Merge "drivers: soc: Clean up voice_svc on kernel 4.9" into msm-4.9
diff --git a/Documentation/crypto/msm/msm_ice_driver.txt b/Documentation/crypto/msm/msm_ice_driver.txt
new file mode 100644
index 0000000..ddb8176
--- /dev/null
+++ b/Documentation/crypto/msm/msm_ice_driver.txt
@@ -0,0 +1,235 @@
+Introduction:
+=============
+Storage encryption has been one of the most required feature from security
+point of view. QTI based storage encryption solution uses general purpose
+crypto engine. While this kind of solution provide a decent amount of
+performance, it falls short as storage speed is improving significantly
+continuously. To overcome performance degradation, newer chips are going to
+have Inline Crypto Engine (ICE) embedded into storage device. ICE is supposed
+to meet the line speed of storage devices.
+
+Hardware Description
+====================
+ICE is a HW block that is embedded into storage device such as UFS/eMMC. By
+default, ICE works in bypass mode i.e. ICE HW does not perform any crypto
+operation on data to be processed by storage device. If required, ICE can be
+configured to perform crypto operation in one direction (i.e. either encryption
+or decryption) or in both direction(both encryption & decryption).
+
+When a switch between the operation modes(plain to crypto or crypto to plain)
+is desired for a particular partition, SW must complete all transactions for
+that particular partition before switching the crypto mode i.e. no crypto, one
+direction crypto or both direction crypto operation. Requests for other
+partitions are not impacted due to crypto mode switch.
+
+ICE HW currently supports AES128/256 bit ECB & XTS mode encryption algorithms.
+
+Keys for crypto operations are loaded from SW. Keys are stored in a lookup
+table(LUT) located inside ICE HW. Maximum of 32 keys can be loaded in ICE key
+LUT. A Key inside the LUT can be referred using a key index.
+
+SW Description
+==============
+ICE HW has catagorized ICE registers in 2 groups: those which can be accessed by
+only secure side i.e. TZ and those which can be accessed by non-secure side such
+as HLOS as well. This requires that ICE driver to be split in two pieces: one
+running from TZ space and another from HLOS space.
+
+ICE driver from TZ would configure keys as requested by HLOS side.
+
+ICE driver on HLOS side is responsible for initialization of ICE HW.
+
+SW Architecture Diagram
+=======================
+Following are all the components involved in the ICE driver for control path:
+
++++++++++++++++++++++++++++++++++++++++++
++ App layer +
++++++++++++++++++++++++++++++++++++++++++
++ System layer +
++ ++++++++ +++++++ +
++ + VOLD + + PFM + +
++ ++++++++ +++++++ +
++ || || +
++ || || +
++ \/ \/ +
++ ++++++++++++++ +
++ + LibQSEECom + +
++ ++++++++++++++ +
++++++++++++++++++++++++++++++++++++++++++
++ Kernel + +++++++++++++++++
++ + + KMS +
++ +++++++ +++++++++++ +++++++++++ + +++++++++++++++++
++ + ICE + + Storage + + QSEECom + + + ICE Driver +
++++++++++++++++++++++++++++++++++++++++++ <===> +++++++++++++++++
+ || ||
+ || ||
+ \/ \/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++ Storage Device +
++ ++++++++++++++ +
++ + ICE HW + +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Use Cases:
+----------
+a) Device bootup
+ICE HW is detected during bootup time and corresponding probe function is
+called. ICE driver parses its data from device tree node. ICE HW and storage
+HW are tightly coupled. Storage device probing is dependent upon ICE device
+probing. ICE driver configures all the required registers to put the ICE HW
+in bypass mode.
+
+b) Configuring keys
+Currently, there are couple of use cases to configure the keys.
+
+1) Full Disk Encryption(FDE)
+System layer(VOLD) at invocation of apps layer would call libqseecom to create
+the encryption key. Libqseecom calls qseecom driver to communicate with KMS
+module on the secure side i.e. TZ. KMS would call ICE driver on the TZ side to
+create and set the keys in ICE HW. At the end of transaction, VOLD would have
+key index of key LUT where encryption key is present.
+
+2) Per File Encryption (PFE)
+Per File Manager(PFM) calls QSEECom api to create the key. PFM has a peer comp-
+onent(PFT) at kernel layer which gets the corresponding key index from PFM.
+
+Following are all the components involved in the ICE driver for data path:
+
++++++++++++++++++++++++++++++++++++++++++
++ App layer +
++++++++++++++++++++++++++++++++++++++++++
++ VFS +
++---------------------------------------+
++ File System (EXT4) +
++---------------------------------------+
++ Block Layer +
++ --------------------------------------+
++ +++++++ +
++ dm-req-crypt => + PFT + +
++ +++++++ +
++ +
++---------------------------------------+
++ +++++++++++ +++++++ +
++ + Storage + + ICE + +
++++++++++++++++++++++++++++++++++++++++++
++ || +
++ || (Storage Req with +
++ \/ ICE parameters ) +
++++++++++++++++++++++++++++++++++++++++++
++ Storage Device +
++ ++++++++++++++ +
++ + ICE HW + +
++++++++++++++++++++++++++++++++++++++++++
+
+c) Data transaction
+Once the crypto key has been configured, VOLD/PFM creates device mapping for
+data partition. As part of device mapping VOLD passes key index, crypto
+algorithm, mode and key length to dm layer. In case of PFE, keys are provided
+by PFT as and when request is processed by dm-req-crypt. When any application
+needs to read/write data, it would go through DM layer which would add crypto
+information, provided by VOLD/PFT, to Request. For each Request, Storage driver
+would ask ICE driver to configure crypto part of request. ICE driver extracts
+crypto data from Request structure and provide it to storage driver which would
+finally dispatch request to storage device.
+
+d) Error Handling
+Due to issue # 1 mentioned in "Known Issues", ICE driver does not register for
+any interrupt. However, it enables sources of interrupt for ICE HW. After each
+data transaction, Storage driver receives transaction completion event. As part
+of event handling, storage driver calls ICE driver to check if any of ICE
+interrupt status is set. If yes, storage driver returns error to upper layer.
+
+Error handling would be changed in future chips.
+
+Interfaces
+==========
+ICE driver exposes interfaces for storage driver to :
+1. Get the global instance of ICE driver
+2. Get the implemented interfaces of the particular ice instance
+3. Initialize the ICE HW
+4. Reset the ICE HW
+5. Resume/Suspend the ICE HW
+6. Get the Crypto configuration for the data request for storage
+7. Check if current data transaction has generated any interrupt
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+ICE driver does not do power management on its own as it is part of storage
+hardware. Whenever storage driver receives request for power collapse/suspend
+resume, it would call ICE driver which exposes APIs for Storage HW. ICE HW
+during power collapse or reset, wipes crypto configuration data. When ICE
+driver receives request to resume, it would ask ICE driver on TZ side to
+restore the configuration. ICE driver does not do anything as part of power
+collapse or suspend event.
+
+Interface:
+==========
+ICE driver exposes following APIs for storage driver to use:
+
+int (*init)(struct platform_device *, void *, ice_success_cb, ice_error_cb);
+ -- This function is invoked by storage controller during initialization of
+ storage controller. Storage controller would provide success and error call
+ backs which would be invoked asynchronously once ICE HW init is done.
+
+int (*reset)(struct platform_device *);
+ -- ICE HW reset as part of storage controller reset. When storage controller
+ received reset command, it would call reset on ICE HW. As of now, ICE HW
+ does not need to do anything as part of reset.
+
+int (*resume)(struct platform_device *);
+ -- ICE HW while going to reset, wipes all crypto keys and other data from ICE
+ HW. ICE driver would reconfigure those data as part of resume operation.
+
+int (*suspend)(struct platform_device *);
+ -- This API would be called by storage driver when storage device is going to
+ suspend mode. As of today, ICE driver does not do anything to handle suspend.
+
+int (*config)(struct platform_device *, struct request* , struct ice_data_setting*);
+ -- Storage driver would call this interface to get all crypto data required to
+ perform crypto operation.
+
+int (*status)(struct platform_device *);
+ -- Storage driver would call this interface to check if previous data transfer
+ generated any error.
+
+Config options
+==============
+This driver is enabled by the kernel config option CONFIG_CRYPTO_DEV_MSM_ICE.
+
+Dependencies
+============
+ICE driver depends upon corresponding ICE driver on TZ side to function
+appropriately.
+
+Known Issues
+============
+1. ICE HW emits 0s even if it has generated an interrupt
+This issue has significant impact on how ICE interrupts are handled. Currently,
+ICE driver does not register for any of the ICE interrupts but enables the
+sources of interrupt. Once storage driver asks to check the status of interrupt,
+it reads and clears the clear status and provide read status to storage driver.
+This mechanism though not optimal but prevents filesystem curruption.
+This issue has been fixed in newer chips.
+
+2. ICE HW wipes all crypto data during power collapse
+This issue necessiate that ICE driver on TZ side store the crypto material
+which is not required in the case of general purpose crypto engine.
+This issue has been fixed in newer chips.
+
+Further Improvements
+====================
+Currently, Due to PFE use case, ICE driver is dependent upon dm-req-crypt to
+provide the keys as part of request structure. This couples ICE driver with
+dm-req-crypt based solution. It is under discussion to expose an IOCTL based
+and registeration based interface APIs from ICE driver. ICE driver would use
+these two interfaces to find out if any key exists for current request. If
+yes, choose the right key index received from IOCTL or registeration based
+APIs. If not, dont set any crypto parameter in the request.
diff --git a/Documentation/crypto/msm/qce.txt b/Documentation/crypto/msm/qce.txt
new file mode 100644
index 0000000..9f1b313b
--- /dev/null
+++ b/Documentation/crypto/msm/qce.txt
@@ -0,0 +1,228 @@
+Introduction:
+=============
+
+The QTI crypto engine (qce) driver is a module that
+provides common services for accessing the QTI crypto device.
+Currently, the two main clients of qce are
+-qcrypto driver (module provided for accessing CE HW by kernel space apps)
+-qcedev driver (module provided for accessing CE HW by user space apps)
+
+
+The crypto engine (qce) driver is a client to the DMA driver for the QTI
+DMA device - Application Data Mover (ADM). ADM is used to provide the DMA
+transfer capability between QTI crypto device hardware and DDR memory
+for crypto operations.
+
+ Figure 1.
+ ---------
+
+ Linux kernel
+ (ex:IPSec)<-----* QTI crypto driver----+
+ (qcrypto) |
+ (for kernel space app) |
+ |
+ +-->|
+ |
+ | *qce <----> QTI
+ | driver ADM driver <---> ADM HW
+ +-->| | |
+ | | |
+ | | |
+ | | |
+ Linux kernel | | |
+ misc device <--- *QCEDEV Driver-------+ | |
+ interface (qcedev) (Reg interface) (DMA interface)
+ (for user space app) \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ \ /
+ QTI crypto CE3 HW
+
+
+ The entities marked with (*) in the Figure 1, are the software components of
+ the Linux QTI crypto modules.
+
+===============
+IMPORTANT NOTE:
+===============
+(1) The CE hardware can be accessed either from user space OR kernel space,
+ at one time. Both user space and kernel space clients cannot access the
+ qce driver (and the CE hardware) at the same time.
+ - If your device has user space apps that needs to access the crypto
+ hardware, make sure to have the qcrypto module disabled/unloaded.
+ This will result in the kernel space apps to use the registered
+ software implementation of the crypto algorithms.
+ - If your device has kernel space apps that needs to access the
+ crypto hardware, make sure to have qcedev module disabled/unloaded
+ and implement your user space application to use the software
+ implementation (ex: openssl/crypto) of the crypto algorithms.
+
+(2) If your device has Playready(Windows Media DRM) application enabled and
+ uses the qcedev module to access the crypto hardware accelerator,
+ please be informed that for performance reasons, the CE hardware will need
+ to be dedicated to playready application. Any other user space application
+ should be implemented to use the SW implementation (ex: openssl/crypto)
+ of the crypto algorithms.
+
+
+Hardware description:
+=====================
+
+QTI Crypto HW device family provides a series of algorithms implemented
+in the device hardware.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides
+fast AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as
+defined by the 3GPP forum.
+
+
+Software description
+====================
+
+The crypto device is defined as a platform device. The driver is
+independent of the platform. The driver supports multiple instances of
+crypto HW.
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-msm7x30.c for MSM7x30.
+
+The qce driver provide the common services of HW crypto
+access to the two drivers as listed above (qcedev, qcrypto. It sets up
+the crypto HW device for the operation, then it requests ADM driver for
+the DMA of the crypto operation.
+
+Two ADM channels and two command lists (one command list for each
+channel) are involved in an operation.
+
+The setting up of the command lists and the procedure of the operation
+of the crypto device are described in the following sections.
+
+The command list for the first DMA channel is set up as follows:
+
+ 1st command of the list is for the DMA transfer from DDR memory to the
+ crypto device to input data to crypto device. The dst crci of the command
+ is set for crci-in for this crypto device.
+
+ 2nd command is for the DMA transfer is from crypto device to DDR memory for
+ the authentication result. The src crci is set as crci-hash-done of the
+ crypto device. If authentication is not required in the operation,
+ the 2nd command is not used.
+
+The command list for the second DMA channel is set up as follows:
+
+ One command to DMA data from crypto device to DDR memory for encryption or
+ decryption output from crypto device.
+
+To accomplish ciphering and authentication concurrent operations, the driver
+performs the following steps:
+ (a). set up HW crypto device
+ (b). hit the crypto go register.
+ (c). issue the DMA command of first channel to the ADM driver,
+ (d). issue the DMA command of 2nd channel to the ADM driver.
+
+SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish
+hash operation (or any authentication only algorithm), 2nd DMA channel is
+not required. Only steps (a) to (c) are performed.
+
+At the completion of the DMA operation (for (c) and (d)) ADM driver
+invokes the callback registered to the DMA driver. This signifies the end of
+the DMA operation(s). The driver reads the status and other information from
+the CE hardware register and then invokes the callback to the qce driver client.
+This signal the completion and the results of the DMA along with the status of
+the CE hardware to the qce driver client. This completes a crypto operation.
+
+In the qce driver initialization, memory for the two command lists, descriptor
+lists for each crypto device are allocated out of coherent memory, using Linux
+DMA API. The driver pre-configures most of the two ADM command lists
+in the initialization. During each crypto operation, minimal set up is required.
+src_dscr or/and dst_dscr descriptor list of the ADM command are populated
+from the information obtained from the corresponding data structure. eg: for
+AEAD request, the following data structure provides the information:
+
+ struct aead_request *req
+ ......
+ req->assoc
+ req->src
+ req->dst
+
+The DMA address of a scatter list will be retrieved and set up in the
+descriptor list of an ADM command.
+
+Power Management
+================
+ none
+
+
+Interface:
+==========
+
+The interface is defined in qce.h
+
+The clients qcrypto, qcedev drivers are the clients using
+the interfaces.
+
+The following services are provided by the qce driver -
+
+ qce_open(), qce_close(), qce_ablk_cipher_req(),
+ qce_hw_support(), qce_process_sha_req()
+
+ qce_open() is the first request from the client, ex. QTI crypto
+ driver (qcedev, qcrypto), to open a crypto engine. It is normally
+ called at the probe function of the client for a device. During the
+ probe,
+ - ADM command list structure will be set up
+ - Crypto device will be initialized.
+ - Resource associated with the crypto engine is retrieved by doing
+ platform_get_resource() or platform_get_resource_byname().
+
+ The resources for a device are
+ - crci-in, crci-out, crci-hash-done
+ - two DMA channel IDs, one for encryption and decryption input, one for
+ output.
+ - base address of the HW crypto device.
+
+ qce_close() is the last request from the client. Normally, it is
+ called from the remove function of the client.
+
+ qce_hw_support() allows the client to query what is supported
+ by the crypto engine hardware.
+
+ qce_ablk_cipher_req() provides ciphering service to the client.
+ qce_process_sha_req() provide hashing service to the client.
+ qce_aead_req() provide aead service to the client.
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware base register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+Dependencies:
+=============
+
+Existing DMA driver.
+The transfers are DMA'ed between the crypto hardware and DDR memory via the
+data mover, ADM. The data transfers are set up to use the existing dma driver.
+
+User space utilities:
+=====================
+ n/a
+
+Known issues:
+=============
+ n/a
+
+To do:
+======
+ n/a
diff --git a/Documentation/crypto/msm/qcedev.txt b/Documentation/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..0638dd9
--- /dev/null
+++ b/Documentation/crypto/msm/qcedev.txt
@@ -0,0 +1,231 @@
+Introduction:
+=============
+
+This driver provides IOCTLS for user space application to access crypto
+engine hardware for the qcedev crypto services. The driver supports the
+following crypto algorithms
+- AES-128, AES-256 (ECB, CBC and CTR mode)
+- AES-192, (ECB, CBC and CTR mode)
+ (support exists on platform supporting CE 3.x hardware)
+- SHA1/SHA256
+- AES-128, AES-256 (XTS), AES CMAC, SHA1/SHA256 HMAC
+ (support exists on platform supporting CE 4.x hardware)
+
+Hardware description:
+=====================
+Crypto 3E provides cipher and hash algorithms as defined in the
+3GPP forum specifications.
+
+
+Software description
+====================
+
+The driver is a Linux platform device driver. For an msm target,
+there can be multiple crypto devices assigned for QCEDEV.
+
+The driver is a misc device driver as well.
+The following operations are registered in the driver,
+-qcedev_ioctl()
+-qcedev_open()
+-qcedev_release()
+
+The following IOCTLS are available to the user space application(s)-
+
+ Cipher IOCTLs:
+ --------------
+ QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+ QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+ Hashing/HMAC IOCTLs
+ -------------------
+
+ QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+ QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+ QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+ QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+ packet of known size.
+ QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+ algorithm) for data packet of known size.
+
+The requests are synchronous. The driver will put the process to
+sleep, waiting for the completion of the requests using wait_for_completion().
+
+Since the requests are coming out of user space application, before giving
+the requests to the low level qce driver, the ioctl requests and the
+associated input/output buffer will have to be safe checked, and copied
+to/from kernel space.
+
+The extra copying of requests/buffer can affect the performance. The issue
+with copying the data buffer is resolved by having the client use PMEM
+allocated buffers.
+
+NOTE: Using memory allocated via PMEM is supported only for in place
+ operations where source and destination buffers point to the same
+ location. Support for different source and destination buffers
+ is not supported currently.
+ Furthermore, when using PMEM, and in AES CTR mode, when issuing an
+ encryption or decryption request, a non-zero byteoffset is not
+ supported.
+
+The design of the driver is to allow multiple open, and multiple requests
+to be issued from application(s). Therefore, the driver will internally queue
+the requests, and serialize the requests to the low level qce (or qce40) driver.
+
+On an IOCTL request from an application, if there is no outstanding
+request, a the driver will issue a "qce" request, otherwise,
+the request is queued in the driver queue. The process is suspended
+waiting for completion.
+
+On completion of a request by the low level qce driver, the internal
+tasklet (done_tasklet) is scheduled. The sole purpose of done_tasklet is
+to call the completion of the current active request (complete()), and
+issue more requests to the qce, if any.
+When the process wakes up from wait_for_completion(), it will collect the
+return code, and return the ioctl.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcedev/stats1, /debug/qcedev/stats2, /debug/qcedev/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+
+Power Management
+================
+n/a
+
+
+Interface:
+==========
+
+Linux user space applications will need to open a handle
+(file descriptor) to the qcedev device. This is achieved by doing
+the following to retrieve a file descriptor to the device.
+
+ fd = open("/dev/qce", O_RDWR);
+ ..
+ ioctl(fd, ...);
+
+Once a valid fd is retrieved, user can call the following ioctls with
+the fd as the first parameter and a pointer to an appropriate data
+structure, qcedev_cipher_op_req or qcedev_sha_op_req (depending on
+cipher/hash functionality) as the second parameter.
+
+The following IOCTLS are available to the user space application(s)-
+
+ Cipher IOCTLs:
+ --------------
+ QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+ QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+ The caller of the IOCTL passes a pointer to the structure shown
+ below, as the second parameter.
+
+ struct qcedev_cipher_op_req {
+ int use_pmem;
+ union{
+ struct qcedev_pmem_info pmem;
+ struct qcedev_vbuf_info vbuf;
+ };
+ uint32_t entries;
+ uint32_t data_len;
+ uint8_t in_place_op;
+ uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
+ uint32_t encklen;
+ uint8_t iv[QCEDEV_MAX_IV_SIZE];
+ uint32_t ivlen;
+ uint32_t byteoffset;
+ enum qcedev_cipher_alg_enum alg;
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_oper_enum op;
+ };
+
+ Hashing/HMAC IOCTLs
+ -------------------
+
+ QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+ QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+ QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+ QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+ packet of known size.
+ QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+ algorithm) for data packet of known size.
+
+ The caller of the IOCTL passes a pointer to the structure shown
+ below, as the second parameter.
+
+ struct qcedev_sha_op_req {
+ struct buf_info data[QCEDEV_MAX_BUFFERS];
+ uint32_t entries;
+ uint32_t data_len;
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ uint32_t diglen;
+ uint8_t *authkey;
+ uint32_t authklen;
+ enum qcedev_sha_alg_enum alg;
+ struct qcedev_sha_ctxt ctxt;
+ };
+
+The IOCTLs and associated request data structures are defined in qcedev.h
+
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware nase register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+
+Dependencies:
+=============
+qce driver. Please see Documentation/arm/msm/qce.txt.
+
+
+User space utilities:
+=====================
+
+none
+
+Known issues:
+=============
+
+none.
+
+
+To do:
+======
+ Enhance Cipher functionality:
+ (1) Add support for handling > 32KB for ciphering functionality when
+ - operation is not an "in place" operation (source != destination).
+ (when using PMEM allocated memory)
+
+Limitations:
+============
+ (1) In case of cipher functionality, Driver does not support
+ a combination of different memory sources for source/destination.
+ In other words, memory pointed to by src and dst,
+ must BOTH (src/dst) be "pmem" or BOTH(src/dst) be "vbuf".
+
+ (2) In case of hash functionality, driver does not support handling data
+ buffers allocated via PMEM.
+
+ (3) Do not load this driver if your device already has kernel space apps
+ that need to access the crypto hardware.
+ Make sure to have qcedev module disabled/unloaded and implement your user
+ space application to use the software implementation (ex: openssl/crypto)
+ of the crypto algorithms.
+ (NOTE: Please refer to details on the limitations listed in qce.txt)
+
+ (4) If your device has Playready (Windows Media DRM) application enabled
+ and uses the qcedev module to access the crypto hardware accelerator,
+ please be informed that for performance reasons, the CE hardware will
+ need to be dedicated to playready application. Any other user space
+ application should be implemented to use the software implementation
+ (ex: openssl/crypto) of the crypto algorithms.
diff --git a/Documentation/crypto/msm/qcrypto.txt b/Documentation/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..2503103
--- /dev/null
+++ b/Documentation/crypto/msm/qcrypto.txt
@@ -0,0 +1,142 @@
+Introduction:
+=============
+
+QTI Crypto (qcrypto) driver is a Linux crypto driver which interfaces
+with the Linux kernel crypto API layer to provide the HW crypto functions.
+This driver is accessed by kernel space apps via the kernel crypto API layer.
+At present there is no means for user space apps to access this module.
+
+Hardware description:
+=====================
+
+QTI Crypto HW device family provides a series of algorithms implemented
+in the device.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 provides fast
+AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 4.0 provides
+HMAC-SHA1/SHA256, AES CBC-MAC hashing algorithm and AES XTS/CCM cipher
+algorithms.
+
+
+Software description
+====================
+
+The module init function (_qcrypto_init()), does a platform_register(),
+to register the driver. As the result, the driver probe function,
+_qcrypto_probe(), will be invoked for each registered device.
+
+In the probe function, driver opens the low level CE (qce_open), and
+registers the supported algorithms to the kernel crypto API layer.
+Currently, qcrypto supports the following algorithms.
+
+ ablkcipher -
+ cbc(aes),ecb(aes),ctr(aes)
+ ahash -
+ sha1, sha256
+ aead -
+ authenc(hmac(sha1),cbc(aes))
+
+ The hmac(sha1), hmac(sha256, authenc(hmac(sha1),cbc(aes)), ccm(aes)
+ and xts(aes) algorithms are registered for some platforms that
+ support these in the CE hardware
+
+The HW device can support various algorithms. However, the most important
+algorithms to gain the performance using a HW crypto accelerator are
+AEAD, and ABLKCIPHER.
+
+AEAD stands for "authentication encryption with association data".
+ABLKCIPHER stands of "asynchronous block cipher".
+
+The AEAD structure is described in the following header file aead.h
+
+The design of the driver is to allow multiple requests
+issued from kernel client SW (eg IPSec).
+Therefore, the driver will have to internally queue the requests, and
+serialize the requests to the low level qce driver.
+
+When a request is received from the client, if there is no outstanding
+request, a qce (or qce40) request is issued, otherwise, the request is
+queued in the driver queue.
+
+On completion of a request, the qce (or qce40) invokes the registered
+callback from the qcrypto. The internal tasklet (done_tasklet) is scheduled
+in this callback function. The sole purpose of done_tasklet is
+to call the completion of the current active request, and
+issue more requests to the qce (or qce40), if any exists.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcrypto/stats1, /debug/qcrypto/stats2, /debug/qcrypto/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+Test vectors for authenc(hmac(sha1),cbc(aes)) algorithm are
+developed offline, and imported to crypto/testmgr.c, and crypto/testmgr.h.
+
+
+Power Management
+================
+ none
+
+
+Interface:
+==========
+The kernel interface is defined in crypto.h.
+
+
+Module parameters:
+==================
+
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-mssm7x30.c for msm7x30.
+
+Dependencies:
+=============
+qce driver.
+
+
+User space utilities:
+=====================
+ n/a
+
+Known issues:
+=============
+ n/a
+
+To do:
+======
+ Add Hashing algorithms.
+
+
+Limitations:
+===============
+(1) Each packet transfer size (for cipher and hash) is limited to maximum of
+ 32KB. This is a limitation in the crypto engine hardware. Client will
+ have to break packets larger than 32KB into multiple requests of smaller
+ size data packets.
+
+(2) Do not load this driver if your device has user space apps that needs to
+ access the crypto hardware. Please make sure to have the qcrypto module
+ disabled/unloaded.
+ Not having the driver loaded, will result in the kernel space apps to use
+ the registered software implementation of the crypto algorithms.
+
+(3) If your device has Playready application enabled and uses the qcedev module
+ to access the crypto hardware accelerator, please be informed that for
+ performance reasons, the CE hardware will need to be dedicated to playready
+ application. Any other user space or kernel application should be implemented
+ to use the software implementation of the crypto algorithms.
+
+ (NOTE: Please refer to details on the limitations listed in qce/40.txt)
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index ef5fbe9..ad440a2 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -38,6 +38,11 @@
architecturally-defined reset values. Only supported for 32-bit
systems which follow the ARMv7 architected reset values.
+- arm,no-tick-in-suspend : The main counter does not tick when the system is in
+ low-power system suspend on some SoCs. This behavior does not match the
+ Architecture Reference Manual's specification that the system counter "must
+ be implemented in an always-on power domain."
+
Example:
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 53e4295..592fcef 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -73,9 +73,6 @@
AMBA markee):
- "arm,coresight-replicator"
- "qcom,coresight-csr"
- - "arm,coresight-cti"
- - "qcom,coresight-tpda"
- - "qcom,coresight-tpdm"
- "qcom,coresight-remote-etm"
- "qcom,coresight-hwevent"
- "qcom,coresight-dummy"
@@ -144,6 +141,20 @@
* qcom,msr-fix-req: boolean, indicating if MSRs need to be programmed
after enabling the subunit.
+* Optional properties for CTI:
+
+ * qcom,cti-gpio-trigin: cti trigger input driven by gpio.
+
+ * qcom,cti-gpio-trigout: cti trigger output sent to gpio.
+
+ * pinctrl-names: names corresponding to the numbered pinctrl. The
+ allowed names are subset of the following: cti-trigin-pinctrl,
+ cti-trigout-pctrl.
+
+ * pinctrl-<n>: list of pinctrl phandles for the different pinctrl
+ states. Refer to
+ "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+
* Required property for Remote ETMs:
* qcom,inst-id: must be present. QMI instance id for remote ETMs.
@@ -264,7 +275,7 @@
};
tpda_mss: tpda@7043000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "qcom,coresight-tpda", "arm,primecell";
reg = <0x7043000 0x1000>;
reg-names = "tpda-base";
@@ -274,9 +285,8 @@
qcom,dsb-elem-size = <0 32>;
qcom,cmb-elem-size = <0 32>;
- clocks = <&clock_gcc clk_qdss_clk>,
- <&clock_gcc clk_qdss_a_clk>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop clk_qdss_clk>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -345,15 +355,14 @@
};
tpdm_mss: tpdm@7042000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "qcom,coresight-tpdm", "arm,primecell";
reg = <0x7042000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-mss";
- clocks = <&clock_gcc clk_qdss_clk>,
- <&clock_gcc clk_qdss_a_clk>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop qdss_clk>;
+ clock-names = "apb_pclk";
port{
tpdm_mss_out_tpda_mss: endpoint {
@@ -364,15 +373,14 @@
4. CTIs
cti0: cti@6010000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,coresight-cti", "arm,primecell";
reg = <0x6010000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti0";
- clocks = <&clock_gcc clk_qdss_clk>,
- <&clock_gcc clk_qdss_a_clk>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop qdss_clk>;
+ clock-names = "apb_pclk";
};
[1]. There is currently two version of STM: STM32 and STM500. Both
diff --git a/Documentation/devicetree/bindings/arm/msm/cmd-db.txt b/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
index b989d8a..e704d70 100644
--- a/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
+++ b/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
@@ -24,10 +24,12 @@
Value type: <prop-encoded-array>
Definition: First element is the base address of shared memory
Second element is the size of the shared memory region
+ Points to the dictionary address that houses the command DB
+ start address and the size of the command DB region
Example:
qcom,cmd-db@861e0000 {
compatible = "qcom,cmd-db";
- reg = <0x861e0000 0x4000>;
+ reg = <0xc3f000c 0x8>;
}
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt
index d1f8ce1..eaa7146b 100644
--- a/Documentation/devicetree/bindings/arm/msm/imem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/imem.txt
@@ -57,6 +57,11 @@
-compatible: "qcom,msm-imem-emergency_download_mode"
-reg: start address and size of emergency_download_mode region in imem
+Kaslr Offset:
+------------------------
+-compatible: "qcom,msm-imem-kaslr_offset"
+-reg: start address and size of kaslr_offset region in imem
+
USB Diag Cookies:
-----------------
Memory region used to store USB PID and serial numbers to be used by
@@ -95,6 +100,12 @@
reg = <0x6b0 32>;
};
+ kaslr_offset@6d0 {
+ compatible = "qcom,msm-imem-kaslr_offset";
+ reg = <0x6d0 12>;
+ };
+
+
pil@94c {
compatible = "qcom,msm-imem-pil";
reg = <0x94c 200>;
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index baae281..bf93a2a 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -110,6 +110,9 @@
- MDMCALIFORNIUM
compatible = "qcom,mdmcalifornium"
+- SDXPOORWILLS
+ compatible = "qcom,sdxpoorwills"
+
- VPIPA
compatible = "qcom,msmvpipa"
@@ -302,3 +305,4 @@
compatible = "qcom,mdmcalifornium-mtp"
compatible = "qcom,apq8009-cdp"
compatible = "qcom,apq8009-mtp"
+compatible = "qcom,sdxpoorwills-rumi"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
index 96e42c5..6bf6a57 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
@@ -218,7 +218,16 @@
master-id, slave-id, arbitrated bandwidth
in KBps, instantaneous bandwidth in KBps
-Example:
+The following are optional properties for client's device nodes:
+
+- qcom,msm-bus,alc-voter: Boolean alc_voter flag to indicate that client
+ will vote as an Active Latency Client.
+- qcom,msm-bus,vectors-alc: Arrays of unsigned integers representing:
+ first access latency, idle time in ns, this
+ property is required if qcom,msm-bus,alc-voter
+ is present.
+
+Example for default client:
qcom,msm-bus,name = "client-name";
qcom,msm-bus,num-cases = <3>;
@@ -229,3 +238,12 @@
<22 512 320000 3200000>, <26 512 3200000 3200000>,
<22 512 160000 1600000>, <26 512 1600000 1600000>;
+Example for ALC client:
+
+ qcom,msm-bus,name = "client-name";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,alc-voter;
+ qcom,msm-bus,vectors-alc =
+ <0 0>,
+ <500 1600>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 964fea6..2347477 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -25,9 +25,8 @@
Value type: <stringlist>
Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
"osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
- "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer" or
- "apps_itm_ctl". Optionally, "l3_efuse", "pwrcl_efuse"
- "perfcl_efuse".
+ "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer".
+ Optionally, "l3_efuse", "pwrcl_efuse", "perfcl_efuse".
Must be specified in the same order as the corresponding
addresses are specified in the reg property.
@@ -350,12 +349,11 @@
<0x178b0000 0x1000>,
<0x17d42400 0x0c00>,
<0x17d44400 0x0c00>,
- <0x17d46c00 0x0c00>,
- <0x17810090 0x8>;
+ <0x17d46c00 0x0c00>;
reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
"l3_pll", "pwrcl_pll", "perfcl_pll",
"l3_sequencer", "pwrcl_sequencer",
- "perfcl_sequencer", "apps_itm_ctl";
+ "perfcl_sequencer";
vdd-l3-supply = <&apc0_l3_vreg>;
vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt b/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt
new file mode 100644
index 0000000..02dab4c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt
@@ -0,0 +1,33 @@
+* RPM Stats
+
+RPM maintains a counter of the number of times the SoC entered a deeper sleep
+mode involving lowering or powering down the backbone rails - Cx and Mx and
+the oscillator clock, XO.
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,rpm-stats".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The address on the RPM RAM from where the stats are read
+ should be provided as "phys_addr_base". The offset from
+ which the stats are available should be provided as
+ "offset_addr".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
+
+EXAMPLE:
+
+ qcom,rpm-stats@c000000 {
+ compatible = "qcom,rpm-stats";
+ reg = <0xC000000 0x1000>, <0x3F0000 0x4>;
+ reg-names = "phys_addr_base", "offset_addr";
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/spss_utils.txt b/Documentation/devicetree/bindings/arm/msm/spss_utils.txt
new file mode 100644
index 0000000..d325574
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/spss_utils.txt
@@ -0,0 +1,35 @@
+Qualcomm Technologies, Inc. Secure Processor SubSystem Utilities (spss_utils)
+
+The Secure Processor SubSystem (SPSS) is a dedicated subsystem for security.
+It has its own CPU, memories, and cryptographic engine.
+It shall provide cryptographic services to other subsystems.
+The SPSS firmware is loaded by PIL driver.
+The communication with SPSS is done via spcom driver, using glink.
+
+The spss_utils driver selects the SPSS firmware file,
+according to a dedicated fuse and the platform HW version.
+
+Required properties:
+-compatible : should be "qcom,spss_utils"
+-qcom,spss-fuse1-addr: fuse1 register physical address
+-qcom,spss-fuse1-bit: fuse1 relevant bit
+-qcom,spss-fuse2-addr: fuse2 register physical address
+-qcom,spss-fuse2-bit: fuse2 relevant bit
+-qcom,spss-dev-firmware-name: dev firmware file name
+-qcom,spss-test-firmware-name: test firmware file name
+-qcom,spss-prod-firmware-name: production firmware file name
+-qcom,spss-debug-reg-addr: debug register physical address
+
+Example:
+ qcom,spss_utils {
+ compatible = "qcom,spss-utils";
+
+ qcom,spss-fuse1-addr = <0x007841c4>;
+ qcom,spss-fuse1-bit = <27>;
+ qcom,spss-fuse2-addr = <0x007841c4>;
+ qcom,spss-fuse2-bit = <26>;
+ qcom,spss-dev-firmware-name = "spss1d"; /* 8 chars max */
+ qcom,spss-test-firmware-name = "spss1t"; /* 8 chars max */
+ qcom,spss-prod-firmware-name = "spss1p"; /* 8 chars max */
+ qcom,spss-debug-reg-addr = <0x01886020>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/tz-log.txt b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
new file mode 100644
index 0000000..d7e84a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
@@ -0,0 +1,24 @@
+* TZLOG (Trust Zone Log)
+
+The tz_log driver is a platform device driver that exposes a debugfs
+interface for accessing and displaying diagnostic information
+related to secure code (Trustzone/QSEE).
+
+Required properties:
+- compatible : Should be "qcom,tz-log"
+- reg : Offset and size of the register set for the device
+
+Optional properties:
+- qcom,hyplog-enabled : (boolean) indicates if driver supports HYP logger service.
+- hyplog-address-offset : Register offset to get the HYP log base address.
+- hyplog-size-offset : Register offset to get the HYP log size parameter.
+
+Example:
+
+ qcom,tz-log@fe805720 {
+ compatible = "qcom,tz-log";
+ reg = <0xfe805720 0x1000>;
+ qcom,hyplog-enabled;
+ hyplog-address-offset = 0x410;
+ hyplog-size-offset = 0x414;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index b381bdeb..c467327 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -10,6 +10,10 @@
- compatible: "qcom,wil6210"
- qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
+- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
+- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
+- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
+- qcom,smmu-mapping: specifies the base address and size of SMMU space
- qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
the below optional properties:
@@ -33,6 +37,10 @@
wil6210: qcom,wil6210 {
compatible = "qcom,wil6210";
qcom,smmu-support;
+ qcom,smmu-s1-en;
+ qcom,smmu-fast-map;
+ qcom,smmu-coherent;
+ qcom,smmu-mapping = <0x20000000 0xe0000000>;
qcom,pcie-parent = <&pcie1>;
qcom,wigig-en = <&tlmm 94 0>;
qcom,msm-bus,name = "wil6210";
diff --git a/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt b/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
new file mode 100644
index 0000000..231b8a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
@@ -0,0 +1,17 @@
+Qualcomm Technologies, Inc. Always On Processor Clock controller Binding
+------------------------------------------------------------------------
+
+Required properties :
+- compatible : must be "qcom,aop-qmp-clk"
+- #clock-cells : must contain 1
+- mboxes : list of QMP mailbox phandle and channel identifier tuples.
+- mbox-names: List of identifier strings for each mailbox channel.
+ Must contain "qdss_clk".
+
+Example :
+ clock_qdss: qcom,aopclk {
+ compatible = "qcom,aop-qmp-clk";
+ #clock-cells = <1>;
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "qdss_clk";
+ };
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index 15feda3..c801e848 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -12,13 +12,22 @@
- reg-names: Names of the memory regions defined in reg entry
- interrupts: Copy engine interrupt table
- qcom,wlan-msa-memory: MSA memory size
+ - clocks: List of clock phandles
+ - clock-names: List of clock names corresponding to the "clocks" property
- iommus: SMMUs and corresponding Stream IDs needed by WLAN
- qcom,wlan-smmu-iova-address: I/O virtual address range as <start length>
format to be used for allocations associated between WLAN and SMMU
Optional properties:
+ - <supply-name>-supply: phandle to the regulator device tree node
+ optional "supply-name" is "vdd-0.8-cx-mx".
+ - qcom,<supply>-config: Specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV. There can
+ be optional load in uA and Regulator settle delay in
+ uS.
- qcom,icnss-vadc: VADC handle for vph_pwr read APIs.
- qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs.
+ - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
Example:
@@ -26,6 +35,8 @@
compatible = "qcom,icnss";
reg = <0x0a000000 0x1000000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_aggre2_noc_clk>;
+ clock-names = "smmu_aggre2_noc_clk";
iommus = <&anoc2_smmu 0x1900>,
<&anoc2_smmu 0x1901>;
qcom,wlan-smmu-iova-address = <0 0x10000000>;
@@ -43,4 +54,7 @@
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x200000>;
+ qcom,smmu-s1-bypass;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
};
diff --git a/Documentation/devicetree/bindings/crypto/msm/ice.txt b/Documentation/devicetree/bindings/crypto/msm/ice.txt
new file mode 100644
index 0000000..2d0e580
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/ice.txt
@@ -0,0 +1,32 @@
+* Inline Crypto Engine (ICE)
+
+Required properties:
+ - compatible : should be "qcom,ice"
+ - reg : <register mapping>
+
+Optional properties:
+ - interrupt-names : name describing the interrupts for ICE IRQ
+ - interrupts : <interrupt mapping for ICE IRQ>
+ - qcom,enable-ice-clk : should enable clocks for ICE HW
+ - clocks : List of phandle and clock specifier pairs
+ - clock-names : List of clock input name strings sorted in the same
+ order as the clocks property.
+ - qocm,op-freq-hz : max clock speed sorted in the same order as the clocks
+ property.
+ - qcom,instance-type : describe the storage type for which ICE node is defined
+ currently, only "ufs" and "sdcc" are supported storage type
+
+Example:
+ ufs_ice: ufsice@630000 {
+ compatible = "qcom,ice";
+ reg = <0x630000 0x8000>;
+ interrupt-names = "ufs_ice_nonsec_level_irq", "ufs_ice_sec_level_irq";
+ interrupts = <0 258 0>, <0 257 0>;
+ qcom,enable-ice-clk;
+ clock-names = "ice_core_clk_src", "ice_core_clk";
+ clocks = <&clock_gcc clk_ufs_ice_core_clk_src>,
+ <&clock_gcc clk_gcc_ufs_ice_core_clk>;
+ qcom,op-freq-hz = <300000000>, <0>;
+ qcom,instance-type = "ufs";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..c8077cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -0,0 +1,43 @@
+* QCEDEV (QTI Crypto Engine Device)
+
+Required properties:
+ - compatible : should be "qcom,qcedev"
+ - reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+ - qcom,ce-hw-instance : should contain crypto HW instance.
+ - qcom,msm_bus,name: Should be "qcedev-noc"
+ - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+ - qcom,msm_bus,num_paths: The paths for source and destination ports
+ - qcom,msm_bus,vectors: Vectors for bus topology.
+ - qcom,ce-device: Device number.
+ - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+ - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+ - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+ - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+ - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+
+Example:
+
+ qcom,qcedev@fd440000 {
+ compatible = "qcom,qcedev";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 235 0>;
+ qcom,bam-pipe-pair = <0>;
+ qcom,ce-hw-instance = <1>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcedev-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <56 512 0 0>,
+ <56 512 3936000 393600>,
+ qcom,ce-opp-freq = <100000000>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcota.txt b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
new file mode 100644
index 0000000..3ce63af
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
@@ -0,0 +1,42 @@
+* QCOTA (Over The Air Crypto Device)
+
+Required properties:
+ - compatible : should be "qcom,qcota"
+ - reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+ - qcom,ce-hw-instance : should contain crypto HW instance.
+ - qcom,ce-device: Unique QCOTA device identifier. 0 for first
+ instance, 1 for second instance, n-1 for n-th instance.
+ - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+ - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+ - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+Example:
+
+ qcom,qcota@fe140000 {
+ compatible = "qcom,qcota";
+ reg = <0xfe140000 0x20000>,
+ <0xfe144000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 111 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <2>;
+ qcom,ce-device = <0>;
+ qcom,ce-opp-freq = <100000000>;
+ };
+
+ qcom,qcota@fe0c0000 {
+ compatible = "qcom,qcota";
+ reg = <0xfe0c0000 0x20000>,
+ <0xfe0c4000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 113 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <4>;
+ qcom,ce-device = <1>;
+ qcom,ce-opp-freq = <100000000>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..06b219a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -0,0 +1,61 @@
+* QCRYPTO (QTI Crypto)
+
+Required properties:
+ - compatible : should be "qcom,qcrypto"
+ - reg : should contain crypto, BAM register map.
+ - reg-names : should contain the crypto and bam base register names.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+ - qcom,ce-hw-instance : should contain crypto HW instance.
+ - qcom,msm_bus,name: Should be "qcrypto-noc"
+ - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+ - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+ - qcom,msm_bus,num_paths: The paths for source and destination ports
+ - qcom,ce-device: Device number. Device number is encoded with the following:
+ bit 3-0 device type: 0 for full disk encryption(fde)
+ 1 for per file encrption(pfe)
+ bit 7-4 unit number within the device type.
+
+
+Optional properties:
+ - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+ - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+ - qcom,use-sw-aes-cbc-ecb-ctr-algo : optional, indicates if use SW aes-cbc/ecb/ctr algorithm.
+ - qcom,use-sw-aes-xts-algo : optional, indicates if use SW aes-xts algorithm.
+ - qcom,use-sw-aead-algo : optional, indicates if use SW aead algorithm.
+ - qcom,use-sw-ahash-algo : optional, indicates if use SW hash algorithm.
+ - qcom,use-sw-hmac-algo : optional, indicates if use SW hmac algorithm.
+ - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
+ - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
+ - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+ - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+ - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
+ changes from target to target. If not specified, by default the
+ frequency is set as 100MHZ.
+
+ - qcom,msm_bus,vectors: optional, indicates vectors for bus topology.
+ This attribute is required for msm targets where bus scaling is
+ required. For other targets such as fsm, they do not perform
+ bus scaling. It is not required for those targets.
+
+Example:
+
+ qcom,qcrypto@fd444000 {
+ compatible = "qcom,qcrypto";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 235 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <1>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,msm-bus,name = "qcrypto-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <56 512 0 0>,
+ <56 512 3936000 393600>,
+ qcom,ce-opp-freq = <100000000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index bc226a7..a3ef34c 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -128,6 +128,8 @@
feature is available or not.
- qcom,sde-has-dim-layer: Boolean property to indicate if mixer has dim layer
feature is available or not.
+- qcom,sde-has-idle-pc: Boolean property to indicate if target has idle
+ power collapse feature available or not.
- qcom,sde-has-mixer-gc: Boolean property to indicate if mixer has gamma correction
feature available or not.
- qcom,sde-has-cdp: Boolean property to indicate if cdp feature is
@@ -420,6 +422,7 @@
qcom,sde-csc-type = "csc-10bit";
qcom,sde-highest-bank-bit = <15>;
qcom,sde-has-mixer-gc;
+ qcom,sde-has-idle-pc;
qcom,sde-sspp-max-rects = <1 1 1 1
1 1 1 1
1 1
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index b95696d..4f7ae75 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -28,6 +28,8 @@
Required properties:
- compatible: value must be either:
* allwinner,sun5i-a13-tcon
+ * allwinner,sun6i-a31-tcon
+ * allwinner,sun6i-a31s-tcon
* allwinner,sun8i-a33-tcon
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
@@ -50,7 +52,7 @@
second the block connected to the TCON channel 1 (usually the TV
encoder)
-On the A13, there is one more clock required:
+On SoCs other than the A33, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
DRC
@@ -87,6 +89,7 @@
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-backend
+ * allwinner,sun6i-a31-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the frontend and backend
@@ -117,6 +120,7 @@
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-frontend
+ * allwinner,sun6i-a31-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -142,6 +146,8 @@
Required properties:
- compatible: value must be one of:
* allwinner,sun5i-a13-display-engine
+ * allwinner,sun6i-a31-display-engine
+ * allwinner,sun6i-a31s-display-engine
* allwinner,sun8i-a33-display-engine
- allwinner,pipelines: list of phandle to the display engine
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 3e7fcb7..ffba081 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -35,7 +35,7 @@
- qcom,mdss-dsi-panel-destination: A string that specifies the destination display for the panel.
"display_1" = DISPLAY_1
"display_2" = DISPLAY_2
-- qcom,mdss-dsi-panel-timings: An array of length 12 that specifies the PHY
+- qcom,mdss-dsi-panel-phy-timings: An array of length 12 that specifies the PHY
timing settings for the panel.
- qcom,mdss-dsi-panel-timings-8996: An array of length 40 char that specifies the 8996 PHY lane
timing settings for the panel.
@@ -456,28 +456,6 @@
with the supply entry index. For a detailed description of
fields in the supply entry, refer to the qcom,ctrl-supply-entries
binding above.
-- qcom,config-select: Optional property to select default configuration.
-
-[[Optional config sub-nodes]] These subnodes provide different configurations for a given same panel.
- Default configuration can be chosen by specifying phandle of the
- selected subnode in the qcom,config-select.
-Required properties for sub-nodes: None
-Optional properites:
-- qcom,lm-split: An array of two values indicating MDP should use two layer
- mixers to reduce power.
- Ex: Normally 1080x1920 display uses single DSI and thus one layer
- mixer. But if we use two layer mixers then mux the output of
- those two mixers into single stream and route it to single DSI
- then we can lower the clock requirements of MDP. To use this
- configuration we need two fill this array with <540 540>.
- Both values doesn't have to be same, but recommended, however sum of
- both values has to be equal to the panel-width.
- By default two mixer streams are merged using 2D mux, however if
- 2 DSC encoders are used then merge is performed within compression
- engine.
-- qcom,split-mode: String property indicating which split mode MDP should use. Valid
- entries are "pingpong-split" and "dualctl-split".
- This property is mutually exclusive with qcom,lm-split.
- qcom,mdss-dsc-version: An 8 bit value indicates the DSC version supported by panel. Bits[0.3]
provides information about minor version while Bits[4.7] provides
major version information. It supports only DSC rev 1(Major).1(Minor)
@@ -500,6 +478,21 @@
- qcom,mdss-dsc-block-prediction-enable: A boolean value to enable/disable the block prediction at decoder.
- qcom,mdss-dsc-config-by-manufacture-cmd: A boolean to indicates panel use manufacture command to setup pps
instead of standard dcs type 0x0A.
+- qcom,display-topology: Array of u32 values which specifies the list of topologies available
+ for the display. A display topology is defined by a
+ set of 3 values in the order:
+ - number of mixers
+ - number of compression encoders
+ - number of interfaces
+ Therefore, the array should always contain a tuple of 3 elements.
+- qcom,default-topology-index: An u32 value which indexes the topology set
+ specified by the node "qcom,display-topology"
+ to identify the default topology for the
+ display. The first set is indexed by the
+ value 0.
+
+Required properties for sub-nodes: None
+Optional properties:
- qcom,dba-panel: Indicates whether the current panel is used as a display bridge
to a non-DSI interface.
- qcom,bridge-name: A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name
@@ -692,18 +685,15 @@
29 00 00 00 00 00 02 F1 00];
qcom,mdss-dsi-timing-switch-command-state = "dsi_lp_mode";
- qcom,config-select = <&dsi_sim_vid_config0>;
- dsi_sim_vid_config0: config0 {
- qcom,lm-split = <360 360>;
- qcom,mdss-dsc-encoders = <2>;
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <360>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- qcom,mdss-dsc-config-by-manufacture-cmd;
- };
+ qcom,mdss-dsc-slice-height = <16>;
+ qcom,mdss-dsc-slice-width = <360>;
+ qcom,mdss-dsc-slice-per-pkt = <2>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ qcom,mdss-dsc-config-by-manufacture-cmd;
+ qcom,display-topology = <1 1 1>;
+ qcom,default-topology-index = <0>;
};
};
qcom,panel-supply-entries {
@@ -737,41 +727,19 @@
};
};
- qcom,config-select = <&dsi_sim_vid_config0>;
qcom,dba-panel;
qcom,bridge-name = "adv7533";
qcom,mdss-dsc-version = <0x11>;
qcom,mdss-dsc-scr-version = <0x1>;
-
- dsi_sim_vid_config0: config0 {
- qcom,lm-split = <360 360>;
- qcom,mdss-dsc-encoders = <2>;
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <360>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- qcom,mdss-dsc-config-by-manufacture-cmd;
- };
-
- dsi_sim_vid_config1: config1 {
- qcom,mdss-dsc-encoders = <1>;
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <360>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
- qcom,mdss-dsc-config-by-manufacture-cmd;
- };
-
- dsi_sim_vid_config2: config2 {
- qcom,split-mode = "dualctl-split";
- };
-
- dsi_sim_vid_config3: config3 {
- qcom,split-mode = "pingpong-split";
- };
+ qcom,mdss-dsc-slice-height = <16>;
+ qcom,mdss-dsc-slice-width = <360>;
+ qcom,mdss-dsc-slice-per-pkt = <2>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ qcom,mdss-dsc-config-by-manufacture-cmd;
+ qcom,display-topology = <1 1 1>,
+ <2 2 1>;
+ qcom,default-topology-index = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
index c6626d1..3ad0986 100644
--- a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
@@ -6,8 +6,9 @@
DSI Controller:
Required properties:
- compatible: Should be "qcom,dsi-ctrl-hw-v<version>". Supported
- versions include 1.4 and 2.0.
- eg: qcom,dsi-ctrl-hw-v1.4, qcom,dsi-ctrl-hw-v2.0
+ versions include 1.4, 2.0 and 2.2.
+ eg: qcom,dsi-ctrl-hw-v1.4, qcom,dsi-ctrl-hw-v2.0,
+ qcom,dsi-ctrl-hw-v2.2
And for dsi phy driver:
qcom,dsi-phy-v0.0-hpm, qcom,dsi-phy-v0.0-lpm,
qcom,dsi-phy-v1.0, qcom,dsi-phy-v2.0,
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
index af0b903..dfc14f7 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -5,7 +5,10 @@
Required properties:
- compatible: Should be "linux,extcon-usb-gpio"
+
+Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
- id-gpio: gpio for USB ID pin. See gpio binding.
+- vbus-gpio: gpio for USB VBUS pin.
Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
extcon_usb1 {
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index b028dda..d0d7fff 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -15,7 +15,7 @@
"qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
"qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
"qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
- "qcom,mdss_dsi_pll_8998", "qcom,mdss_dp_pll_8998",
+ "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
"qcom,mdss_hdmi_pll_8998"
- cell-index: Specifies the controller used
- reg: offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
new file mode 100644
index 0000000..8e2bdee
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -0,0 +1,363 @@
+* Qualcomm Technologies, Inc. MSM CCI
+
+[First level nodes]
+Required properties:
+- cell-index: cci hardware core index
+- compatible :
+ - "qcom,cci"
+- reg : offset and length of the register set for the device
+ for the cci operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the cci interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+ property defined.
+- gpios : should contain phandle to gpio controller node and array of
+ #gpio-cells specifying specific gpio (controller specific)
+- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-req-tbl-label : should contain name of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- clock-names: name of the clocks required for the device
+- clock-rates: clock rate in Hz
+
+Optional properties:
+- qcom,cam-vreg-name : name of the voltage regulators required for the device.
+- gdscr-supply : should contain gdsr regulator used for cci clocks.
+- mmagic-supply : should contain mmagic regulator used for mmagic clocks.
+
+- I2c speed settings (*)
+ - i2c_freq_100Khz: qcom,i2c_standard_mode - node should contain clock settings for
+ 100Khz
+ - i2c_freq_400Khz: qcom,i2c_fast_mode - node should contain clock settings for
+ 400Khz
+ - i2c_freq_custom: qcom,i2c_custom_mode - node can contain clock settings for
+ frequencies other than 100Khz and 400Khz which is specific to usecase.
+ Currently it has settings for 375Khz.
+ - i2c_freq_1Mhz: qcom,i2c_fast_plus_mode - node should contain clock
+ settings for 1Mhz
+* if speed settings is not defined the low level driver can use "i2c_freq_custom"
+like default
+
+[Second level nodes]
+* Qualcomm Technologies, Inc. CCI clock settings
+
+Optional properties:
+- qcom,hw-thigh : should contain high period of the SCL clock in terms of CCI
+ clock cycle
+- qcom,hw-tlow : should contain high period of the SCL clock in terms of CCI
+ clock cycle
+- qcom,hw-tsu-sto : should contain setup time for STOP condition
+- qcom,hw-tsu-sta : should contain setup time for Repeated START condition
+- qcom,hw-thd-dat : should contain hold time for the data
+- qcom,hw-thd-sta : should contain hold time for START condition
+- qcom,hw-tbuf : should contain free time between a STOP and a START condition
+- qcom,hw-scl-stretch-en : should contain enable or disable clock stretching
+- qcom,hw-trdhld : should contain internal hold time for SDA
+- qcom,hw-tsp : should contain filtering of glitches
+
+* Qualcomm Technologies, Inc. MSM Sensor
+
+MSM sensor node contains properties of camera sensor
+
+Required properties:
+- compatible : should be manufacturer name followed by sensor name
+ - "qcom,camera"
+- reg : should contain i2c slave address of the device
+- qcom,csiphy-sd-index : should contain csiphy instance that will used to
+ receive sensor data
+ - 0, 1, 2
+- cam_vdig-supply : should contain regulator from which digital voltage is
+ supplied
+- cam_vana-supply : should contain regulator from which analog voltage is
+ supplied
+- cam_vio-supply : should contain regulator from which IO voltage is supplied
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+ sensor
+ - "cam_vdig", "cam_vana", "cam_vio", "cam_vaf"
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level for
+ regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level for
+ regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain optimum voltage level for regulators
+ mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,sensor-position-roll : should contain sensor rotational angle with respect
+ to axis of reference
+ - 0, 90, 180, 360
+- qcom,sensor-position-pitch : should contain sensor rotational angle with respect
+ to axis of reference
+ - 0, 90, 180, 360
+- qcom,sensor-position-yaw : should contain sensor rotational angle with respect
+ to axis of reference
+ - 0, 90, 180, 360
+Optional properties:
+- qcom,slave-id : should contain i2c slave address, device id address, expected
+ id read value and device id mask
+- qcom,sensor-name : should contain unique sensor name to differentiate from
+ other sensor
+ - "s5k3l1yx"
+- qcom,sensor-mode : should contain sensor mode supported
+ - 0 -> back camera 2D
+ - 1 -> front camera 2D
+ - 2 -> back camera 3D
+ - 3 -> back camera int 3D
+- qcom,sensor-type : should contain format of data that sensor streams
+ - 0 -> bayer format
+ - 1 -> yuv format
+- qcom,secure : should be enabled to operate the camera in secure mode
+ - 0, 1
+- qcom,gpio-no-mux : should contain field to indicate whether gpio mux table is
+ available
+ - 1 if gpio mux is not available, 0 otherwise
+- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+- gpios : should contain phandle to gpio controller node and array of
+ #gpio-cells specifying specific gpio (controller specific)
+- qcom,gpio-reset : should contain index to gpio used by sensors reset_n
+- qcom,gpio-standby : should contain index to gpio used by sensors standby_n
+- qcom,gpio-vio : should contain index to gpio used by sensors io vreg enable
+- qcom,gpio-vana : should contain index to gpio used by sensors analog vreg enable
+- qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable
+- qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable
+- qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
+- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-req-tbl-label : should contain name of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-set-tbl-num : should contain index of gpios that need to be
+ configured by msm
+- qcom,gpio-set-tbl-flags : should contain value to be configured for the gpios
+ present in qcom,gpio-set-tbl-num property (in the same order)
+- qcom,gpio-set-tbl-delay : should contain amount of delay after configuring
+ gpios as specified in gpio_set_tbl_flags property (in the same order)
+- qcom,csi-phy-sel : should contain CSIPHY core instance from which CSID should
+ receive data
+- qcom,actuator-cam-name : should contain actuator cam name associated with
+ this sensor
+ - If actuator does not exist, this property should not be initialized
+ - If actuator exist, this field should indicate the index of actuator to
+ be used
+- qcom,actuator-vcm-pwd : should contain the gpio pin of vcm power to be enabled
+ for actuator
+- qcom,actuator-vcm-enable : should contain value to be set for actuator vcm
+ gpio
+- qcom,sensor-position : should contain the mount angle of the camera sensor
+ - 0 -> back camera
+ - 1 -> front camera
+- qcom,cci-master : should contain i2c master id to be used for this camera
+ sensor
+ - 0 -> MASTER 0
+ - 1 -> MASTER 1
+- qcom,actuator-src : if auto focus is supported by this sensor, this
+ property should contain phandle of respective actuator node
+- qcom,led-flash-src : if LED flash is supported by this sensor, this
+ property should contain phandle of respective LED flash node
+- qcom,vdd-cx-supply : should contain regulator from which cx voltage is
+ supplied
+- qcom,vdd-cx-name : should contain names of cx regulator
+- qcom,eeprom-src : if eeprom memory is supported by this sensor, this
+ property should contain phandle of respective eeprom nodes
+- qcom,ois-src : if optical image stabilization is supported by this sensor,
+ this property should contain phandle of respective ois node
+- qcom,ir-led-src : if ir led is supported by this sensor, this property
+ should contain phandle of respective ir-led node
+- qcom,ir-cut-src : if ir cut is supported by this sensor, this property
+ should contain phandle of respective ir-cut node
+- qcom,special-support-sensors: if only some special sensors are supported
+ on this board, add sensor name in this property.
+
+* Qualcomm Technologies, Inc. MSM ACTUATOR
+
+Required properties:
+- cell-index : should contain unique identifier to differentiate
+ between multiple actuators
+- reg : should contain i2c slave address of the actuator and length of
+ data field which is 0x0
+- compatible :
+ - "qcom,actuator"
+- qcom,cci-master : should contain i2c master id to be used for this camera
+ sensor
+ - 0 -> MASTER 0
+ - 1 -> MASTER 1
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+ actuator
+ - "cam_vaf"
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+ required from the regulators mentioned in the qcom,cam-vreg-name property
+ (in the same order).
+- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+
+* Qualcomm Technologies, Inc. MSM OIS
+
+Required properties:
+- cell-index : should contain unique identifier to differentiate
+ between multiple ois drivers
+- reg : should contain i2c slave address of the ois and length of
+ data field which is 0x0
+- compatible :
+ - "qcom,ois"
+- qcom,cci-master : should contain i2c master id to be used for this camera
+ sensor
+ - 0 -> MASTER 0
+ - 1 -> MASTER 1
+
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+ ois
+ - "cam_vaf"
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+ required from the regulators mentioned in the qcom,cam-vreg-name property
+ (in the same order).
+- cam_vaf-supply : should contain regulator from which ois voltage is supplied
+
+Example:
+
+led_flash0: qcom,camera-flash@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+ qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ qcom,switch-source = <&pmi8998_switch>;
+ status = "ok";
+}
+
+qcom,cci@0xfda0c000 {
+ cell-index = <0>;
+ compatible = "qcom,cci";
+ reg = <0xfda0c000 0x300>;
+ reg-names = "cci";
+ interrupts = <0 50 0>;
+ interrupt-names = "cci";
+ clock-names = "camnoc_axi_clk", "soc_ahb_clk",
+ "slow_ahb_src_clk", "cpas_ahb_clk",
+ "cci_clk", "cci_clk_src";
+ qcom,clock-rates = <0 0 80000000 0 0 37500000>;
+ gpios = <&tlmm 17 0>,
+ <&tlmm 18 0>,
+ <&tlmm 19 0>,
+ <&tlmm 20 0>;
+ qcom,gpio-tbl-num = <0 1 2 3>;
+ qcom,gpio-tbl-flags = <1 1 1 1>;
+ qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
+ i2c_freq_100Khz: qcom,i2c_standard_mode {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+ status = "ok";
+ };
+ i2c_freq_400Khz: qcom,i2c_fast_mode {
+ qcom,hw-thigh = <20>;
+ qcom,hw-tlow = <28>;
+ qcom,hw-tsu-sto = <21>;
+ qcom,hw-tsu-sta = <21>;
+ qcom,hw-thd-dat = <13>;
+ qcom,hw-thd-sta = <18>;
+ qcom,hw-tbuf = <25>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ status = "ok";
+ };
+ i2c_freq_custom: qcom,i2c_custom_mode {
+ qcom,hw-thigh = <15>;
+ qcom,hw-tlow = <28>;
+ qcom,hw-tsu-sto = <21>;
+ qcom,hw-tsu-sta = <21>;
+ qcom,hw-thd-dat = <13>;
+ qcom,hw-thd-sta = <18>;
+ qcom,hw-tbuf = <25>;
+ qcom,hw-scl-stretch-en = <1>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ status = "ok";
+ };
+ i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+ qcom,hw-thigh = <16>;
+ qcom,hw-tlow = <22>;
+ qcom,hw-tsu-sto = <17>;
+ qcom,hw-tsu-sta = <18>;
+ qcom,hw-thd-dat = <16>;
+ qcom,hw-thd-sta = <15>;
+ qcom,hw-tbuf = <19>;
+ qcom,hw-scl-stretch-en = <1>;
+ qcom,hw-trdhld = <3>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <180>;
+ qcom,secure = <1>;
+ qcom,led-flash-src = <&led_flash0>;
+ qcom,actuator-src = <&actuator0>;
+ qcom,eeprom-src = <&eeprom0>;
+ cam_vdig-supply = <&pm845_s3>;
+ cam_vio-supply = <&pm845_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-standby = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
new file mode 100644
index 0000000..e8a74b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
@@ -0,0 +1,34 @@
+* Qualcomm Technologies, Inc. MSM CSI Phy
+
+Required properties:
+- cell-index: csi phy hardware core index
+- compatible :
+ - "qcom,csiphy-v5.01"
+- reg : offset and length of the register set for the device
+ for the csiphy operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the csiphy interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+ property defined.
+- clock-names: name of the clocks required for the device
+- qcom,clock-rates: clock rate in Hz
+ - 0 if appropriate clock is required but doesn't have to apply the rate
+
+Example:
+
+qcom,csiphy@ac65000 {
+ cell-index = <0>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0xac65000 0x200>;
+ reg-names = "csiphy";
+ interrupts = <0 477 0>;
+ interrupt-names = "csiphy";
+ clock-names = "camnoc_axi_clk", "soc_ahb_clk",
+ "slow_ahb_src_clk", "cpas_ahb_clk",
+ "cphy_rx_clk_src", "csiphy0_clk",
+ "csi0phytimer_clk_src", "csi0phytimer_clk",
+ "ife_0_csid_clk", "ife_0_csid_clk_src";
+ qcom,clock-rates =
+ <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ status = "ok";
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
new file mode 100644
index 0000000..2ed913c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -0,0 +1,130 @@
+* Qualcomm Technologies, Inc. MSM Camera SMMU
+
+The MSM camera SMMU device provides SMMU context bank definitions
+for all HW blocks that need to map IOVA to physical memory. These
+definitions consist of various properties that define how the
+IOVA address space is laid out for each HW block in the camera
+subsystem.
+
+=======================
+Required Node Structure
+=======================
+The camera SMMU device must be described in three levels of device nodes. The
+first level describes the overall SMMU device. Within it, second level nodes
+describe individual context banks that map different stream ids. There can
+also be second level nodes describing firmware device nodes. Each HW block
+such as IFE, ICP maps into these second level device nodes. All context bank
+specific properties that define how the IOVA is laid out is contained within
+third level device nodes within the second level device nodes.
+
+During the kernel initialization all the devices are probed recursively and
+a device pointer is created for each context bank keeping track of the IOVA
+mapping information.
+
+Duplicate regions of the same type are not allowed within the same
+context bank. All context banks must contain an IO region at the very least.
+
+==================================
+First Level Node - CAM SMMU device
+==================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,msm-cam-smmu".
+
+===================================================================
+Second Level Node - CAM SMMU context bank device or firmware device
+===================================================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,msm-cam-smmu-cb" or "qcom,msm-cam-smmu-fw-dev".
+
+- memory-region
+ Usage: optional
+ Value type: <phandle>
+ Definition: Should specify the phandle of the memory region for firmware.
+ allocation
+
+- iommus
+ Usage: required
+ Value type: <phandle>
+ Definition: Should specify the phandle of the iommu sid.
+
+- label
+ Usage: required
+ Value type: <string>
+ Definition: Should specify a string label to identify the context bank.
+
+=============================================
+Third Level Node - CAM SMMU memory map device
+=============================================
+- iova-region-name
+ Usage: required
+ Value type: <string>
+ Definition: Should specify a string label to identify the IOVA region.
+
+- iova-region-start
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify start IOVA for region.
+
+- iova-region-len
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify length for IOVA region.
+
+- iova-region-id
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the numerical identifier for IOVA region.
+ Allowed values are: 0x00 to 0x03
+ - Firmware region: 0x00
+ - Shared region: 0x01
+ - Scratch region: 0x02
+ - IO region: 0x03
+
+Example:
+ qcom,cam_smmu@0 {
+ compatible = "qcom,msm-cam-smmu";
+
+ msm_cam_smmu_icp {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1078>,
+ <&apps_smmu 0x1020>,
+ <&apps_smmu 0x1028>,
+ <&apps_smmu 0x1040>,
+ <&apps_smmu 0x1048>,
+ <&apps_smmu 0x1030>,
+ <&apps_smmu 0x1050>;
+ label = "icp";
+ icp_iova_mem_map: iova-mem-map {
+ iova-mem-region-firmware {
+ /* Firmware region is 5MB */
+ iova-region-name = "firmware";
+ iova-region-start = <0x0>;
+ iova-region-len = <0x500000>;
+ iova-region-id = <0x0>;
+ status = "ok";
+ };
+
+ iova-mem-region-shared {
+ /* Shared region is 100MB long */
+ iova-region-name = "shared";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0x6400000>;
+ iova-region-id = <0x1>;
+ status = "ok";
+ };
+
+ iova-mem-region-io {
+ /* IO region is approximately 3.5 GB */
+ iova-region-name = "io";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0xd2800000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 058dab1..0295e1b 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -84,6 +84,7 @@
- qcom,mdss-rot-mode: This is integer value indicates operation mode
of the rotator device
- qcom,mdss-sbuf-headroom: This integer value indicates stream buffer headroom in lines.
+- qcom,mdss-rot-linewidth: This integer value indicates rotator line width supported in pixels.
- cache-slice-names: A set of names that identify the usecase names of a client that uses
cache slice. These strings are used to look up the cache slice
entries by name.
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index 6d72e8b..bdc0eba 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -10,6 +10,10 @@
of macroblocks per second. The load is a reflection of hardware capability
rather than a performance guarantee. Performance is guaranteed only up to
advertised capability of the chipset.
+- qcom,max-hq-mbs-per-frame : Max no of mbs per frame beyond which
+ "High Quality" encoding is not supported.
+- qcom,max-hq-frames-per-sec : Max no of frames per second beyond which
+ "High Quality" encoding is not supported.
Optional properties:
- reg : offset and length of the register set for the device.
@@ -157,7 +161,6 @@
Example:
-
qcom,vidc@fdc00000 {
compatible = "qcom,msm-vidc";
reg = <0xfdc00000 0xff000>;
@@ -182,6 +185,8 @@
qcom,use_dynamic_bw_update;
qcom,fw-bias = <0xe000000>;
qcom,allowed-clock-rates = <200000000 300000000 400000000>;
+ qcom,max-hq-mbs-per-frame = <8160>;
+ qcom,max-hq-frames-per-sec = <60>;
msm_vidc_cb1: msm_vidc_cb1 {
compatible = "qcom,msm-vidc,context-bank";
label = "venus_ns";
diff --git a/Documentation/devicetree/bindings/misc/qpnp-misc.txt b/Documentation/devicetree/bindings/misc/qpnp-misc.txt
new file mode 100644
index 0000000..a34cbde
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/qpnp-misc.txt
@@ -0,0 +1,25 @@
+QPNP-MISC
+
+QPNP-MISC provides a way to read the PMIC part number and revision.
+
+Required properties:
+- compatible : should be "qcom,qpnp-misc"
+- reg : offset and length of the PMIC peripheral register map.
+
+Optional properties:
+- qcom,pwm-sel: Select PWM source. Possible values:
+ 0: LOW
+ 1: PWM1_in
+ 2: PWM2_in
+ 3: PWM1_in & PWM2_in
+- qcom,enable-gp-driver: Enable the GP driver. Should only be specified
+ if a non-zero PWM source is specified under
+ "qcom,pwm-sel" property.
+
+Example:
+ qcom,misc@900 {
+ compatible = "qcom,qpnp-misc";
+ reg = <0x900 0x100>;
+ qcom,pwm-sel = <2>;
+ qcom,enable-gp-driver;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 485483a..da9a632 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -1,55 +1,163 @@
-* Qualcomm SDHCI controller (sdhci-msm)
+Qualcomm Technologies, Inc. Standard Secure Digital Host Controller (SDHC)
-This file documents differences between the core properties in mmc.txt
-and the properties used by the sdhci-msm driver.
+Secure Digital Host Controller provides standard host interface to SD/MMC/SDIO cards.
Required properties:
-- compatible: Should contain "qcom,sdhci-msm-v4".
-- reg: Base address and length of the register in the following order:
- - Host controller register map (required)
- - SD Core register map (required)
-- interrupts: Should contain an interrupt-specifiers for the interrupts:
- - Host controller interrupt (required)
-- pinctrl-names: Should contain only one value - "default".
-- pinctrl-0: Should specify pin control groups used for this controller.
-- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock-names.
-- clock-names: Should contain the following:
- "iface" - Main peripheral bus clock (PCLK/HCLK - AHB Bus clock) (required)
- "core" - SDC MMC clock (MCLK) (required)
- "bus" - SDCC bus voter clock (optional)
+ - compatible : should be "qcom,sdhci-msm"
+ For SDCC version 5.0.0, MCI registers are removed from SDCC interface
+ and some registers are moved to HC. New compatible string is added to
+ support this change - "qcom,sdhci-msm-v5".
+ - reg : should contain SDHC, SD Core register map.
+ - reg-names : indicates various resources passed to driver (via reg proptery) by name.
+ Required "reg-names" are "hc_mem" and "core_mem"
+ optional ones are "tlmm_mem"
+ - interrupts : should contain SDHC interrupts.
+ - interrupt-names : indicates interrupts passed to driver (via interrupts property) by name.
+ Required "interrupt-names" are "hc_irq" and "pwr_irq".
+ - <supply-name>-supply: phandle to the regulator device tree node
+ Required "supply-name" are "vdd" and "vdd-io".
+
+Required alias:
+- The slot number is specified via an alias with the following format
+ 'sdhc{n}' where n is the slot number.
+
+Optional Properties:
+ - interrupt-names - "status_irq". This status_irq will be used for card
+ detection.
+ - qcom,bus-width - defines the bus I/O width that controller supports.
+ Units - number of bits. The valid bus-width values are
+ 1, 4 and 8.
+ - qcom,nonremovable - specifies whether the card in slot is
+ hot pluggable or hard wired.
+ - qcom,nonhotplug - specifies the card in slot is not hot pluggable.
+ if card lost or removed manually at runtime, don't retry
+ to redetect it until next reboot probe.
+ - qcom,bus-speed-mode - specifies supported bus speed modes by host.
+ The supported bus speed modes are :
+ "HS200_1p8v" - indicates that host can support HS200 at 1.8v.
+ "HS200_1p2v" - indicates that host can support HS200 at 1.2v.
+ "DDR_1p8v" - indicates that host can support DDR mode at 1.8v.
+ "DDR_1p2v" - indicates that host can support DDR mode at 1.2v.
+ - qcom,devfreq,freq-table - specifies supported frequencies for clock scaling.
+ Clock scaling logic shall toggle between these frequencies based
+ on card load. In case the defined frequencies are over or below
+ the supported card frequencies, they will be overridden
+ during card init. In case this entry is not supplied,
+ the driver will construct one based on the card
+ supported max and min frequencies.
+ The frequencies must be ordered from lowest to highest.
+ - qcom,pm-qos-irq-type - the PM QoS request type to be used for IRQ voting.
+ Can be either "affine_cores" or "affine_irq". If not specified, will default
+ to "affine_cores". Use "affine_irq" setting in case an IRQ balancer is active,
+ and IRQ affinity changes during runtime.
+ - qcom,pm-qos-irq-cpu - specifies the CPU for which IRQ voting shall be done.
+ If "affine_cores" was specified for property 'qcom,pm-qos-irq-type'
+ then this property must be defined, and is not relevant otherwise.
+ - qcom,pm-qos-irq-latency - a tuple defining two latency values with which
+ PM QoS IRQ voting shall be done. The first value is the latecy to be used
+ when load is high (performance mode) and the second is for low loads
+ (power saving mode).
+ - qcom,pm-qos-cpu-groups - defines cpu groups mapping.
+ Each cell represnets a group, which is a cpu bitmask defining which cpus belong
+ to that group.
+ - qcom,pm-qos-<mode>-latency-us - where <mode> is either "cmdq" or "legacy".
+ An array of latency value tuples, each tuple corresponding to a cpu group in the order
+ defined in property 'qcom,pm-qos-cpu-groups'. The first value is the latecy to be used
+ when load is high (performance mode) and the second is for low loads
+ (power saving mode). These values will be used for cpu group voting for
+ command-queueing mode or legacy respectively.
+ - qcom,core_3_0v_support: an optional property that is used to fake
+ 3.0V support for SDIO devices.
+ - qcom,scaling-lower-bus-speed-mode: specifies the lower bus speed mode to be used
+ during clock scaling. If this property is not
+ defined, then it falls back to the default HS
+ bus speed mode to maintain backward compatibility.
+ - qcom,sdr104-wa: On Certain chipsets, SDR104 mode might be unstable causing CRC errors
+ on the interface. So there is a workaround implemented to skip printing
+ register dumps on CRC errors and also downgrade bus speed mode to
+ SDR50/DDR50 in case of continuous CRC errors. Set this flag to enable
+ this workaround.
+
+In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
+ - qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
+ - qcom,<supply>-lpm_sup - specifies whether supply can be kept in low power mode (lpm).
+ - qcom,<supply>-voltage_level - specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV.
+ - qcom,<supply>-current_level - specifies load levels for supply in lpm or
+ high power mode (hpm). Should be specified in
+ pairs (lpm, hpm), units uA.
+
+ - gpios - specifies gpios assigned for sdhc slot.
+ - qcom,gpio-names - a list of strings that map in order to the list of gpios
+
+ Tlmm pins are specified as <clk cmd data> and starting with eMMC5.0 as
+ <clk cmd data rclk>
+
+ - Refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+ for following optional properties:
+ - pinctrl-names
+ - pinctrl-0, pinctrl-1,.. pinctrl-n
+
+ - qcom,large-address-bus - specifies whether the soc is capable of
+ supporting larger than 32 bit address bus width.
+
+ - qcom,wakeup-on-idle: if configured, the mmcqd thread will call
+ set_wake_up_idle(), thereby voting for it to be called on idle CPUs.
Example:
- sdhc_1: sdhci@f9824900 {
- compatible = "qcom,sdhci-msm-v4";
- reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
- interrupts = <0 123 0>;
- bus-width = <8>;
- non-removable;
-
- vmmc-supply = <&pm8941_l20>;
- vqmmc-supply = <&pm8941_s3>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&sdc1_clk &sdc1_cmd &sdc1_data>;
-
- clocks = <&gcc GCC_SDCC1_APPS_CLK>, <&gcc GCC_SDCC1_AHB_CLK>;
- clock-names = "core", "iface";
+ aliases {
+ sdhc1 = &sdhc_1;
};
- sdhc_2: sdhci@f98a4900 {
- compatible = "qcom,sdhci-msm-v4";
- reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
- interrupts = <0 125 0>;
- bus-width = <4>;
- cd-gpios = <&msmgpio 62 0x1>;
+ sdhc_1: qcom,sdhc@f9824900 {
+ compatible = "qcom,sdhci-msm";
+ reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+ interrupts = <0 123 0>, <0 138 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
- vmmc-supply = <&pm8941_l21>;
- vqmmc-supply = <&pm8941_l13>;
+ vdd-supply = <&pm8941_l21>;
+ vdd-io-supply = <&pm8941_l13>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
- pinctrl-names = "default";
- pinctrl-0 = <&sdc2_clk &sdc2_cmd &sdc2_data>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
- clocks = <&gcc GCC_SDCC2_APPS_CLK>, <&gcc GCC_SDCC2_AHB_CLK>;
- clock-names = "core", "iface";
+ qcom,devfreq,freq-table = <52000000 200000000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_on &sdc1_data_on>;
+
+
+ qcom,bus-width = <4>;
+ qcom,nonremovable;
+ qcom,large-address-bus;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ qcom,scaling-lower-bus-speed-mode = "DDR52";
+
+ gpios = <&msmgpio 40 0>, /* CLK */
+ <&msmgpio 39 0>, /* CMD */
+ <&msmgpio 38 0>, /* DATA0 */
+ <&msmgpio 37 0>, /* DATA1 */
+ <&msmgpio 36 0>, /* DATA2 */
+ <&msmgpio 35 0>; /* DATA3 */
+ qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
+
+ qcom,pm-qos-irq-type = "affine_cores";
+ qcom,pm-qos-irq-cpu = <0>;
+ qcom,pm-qos-irq-latency = <500 100>;
+ qcom,pm-qos-cpu-groups = <0x03 0x0c>;
+ qcom,pm-qos-cmdq-latency-us = <50 100>, <50 100>;
+ qcom,pm-qos-legacy-latency-us = <50 100>, <50 100>;
+ };
+
+ sdhc_2: qcom,sdhc@f98a4900 {
+ qcom,pm-qos-irq-type = "affine_irq";
+ qcom,pm-qos-irq-latency = <120 200>;
};
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index a50e0c2..fc019bd 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -79,8 +79,12 @@
PCIe port PHY.
Should be specified in groups (offset, value, delay).
- qcom,use-19p2mhz-aux-clk: The frequency of PCIe AUX clock is 19.2MHz.
- - qcom,ep-wakeirq: The endpoint will issue wake signal when it is up, and the
- root complex has the capability to enumerate the endpoint for this case.
+ - qcom,boot-option: Bits that alter PCIe bus driver boot sequence.
+ Below details what happens when each bit is set
+ BIT(0): PCIe bus driver will not start enumeration during its probe.
+ Clients will control when PCIe bus driver should do enumeration.
+ BIT(1): PCIe bus driver will not start enumeration if it receives a WAKE
+ interrupt.
- qcom,msi-gicm-addr: MSI address for GICv2m.
- qcom,msi-gicm-base: MSI IRQ base for GICv2m.
- qcom,ext-ref-clk: The reference clock is external.
@@ -263,7 +267,7 @@
qcom,aux-clk-sync;
qcom,n-fts = <0x50>;
qcom,pcie-phy-ver = <1>;
- qcom,ep-wakeirq;
+ qcom,boot-option = <0x1>;
qcom,msi-gicm-addr = <0xf9040040>;
qcom,msi-gicm-base = <0x160>;
qcom,ext-ref-clk;
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index ea828da..bc844de 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -16,6 +16,10 @@
If "halt_base" is in same 4K pages this register then
this will be defined else "halt_q6", "halt_modem",
"halt_nc" is required.
+ "pdc_sync" is the power domain register introduced in
+ sdm845 for power domain of subsystems.
+ If alternative reset is required, "alt_reset" maps to
+ mss_alt_ares.
- interrupts: The modem watchdog interrupt
- vdd_cx-supply: Reference to the regulator that supplies the vdd_cx domain.
- vdd_cx-voltage: Voltage corner/level(max) for cx rail.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl
new file mode 100644
index 0000000..9a69084
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl
@@ -0,0 +1,186 @@
+Qualcomm Technologies, Inc. SDXPOORWILLS TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+SDXPOORWILLS platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,sdxpoorwills-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode.
+
+ Valid pins are:
+ gpio0-gpio149
+ Supports mux, bias and drive-strength
+
+ sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd,
+ sdc2_data sdc1_rclk
+ Supports bias and drive-strength
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+
+ blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens,
+ bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8,
+ qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b,
+ dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10,
+ blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12,
+ mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11,
+ atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char,
+ cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b,
+ pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c,
+ qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4,
+ qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5,
+ atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6,
+ atest_usb20, atest_char0, dac_calib10, qdss_stm10,
+ qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6,
+ blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11,
+ qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1,
+ qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11,
+ dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6,
+ qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14,
+ dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem,
+ dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto,
+ dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0,
+ dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25,
+ sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2,
+ qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3,
+ uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9,
+ blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7,
+ qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11,
+ blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0,
+ cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4,
+ blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4,
+ qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus,
+ isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s,
+ qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b,
+ sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b,
+ gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12,
+ qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29,
+ tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27,
+ qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk,
+ sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b,
+ sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b,
+ ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b,
+ blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt,
+ pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11,
+ qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx,
+ qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3,
+ gpio
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@03900000 {
+ compatible = "qcom,sdxpoorwills-pinctrl";
+ reg = <0x03900000 0x300000>;
+ interrupts = <0 212 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c7024e0..d8934c0 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -10,9 +10,13 @@
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+ to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
Example:
qcom,rmnet-ipa {
compatible = "qcom,rmnet-ipa";
+ qcom,wan-rx-desc-size = <256>;
}
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
index 3f55312..e9575f1 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -10,9 +10,13 @@
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+ to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
Example:
qcom,rmnet-ipa3 {
compatible = "qcom,rmnet-ipa3";
+ qcom,wan-rx-desc-size = <256>;
}
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 9638888..12d32ec 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -96,6 +96,14 @@
This value has to be specified in negative values for
the charging current.
+- qcom,fg-chg-term-base-current
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery current (in mA) upper boundary at which the fuel
+ gauge will issue an end of charge during discharging. If
+ this property is not specified, then the default value used
+ will be 75mA.
+
- qcom,fg-delta-soc-thr
Usage: optional
Value type: <u32>
@@ -154,6 +162,20 @@
asleep and the battery is discharging. This option requires
qcom,fg-esr-timer-awake to be defined.
+- qcom,fg-esr-pulse-thresh-ma
+ Usage: optional
+ Value type: <u32>
+ Definition: ESR pulse qualification threshold in mA. If this is not
+ specified, a default value of 110 mA will be configured.
+ Allowed values are from 1 to 997.
+
+- qcom,fg-esr-meas-curr-ma
+ Usage: optional
+ Value type: <u32>
+ Definition: ESR measurement current in mA. If this is not specified,
+ a default value of 120 mA will be configured. Allowed
+ values are 60, 120, 180 and 240.
+
- qcom,cycle-counter-en
Usage: optional
Value type: <empty>
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index f4a22e0..e1f194f3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -169,6 +169,18 @@
Definition: Boolean flag which when present enables input suspend for
debug battery.
+- qcom,min-freq-khz
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the minimum charger buck/boost switching frequency
+ in KHz. It overrides the min frequency defined for the charger.
+
+- qcom,max-freq-khz
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the maximum charger buck/boost switching frequency in
+ KHz. It overrides the max frequency defined for the charger.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
index c8f2a5a..5529e308 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -22,7 +22,8 @@
Definition: String which indicates the charging mode. Can be one of the
following:
Standalone/Parallel Master - "qcom,smb138x-charger"
- Parallel Slave - "qcom,smb138x-parallel-slave"
+ smb138x Parallel Slave - "qcom,smb138x-parallel-slave"
+ smb1355 Parallel Slave - "qcom,smb1355-parallel-slave",
- qcom,pmic-revid
Usage: required
@@ -31,6 +32,13 @@
revid module. This is used to identify
the SMB subtype.
+- qcom,parallel-mode
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies parallel charging mode. If not specified, MID-MID
+ option is selected by default. Note that smb1355 can only
+ run in MID-MID configuration.
+
- qcom,suspend-input
Usage: optional
Value type: <empty>
@@ -119,7 +127,7 @@
=======
smb138x_charger: qcom,smb138x-charger {
- compatible = "qcom,qpnp-smb138x-charger";
+ compatible = "qcom,smb138x-charger";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/prng/msm-rng.txt b/Documentation/devicetree/bindings/prng/msm-rng.txt
new file mode 100644
index 0000000..917c2fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/prng/msm-rng.txt
@@ -0,0 +1,18 @@
+* RNG (Random Number Generator)
+
+Required properties:
+- compatible : Should be "qcom,msm-rng"
+- reg : Offset and length of the register set for the device
+
+Optional property:
+- qcom,msm-rng-iface-clk : If the device uses iface-clk.
+- qcom,no-qrng-config : Flag to decide whether the driver do the hardware configuration or not.
+
+Example:
+
+ qcom,msm-rng@f9bff000 {
+ compatible = "qcom,msm-rng";
+ reg = <0xf9bff000 0x200>;
+ qcom,msm-rng-iface-clk;
+ qcom,no-qrng-config;
+ };
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
new file mode 100644
index 0000000..8fbf8e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -0,0 +1,85 @@
+* QSEECOM (QTI Secure Execution Environment Communicator)
+
+Required properties:
+- compatible : Should be "qcom,qseecom"
+- reg : should contain memory region address reserved for loading secure apps.
+- qcom,disk-encrypt-pipe-pair : indicates what CE HW pipe pair is used for disk encryption
+- qcom,file-encrypt-pipe-pair : indicates what CE HW pipe pair is used for file encryption
+- qcom,support-multiple-ce-hw-instance : indicates if multicore CE support is supported.
+- qcom,hlos-num-ce-hw-instances : indicates number of CE HW instances hlos can use.
+- qcom,hlos-ce-hw-instance : indicates what CE HW is used by HLOS crypto driver
+- qcom,qsee-ce-hw-instance : indicates what CE HW is used by secure domain (TZ) crypto driver
+- qcom, msm_bus,name: Should be "qseecom-noc"
+- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
+- qcom, msm_bus,num_paths: The paths for source and destination ports
+- qcom, msm_bus,vectors: Vectors for bus topology.
+- qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+- qcom,full-disk-encrypt-info : Vectors defining full disk encryption unit, crypto engine, pipe pair configuration in <unit#, ce#, pipe-pair#>
+- qcom,per-file-encrypt-info : Vectors defining per file encryption unit, crypto engine, pipe pair configuration in <unit#, ce#, pipe-pair#>
+
+Optional properties:
+ - qcom,support-bus-scaling : indicates if driver support scaling the bus for crypto operation.
+ - qcom,support-fde : indicates if driver support key managing for full disk encryption feature.
+ - qcom,support-pfe : indicates if driver support key managing for per file encryption feature.
+ - qcom,no-clock-support : indicates clocks are not handled by qseecom (could be handled by RPM)
+ - qcom,appsbl-qseecom-support : indicates if there is qseecom support in appsbootloader
+ - vdd-hba-supply : handle for fixed power regulator
+ - qcom,qsee-reentrancy-support: indicates the qsee reentrancy phase supported by the target
+ - qcom,commonlib64-loaded-by-uefi: indicates commonlib64 is loaded by uefi already
+ - qcom,fde-key-size: indicates which FDE key size is used in device.
+
+Example:
+ qcom,qseecom@fe806000 {
+ compatible = "qcom,qseecom";
+ reg = <0x7f00000 0x500000>;
+ reg-names = "secapp-region";
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,file-encrypt-pipe-pair = <0>;
+ qcom,support-multiple-ce-hw-instance;
+ qcom,hlos-num-ce-hw-instances = <2>;
+ qcom,hlos-ce-hw-instance = <1 2>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,support-fde;
+ qcom,support-pfe;
+ qcom,msm_bus,name = "qseecom-noc";
+ qcom,msm_bus,num_cases = <4>;
+ qcom,msm_bus,active_only = <0>;
+ qcom,msm_bus,num_paths = <1>;
+ qcom,no-clock-support;
+ qcom,appsbl-qseecom-support;
+ qcom,fde-key-size;
+ qcom,msm_bus,vectors =
+ <55 512 0 0>,
+ <55 512 3936000000 393600000>,
+ <55 512 3936000000 393600000>,
+ <55 512 3936000000 393600000>;
+ qcom,ce-opp-freq = <100000000>;
+ vdd-hba-supply = <&gdsc_ufs>;
+ };
+
+Example: The following dts setup is the same as the example above.
+
+ qcom,qseecom@fe806000 {
+ compatible = "qcom,qseecom";
+ reg = <0x7f00000 0x500000>;
+ reg-names = "secapp-region";
+ qcom,support-fde;
+ qcom,full-disk-encrypt-info = <0 1 2>, <0 2 2>;
+ qcom,support-pfe;
+ qcom,per-file-encrypt-info = <0 1 0>, <0 2 0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,msm_bus,name = "qseecom-noc";
+ qcom,msm_bus,num_cases = <4>;
+ qcom,msm_bus,active_only = <0>;
+ qcom,msm_bus,num_paths = <1>;
+ qcom,no-clock-support;
+ qcom,appsbl-qseecom-support;
+ qcom,fde-key-size;
+ qcom,msm_bus,vectors =
+ <55 512 0 0>,
+ <55 512 3936000000 393600000>,
+ <55 512 3936000000 393600000>,
+ <55 512 3936000000 393600000>;
+ qcom,ce-opp-freq = <100000000>;
+ vdd-hba-supply = <&gdsc_ufs>;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
index 5bf560e..846bd22 100644
--- a/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpr3-regulator.txt
@@ -216,6 +216,15 @@
as the corresponding addresses are specified in
the qcom,cpr-panic-reg-addr-list property.
+- qcom,cpr-reset-step-quot-loop-en
+ Usage: optional; only meaningful for CPR4 and CPRh controllers
+ Value type: <empty>
+ Definition: Boolean value which indicates that the CPR controller should
+ be configured to reset step_quot on each loop_en = 0
+ transition. This configuration allows the CPR controller to
+ first use the default step_quot and then later switch to the
+ run-time calibrated step_quot.
+
- qcom,saw-avs-ctrl
Usage: required if "saw" registers are specified by reg and
reg-names properties
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
index e53b691..0173a3d 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
@@ -16,6 +16,11 @@
Should be "active" and "sleep" for the pin confuguration when core is active
or when entering sleep state.
+Optional properties:
+- qcom,bus-mas: contains the bus master id needed to put in bus bandwidth votes
+ for inter-connect buses.
+- qcom,wakeup-byte: Byte to be injected in the tty layer during wakeup isr.
+
Example:
qupv3_uart11: qcom,qup_uart@0xa88000 {
compatible = "qcom,msm-geni-uart";
@@ -29,4 +34,6 @@
pinctrl-0 = <&qup_1_uart_3_active>;
pinctrl-1 = <&qup_1_uart_3_sleep>;
interrupts = <0 355 0>;
+ qcom,bus-mas = <MASTER_BLSP_2>;
+ qcom,wakeup-byte = <0xFF>;
};
diff --git a/Documentation/devicetree/bindings/soc/qcom/dcc.txt b/Documentation/devicetree/bindings/soc/qcom/dcc.txt
index 0fd4e15..8a9761c 100644
--- a/Documentation/devicetree/bindings/soc/qcom/dcc.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/dcc.txt
@@ -14,6 +14,11 @@
of the component.
- reg-names : names corresponding to each reg property value.
+ dcc-base: Base address for DCC configuration reg
+ dcc-ram-base: Start of HLOS address space in SRAM
+ dcc-xpu-base: Base address for XPU configuration reg
+
+- dcc-ram-offset: Address offset from the start of the SRAM address space.
Optional properties:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
new file mode 100644
index 0000000..d7aefbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
@@ -0,0 +1,30 @@
+QPNP PBS
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) PBS is programmable boot sequence
+and this driver is for helping the client drivers triggering such sequence
+to be configured in PMIC.
+
+This document describes the bindings for QPNP PBS driver.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: should be "qcom,qpnp-pbs".
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Base address of the PBS registers.
+
+
+=======
+Example
+=======
+ pm660l_pbs: qcom,pbs@7300 {
+ compatible = "qcom,qpnp-pbs";
+ reg = <0x7300 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
new file mode 100644
index 0000000..ceac719
--- /dev/null
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
@@ -0,0 +1,63 @@
+Qualcomm Technologies, Inc. SPMI Debug Controller (PMIC Arbiter)
+
+The SPMI PMIC Arbiter is found on various QTI chips. It is an SPMI controller
+with wrapping arbitration logic to allow for multiple on-chip devices to control
+a single SPMI master.
+
+The PMIC Arbiter debug bus is present starting at arbiter version 5. It has
+read and write access to all PMIC peripherals regardless of ownership
+configurations. It cannot be used on production devices because it is disabled
+by an eFuse.
+
+See spmi.txt for the generic SPMI controller binding requirements for child
+nodes.
+
+Supported Properties:
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must be "qcom,spmi-pmic-arb-debug".
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: List of address and size pairs. The address of the PMIC
+ arbiter module is required. The address of the debug bus
+ disabling fuse is optional.
+
+- reg-names
+ Usage: required
+ Value type: <stringlist>
+ Definition: Address names. Must include "core" for the PMIC arbiter
+ module and may include "fuse" for the debug bus disabling
+ fuse. The strings must be specified in the same order as
+ the corresponding addresses are specified in the reg
+ property.
+
+- #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 2.
+
+- #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 0.
+
+- qcom,fuse-disable-bit
+ Usage: required if "fuse" is listed in reg-names property
+ Value type: <u32>
+ Definition: The bit within the fuse register which is set when the debug
+ bus is not available. Supported values are 0 to 31.
+
+Example:
+
+qcom,spmi-debug@6b22000 {
+ compatible = "qcom,spmi-pmic-arb-debug";
+ reg = <0x6b22000 0x60>, <0x7820A8 4>;
+ reg-names = "core", "fuse";
+ qcom,fuse-disable-bit = <12>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/qcom-bcl.txt b/Documentation/devicetree/bindings/thermal/qcom-bcl.txt
new file mode 100644
index 0000000..449cbad
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-bcl.txt
@@ -0,0 +1,44 @@
+===============================================================================
+BCL PMIC Peripheral driver:
+===============================================================================
+Qualcomm Technologies, Inc's PMIC has battery current limiting peripheral, which can monitor for
+high battery current and low battery voltage in the hardware. The BCL
+peripheral driver interacts with the PMIC peripheral using the SPMI driver
+interface. The hardware can take threshold for notifying for high battery
+current or low battery voltage events.
+
+Required Parameters:
+- compatible: must be
+ 'qcom,msm-bcl-lmh' for bcl peripheral with LMH DCVSh interface.
+- reg: <a b> where 'a' is the starting register address of the PMIC
+ peripheral and 'b' is the size of the peripheral address space.
+ If the BCL inhibit current derating feature is enabled, this must also
+ have the PON spare registers as well. Example: <a b c d> where
+ c is the first PON spare register that will be written and d is the
+ size of the registers space needed to be written. Certain version
+ of PMIC, can send interrupt to LMH hardware driver directly. In that
+ case the shadow peripheral address space should be mentioned along
+ with the bcl peripherals address.
+- interrupts: <a b c> Where 'a' is the SLAVE ID of the PMIC, 'b' is
+ the peripheral ID and 'c' is the interrupt number in PMIC.
+- interrupt-names: user defined names for the interrupts. These
+ interrupt names will be used by the drivers to identify the
+ interrupts, instead of specifying the ID's. bcl driver will
+ accept these five standard interrupts.
+ "bcl-low-vbat"
+ "bcl-very-low-vbat"
+ "bcl-crit-low-vbat"
+ "bcl-high-ibat"
+ "bcl-very-high-ibat"
+
+
+Optional Parameters:
+
+ bcl@4200 {
+ compatible = "qcom,msm-bcl";
+ reg = <0x4200 0xFF 0x88e 0x2>;
+ interrupts = <0x2 0x42 0x0>,
+ <0x2 0x42 0x1>;
+ interrupt-names = "bcl-high-ibat-int",
+ "bcl-low-vbat-int";
+ };
diff --git a/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
new file mode 100644
index 0000000..8bead0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
@@ -0,0 +1,47 @@
+Limits Management Hardware - DCVS
+
+The LMH-DCVS block is a hardware IP for every CPU cluster, to handle quick
+changes in thermal limits. The hardware responds to thermal variation amongst
+the CPUs in the cluster by requesting limits on the clock frequency and
+voltage on the OSM hardware.
+
+The LMH DCVS driver exports a virtual sensor that can be used to set the
+thermal limits on the hardware. LMH DCVS driver can be a platform CPU Cooling
+device, which registers with the CPU cooling device interface. All CPU device
+nodes should reference the corresponding LMH DCVS hardware in device tree.
+CPUs referencing the same LMH DCVS node will be associated with the
+corresponding cooling device as related CPUs.
+
+Properties:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: shall be "qcom,msm-hw-limits"
+- interrupts:
+ Usage: required
+ Value type: <interrupt_type interrupt_number interrupt_trigger_type>
+ Definition: Should specify interrupt information about the debug
+ interrupt generated by the LMH DCVSh hardware. LMH
+ DCVSh hardware will generate this interrupt whenever
+ it makes a new cpu DCVS decision.
+- qcom,affinity:
+ Usage: Required
+ Value type: <u32>
+ Definition: Should specify the cluster affinity this hardware
+ corresponds to.
+
+Example:
+
+ lmh_dcvs0: qcom,limits-dcvs@0 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,affinity = <0>;
+ };
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0 0x0>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
index bb20644..fc8ec87 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
@@ -12,10 +12,17 @@
- interrupts: PMIC temperature alarm interrupt
- label: A string used as a descriptive name for this thermal device.
This name should be 19 characters or less.
+- #thermal-sensor-cells: Must be 0. Please refer to
+ <devicetree/bindings/thermal/thermal.txt> for more
+ details.
Required structure:
- A qcom,qpnp-temp-alarm node must be a child of an SPMI node that has specified
the spmi-slave-container property
+- A top level device tree node named "thermal-zones" must exist. It must
+ contain a subnode with a property named "thermal-sensors" which is assigned
+ a phandle to the qpnp-temp-alarm device node. See
+ <devicetree/bindings/thermal/thermal.txt> for more details.
Optional properties:
- qcom,channel-num: VADC channel number associated PMIC DIE_TEMP thermistor.
@@ -38,11 +45,6 @@
1 = 50 Hz
2 = 25 Hz
3 = 12.5 Hz
-- qcom,allow-override: Boolean which controls the ability of software to
- override shutdowns. If present, then software is
- allowed to override automatic PMIC hardware stage 2 and
- stage 3 over temperature shutdowns. Otherwise, software
- is not allowed to override automatic shutdown.
- qcom,default-temp: Specifies the default temperature in millicelcius to use
if no ADC channel is present to read the real time
temperature.
@@ -64,7 +66,7 @@
#address-cells = <1>;
#size-cells = <1>;
- qcom,temp-alarm@2400 {
+ pm8941_tz: qcom,temp-alarm@2400 {
compatible = "qcom,qpnp-temp-alarm";
reg = <0x2400 0x100>;
interrupts = <0x0 0x24 0x0>;
@@ -72,6 +74,36 @@
qcom,channel-num = <8>;
qcom,threshold-set = <0>;
qcom,temp_alarm-vadc = <&pm8941_vadc>;
+ #thermal-sensor-cells = <0>;
+ };
+ };
+};
+
+Below is an example thermal zone definition for the temperature alarm
+peripheral.
+thermal-zones {
+ pm8941_tz {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
+ thermal-sensors = <&pm8941_tz>;
+
+ trips {
+ pm8941-trip0 {
+ temperature = <105000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ pm8941-trip1 {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ pm8941-trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 1065456..3e59c43 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -21,34 +21,25 @@
The compatible property is used to identify the respective controller to use
for the corresponding SoC.
- reg : offset and length of the TSENS registers with associated property in reg-names
- as "tsens_physical" for TSENS TM physical address region.
+ as "tsens_srot_physical" for TSENS SROT physical address region. TSENS TM
+ physical address region as "tsens_tm_physical".
- reg-names : resource names used for the physical address of the TSENS
- registers. Should be "tsens_physical" for physical address of the TSENS.
+ registers. Should be "tsens_srot_physical" for physical address of the TSENS
+ SROT region and "tsens_tm_physical" for physical address of the TM region.
- interrupts : TSENS interrupt to notify Upper/Lower and Critical temperature threshold.
- interrupt-names: Should be "tsens-upper-lower" for temperature threshold.
Add "tsens-critical" for Critical temperature threshold notification
in addition to "tsens-upper-lower" for 8996 TSENS since
8996 supports Upper/Lower and Critical temperature threshold.
-- qcom,sensors : Total number of available Temperature sensors for TSENS.
-
-Optional properties:
-- qcom,sensor-id : If the flag is present map the TSENS sensors based on the
- remote sensors that are enabled in HW. Ensure the mapping is not
- more than the number of supported sensors.
-- qcom,client-id : If the flag is present use it to identify the SW ID mapping
- used to associate it with the controller and the physical sensor
- mapping within the controller. The physical sensor mapping within
- each controller is done using the qcom,sensor-id property. If the
- property is not present the SW ID mapping with default from 0 to
- total number of supported sensors with each controller instance.
Example:
tsens@fc4a8000 {
compatible = "qcom,msm-tsens";
- reg = <0xfc4a8000 0x2000>;,
- reg-names = "tsens_physical";
+ reg = <0xfc4a8000 0x10>,
+ <0xfc4b8000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
interrupts = <0 184 0>;
interrupt-names = "tsens-upper-lower";
- qcom,sensors = <11>;
};
diff --git a/Documentation/devicetree/bindings/uio/msm_sharedmem.txt b/Documentation/devicetree/bindings/uio/msm_sharedmem.txt
new file mode 100644
index 0000000..749c6e85
--- /dev/null
+++ b/Documentation/devicetree/bindings/uio/msm_sharedmem.txt
@@ -0,0 +1,18 @@
+msm_sharedmem provides the shared memory addresses for various clients in user-space
+
+Required properties:
+- compatible: Must be "qcom,sharedmem-uio"
+- reg : The address and size of the shared memory. The address/sizes may vary.
+ A reg address of Zero indicates that the shared memory is dynamically
+ allocated using dma_alloc_coherent. A non zero reg address is used
+ directly.
+- reg-names : Indicates various client-names.
+- qcom,client-id : The client id for the QMI clients.
+
+Example:
+ qcom,msm_sharedmem@0dc80000 {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0dc80000 0x00180000>,
+ reg-names = "rmtfs";
+ qcom,client-id = <0x00000001>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 4a81034..609d853 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -56,6 +56,8 @@
fladj_30mhz_sdbnd signal is invalid or incorrect.
- snps,disable-clk-gating: If present, disable controller's internal clock
gating. Default it is enabled.
+ - snps,xhci-imod-value: Interrupt moderation interval for host mode
+ (in increments of 250nsec).
This is usually a subnode to DWC3 glue to which it is connected.
@@ -65,4 +67,5 @@
interrupts = <0 92 4>
usb-phy = <&usb2_phy>, <&usb3,phy>;
tx-fifo-resize;
+ snps,xhci-imod-value = <4000>;
};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 8e5782a..e508a4f 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -4,12 +4,12 @@
Required properties:
- compatible: Should be "qcom,usb-ssphy-qmp", "qcom,usb-ssphy-qmp-v1" or
- "qcom,usb-ssphy-qmp-v2"
+ "qcom,usb-ssphy-qmp-v2" or "qcom,usb-ssphy-qmp-dp-combo"
- reg: Address and length of the register set for the device
Required regs are:
"qmp_phy_base" : QMP PHY Base register set.
- "vls_clamp_reg" : top-level CSR register to be written to enable phy vls
- clamp which allows phy to detect autonomous mode.
+ clamp which allows phy to detect autonomous mode. (optional for USB DP PHY)
- <supply-name>-supply: phandle to the regulator device tree node
Required "supply-name" examples are:
"vdd" : vdd supply for SSPHY digital circuit operation
@@ -24,13 +24,28 @@
- qcom,qmp-phy-init-seq: QMP PHY initialization sequence with reg offset, its
value, delay after register write. It is not must property to have for emulation.
- qcom,qmp-phy-reg-offset: Provides important phy register offsets in an order
- defined in the phy driver. Provide below mentioned register offsets in order:
+ defined in the phy driver.
+ Provide below mentioned register offsets in order for non USB DP combo PHY:
USB3_PHY_PCS_STATUS,
USB3_PHY_AUTONOMOUS_MODE_CTRL,
USB3_PHY_LFPS_RXTERM_IRQ_CLEAR,
USB3_PHY_POWER_DOWN_CONTROL,
USB3_PHY_SW_RESET,
USB3_PHY_START
+
+ In addion to above following set of registers offset needed for USB DP combo PHY in mentioned order:
+ USB3_DP_DP_PHY_PD_CTL,
+ USB3_DP_COM_POWER_DOWN_CTRL,
+ USB3_DP_COM_SW_RESET,
+ USB3_DP_COM_RESET_OVRD_CTRL,
+ USB3_DP_COM_PHY_MODE_CTRL,
+ USB3_DP_COM_TYPEC_CTRL,
+ USB3_DP_COM_SWI_CTRL,
+ USB3_PCS_MISC_CLAMP_ENABLE
+
+ Optional register for configuring USB Type-C port select if available:
+ USB3_PHY_PCS_MISC_TYPEC_CTRL
+
- resets: reset specifier pair consists of phandle for the reset controller
and reset lines used by this controller.
- reset-names: reset signal name strings sorted in the same order as the resets
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 18056ee..bc66690 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -66,6 +66,7 @@
event buffers. 1 event buffer is needed per h/w accelerated endpoint.
- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
which is used as a vote by driver to get max performance in perf mode.
+- qcom,smmu-s1-bypass: If present, configure SMMU to bypass stage 1 translation.
Sub nodes:
- Sub node for "DWC3- USB3 controller".
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 966885c..7790c81 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -26,6 +26,7 @@
Optional properties:
- clocks: reference to a clock
- usb3-lpm-capable: determines if platform is USB3 LPM capable
+ - quirk-broken-port-ped: set if the controller has broken port disable mechanism
Example:
usb@f0931000 {
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
index 8f3d96a..1f6e101 100644
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
@@ -6,10 +6,11 @@
Required properties:
- compatible : should be one among the following
- (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs
- (b) "samsung,exynos5250-wdt" for Exynos5250
- (c) "samsung,exynos5420-wdt" for Exynos5420
- (c) "samsung,exynos7-wdt" for Exynos7
+ - "samsung,s3c2410-wdt" for S3C2410
+ - "samsung,s3c6410-wdt" for S3C6410, S5PV210 and Exynos4
+ - "samsung,exynos5250-wdt" for Exynos5250
+ - "samsung,exynos5420-wdt" for Exynos5420
+ - "samsung,exynos7-wdt" for Exynos7
- reg : base physical address of the controller and length of memory mapped
region.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9877ebf..8527965 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -306,6 +306,16 @@
use by PCI
Format: <irq>,<irq>...
+ acpi_mask_gpe= [HW,ACPI]
+ Due to the existence of _Lxx/_Exx, some GPEs triggered
+ by unsupported hardware/firmware features can result in
+ GPE floodings that cannot be automatically disabled by
+ the GPE dispatcher.
+ This facility can be used to prevent such uncontrolled
+ GPE floodings.
+ Format: <int>
+ Support masking of GPEs numbered from 0x00 to 0x7f.
+
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods
AML control methods that contain the opcodes to create
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 379dc99..1d832d4 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,12 +8,29 @@
force_ro Enforce read-only access even if write protect switch is off.
+ num_wr_reqs_to_start_packing This attribute is used to determine
+ the trigger for activating the write packing, in case the write
+ packing control feature is enabled.
+
+ When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+ write requests could be packed, it enables the write packing feature.
+ This allows us to start the write packing only when it is beneficial
+ and has minimum affect on the read latency.
+
+ The number of potential packed requests that will trigger the packing
+ can be configured via sysfs by writing the required value to:
+ /sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
+
+ The default value of num_wr_reqs_to_start_packing was determined by
+ running parallel lmdd write and lmdd read operations and calculating
+ the max number of packed writes requests.
+
SD and MMC Device Attributes
============================
All attributes are read-only.
- cid Card Identifaction Register
+ cid Card Identification Register
csd Card Specific Data Register
scr SD Card Configuration Register (SD only)
date Manufacturing Date (from CID Register)
@@ -84,3 +101,41 @@
clkgate_delay Tune the clock gating delay with desired value in milliseconds.
echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
+
+SD/MMC/SDIO Clock Scaling Attributes
+====================================
+
+Read and write accesses are provided to following attributes.
+
+ polling_interval Measured in milliseconds, this attribute
+ defines how often we need to check the card
+ usage and make decisions on frequency scaling.
+
+ up_threshold This attribute defines what should be the
+ average card usage between the polling
+ interval for the mmc core to make a decision
+ on whether it should increase the frequency.
+ For example when it is set to '35' it means
+ that between the checking intervals the card
+ needs to be on average more than 35% in use to
+ scale up the frequency. The value should be
+ between 0 - 100 so that it can be compared
+ against load percentage.
+
+ down_threshold Similar to up_threshold, but on lowering the
+ frequency. For example, when it is set to '2'
+ it means that between the checking intervals
+ the card needs to be on average less than 2%
+ in use to scale down the clocks to minimum
+ frequency. The value should be between 0 - 100
+ so that it can be compared against load
+ percentage.
+
+ enable Enable clock scaling for hosts (and cards)
+ that support ultrahigh speed modes
+ (SDR104, DDR50, HS200).
+
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/polling_interval
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/up_threshold
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/down_threshold
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/enable
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 4d82e31..501af5d 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -124,7 +124,7 @@
.. code-block:: none
- Cc: <stable@vger.kernel.org> # 3.3.x-
+ Cc: <stable@vger.kernel.org> # 3.3.x
The tag has the meaning of:
diff --git a/Makefile b/Makefile
index e70a1eb..06a55b5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 20
+SUBLEVEL = 26
EXTRAVERSION =
NAME = Roaring Lionus
@@ -374,7 +374,7 @@
CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
+CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index b65930a..54b54da 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,10 +17,11 @@
#include <asm/barrier.h>
#include <asm/smp.h>
+#define ATOMIC_INIT(i) { (i) }
+
#ifndef CONFIG_ARC_PLAT_EZNPS
#define atomic_read(v) READ_ONCE((v)->counter)
-#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_ARC_HAS_LLSC
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e..aee1a77 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
;
; Now manually save: r12, sp, fp, gp, r25
+ PUSH r30
PUSH r12
; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
POPAX AUX_USER_SP
1:
POP r12
+ POP r30
.endm
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da..47111d5 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
- unsigned long r12;
+ unsigned long r12, r30;
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b174261..d04e168 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -550,7 +550,7 @@
config ARCH_QCOM
bool "Qualcomm MSM (non-multiplatform)"
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select CPU_V7
select AUTO_ZRELADDR
select HAVE_SMP
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index ae4b388..4616452 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
timer@20200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x20200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
local-timer@20600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x20600 0x100>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 14422e5..7eb0c7f 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -1,4 +1,5 @@
+dtb-$(CONFIG_ARCH_SDXPOORWILLS) += sdxpoorwills-rumi.dtb
ifeq ($(CONFIG_ARM64),y)
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
new file mode 100644
index 0000000..ac02429
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ tlmm: pinctrl@3900000 {
+ compatible = "qcom,sdxpoorwills-pinctrl";
+ reg = <0x3900000 0x300000>;
+ interrupts = <0 212 0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart2_console_active: uart2_console_active {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ };
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
new file mode 100644
index 0000000..78a26c2
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
@@ -0,0 +1,30 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+
+#include "sdxpoorwills.dtsi"
+#include "sdxpoorwills-pinctrl.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDXPOORWILLS RUMI";
+ compatible = "qcom,sdxpoorwills-rumi",
+ "qcom,sdxpoorwills", "qcom,rumi";
+ qcom,board-id = <15 0>;
+};
+
+&blsp1_uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_console_active>;
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
new file mode 100644
index 0000000..0078617
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -0,0 +1,160 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDX POORWILLS";
+ compatible = "qcom,sdxpoorwills";
+ qcom,msm-id = <334 0x0>;
+ interrupt-parent = <&intc>;
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ peripheral2_mem: peripheral2_region@8fd00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x8fd00000 0x300000>;
+ label = "peripheral2_mem";
+ };
+ };
+
+ cpus {
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ CPU0: cpu@0 {
+ device-type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x0>;
+ };
+ };
+
+ soc: soc { };
+};
+
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ intc: interrupt-controller@17800000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x17800000 0x1000>,
+ <0x17802000 0x1000>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 12 0xf08>,
+ <1 10 0xf08>,
+ <1 11 0xf08>;
+ clock-frequency = <19200000>;
+ };
+
+ timer@17820000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17820000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@17821000 {
+ frame-number = <0>;
+ interrupts = <0 7 0x4>,
+ <0 6 0x4>;
+ reg = <0x17821000 0x1000>,
+ <0x17822000 0x1000>;
+ };
+
+ frame@17823000 {
+ frame-number = <1>;
+ interrupts = <0 8 0x4>;
+ reg = <0x17823000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17824000 {
+ frame-number = <2>;
+ interrupts = <0 9 0x4>;
+ reg = <0x17824000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17825000 {
+ frame-number = <3>;
+ interrupts = <0 10 0x4>;
+ reg = <0x17825000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17826000 {
+ frame-number = <4>;
+ interrupts = <0 11 0x4>;
+ reg = <0x17826000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17827000 {
+ frame-number = <5>;
+ interrupts = <0 12 0x4>;
+ reg = <0x17827000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17828000 {
+ frame-number = <6>;
+ interrupts = <0 13 0x4>;
+ reg = <0x17828000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17829000 {
+ frame-number = <7>;
+ interrupts = <0 14 0x4>;
+ reg = <0x17829000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ clock_gcc: qcom,gcc@100000 {
+ compatible = "qcom,dummycc";
+ clock-output-names = "gcc_clocks";
+ #clock-cells = <1>;
+ };
+
+ clock_cpu: qcom,clock-a7@17810008 {
+ compatible = "qcom,dummycc";
+ clock-output-names = "cpu_clocks";
+ #clock-cells = <1>;
+ };
+
+ blsp1_uart2: serial@831000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x831000 0x200>;
+ interrupts = <0 26 0>;
+ status = "disabled";
+ clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&clock_gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ };
+};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 8f79b41..acdcbf9 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -680,6 +680,7 @@
phy-names = "usb2-phy", "usb3-phy";
phys = <&usb2_picophy0>,
<&phy_port2 PHY_TYPE_USB3>;
+ snps,dis_u3_susphy_quirk;
};
};
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
new file mode 100644
index 0000000..1f6d2cc
--- /dev/null
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -0,0 +1,289 @@
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDXPOORWILLS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMA=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_ARP=y
+CONFIG_BRIDGE_EBT_IP=y
+CONFIG_BRIDGE_EBT_IP6=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_PINCTRL_SDXPOORWILLS=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_THERMAL=y
+CONFIG_REGULATOR=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_GSI=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_USB_BAM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_QCOM_SCM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_SMEM=y
+CONFIG_TRACER_PKT=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
+CONFIG_ANDROID=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
new file mode 100644
index 0000000..5d61163
--- /dev/null
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -0,0 +1,295 @@
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDXPOORWILLS=y
+# CONFIG_VDSO is not set
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMA=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_ARP=y
+CONFIG_BRIDGE_EBT_IP=y
+CONFIG_BRIDGE_EBT_IP6=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_HVC_DCC=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SLIMBUS=y
+CONFIG_PINCTRL_SDXPOORWILLS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_THERMAL=y
+CONFIG_MSM_CDC_PINCTRL=y
+CONFIG_MSM_CDC_SUPPLY=y
+CONFIG_REGULATOR=y
+CONFIG_FB=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD=y
+CONFIG_QCOM_SCM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_TRACER_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
+CONFIG_ANDROID=y
+CONFIG_STM=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DEBUG_USER=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_XZ_DEC=y
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 2ef282f..b4e74af 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -24,6 +24,8 @@
struct kref kref;
};
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
@@ -33,5 +35,29 @@
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
+#else /* !CONFIG_ARM_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+ return NULL;
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif /* CONFIG_ARM_DMA_USE_IOMMU */
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 7e45f69..8e8d20c 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -178,6 +178,6 @@
#endif
#ifdef CONFIG_HAVE_ARM_SMCCC
-EXPORT_SYMBOL(arm_smccc_smc);
-EXPORT_SYMBOL(arm_smccc_hvc);
+EXPORT_SYMBOL(__arm_smccc_smc);
+EXPORT_SYMBOL(__arm_smccc_hvc);
#endif
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 2e48b67..e5d4306 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -46,17 +46,19 @@
/*
* void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
*/
-ENTRY(arm_smccc_smc)
+ENTRY(__arm_smccc_smc)
SMCCC SMCCC_SMC
-ENDPROC(arm_smccc_smc)
+ENDPROC(__arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
*/
-ENTRY(arm_smccc_hvc)
+ENTRY(__arm_smccc_hvc)
SMCCC SMCCC_HVC
-ENDPROC(arm_smccc_hvc)
+ENDPROC(__arm_smccc_hvc)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index a5265ed..2fd5c13 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
+ assert_spin_locked(&kvm->mmu_lock);
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
if (!stage2_pgd_none(*pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
+ /*
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+ if (next != end)
+ cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@@ -803,6 +810,7 @@
int idx;
idx = srcu_read_lock(&kvm->srcu);
+ down_read(¤t->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+ up_read(¤t->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -829,7 +838,10 @@
if (kvm->arch.pgd == NULL)
return;
+ spin_lock(&kvm->mmu_lock);
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ spin_unlock(&kvm->mmu_lock);
+
/* Free the HW pgd, one page at a time */
free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
kvm->arch.pgd = NULL;
@@ -1804,6 +1816,7 @@
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
+ down_read(¤t->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -1847,8 +1860,10 @@
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
- return -EINVAL;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@@ -1860,7 +1875,7 @@
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
- return ret;
+ goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@@ -1868,6 +1883,8 @@
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+out:
+ up_read(¤t->mm->mmap_sem);
return ret;
}
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c
index c8830a2..fe067f6 100644
--- a/arch/arm/mach-bcm/bcm_5301x.c
+++ b/arch/arm/mach-bcm/bcm_5301x.c
@@ -9,14 +9,42 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define FSR_EXTERNAL (1 << 12)
+#define FSR_READ (0 << 10)
+#define FSR_IMPRECISE 0x0406
static const char *const bcm5301x_dt_compat[] __initconst = {
"brcm,bcm4708",
NULL,
};
+static int bcm5301x_abort_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ /*
+ * We want to ignore aborts forwarded from the PCIe bus that are
+ * expected and shouldn't really be passed by the PCIe controller.
+ * The biggest disadvantage is the same FSR code may be reported when
+ * reading non-existing APB register and we shouldn't ignore that.
+ */
+ if (fsr == (FSR_EXTERNAL | FSR_READ | FSR_IMPRECISE))
+ return 0;
+
+ return 1;
+}
+
+static void __init bcm5301x_init_early(void)
+{
+ hook_fault_code(16 + 6, bcm5301x_abort_handler, SIGBUS, BUS_OBJERR,
+ "imprecise external abort");
+}
+
DT_MACHINE_START(BCM5301X, "BCM5301X")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.dt_compat = bcm5301x_dt_compat,
+ .init_early = bcm5301x_init_early,
MACHINE_END
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index c9f7e92..aed44dc 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -46,6 +46,7 @@
static void __init da850_init_machine(void)
{
of_platform_default_populate(NULL, da850_auxdata_lookup, NULL);
+ davinci_pm_init();
}
static const char *const da850_boards_compat[] __initconst = {
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 28866e9..f4d7965 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -38,5 +38,18 @@
select CLKSRC_OF
select COMMON_CLK
+config ARCH_SDXPOORWILLS
+ bool "Enable support for SDXPOORWILLS"
+ select CPU_V7
+ select HAVE_ARM_ARCH_TIMER
+ select MSM_CORTEX_A7
+ select COMMON_CLK_MSM
+ select PINCTRL
+ select QCOM_SCM if SMP
+ select MSM_JTAG_MM if CORESIGHT_ETM
+ select PM_DEVFREQ
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
endmenu
endif
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index e7ffa04..d893b27 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_USE_OF) += board-dt.o
obj-$(CONFIG_SMP) += platsmp.o
+obj-$(CONFIG_ARCH_SDXPOORWILLS) += board-poorwills.o
diff --git a/arch/arm/mach-qcom/board-poorwills.c b/arch/arm/mach-qcom/board-poorwills.c
new file mode 100644
index 0000000..31f5d3e
--- /dev/null
+++ b/arch/arm/mach-qcom/board-poorwills.c
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *sdxpoorwills_dt_match[] __initconst = {
+ "qcom,sdxpoorwills",
+ NULL
+};
+
+static void __init sdxpoorwills_init(void)
+{
+ board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(SDXPOORWILLS_DT,
+ "Qualcomm Technologies, Inc. SDX POORWILLS (Flattened Device Tree)")
+ .init_machine = sdxpoorwills_init,
+ .dt_compat = sdxpoorwills_dt_match,
+MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e46907c..19f444e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1925,7 +1925,11 @@
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
- int ret, prot, len = PAGE_ALIGN(size + offset);
+ int ret, prot, len, start_offset, map_offset;
+
+ map_offset = offset & ~PAGE_MASK;
+ start_offset = offset & PAGE_MASK;
+ len = PAGE_ALIGN(map_offset + size);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE)
@@ -1933,11 +1937,12 @@
prot = __dma_direction_to_prot(dir);
- ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+ start_offset, len, prot);
if (ret < 0)
goto fail;
- return dma_addr + offset;
+ return dma_addr + map_offset;
fail:
__free_iova(mapping, dma_addr, len);
return DMA_ERROR_CODE;
@@ -2396,6 +2401,7 @@
set_dma_ops(dev, dma_ops);
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
void arch_teardown_dma_ops(struct device *dev)
{
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ba0695b..b861876 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -83,7 +83,6 @@
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_DMA_COHERENT
- select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
@@ -687,7 +686,7 @@
config ARM64_DMA_IOMMU_ALIGNMENT
int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
range 4 9
- default 8
+ default 9
help
DMA mapping framework by default aligns all buffers to the smallest
PAGE_SIZE order which is greater than or equal to the requested buffer
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index af45041..f2eb12c 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -590,7 +590,7 @@
reg = <0 0xa2000000 0 0x10000>;
sas-addr = [50 01 88 20 16 00 00 00];
hisilicon,sas-syscon = <&pcie_subctl>;
- am-max-trans;
+ hip06-sas-v2-quirk-amt;
ctrl-reset-reg = <0xa18>;
ctrl-reset-sts-reg = <0x5a0c>;
ctrl-clock-ena-reg = <0x318>;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index c32324f..ff2cc3e 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -11,7 +11,8 @@
sdm845-v2-cdp.dtb \
sdm845-qrd.dtb \
sdm845-4k-panel-mtp.dtb \
- sdm845-4k-panel-cdp.dtb
+ sdm845-4k-panel-cdp.dtb \
+ sdm845-4k-panel-qrd.dtb
dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi
new file mode 100644
index 0000000..a83d860
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,ascent_2800mah {
+ /* #Ascent_860_82912_0000_2800mAh_averaged_MasterSlave_Jan11th2017*/
+ qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
+ qcom,fastchg-current-ma = <2800>;
+ qcom,batt-id-kohm = <20>;
+ qcom,battery-beta = <3450>;
+ qcom,battery-type = "ascent_2800mah_averaged_masterslave_jan11th2017";
+ qcom,checksum = <0x0110>;
+ qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+ qcom,fg-profile-data = [
+ 21 21 F5 0D
+ 82 0B 6E 05
+ 0C 1D 5F FA
+ 74 06 97 01
+ 0E 18 F7 22
+ A8 45 B1 52
+ 76 00 00 00
+ 0E 00 00 00
+ 00 00 3D C4
+ 6E CD 2A CB
+ 21 00 08 00
+ 28 D3 2E E5
+ 0E 06 BA F3
+ 59 E3 22 12
+ 08 E5 54 32
+ 22 06 09 20
+ 27 00 14 00
+ 4B 20 F6 04
+ CF 0A 04 06
+ 25 1D B7 FA
+ DD F4 BB 06
+ FE 18 E1 22
+ 73 45 32 53
+ 5F 00 00 00
+ 0E 00 00 00
+ 00 00 D5 D5
+ 9C CC 8E D3
+ 1A 00 00 00
+ 6E EA 2E E5
+ 6E 06 A9 00
+ 6D F5 73 0B
+ 2A 02 61 1B
+ B1 33 CC FF
+ 07 10 00 00
+ 14 0B 99 45
+ 1A 00 40 00
+ 7D 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
new file mode 100644
index 0000000..c7cecbc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,ascent_3450mah {
+ /* Ascent_with_connector_3450mAh_averaged_MasterSlave_Jan6th2017 */
+ qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
+ qcom,fastchg-current-ma = <3450>;
+ qcom,batt-id-kohm = <60>;
+ qcom,battery-beta = <3435>;
+ qcom,battery-type = "ascent_3450mah_averaged_masterslave_jan6th2017";
+ qcom,checksum = <0x96AC>;
+ qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+ qcom,fg-profile-data = [
+ 9C 1F 85 05
+ 82 0A 73 FC
+ 2B 1D 72 EA
+ EE 03 66 0C
+ C8 17 F4 22
+ E0 45 1F 52
+ 5C 00 00 00
+ 10 00 00 00
+ 00 00 4A C4
+ C7 BC 48 C2
+ 0F 00 08 00
+ E1 DA 5D ED
+ 8D FD B2 F3
+ 96 E2 A7 12
+ 7E F4 0E 3B
+ 24 06 09 20
+ 27 00 14 00
+ 83 1F EE 05
+ 1F 0A 45 FD
+ 6B 1D 53 E5
+ EC 0B 31 14
+ 44 18 49 23
+ 18 45 A6 53
+ 55 00 00 00
+ 0E 00 00 00
+ 00 00 61 CC
+ B7 C3 0F BC
+ 0F 00 00 00
+ 92 00 5D ED
+ E3 06 E0 00
+ 75 FD 9C 03
+ 47 DB B3 22
+ CB 33 CC FF
+ 07 10 00 00
+ 99 0D 99 45
+ 0F 00 40 00
+ AB 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-demo-6000mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-demo-6000mah.dtsi
new file mode 100644
index 0000000..1e8cd16
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-demo-6000mah.dtsi
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,demo_6000mah {
+ qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
+ qcom,fastchg-current-ma = <6000>;
+ qcom,batt-id-kohm = <75>;
+ qcom,battery-beta = <3435>;
+ qcom,battery-type = "Demo_battery_6000mah";
+ qcom,fg-profile-data = [
+ 2C 1F 3F FC
+ E9 03 A1 FD
+ 58 1D FD F5
+ 27 12 2C 14
+ 3F 18 FF 22
+ 9B 45 A3 52
+ 55 00 00 00
+ 0E 00 00 00
+ 00 00 1C AC
+ F7 CD 71 B5
+ 1A 00 0C 00
+ 3C EB 54 E4
+ EC 05 7F FA
+ 76 05 F5 02
+ CA F3 82 3A
+ 2A 09 40 40
+ 07 00 05 00
+ 58 1F 42 06
+ 85 03 35 F4
+ 4D 1D 37 F2
+ 23 0A 79 15
+ B7 18 32 23
+ 26 45 72 53
+ 55 00 00 00
+ 0D 00 00 00
+ 00 00 13 CC
+ 03 00 98 BD
+ 16 00 00 00
+ 3C EB 54 E4
+ 9F FC A3 F3
+ 0F FC DF FA
+ FF E5 A9 23
+ CB 33 08 33
+ 07 10 00 00
+ 81 0D 99 45
+ 16 00 19 00
+ 75 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
new file mode 100644
index 0000000..3888047
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,itech_3000mah {
+ /* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jan10th2017*/
+ qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
+ qcom,fastchg-current-ma = <2000>;
+ qcom,batt-id-kohm = <100>;
+ qcom,battery-beta = <3435>;
+ qcom,battery-type = "itech_b00826lf_3000mah_ver1660_jan10th2017";
+ qcom,checksum = <0xFB8F>;
+ qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+ qcom,fg-profile-data = [
+ A4 1F 6E 05
+ 9C 0A 2B FC
+ 32 1D 23 E5
+ 60 0B 1B 15
+ AD 17 8C 22
+ EA 3C 89 4A
+ 5B 00 00 00
+ 12 00 00 00
+ 00 00 62 C2
+ 0C CD D8 C2
+ 19 00 08 00
+ 85 EA C7 EC
+ E2 05 2F 01
+ 9B F5 12 12
+ 5E 05 88 3B
+ 22 06 09 20
+ 27 00 14 00
+ 7D 1F DD 05
+ 3F 0A E5 FC
+ 72 1D E3 F5
+ 6F 12 C0 1D
+ 88 18 FB 22
+ 8D 45 C6 52
+ 54 00 00 00
+ 0F 00 00 00
+ 00 00 BD CD
+ 55 C2 5D C5
+ 14 00 00 00
+ 7E 00 C7 EC
+ 60 06 BB 00
+ 59 06 61 03
+ D9 FC 75 1B
+ B3 33 CC FF
+ 07 10 00 00
+ 3E 0B 99 45
+ 14 00 40 00
+ AE 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
new file mode 100644
index 0000000..11600ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,qrd_msm8998_skuk_3000mah {
+ /* QRD8997_ST1031GA_3000mAh_averaged_MasterSlave_Jan10th2017 */
+ qcom,max-voltage-uv = <4400000>;
+ qcom,fg-cc-cv-threshold-mv = <4390>;
+ qcom,fastchg-current-ma = <3000>;
+ qcom,batt-id-kohm = <68>;
+ qcom,battery-beta = <3380>;
+ qcom,battery-type = "qrd8997_st1031ga_3000mah";
+ qcom,checksum = <0xD299>;
+ qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+ qcom,fg-profile-data = [
+ 70 1F B1 05
+ 6F 0A A1 FC
+ 8C 1D D7 FD
+ C4 12 AC 1D
+ 7E 18 01 23
+ 8C 45 B6 52
+ 55 00 00 00
+ 0F 00 00 00
+ 00 00 92 C5
+ 95 CD A0 CA
+ 1F 00 08 00
+ 9F E3 C3 EC
+ F7 FC 25 F3
+ 02 01 FF 12
+ 29 DC 1D 3A
+ 1C 06 09 20
+ 27 00 14 00
+ AC 1F B4 05
+ 57 0A EF FC
+ 6A 1D E9 E2
+ 11 0B BB 14
+ 40 19 DC 22
+ 79 45 03 53
+ 53 00 00 00
+ 0E 00 00 00
+ 00 00 05 CC
+ 3A BB 24 CA
+ 1C 00 00 00
+ 56 F2 C3 EC
+ A6 06 A2 F2
+ 9A 06 CC 01
+ 8C EA CF 1A
+ BA 33 CC FF
+ 07 10 00 00
+ 3A 0C 66 46
+ 1C 00 40 00
+ 98 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index a94a716..6a3e8b4 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -11,6 +11,7 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
&soc {
kgsl_smmu: arm,smmu-kgsl@5040000 {
@@ -32,6 +33,14 @@
<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+ clock-names = "gcc_ddrss_gpu_axi_clk",
+ "gcc_gpu_memnoc_gfx_clk",
+ "gpu_cc_ahb_clk",
+ "gpu_cc_cx_gmu_clk";
+ clocks = <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>,
+ <&clock_gpucc GPU_CC_CX_GMU_CLK>;
attach-impl-defs =
<0x6000 0x2378>,
<0x6060 0x1055>,
@@ -52,7 +61,7 @@
reg = <0x15000000 0x80000>,
<0x150c2000 0x20>;
reg-names = "base", "tcu-base";
- #iommu-cells = <1>;
+ #iommu-cells = <2>;
qcom,skip-init;
#global-interrupts = <1>;
#size-cells = <1>;
@@ -123,9 +132,19 @@
<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,msm-bus,name = "apps_smmu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 0>,
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 1000>;
anoc_1_tbu: anoc_1_tbu@0x150c5000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150c5000 0x1000>,
<0x150c2200 0x8>;
@@ -133,10 +152,20 @@
qcom,stream-id-range = <0x0 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>;
+ qcom,msm-bus,name = "apps_smmu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 0>,
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 1000>;
};
anoc_2_tbu: anoc_2_tbu@0x150c9000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150c9000 0x1000>,
<0x150c2208 0x8>;
@@ -144,10 +173,20 @@
qcom,stream-id-range = <0x400 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>;
+ qcom,msm-bus,name = "apps_smmu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 0>,
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 1000>;
};
mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x150cd000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150cd000 0x1000>,
<0x150c2210 0x8>;
@@ -155,10 +194,20 @@
qcom,stream-id-range = <0x800 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
+ qcom,msm-bus,name = "mnoc_hf_0_tbu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_MDP_PORT0>,
+ <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+ <0 0>,
+ <MSM_BUS_MASTER_MDP_PORT0>,
+ <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+ <0 1000>;
};
mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x150d1000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150d1000 0x1000>,
<0x150c2218 0x8>;
@@ -166,10 +215,20 @@
qcom,stream-id-range = <0xc00 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
+ qcom,msm-bus,name = "mnoc_hf_1_tbu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_MDP_PORT0>,
+ <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+ <0 0>,
+ <MSM_BUS_MASTER_MDP_PORT0>,
+ <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+ <0 1000>;
};
mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x150d5000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150d5000 0x1000>,
<0x150c2220 0x8>;
@@ -177,20 +236,40 @@
qcom,stream-id-range = <0x1000 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+ qcom,msm-bus,name = "mnoc_sf_0_tbu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_SF>,
+ <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>,
+ <0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF>,
+ <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>,
+ <0 1000>;
};
compute_dsp_tbu: compute_dsp_tbu@0x150d9000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150d9000 0x1000>,
<0x150c2228 0x8>;
reg-names = "base", "status-reg";
qcom,stream-id-range = <0x1400 0x400>;
/* No GDSC */
+ qcom,msm-bus,name = "apps_smmu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 0>,
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 1000>;
};
adsp_tbu: adsp_tbu@0x150dd000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150dd000 0x1000>,
<0x150c2230 0x8>;
@@ -198,10 +277,20 @@
qcom,stream-id-range = <0x1800 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>;
+ qcom,msm-bus,name = "apps_smmu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 0>,
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 1000>;
};
anoc_1_pcie_tbu: anoc_1_pcie_tbu@0x150e1000 {
- status = "disabled";
compatible = "qcom,qsmmuv500-tbu";
reg = <0x150e1000 0x1000>,
<0x150c2238 0x8>;
@@ -209,26 +298,39 @@
qcom,stream-id-range = <0x1c00 0x400>;
qcom,regulator-names = "vdd";
vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>;
+ clock-names = "gcc_aggre_noc_pcie_tbu_clk";
+ clocks = <&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
+ qcom,msm-bus,name = "apps_smmu";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 0>,
+ <MSM_BUS_MASTER_GNOC_SNOC>,
+ <MSM_BUS_SLAVE_IMEM_CFG>,
+ <0 1000>;
};
};
- iommu_test_device {
+ kgsl_iommu_test_device {
compatible = "iommu-debug-test";
/*
- * 42 shouldn't be used by anyone on the mmss_smmu. We just
- * need _something_ here to get this node recognized by the
- * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+ * 0x7 isn't a valid sid, but should pass the sid sanity check.
+ * We just need _something_ here to get this node recognized by
+ * the SMMU driver. Our test uses ATOS, which doesn't use SIDs
* anyways, so using a dummy value is ok.
*/
- iommus = <&kgsl_smmu 0x3>;
+ iommus = <&kgsl_smmu 0x7>;
};
- iommu_test_device2 {
+ apps_iommu_test_device {
compatible = "iommu-debug-test";
/*
- * This SID belongs to PCIE. We can't use a fake SID for
+ * This SID belongs to QUP1-GSI. We can't use a fake SID for
* the apps_smmu device.
*/
- iommus = <&apps_smmu 0x1c03>;
+ iommus = <&apps_smmu 0x16 0>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 4036ce5..655f447 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -362,7 +362,7 @@
compatible = "qcom,msm-audio-ion";
qcom,smmu-version = <2>;
qcom,smmu-enabled;
- iommus = <&apps_smmu 0x1821>;
+ iommus = <&apps_smmu 0x1821 0x0>;
};
qcom,msm-adsp-loader {
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 5290f46..b9a6c79 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/spmi/spmi.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/msm/power-on.h>
&spmi_bus {
qcom,pm8998@0 {
@@ -42,10 +43,6 @@
qcom,pon-type = <0>;
qcom,pull-up = <1>;
linux,code = <116>;
- qcom,support-reset = <1>;
- qcom,s1-timer = <10256>;
- qcom,s2-timer = <2000>;
- qcom,s2-type = <1>;
};
qcom,pon_2 {
@@ -60,16 +57,19 @@
qcom,pull-up = <1>;
qcom,s1-timer = <6720>;
qcom,s2-timer = <2000>;
- qcom,s2-type = <7>;
+ qcom,s2-type = <PON_POWER_OFF_DVDD_HARD_RESET>;
qcom,use-bark;
};
};
- qcom,temp-alarm@2400 {
+ pm8998_tz: qcom,temp-alarm@2400 {
compatible = "qcom,qpnp-temp-alarm";
reg = <0x2400 0x100>;
interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
label = "pm8998_tz";
+ qcom,channel-num = <6>;
+ qcom,temp_alarm-vadc = <&pm8998_vadc>;
+ #thermal-sensor-cells = <0>;
};
pm8998_gpios: pinctrl@c000 {
@@ -139,6 +139,7 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1875>;
+ #thermal-sensor-cells = <1>;
chan@6 {
label = "die_temp";
@@ -199,3 +200,30 @@
#size-cells = <0>;
};
};
+
+&thermal_zones {
+ pm8998_temp_alarm: pm8998_tz {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
+ thermal-sensors = <&pm8998_tz>;
+
+ trips {
+ pm8998_trip0: pm8998-trip0 {
+ temperature = <105000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ pm8998_trip1: pm8998-trip1 {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ pm8998_trip2: pm8998-trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 1f27b21..b53f7ac 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -31,11 +31,12 @@
reg = <0x800 0x100>;
};
- qcom,temp-alarm@2400 {
+ pmi8998_tz: qcom,temp-alarm@2400 {
compatible = "qcom,qpnp-temp-alarm";
reg = <0x2400 0x100>;
interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
label = "pmi8998_tz";
+ #thermal-sensor-cells = <0>;
};
pmi8998_gpios: pinctrl@c000 {
@@ -63,6 +64,194 @@
qcom,gpios-disallowed = <4 7 13>;
};
+ qcom,qpnp-qnovo@1500 {
+ compatible = "qcom,qpnp-qnovo";
+ reg = <0x1500 0x100>;
+ interrupts = <0x2 0x15 0x0 IRQ_TYPE_NONE>;
+ interrupt-names = "ptrain-done";
+ qcom,pmic-revid = <&pmi8998_revid>;
+ };
+
+ pmi8998_charger: qcom,qpnp-smb2 {
+ compatible = "qcom,qpnp-smb2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,pmic-revid = <&pmi8998_revid>;
+
+ io-channels = <&pmi8998_rradc 8>,
+ <&pmi8998_rradc 10>,
+ <&pmi8998_rradc 3>,
+ <&pmi8998_rradc 4>;
+ io-channel-names = "charger_temp",
+ "charger_temp_max",
+ "usbin_i",
+ "usbin_v";
+
+ qcom,boost-threshold-ua = <100000>;
+ qcom,wipower-max-uw = <5000000>;
+
+ qcom,thermal-mitigation
+ = <3000000 1500000 1000000 500000>;
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts =
+ <0x2 0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "chg-error",
+ "chg-state-change",
+ "step-chg-state-change",
+ "step-chg-soc-update-fail",
+ "step-chg-soc-update-request";
+ };
+
+ qcom,otg@1100 {
+ reg = <0x1100 0x100>;
+ interrupts = <0x2 0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+ interrupt-names = "otg-fail",
+ "otg-overcurrent",
+ "otg-oc-dis-sw-sts",
+ "testmode-change-detect";
+ };
+
+ qcom,bat-if@1200 {
+ reg = <0x1200 0x100>;
+ interrupts =
+ <0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+ interrupt-names = "bat-temp",
+ "bat-ocp",
+ "bat-ov",
+ "bat-low",
+ "bat-therm-or-id-missing",
+ "bat-terminal-missing";
+ };
+
+ qcom,usb-chgpth@1300 {
+ reg = <0x1300 0x100>;
+ interrupts =
+ <0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "usbin-collapse",
+ "usbin-lt-3p6v",
+ "usbin-uv",
+ "usbin-ov",
+ "usbin-plugin",
+ "usbin-src-change",
+ "usbin-icl-change",
+ "type-c-change";
+ };
+
+ qcom,dc-chgpth@1400 {
+ reg = <0x1400 0x100>;
+ interrupts =
+ <0x2 0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x14 0x6 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "dcin-collapse",
+ "dcin-lt-3p6v",
+ "dcin-uv",
+ "dcin-ov",
+ "dcin-plugin",
+ "div2-en-dg",
+ "dcin-icl-change";
+ };
+
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts =
+ <0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x16 0x6 IRQ_TYPE_EDGE_FALLING>,
+ <0x2 0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+ interrupt-names = "wdog-snarl",
+ "wdog-bark",
+ "aicl-fail",
+ "aicl-done",
+ "high-duty-cycle",
+ "input-current-limiting",
+ "temperature-change",
+ "switcher-power-ok";
+ };
+ };
+
+ pmi8998_pdphy: qcom,usb-pdphy@1700 {
+ compatible = "qcom,qpnp-pdphy";
+ reg = <0x1700 0x100>;
+ vdd-pdphy-supply = <&pm8998_l24>;
+ vbus-supply = <&smb2_vbus>;
+ vconn-supply = <&smb2_vconn>;
+ interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-names = "sig-tx",
+ "sig-rx",
+ "msg-tx",
+ "msg-rx",
+ "msg-tx-failed",
+ "msg-tx-discarded",
+ "msg-rx-discarded";
+
+ qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+ <9000 3000>, /* 9V @ 3A */
+ <12000 2250>; /* 12V @ 2.25A */
+ };
+
+ bcl_sensor: bcl@4200 {
+ compatible = "qcom,msm-bcl-lmh";
+ reg = <0x4200 0xff>,
+ <0x4300 0xff>;
+ reg-names = "fg_user_adc",
+ "fg_lmh";
+ interrupts = <0x2 0x42 0x0 IRQ_TYPE_NONE>,
+ <0x2 0x42 0x1 IRQ_TYPE_NONE>,
+ <0x2 0x42 0x2 IRQ_TYPE_NONE>,
+ <0x2 0x42 0x3 IRQ_TYPE_NONE>,
+ <0x2 0x42 0x4 IRQ_TYPE_NONE>;
+ interrupt-names = "bcl-high-ibat",
+ "bcl-very-high-ibat",
+ "bcl-low-vbat",
+ "bcl-very-low-vbat",
+ "bcl-crit-low-vbat";
+ #thermal-sensor-cells = <1>;
+ };
+
pmi8998_rradc: rradc@4500 {
compatible = "qcom,rradc";
reg = <0x4500 0x100>;
@@ -71,6 +260,70 @@
#io-channel-cells = <1>;
qcom,pmic-revid = <&pmi8998_revid>;
};
+
+ pmi8998_fg: qpnp,fg {
+ compatible = "qcom,fg-gen3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,pmic-revid = <&pmi8998_revid>;
+ io-channels = <&pmi8998_rradc 0>;
+ io-channel-names = "rradc_batt_id";
+ qcom,rradc-base = <0x4500>;
+ qcom,fg-esr-timer-awake = <96>;
+ qcom,fg-esr-timer-asleep = <256>;
+ qcom,cycle-counter-en;
+ status = "okay";
+
+ qcom,fg-batt-soc@4000 {
+ status = "okay";
+ reg = <0x4000 0x100>;
+ interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x2
+ IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x40 0x3
+ IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x5
+ IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "soc-update",
+ "soc-ready",
+ "bsoc-delta",
+ "msoc-delta",
+ "msoc-low",
+ "msoc-empty",
+ "msoc-high",
+ "msoc-full";
+ };
+
+ qcom,fg-batt-info@4100 {
+ status = "okay";
+ reg = <0x4100 0x100>;
+ interrupts = <0x2 0x41 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x2 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x41 0x6 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "vbatt-pred-delta",
+ "vbatt-low",
+ "esr-delta",
+ "batt-missing",
+ "batt-temp-delta";
+ };
+
+ qcom,fg-memif@4400 {
+ status = "okay";
+ reg = <0x4400 0x100>;
+ interrupts = <0x2 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "ima-rdy",
+ "mem-xcp",
+ "dma-grant";
+ };
+ };
};
qcom,pmi8998@3 {
@@ -418,3 +671,154 @@
};
};
};
+
+&thermal_zones {
+ ibat-high {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
+ thermal-sensors = <&bcl_sensor 0>;
+
+ trips {
+ ibat_high: low-ibat {
+ temperature = <4200>;
+ hysteresis = <200>;
+ type = "passive";
+ };
+ };
+ };
+ ibat-vhigh {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
+ thermal-sensors = <&bcl_sensor 1>;
+
+ trips {
+ ibat_vhigh: ibat_vhigh {
+ temperature = <4300>;
+ hysteresis = <100>;
+ type = "passive";
+ };
+ };
+ };
+ vbat {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_cap";
+ thermal-sensors = <&bcl_sensor 2>;
+ tracks-low;
+
+ trips {
+ low_vbat: low-vbat {
+ temperature = <3300>;
+ hysteresis = <100>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ vbat_cpu4 {
+ trip = <&low_vbat>;
+ cooling-device = <&CPU4 22 22>;
+ };
+ vbat_cpu5 {
+ trip = <&low_vbat>;
+ cooling-device = <&CPU5 22 22>;
+ };
+ vbat_map6 {
+ trip = <&low_vbat>;
+ cooling-device = <&CPU6 22 22>;
+ };
+ vbat_map7 {
+ trip = <&low_vbat>;
+ cooling-device = <&CPU7 22 22>;
+ };
+ };
+ };
+ vbat_low {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_cap";
+ thermal-sensors = <&bcl_sensor 3>;
+ tracks-low;
+
+ trips {
+ low-vbat {
+ temperature = <3100>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ };
+ };
+ vbat_too_low {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_cap";
+ thermal-sensors = <&bcl_sensor 4>;
+ tracks-low;
+
+ trips {
+ low-vbat {
+ temperature = <2900>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ };
+ };
+ soc {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_cap";
+ thermal-sensors = <&bcl_sensor 5>;
+ tracks-low;
+
+ trips {
+ low_soc: low-soc {
+ temperature = <10>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ soc_cpu4 {
+ trip = <&low_soc>;
+ cooling-device = <&CPU4 22 22>;
+ };
+ soc_cpu5 {
+ trip = <&low_soc>;
+ cooling-device = <&CPU5 22 22>;
+ };
+ soc_map6 {
+ trip = <&low_soc>;
+ cooling-device = <&CPU6 22 22>;
+ };
+ soc_map7 {
+ trip = <&low_soc>;
+ cooling-device = <&CPU7 22 22>;
+ };
+ };
+ };
+
+ pmi8998_tz {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pmi8998_tz>;
+
+ trips {
+ pmi8998_trip0: pmi8998-trip0 {
+ temperature = <105000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ pmi8998_trip1: pmi8998-trip1 {
+ temperature = <125000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ pmi8998_trip2: pmi8998-trip2 {
+ temperature = <145000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index d5646bf..6569219 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -21,3 +21,26 @@
compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
qcom,board-id = <1 1>;
};
+
+&dsi_dual_nt35597_truly_video_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index d641276..2e893de 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -21,3 +21,26 @@
compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
qcom,board-id = <8 1>;
};
+
+&dsi_dual_nt35597_truly_video_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
new file mode 100644
index 0000000..6171c7b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel QRD";
+ compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+ qcom,board-id = <11 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index b66ca94..69dfe46 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -17,8 +17,8 @@
#include <dt-bindings/clock/qcom,audio-ext-clk.h>
&msm_audio_ion {
- iommus = <&apps_smmu 0x1821>;
- qcom,smmu-sid-mask = <0xf>;
+ iommus = <&apps_smmu 0x1821 0x0>;
+ qcom,smmu-sid-mask = /bits/ 64 <0xf>;
};
&soc {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index a51f411..1702e80 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -191,6 +191,7 @@
cell-id = <MSM_BUS_BCM_QUP0>;
label = "QUP0";
qcom,bcm-name = "QUP0";
+ qcom,rscs = <&rsc_apps>;
qcom,bcm-dev;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
new file mode 100644
index 0000000..922e990
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ qcom,switch-source = <&pmi8998_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8998_flash2>;
+ qcom,torch-source = <&pmi8998_torch2>;
+ qcom,switch-source = <&pmi8998_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ vin-supply = <&pmi8998_bob>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm8998_gpios 12 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm8998_s3>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm8998_gpios 9 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm8998_s3>;
+ };
+};
+
+&cci {
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <0>;
+ };
+
+ actuator_front: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>,
+ <&tlmm 27 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-vaf = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA0",
+ "CAM_VAF";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+ qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&tlmm 8 0>,
+ <&tlmm 27 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-vaf = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VANA2",
+ "CAM_VAF";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <180>;
+ qcom,led-flash-src = <&led_flash_rear>;
+ qcom,actuator-src = <&actuator_rear>;
+ qcom,ois-src = <&ois_rear>;
+ qcom,eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA";
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <180>;
+ qcom,eeprom-src = <&eeprom_rear_aux>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+ qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <0>;
+ qcom,eeprom-src = <&eeprom_front>;
+ qcom,actuator-src = <&actuator_front>;
+ qcom,led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VANA1";
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
new file mode 100644
index 0000000..922e990
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ qcom,switch-source = <&pmi8998_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmi8998_flash2>;
+ qcom,torch-source = <&pmi8998_torch2>;
+ qcom,switch-source = <&pmi8998_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ vin-supply = <&pmi8998_bob>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm8998_gpios 12 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm8998_s3>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm8998_gpios 9 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm8998_s3>;
+ };
+};
+
+&cci {
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <0>;
+ };
+
+ actuator_front: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <2800000>;
+ qcom,cam-vreg-op-mode = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>,
+ <&tlmm 27 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-vaf = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA0",
+ "CAM_VAF";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+ qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&tlmm 8 0>,
+ <&tlmm 27 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-vaf = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VANA2",
+ "CAM_VAF";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <180>;
+ qcom,led-flash-src = <&led_flash_rear>;
+ qcom,actuator-src = <&actuator_rear>;
+ qcom,ois-src = <&ois_rear>;
+ qcom,eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA";
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <180>;
+ qcom,eeprom-src = <&eeprom_rear_aux>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
+ qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
+ qcom,cam-vreg-op-mode = <105000 0 80000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,sensor-position-roll = <90>;
+ qcom,sensor-position-pitch = <0>;
+ qcom,sensor-position-yaw = <0>;
+ qcom,eeprom-src = <&eeprom_front>;
+ qcom,actuator-src = <&actuator_front>;
+ qcom,led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
+ qcom,cam-vreg-op-mode = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VANA1";
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ qcom,clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index c197d65..3b9c26f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -16,4 +16,317 @@
compatible = "qcom,cam-req-mgr";
status = "ok";
};
+
+ qcom,csiphy@ac65000 {
+ cell-index = <0>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0x0ac65000 0x1000>;
+ reg-names = "csiphy";
+ interrupts = <0 477 0>;
+ interrupt-names = "csiphy";
+ gdscr-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "gdscr";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY0_CLK>,
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cphy_rx_clk_src",
+ "csiphy0_clk",
+ "csi0phytimer_clk_src",
+ "csi0phytimer_clk",
+ "ife_0_csid_clk",
+ "ife_0_csid_clk_src";
+ qcom,clock-rates =
+ <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ status = "ok";
+ };
+
+ qcom,csiphy@ac66000{
+ cell-index = <1>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0xac66000 0x1000>;
+ reg-names = "csiphy";
+ interrupts = <0 478 0>;
+ interrupt-names = "csiphy";
+ gdscr-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "gdscr";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY1_CLK>,
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cphy_rx_clk_src",
+ "csiphy1_clk",
+ "csi1phytimer_clk_src",
+ "csi1phytimer_clk",
+ "ife_1_csid_clk",
+ "ife_1_csid_clk_src";
+ qcom,clock-rates =
+ <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+
+ status = "ok";
+ };
+
+ qcom,csiphy@ac67000 {
+ cell-index = <2>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0xac67000 0x1000>;
+ reg-names = "csiphy";
+ interrupts = <0 479 0>;
+ interrupt-names = "csiphy";
+ gdscr-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "gdscr";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY2_CLK>,
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cphy_rx_clk_src",
+ "csiphy2_clk",
+ "csi2phytimer_clk_src",
+ "csi2phytimer_clk",
+ "ife_lite_csid_clk",
+ "ife_lite_csid_clk_src";
+ qcom,clock-rates =
+ <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ status = "ok";
+ };
+
+ cci: qcom,cci@ac4a000 {
+ cell-index = <0>;
+ compatible = "qcom,cci";
+ reg = <0xac4a000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "cci";
+ interrupts = <0 460 0>;
+ interrupt-names = "cci";
+ status = "ok";
+ gdscr-supply = <&titan_top_gdsc>;
+ qcom,cam-vreg-name = "gdscr";
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK_SRC>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cci_clk",
+ "cci_clk_src";
+ qcom,clock-rates = <0 0 80000000 0 0 37500000>;
+ pinctrl-names = "cci_default", "cci_suspend";
+ pinctrl-0 = <&cci0_active &cci1_active>;
+ pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+ gpios = <&tlmm 17 0>,
+ <&tlmm 18 0>,
+ <&tlmm 19 0>,
+ <&tlmm 20 0>;
+ qcom,gpio-tbl-num = <0 1 2 3>;
+ qcom,gpio-tbl-flags = <1 1 1 1>;
+ qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
+
+ i2c_freq_100Khz: qcom,i2c_standard_mode {
+ qcom,hw-thigh = <201>;
+ qcom,hw-tlow = <174>;
+ qcom,hw-tsu-sto = <204>;
+ qcom,hw-tsu-sta = <231>;
+ qcom,hw-thd-dat = <22>;
+ qcom,hw-thd-sta = <162>;
+ qcom,hw-tbuf = <227>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ i2c_freq_400Khz: qcom,i2c_fast_mode {
+ qcom,hw-thigh = <38>;
+ qcom,hw-tlow = <56>;
+ qcom,hw-tsu-sto = <40>;
+ qcom,hw-tsu-sta = <40>;
+ qcom,hw-thd-dat = <22>;
+ qcom,hw-thd-sta = <35>;
+ qcom,hw-tbuf = <62>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ i2c_freq_custom: qcom,i2c_custom_mode {
+ qcom,hw-thigh = <38>;
+ qcom,hw-tlow = <56>;
+ qcom,hw-tsu-sto = <40>;
+ qcom,hw-tsu-sta = <40>;
+ qcom,hw-thd-dat = <22>;
+ qcom,hw-thd-sta = <35>;
+ qcom,hw-tbuf = <62>;
+ qcom,hw-scl-stretch-en = <1>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+ qcom,hw-thigh = <16>;
+ qcom,hw-tlow = <22>;
+ qcom,hw-tsu-sto = <17>;
+ qcom,hw-tsu-sta = <18>;
+ qcom,hw-thd-dat = <16>;
+ qcom,hw-thd-sta = <15>;
+ qcom,hw-tbuf = <24>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <3>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+ };
+ };
+
+ qcom,cam_smmu {
+ compatible = "qcom,msm-cam-smmu";
+ status = "ok";
+
+ msm_cam_smmu_ife {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x808>,
+ <&apps_smmu 0x810>,
+ <&apps_smmu 0x818>,
+ <&apps_smmu 0xc08>,
+ <&apps_smmu 0xc10>,
+ <&apps_smmu 0xc18>;
+ label = "ife";
+ ife_iova_mem_map: iova-mem-map {
+ /* IO region is approximately 3.4 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_icp_fw {
+ compatible = "qcom,msm-cam-smmu-fw-dev";
+ label="icp";
+ memory-region = <&pil_camera_mem>;
+ };
+
+ msm_cam_smmu_icp {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1078>,
+ <&apps_smmu 0x1020>,
+ <&apps_smmu 0x1028>,
+ <&apps_smmu 0x1040>,
+ <&apps_smmu 0x1048>,
+ <&apps_smmu 0x1030>,
+ <&apps_smmu 0x1050>;
+ label = "icp";
+ icp_iova_mem_map: iova-mem-map {
+ iova-mem-region-firmware {
+ /* Firmware region is 5MB */
+ iova-region-name = "firmware";
+ iova-region-start = <0x0>;
+ iova-region-len = <0x500000>;
+ iova-region-id = <0x0>;
+ status = "ok";
+ };
+
+ iova-mem-region-shared {
+ /* Shared region is 100MB long */
+ iova-region-name = "shared";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0x6400000>;
+ iova-region-id = <0x1>;
+ status = "ok";
+ };
+
+ iova-mem-region-io {
+ /* IO region is approximately 3.3 GB */
+ iova-region-name = "io";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0xd2800000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_smmu_cpas_cdm {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1000>;
+ label = "cpas-cdm0";
+ cpas_cdm_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ /* IO region is approximately 3.4 GB */
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_smmu_secure {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1001>;
+ label = "cam-secure";
+ cam_secure_iova_mem_map: iova-mem-map {
+ /* Secure IO region is approximately 3.4 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 06f620b..af28003 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -11,6 +11,30 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include "sdm845-camera-sensor-cdp.dtsi"
+
+/ {
+ bluetooth: bt_wcn3990 {
+ compatible = "qca,wcn3990";
+ qca,bt-vdd-io-supply = <&pm8998_s3>;
+ qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+ qca,bt-vdd-core-supply = <&pm8998_l7>;
+ qca,bt-vdd-pa-supply = <&pm8998_l17>;
+ qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+ qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+ qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+ qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+ qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+ qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+ qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+ };
+};
&soc {
sound-tavil {
@@ -69,6 +93,10 @@
};
};
+&mdss_mdp {
+ #cooling-cells = <2>;
+};
+
&ufsphy_mem {
compatible = "qcom,ufs-phy-qmp-v3";
@@ -118,3 +146,234 @@
status = "ok";
};
+
+&sdhc_2 {
+ vdd-supply = <&pm8998_l21>;
+ qcom,vdd-voltage-level = <2950000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm8998_l13>;
+ qcom,vdd-io-voltage-level = <1808000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ status = "ok";
+};
+
+&pmi8998_switch1 {
+ pinctrl-names = "led_enable", "led_disable";
+ pinctrl-0 = <&flash_led3_front_en>;
+ pinctrl-1 = <&flash_led3_front_dis>;
+};
+
+&pmi8998_charger {
+ qcom,batteryless-platform;
+};
+
+/ {
+ extcon_usb1: extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&pmi8998_gpios 9 GPIO_ACTIVE_HIGH>;
+ vbus-gpio = <&pmi8998_gpios 8 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_default
+ &usb2_id_det_default>;
+ };
+
+ usb1_vbus_vreg: usb1_vbus_vreg {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus_vreg";
+ gpio = <&pmi8998_gpios 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ /* Typical EN-to-VBUS turn on time for NX5P1100 */
+ regulator-enable-ramp-delay = <630>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_boost_default>;
+ };
+
+aliases {
+ serial0 = &qupv3_se9_2uart;
+ spi0 = &qupv3_se8_spi;
+ i2c0 = &qupv3_se10_i2c;
+ i2c1 = &qupv3_se3_i2c;
+ hsuart0 = &qupv3_se6_4uart;
+ };
+};
+
+&qupv3_se9_2uart {
+ status = "ok";
+};
+
+&labibb {
+ status = "ok";
+ qcom,qpnp-labibb-mode = "lcd";
+};
+
+&dsi_dual_nt35597_truly_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+ qcom,dsi-display-active;
+};
+
+&pmi8998_wled {
+ status = "okay";
+ qcom,led-strings-list = [01 02];
+};
+
+&qupv3_se8_spi {
+ status = "ok";
+};
+
+&qupv3_se3_i2c {
+ status = "ok";
+};
+
+&qupv3_se10_i2c {
+ status = "ok";
+};
+
+&qupv3_se6_4uart {
+ status = "ok";
+};
+
+&usb1 {
+ status = "okay";
+ extcon = <&extcon_usb1>;
+ vbus_dwc3-supply = <&usb1_vbus_vreg>;
+};
+
+&qusb_phy1 {
+ status = "okay";
+};
+
+&usb_qmp_phy {
+ status = "okay";
+};
+
+&pm8998_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@85 {
+ label = "vcoin";
+ reg = <0x85>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4f {
+ label = "pa_therm1";
+ reg = <0x4f>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+};
+
+&pm8998_adc_tm {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,btm-channel-number = <0x60>;
+ };
+};
+
+&thermal_zones {
+ xo-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x4c>;
+ };
+
+ msm-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x4d>;
+ };
+
+ pa-therm1-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x4f>;
+ };
+
+ quiet-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x51>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index e7ff343..d1712ad 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -21,9 +21,8 @@
coresight-name = "coresight-replicator";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -57,9 +56,8 @@
coresight-name = "coresight-replicator-swao";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -106,9 +104,8 @@
coresight-name = "coresight-tmc-etf-swao";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -143,9 +140,8 @@
coresight-name = "coresight-funnel-swao";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -171,7 +167,8 @@
};
tpda_swao: tpda@6b01000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6b01000 0x1000>;
reg-names = "tpda-base";
@@ -181,9 +178,8 @@
qcom,dsb-elem-size = <1 32>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -220,16 +216,16 @@
};
tpdm_swao0: tpdm@6b02000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6b02000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-swao-0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_swao0_out_tpda_swao: endpoint {
@@ -239,15 +235,15 @@
};
tpdm_swao1: tpdm@6b03000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6b03000 0x1000>;
reg-names = "tpdm-base";
coresight-name="coresight-tpdm-swao-1";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_swao1_out_tpda_swao: endpoint {
@@ -265,13 +261,13 @@
reg-names = "tmc-base", "bam-base";
arm,buffer-size = <0x400000>;
+ arm,sg-enable;
coresight-name = "coresight-tmc-etr";
coresight-ctis = <&cti0 &cti8>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tmc_etr_in_replicator: endpoint {
@@ -292,9 +288,8 @@
coresight-ctis = <&cti0 &cti8>;
arm,default-sink;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -329,9 +324,8 @@
coresight-name = "coresight-funnel-merg";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -375,9 +369,8 @@
coresight-name = "coresight-stm";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
stm_out_funnel_in0: endpoint {
@@ -387,6 +380,32 @@
};
+ hwevent: hwevent@0x014066f0 {
+ compatible = "qcom,coresight-hwevent";
+ reg = <0x14066f0 0x4>,
+ <0x14166f0 0x4>,
+ <0x1406038 0x4>,
+ <0x1416038 0x4>;
+ reg-names = "ddr-ch0-cfg", "ddr-ch23-cfg", "ddr-ch0-ctrl",
+ "ddr-ch23-ctrl";
+
+ coresight-name = "coresight-hwevent";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+ };
+
+ csr: csr@6001000 {
+ compatible = "qcom,coresight-csr";
+ reg = <0x6001000 0x1000>;
+ reg-names = "csr-base";
+
+ coresight-name = "coresight-csr";
+
+ qcom,blk-size = <1>;
+ };
+
funnel_in0: funnel@0x6041000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b908>;
@@ -396,9 +415,8 @@
coresight-name = "coresight-funnel-in0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -449,9 +467,8 @@
coresight-name = "coresight-funnel-in2";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -466,6 +483,16 @@
};
port@1 {
+ reg = <0>;
+ funnel_in2_in_modem_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&modem_etm0_out_funnel_in2>;
+ };
+
+ };
+
+ port@2 {
reg = <1>;
funnel_in2_in_replicator_swao: endpoint {
slave-mode;
@@ -475,7 +502,17 @@
};
- port@2 {
+ port@3 {
+ reg = <2>;
+ funnel_in2_in_funnel_modem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_modem_out_funnel_in2>;
+ };
+
+ };
+
+ port@4 {
reg = <5>;
funnel_in2_in_funnel_apss_merg: endpoint {
slave-mode;
@@ -488,24 +525,31 @@
};
tpda: tpda@6004000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6004000 0x1000>;
reg-names = "tpda-base";
coresight-name = "coresight-tpda";
qcom,tpda-atid = <65>;
- qcom,bc-elem-size = <13 32>;
- qcom,tc-elem-size = <7 32>,
+ qcom,bc-elem-size = <10 32>,
<13 32>;
- qcom,dsb-elem-size = <13 32>;
- qcom,cmb-elem-size = <7 32>,
- <8 32>,
+ qcom,tc-elem-size = <13 32>;
+ qcom,dsb-elem-size = <0 32>,
+ <2 32>,
+ <3 32>,
+ <5 32>,
+ <6 32>,
+ <10 32>,
+ <11 32>,
+ <13 32>;
+ qcom,cmb-elem-size = <3 64>,
+ <7 64>,
<13 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -520,6 +564,51 @@
};
port@1 {
+ reg = <0>;
+ tpda_in_tpdm_center: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_center_out_tpda>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ tpda_in_funnel_dl_mm: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_dl_mm_out_tpda>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ tpda_in_funnel_ddr_0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_ddr_0_out_tpda>;
+ };
+ };
+
+ port@4 {
+ reg = <5>;
+ tpda_in_funnel_lpass: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_lpass_out_tpda>;
+ };
+ };
+
+ port@5 {
+ reg = <6>;
+ tpda_in_funnel_turing: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_turing_out_tpda>;
+ };
+ };
+
+ port@6 {
reg = <7>;
tpda_in_tpdm_vsense: endpoint {
slave-mode;
@@ -528,16 +617,25 @@
};
};
- port@2 {
- reg = <8>;
- tpda_in_tpdm_dcc: endpoint {
+ port@7 {
+ reg = <10>;
+ tpda_in_tpdm_qm: endpoint {
slave-mode;
remote-endpoint =
- <&tpdm_dcc_out_tpda>;
+ <&tpdm_qm_out_tpda>;
};
};
- port@3 {
+ port@8 {
+ reg = <11>;
+ tpda_in_tpdm_north: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_north_out_tpda>;
+ };
+ };
+
+ port@9 {
reg = <13>;
tpda_in_tpdm_pimem: endpoint {
slave-mode;
@@ -548,16 +646,544 @@
};
};
- tpdm_pimem: tpdm@6850000 {
+ funnel_modem: funnel@6832000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6832000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-modem";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_modem_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_funnel_modem>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_modem_in_tpda_modem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_modem_out_funnel_modem>;
+ };
+ };
+ };
+ };
+
+ tpda_modem: tpda@6831000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x6831000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-modem";
+
+ qcom,tpda-atid = <67>;
+ qcom,dsb-elem-size = <0 32>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_modem_out_funnel_modem: endpoint {
+ remote-endpoint =
+ <&funnel_modem_in_tpda_modem>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_modem_in_tpdm_modem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_modem_out_tpda_modem>;
+ };
+ };
+ };
+ };
+
+ tpdm_modem: tpdm@6830000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6830000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-modem";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_modem_out_tpda_modem: endpoint {
+ remote-endpoint = <&tpda_modem_in_tpdm_modem>;
+ };
+ };
+ };
+
+ funnel_lpass: funnel@6845000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6845000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-lpass";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_lpass_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_lpass>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_lpass_in_tpdm_lpass: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_lpass_out_funnel_lpass>;
+ };
+ };
+ };
+ };
+
+ tpdm_lpass: tpdm@6844000 {
compatible = "qcom,coresight-tpdm";
+ reg = <0x6844000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-lpass";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "core_clk";
+
+ port {
+ tpdm_lpass_out_funnel_lpass: endpoint {
+ remote-endpoint = <&funnel_lpass_in_tpdm_lpass>;
+ };
+ };
+ };
+
+ tpdm_center: tpdm@6c28000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6c28000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-center";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_center_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_center>;
+ };
+ };
+ };
+
+ tpdm_north: tpdm@6a24000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6a24000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-north";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_north_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_north>;
+ };
+ };
+ };
+
+ tpdm_qm: tpdm@69d0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x69d0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-qm";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_qm_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_qm>;
+ };
+ };
+ };
+
+ tpda_apss: tpda@7862000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x7862000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-apss";
+
+ qcom,tpda-atid = <66>;
+ qcom,dsb-elem-size = <0 32>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_apss_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_tpda_apss>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_apss_in_tpdm_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_apss_out_tpda_apss>;
+ };
+ };
+ };
+ };
+
+ tpdm_apss: tpdm@7860000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x7860000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-apss";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_apss_out_tpda_apss: endpoint {
+ remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+ };
+ };
+ };
+
+ tpda_llm_silver: tpda@78c0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x78c0000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-llm-silver";
+
+ qcom,tpda-atid = <72>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_llm_silver_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_tpda_llm_silver>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_llm_silver_in_tpdm_llm_silver: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_llm_silver_out_tpda_llm_silver>;
+ };
+ };
+ };
+ };
+
+ tpdm_llm_silver: tpdm@78a0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x78a0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-llm-silver";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_llm_silver_out_tpda_llm_silver: endpoint {
+ remote-endpoint =
+ <&tpda_llm_silver_in_tpdm_llm_silver>;
+ };
+ };
+ };
+
+ tpda_llm_gold: tpda@78d0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x78d0000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-llm-gold";
+
+ qcom,tpda-atid = <73>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_llm_gold_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_tpda_llm_gold>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_llm_gold_in_tpdm_llm_gold: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_llm_gold_out_tpda_llm_gold>;
+ };
+ };
+ };
+ };
+
+ tpdm_llm_gold: tpdm@78b0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x78b0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-llm-gold";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_llm_gold_out_tpda_llm_gold: endpoint {
+ remote-endpoint =
+ <&tpda_llm_gold_in_tpdm_llm_gold>;
+ };
+ };
+ };
+
+ funnel_dl_mm: funnel@6c0b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6c0b000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-dl-mm";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_dl_mm_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_dl_mm>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel_dl_mm_in_tpdm_mm: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_mm_out_funnel_dl_mm>;
+ };
+ };
+ };
+ };
+
+ tpdm_mm: tpdm@6c08000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6c08000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-mm";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_mm_out_funnel_dl_mm: endpoint {
+ remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>;
+ };
+ };
+ };
+
+ funnel_turing: funnel@6861000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6861000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-turing";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_turing_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_turing>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_turing_in_tpdm_turing: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_turing_out_funnel_turing>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_turing_in_turing_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&turing_etm0_out_funnel_turing>;
+ };
+ };
+ };
+ };
+
+ tpdm_turing: tpdm@6860000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6860000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-turing";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_turing_out_funnel_turing: endpoint {
+ remote-endpoint =
+ <&funnel_turing_in_tpdm_turing>;
+ };
+ };
+ };
+
+ funnel_ddr_0: funnel@69e2000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x69e2000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-ddr-0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_ddr_0_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_ddr_0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_ddr_0_in_tpdm_ddr: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_ddr_out_funnel_ddr_0>;
+ };
+ };
+ };
+ };
+
+ tpdm_ddr: tpdm@69e0000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x69e0000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-ddr";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_ddr_out_funnel_ddr_0: endpoint {
+ remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
+ };
+ };
+ };
+
+ tpdm_pimem: tpdm@6850000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6850000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-pimem";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port {
tpdm_pimem_out_tpda: endpoint {
@@ -566,35 +1192,16 @@
};
};
-
- tpdm_dcc: tpdm@6870000 {
- compatible = "qcom,coresight-tpdm";
- reg = <0x6870000 0x1000>;
- reg-names = "tpdm-base";
-
- coresight-name = "coresight-tpdm-dcc";
-
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
-
- port {
- tpdm_dcc_out_tpda: endpoint {
- remote-endpoint = <&tpda_in_tpdm_dcc>;
- };
- };
- };
-
tpdm_vsense: tpdm@6840000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6840000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-vsense";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port{
tpdm_vsense_out_tpda: endpoint {
@@ -604,7 +1211,8 @@
};
tpda_olc: tpda@7832000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x7832000 0x1000>;
reg-names = "tpda-base";
@@ -613,9 +1221,8 @@
qcom,tpda-atid = <69>;
qcom,cmb-elem-size = <0 64>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -639,15 +1246,15 @@
};
tpdm_olc: tpdm@7830000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x7830000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-olc";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
port{
tpdm_olc_out_tpda_olc: endpoint {
@@ -657,7 +1264,8 @@
};
tpda_spss: tpda@6882000 {
- compatible = "qcom,coresight-tpda";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
reg = <0x6882000 0x1000>;
reg-names = "tpda-base";
@@ -666,9 +1274,8 @@
qcom,tpda-atid = <70>;
qcom,dsb-elem-size = <0 32>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -692,16 +1299,15 @@
};
tpdm_spss: tpdm@6880000 {
- compatible = "qcom,coresight-tpdm";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
reg = <0x6880000 0x1000>;
reg-names = "tpdm-base";
coresight-name = "coresight-tpdm-spss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
-
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
qcom,msr-fix-req;
port{
@@ -720,9 +1326,8 @@
coresight-name = "coresight-funnel-spss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -756,9 +1361,8 @@
coresight-name = "coresight-funnel-qatb";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -783,300 +1387,358 @@
};
};
+ cti_ddr0: cti@69e1000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x69e1000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ddr0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_ddr1: cti@69e4000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b969>;
+ reg = <0x69e4000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ddr1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
cti0: cti@6010000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6010000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti0";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti1: cti@6011000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6011000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti1";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti2: cti@6012000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6012000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti2";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ qcom,cti-gpio-trigout = <4>;
+ pinctrl-names = "cti-trigout-pctrl";
+ pinctrl-0 = <&trigout_a>;
};
cti3: cti@6013000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6013000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti3";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti4: cti@6014000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6014000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti4";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti5: cti@6015000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6015000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti5";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti6: cti@6016000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6016000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti6";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti7: cti@6017000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6017000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti7";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti8: cti@6018000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6018000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti8";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti9: cti@6019000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6019000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti9";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti10: cti@601a000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601a000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti10";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti11: cti@601b000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601b000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti11";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti12: cti@601c000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601c000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti12";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti13: cti@601d000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601d000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti13";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti14: cti@601e000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601e000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti14";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti15: cti@601f000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x601f000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti15";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti_cpu0: cti@7020000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7020000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu0";
cpu = <&CPU0>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
};
cti_cpu1: cti@7120000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7120000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu1";
cpu = <&CPU1>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu2: cti@7220000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7220000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu2";
cpu = <&CPU2>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu3: cti@7320000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7320000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu3";
cpu = <&CPU3>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu4: cti@7420000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7420000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu4";
cpu = <&CPU4>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu5: cti@7520000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7520000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu5";
cpu = <&CPU5>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu6: cti@7620000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7620000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu6";
cpu = <&CPU6>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
};
cti_cpu7: cti@7720000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7720000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu7";
cpu = <&CPU7>;
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "core_clk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ turing_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-turing-etm0";
+ qcom,inst-id = <1>;
+
+ port{
+ turing_etm0_out_funnel_turing: endpoint {
+ remote-endpoint =
+ <&funnel_turing_in_turing_etm0>;
+ };
+ };
};
dummy_eud: dummy_sink {
@@ -1094,6 +1756,20 @@
};
};
+ modem_etm0 {
+ compatible = "qcom,coresight-remote-etm";
+
+ coresight-name = "coresight-modem-etm0";
+ qcom,inst-id = <2>;
+
+ port {
+ modem_etm0_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_modem_etm0>;
+ };
+ };
+ };
+
funnel_apss_merg: funnel@7810000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b908>;
@@ -1103,9 +1779,8 @@
coresight-name = "coresight-funnel-apss-merg";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1129,13 +1804,192 @@
};
port@2 {
- reg = <1>;
+ reg = <2>;
funnel_apss_merg_in_tpda_olc: endpoint {
slave-mode;
remote-endpoint =
<&tpda_olc_out_funnel_apss_merg>;
};
};
+
+ port@3 {
+ reg = <4>;
+ funnel_apss_merg_in_tpda_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_apss_out_funnel_apss_merg>;
+ };
+ };
+
+ port@4 {
+ reg = <5>;
+ funnel_apss_merg_in_tpda_llm_silver: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_llm_silver_out_funnel_apss_merg>;
+ };
+ };
+
+ port@5 {
+ reg = <6>;
+ funnel_apss_merg_in_tpda_llm_gold: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_llm_gold_out_funnel_apss_merg>;
+ };
+ };
+ };
+ };
+
+ etm0: etm@7040000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7040000 0x1000>;
+ cpu = <&CPU0>;
+
+ coresight-name = "coresight-etm0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm0_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm0>;
+ };
+ };
+ };
+
+ etm1: etm@7140000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7140000 0x1000>;
+ cpu = <&CPU1>;
+
+ coresight-name = "coresight-etm1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm1_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm1>;
+ };
+ };
+ };
+
+ etm2: etm@7240000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7240000 0x1000>;
+ cpu = <&CPU2>;
+
+ coresight-name = "coresight-etm2";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm2_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm2>;
+ };
+ };
+ };
+
+ etm3: etm@7340000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7340000 0x1000>;
+ cpu = <&CPU3>;
+
+ coresight-name = "coresight-etm3";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm3_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm3>;
+ };
+ };
+ };
+
+ etm4: etm@7440000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7440000 0x1000>;
+ cpu = <&CPU4>;
+
+ coresight-name = "coresight-etm4";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm4_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm4>;
+ };
+ };
+ };
+
+ etm5: etm@7540000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7540000 0x1000>;
+ cpu = <&CPU5>;
+
+ coresight-name = "coresight-etm5";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm5_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm5>;
+ };
+ };
+ };
+
+ etm6: etm@7640000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7640000 0x1000>;
+ cpu = <&CPU6>;
+
+ coresight-name = "coresight-etm6";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm6_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm6>;
+ };
+ };
+ };
+
+ etm7: etm@7740000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb95d>;
+
+ reg = <0x7740000 0x1000>;
+ cpu = <&CPU7>;
+
+ coresight-name = "coresight-etm7";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm7_out_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_in_etm7>;
+ };
};
};
@@ -1148,9 +2002,8 @@
coresight-name = "coresight-funnel-apss";
- clocks = <&clock_gcc RPMH_QDSS_CLK>,
- <&clock_gcc RPMH_QDSS_A_CLK>;
- clock-names = "apb_pclk", "core_a_clk";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
ports {
#address-cells = <1>;
@@ -1163,6 +2016,77 @@
<&funnel_apss_merg_in_funnel_apss>;
};
};
+ port@1 {
+ reg = <0>;
+ funnel_apss_in_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm0_out_funnel_apss>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_apss_in_etm1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm1_out_funnel_apss>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel_apss_in_etm2: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm2_out_funnel_apss>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel_apss_in_etm3: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm3_out_funnel_apss>;
+ };
+ };
+
+ port@5 {
+ reg = <4>;
+ funnel_apss_in_etm4: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm4_out_funnel_apss>;
+ };
+ };
+
+ port@6 {
+ reg = <5>;
+ funnel_apss_in_etm5: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm5_out_funnel_apss>;
+ };
+ };
+
+ port@7 {
+ reg = <6>;
+ funnel_apss_in_etm6: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm6_out_funnel_apss>;
+ };
+ };
+
+ port@8 {
+ reg = <7>;
+ funnel_apss_in_etm7: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&etm7_out_funnel_apss>;
+ };
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
new file mode 100644
index 0000000..77edb85
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -0,0 +1,300 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+ pil_gpu: qcom,kgsl-hyp {
+ compatible = "qcom,pil-tz-generic";
+ qcom,pas-id = <13>;
+ qcom,firmware-name = "a630_zap";
+ };
+
+ msm_bus: qcom,kgsl-busmon{
+ label = "kgsl-busmon";
+ compatible = "qcom,kgsl-busmon";
+ };
+
+ gpubw: qcom,gpubw {
+ compatible = "qcom,devbw";
+ governor = "bw_vbif";
+ qcom,src-dst-ports = <26 512>;
+ /*
+ * active-only flag is used while registering the bus
+ * governor.It helps release the bus vote when the CPU
+ * subsystem is inactiv3
+ */
+ qcom,active-only;
+ qcom,bw-tbl =
+ < 0 /* off */ >,
+ < 762 /* 100 MHz */ >,
+ < 1144 /* 150 MHz */ >,
+ < 1525 /* 200 MHz */ >,
+ < 2288 /* 300 MHz */ >,
+ < 3143 /* 412 MHz */ >,
+ < 4173 /* 547 MHz */ >,
+ < 5195 /* 681 MHz */ >,
+ < 5859 /* 768 MHz */ >,
+ < 7759 /* 1017 MHz */ >,
+ < 9887 /* 1296 MHz */ >,
+ < 11863 /* 1555 MHz */ >,
+ < 13763 /* 1804 MHz */ >;
+ };
+
+ msm_gpu: qcom,kgsl-3d0@5000000 {
+ label = "kgsl-3d0";
+ compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+ status = "ok";
+ reg = <0x5000000 0x40000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <0 300 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ qcom,id = <0>;
+
+ qcom,chipid = <0x06030000>;
+
+ qcom,initial-pwrlevel = <2>;
+
+ qcom,gpu-quirk-hfi-use-reg;
+ qcom,gpu-quirk-two-pass-use-wfi;
+
+ qcom,idle-timeout = <100000000>; //msecs
+ qcom,no-nap;
+
+ qcom,highest-bank-bit = <15>;
+
+ qcom,min-access-length = <32>;
+
+ qcom,ubwc-mode = <2>;
+
+ qcom,snapshot-size = <1048576>; //bytes
+
+ qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size
+
+ qcom,tsens-name = "tsens_tz_sensor12";
+ #cooling-cells = <2>;
+
+ clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK>,
+ <&clock_gpucc GPU_CC_CXO_CLK>,
+ <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gpucc GPU_CC_CX_GMU_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>,
+ <&clock_gpucc GPU_CC_GX_CXO_CLK>;
+
+ clock-names = "core_clk", "rbbmtimer_clk", "mem_clk",
+ "mem_iface_clk", "gmu_clk", "ahb_clk",
+ "cxo_clk";
+
+ qcom,isense-clk-on-level = <1>;
+
+ /* Bus Scale Settings */
+ qcom,gpubw-dev = <&gpubw>;
+ qcom,bus-control;
+ qcom,msm-bus,name = "grp3d";
+ qcom,msm-bus,num-cases = <13>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <26 512 0 0>,
+
+ <26 512 0 800000>, // 1 bus=100
+ <26 512 0 1200000>, // 2 bus=150
+ <26 512 0 1600000>, // 3 bus=200
+ <26 512 0 2400000>, // 4 bus=300
+ <26 512 0 3296000>, // 5 bus=412
+ <26 512 0 4376000>, // 6 bus=547
+ <26 512 0 5448000>, // 7 bus=681
+ <26 512 0 6144000>, // 8 bus=768
+ <26 512 0 8136000>, // 9 bus=1017
+ <26 512 0 10368000>, // 10 bus=1296
+ <26 512 0 12440000>, // 11 bus=1555
+ <26 512 0 14432000>; // 12 bus=1804
+
+ /* GDSC regulator names */
+ regulator-names = "vddcx", "vdd";
+ /* GDSC oxili regulators */
+ vddcx-supply = <&gpu_cx_gdsc>;
+ vdd-supply = <&gpu_gx_gdsc>;
+
+ /* GPU related llc slices */
+ cache-slice-names = "gpu", "gpuhtw";
+ cache-slices = <&llcc 12>, <&llcc 11>;
+
+ /* GPU Mempools */
+ qcom,gpu-mempools {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "qcom,gpu-mempools";
+
+ /* 4K Page Pool configuration */
+ qcom,gpu-mempool@0 {
+ reg = <0>;
+ qcom,mempool-page-size = <4096>;
+ qcom,mempool-reserved = <2048>;
+ qcom,mempool-allocate;
+ };
+ /* 8K Page Pool configuration */
+ qcom,gpu-mempool@1 {
+ reg = <1>;
+ qcom,mempool-page-size = <8192>;
+ qcom,mempool-reserved = <1024>;
+ qcom,mempool-allocate;
+ };
+ /* 64K Page Pool configuration */
+ qcom,gpu-mempool@2 {
+ reg = <2>;
+ qcom,mempool-page-size = <65536>;
+ qcom,mempool-reserved = <256>;
+ };
+ /* 1M Page Pool configuration */
+ qcom,gpu-mempool@3 {
+ reg = <3>;
+ qcom,mempool-page-size = <1048576>;
+ qcom,mempool-reserved = <32>;
+ };
+ };
+
+ /* Power levels */
+ qcom,gpu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gpu-pwrlevels";
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <280000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <280000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <280000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <280000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+ };
+
+ };
+
+ kgsl_msm_iommu: qcom,kgsl-iommu {
+ compatible = "qcom,kgsl-smmu-v2";
+
+ reg = <0x05040000 0x10000>;
+ qcom,protect = <0x40000 0x10000>;
+ qcom,micro-mmu-control = <0x6000>;
+
+ clocks =<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
+ <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+
+ clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
+
+ qcom,secure_align_mask = <0xfff>;
+ qcom,global_pt;
+
+ gfx3d_user: gfx3d_user {
+ compatible = "qcom,smmu-kgsl-cb";
+ label = "gfx3d_user";
+ iommus = <&kgsl_smmu 0>;
+ qcom,gpu-offset = <0x48000>;
+ };
+
+ gfx3d_secure: gfx3d_secure {
+ compatible = "qcom,smmu-kgsl-cb";
+ iommus = <&kgsl_smmu 2>;
+ };
+ };
+
+ gmu: qcom,gmu {
+ label = "kgsl-gmu";
+ compatible = "qcom,gpu-gmu";
+
+ reg = <0x506a000 0x26000>, <0xb200000 0x300000>;
+ reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg";
+
+ interrupts = <0 304 0>, <0 305 0>;
+ interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
+
+ qcom,msm-bus,name = "cnoc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <26 10036 0 0>, // CNOC off
+ <26 10036 0 100>; // CNOC on
+
+ regulator-names = "vddcx", "vdd";
+ vddcx-supply = <&gpu_cx_gdsc>;
+ vdd-supply = <&gpu_gx_gdsc>;
+
+
+ clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>,
+ <&clock_gpucc GPU_CC_CXO_CLK>,
+ <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>;
+
+ clock-names = "gmu_clk", "cxo_clk", "axi_clk",
+ "memnoc_clk", "ahb_clk";
+
+ qcom,gmu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gmu-pwrlevels";
+
+ qcom,gmu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gmu-freq = <400000000>;
+ };
+
+ qcom,gmu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gmu-freq = <19200000>;
+ };
+
+ qcom,gmu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gmu-freq = <0>;
+ };
+ };
+
+ gmu_user: gmu_user {
+ compatible = "qcom,smmu-gmu-user-cb";
+ iommus = <&kgsl_smmu 4>;
+ };
+
+ gmu_kernel: gmu_kernel {
+ compatible = "qcom,smmu-gmu-kernel-cb";
+ iommus = <&kgsl_smmu 5>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 734b6a9..715b566 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -11,6 +11,30 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include "sdm845-camera-sensor-mtp.dtsi"
+
+/ {
+ bluetooth: bt_wcn3990 {
+ compatible = "qca,wcn3990";
+ qca,bt-vdd-io-supply = <&pm8998_s3>;
+ qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+ qca,bt-vdd-core-supply = <&pm8998_l7>;
+ qca,bt-vdd-pa-supply = <&pm8998_l17>;
+ qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+ qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+ qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+ qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+ qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+ qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+ qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+ };
+};
&soc {
gpio_keys {
@@ -54,6 +78,34 @@
};
};
+&labibb {
+ status = "ok";
+ qcom,qpnp-labibb-mode = "lcd";
+};
+
+&dsi_dual_nt35597_truly_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+ qcom,dsi-display-active;
+};
+
+&pmi8998_wled {
+ status = "okay";
+ qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+ #cooling-cells = <2>;
+};
+
&ufsphy_mem {
compatible = "qcom,ufs-phy-qmp-v3";
@@ -103,3 +155,203 @@
status = "ok";
};
+
+&sdhc_2 {
+ vdd-supply = <&pm8998_l21>;
+ qcom,vdd-voltage-level = <2950000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm8998_l13>;
+ qcom,vdd-io-voltage-level = <1808000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ status = "ok";
+};
+
+&pmi8998_switch1 {
+ pinctrl-names = "led_enable", "led_disable";
+ pinctrl-0 = <&flash_led3_front_en>;
+ pinctrl-1 = <&flash_led3_front_dis>;
+};
+
+/{
+ mtp_batterydata: qcom,battery-data {
+ qcom,batt-id-range-pct = <15>;
+ #include "fg-gen3-batterydata-itech-3000mah.dtsi"
+ #include "fg-gen3-batterydata-ascent-3450mah.dtsi"
+ #include "fg-gen3-batterydata-demo-6000mah.dtsi"
+ };
+
+ extcon_usb1: extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ vbus-gpio = <&pmi8998_gpios 8 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_default>;
+ };
+};
+
+&pmi8998_fg {
+ qcom,battery-data = <&mtp_batterydata>;
+};
+
+/ {
+aliases {
+ serial0 = &qupv3_se9_2uart;
+ spi0 = &qupv3_se8_spi;
+ i2c0 = &qupv3_se10_i2c;
+ i2c1 = &qupv3_se3_i2c;
+ hsuart0 = &qupv3_se6_4uart;
+ };
+};
+
+&qupv3_se9_2uart {
+ status = "ok";
+};
+
+&qupv3_se8_spi {
+ status = "ok";
+};
+
+&qupv3_se3_i2c {
+ status = "ok";
+};
+
+&qupv3_se10_i2c {
+ status = "ok";
+};
+
+&qupv3_se6_4uart {
+ status = "ok";
+};
+
+&usb1 {
+ status = "okay";
+ extcon = <&extcon_usb1>;
+};
+
+&qusb_phy1 {
+ status = "okay";
+};
+
+&usb_qmp_phy {
+ status = "okay";
+};
+
+&pm8998_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@85 {
+ label = "vcoin";
+ reg = <0x85>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4d {
+ label = "msm_therm";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4f {
+ label = "pa_therm1";
+ reg = <0x4f>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@51 {
+ label = "quiet_therm";
+ reg = <0x51>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+};
+
+&pm8998_adc_tm {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,btm-channel-number = <0x60>;
+ };
+};
+
+&thermal_zones {
+ xo-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x4c>;
+ };
+
+ msm-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x4d>;
+ };
+
+ pa-therm1-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x4f>;
+ };
+
+ quiet-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8998_vadc 0x51>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
new file mode 100644
index 0000000..da5d6fa
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+
+&soc {
+ pcie0: qcom,pcie@0x1c00000 {
+ compatible = "qcom,pci-msm";
+ cell-index = <0>;
+
+ reg = <0x1c00000 0x2000>,
+ <0x1c06000 0x1000>,
+ <0x60000000 0xf1d>,
+ <0x60000f20 0xa8>,
+ <0x60100000 0x100000>,
+ <0x60200000 0x100000>,
+ <0x60300000 0xd00000>;
+
+ reg-names = "parf", "phy", "dm_core", "elbi",
+ "conf", "io", "bars";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x60200000 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x60300000 0x0 0xd00000>;
+ interrupt-parent = <&pcie0>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
+ 36 37>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0xffffffff>;
+ interrupt-map = <0 0 0 0 &intc 0 141 0
+ 0 0 0 1 &intc 0 149 0
+ 0 0 0 2 &intc 0 150 0
+ 0 0 0 3 &intc 0 151 0
+ 0 0 0 4 &intc 0 152 0
+ 0 0 0 5 &intc 0 140 0
+ 0 0 0 6 &intc 0 672 0
+ 0 0 0 7 &intc 0 673 0
+ 0 0 0 8 &intc 0 674 0
+ 0 0 0 9 &intc 0 675 0
+ 0 0 0 10 &intc 0 676 0
+ 0 0 0 11 &intc 0 677 0
+ 0 0 0 12 &intc 0 678 0
+ 0 0 0 13 &intc 0 679 0
+ 0 0 0 14 &intc 0 680 0
+ 0 0 0 15 &intc 0 681 0
+ 0 0 0 16 &intc 0 682 0
+ 0 0 0 17 &intc 0 683 0
+ 0 0 0 18 &intc 0 684 0
+ 0 0 0 19 &intc 0 685 0
+ 0 0 0 20 &intc 0 686 0
+ 0 0 0 21 &intc 0 687 0
+ 0 0 0 22 &intc 0 688 0
+ 0 0 0 23 &intc 0 689 0
+ 0 0 0 24 &intc 0 690 0
+ 0 0 0 25 &intc 0 691 0
+ 0 0 0 26 &intc 0 692 0
+ 0 0 0 27 &intc 0 693 0
+ 0 0 0 28 &intc 0 694 0
+ 0 0 0 29 &intc 0 695 0
+ 0 0 0 30 &intc 0 696 0
+ 0 0 0 31 &intc 0 697 0
+ 0 0 0 32 &intc 0 698 0
+ 0 0 0 33 &intc 0 699 0
+ 0 0 0 34 &intc 0 700 0
+ 0 0 0 35 &intc 0 701 0
+ 0 0 0 36 &intc 0 702 0
+ 0 0 0 37 &intc 0 703 0>;
+
+ interrupt-names = "int_msi", "int_a", "int_b", "int_c",
+ "int_d", "int_global_int",
+ "msi_0", "msi_1", "msi_2", "msi_3",
+ "msi_4", "msi_5", "msi_6", "msi_7",
+ "msi_8", "msi_9", "msi_10", "msi_11",
+ "msi_12", "msi_13", "msi_14", "msi_15",
+ "msi_16", "msi_17", "msi_18", "msi_19",
+ "msi_20", "msi_21", "msi_22", "msi_23",
+ "msi_24", "msi_25", "msi_26", "msi_27",
+ "msi_28", "msi_29", "msi_30", "msi_31";
+
+ qcom,phy-sequence = <0x804 0x01 0x0
+ 0x034 0x14 0x0
+ 0x138 0x30 0x0
+ 0x048 0x07 0x0
+ 0x15c 0x06 0x0
+ 0x090 0x01 0x0
+ 0x088 0x20 0x0
+ 0x0f0 0x00 0x0
+ 0x0f8 0x01 0x0
+ 0x0f4 0xc9 0x0
+ 0x11c 0xff 0x0
+ 0x120 0x3f 0x0
+ 0x164 0x01 0x0
+ 0x154 0x00 0x0
+ 0x148 0x0a 0x0
+ 0x05c 0x19 0x0
+ 0x038 0x90 0x0
+ 0x0b0 0x82 0x0
+ 0x0c0 0x02 0x0
+ 0x0bc 0xea 0x0
+ 0x0b8 0xab 0x0
+ 0x0a0 0x00 0x0
+ 0x09c 0x0d 0x0
+ 0x098 0x04 0x0
+ 0x13c 0x00 0x0
+ 0x060 0x06 0x0
+ 0x068 0x16 0x0
+ 0x070 0x36 0x0
+ 0x184 0x01 0x0
+ 0x15c 0x16 0x0
+ 0x138 0x33 0x0
+ 0x03c 0x02 0x0
+ 0x040 0x07 0x0
+ 0x080 0x04 0x0
+ 0x0dc 0x00 0x0
+ 0x0d8 0x3f 0x0
+ 0x00c 0x09 0x0
+ 0x010 0x01 0x0
+ 0x01c 0x40 0x0
+ 0x020 0x01 0x0
+ 0x014 0x02 0x0
+ 0x018 0x00 0x0
+ 0x024 0x7e 0x0
+ 0x028 0x15 0x0
+ 0x244 0x02 0x0
+ 0x2a4 0x12 0x0
+ 0x260 0x10 0x0
+ 0x28c 0x06 0x0
+ 0x504 0x03 0x0
+ 0x500 0x1c 0x0
+ 0x50c 0x14 0x0
+ 0x4d4 0x0e 0x0
+ 0x4d8 0x04 0x0
+ 0x4dc 0x1a 0x0
+ 0x434 0x4b 0x0
+ 0x414 0x04 0x0
+ 0x40c 0x04 0x0
+ 0x4f8 0x71 0x0
+ 0x564 0x59 0x0
+ 0x568 0x59 0x0
+ 0x4fc 0x80 0x0
+ 0x51c 0x40 0x0
+ 0x444 0x71 0x0
+ 0x43c 0x40 0x0
+ 0x854 0x04 0x0
+ 0x62c 0x52 0x0
+ 0x654 0x50 0x0
+ 0x65c 0x1a 0x0
+ 0x660 0x06 0x0
+ 0x8c8 0x83 0x0
+ 0x8cc 0x09 0x0
+ 0x8d0 0xa2 0x0
+ 0x8d4 0x40 0x0
+ 0x8c4 0x02 0x0
+ 0x9ac 0x00 0x0
+ 0x8a0 0x01 0x0
+ 0x9e0 0x00 0x0
+ 0x9dc 0x20 0x0
+ 0x9a8 0x00 0x0
+ 0x8a4 0x01 0x0
+ 0x8a8 0x73 0x0
+ 0x9d8 0xaa 0x0
+ 0x9b0 0x03 0x0
+ 0xa0c 0x0d 0x0
+ 0x86c 0x00 0x0
+ 0x644 0x00 0x0
+ 0x804 0x03 0x0
+ 0x800 0x00 0x0
+ 0x808 0x03 0x0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_clkreq_default
+ &pcie0_perst_default
+ &pcie0_wake_default>;
+
+ perst-gpio = <&tlmm 35 0>;
+ wake-gpio = <&tlmm 37 0>;
+
+ gdsc-vdd-supply = <&pcie_0_gdsc>;
+ vreg-1.8-supply = <&pm8998_l26>;
+ vreg-0.9-supply = <&pm8998_l1>;
+ vreg-cx-supply = <&pm8998_s9_level>;
+
+ qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
+ qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
+ qcom,vreg-cx-voltage-level = <RPMH_REGULATOR_LEVEL_MAX
+ RPMH_REGULATOR_LEVEL_SVS 0>;
+
+ qcom,l1-supported;
+ qcom,l1ss-supported;
+ qcom,aux-clk-sync;
+
+ qcom,ep-latency = <10>;
+
+ qcom,boot-option = <0x1>;
+
+ linux,pci-domain = <0>;
+
+ qcom,msi-gicm-addr = <0x17a00040>;
+ qcom,msi-gicm-base = <0x2c0>;
+
+ qcom,pcie-phy-ver = <0x30>;
+ qcom,use-19p2mhz-aux-clk;
+
+ qcom,smmu-sid-base = <0x1c10>;
+
+ iommu-map = <0x100 &apps_smmu 0x1c11 0x1>,
+ <0x200 &apps_smmu 0x1c12 0x1>,
+ <0x300 &apps_smmu 0x1c13 0x1>,
+ <0x400 &apps_smmu 0x1c14 0x1>,
+ <0x500 &apps_smmu 0x1c15 0x1>,
+ <0x600 &apps_smmu 0x1c16 0x1>,
+ <0x700 &apps_smmu 0x1c17 0x1>,
+ <0x800 &apps_smmu 0x1c18 0x1>,
+ <0x900 &apps_smmu 0x1c19 0x1>,
+ <0xa00 &apps_smmu 0x1c1a 0x1>,
+ <0xb00 &apps_smmu 0x1c1b 0x1>,
+ <0xc00 &apps_smmu 0x1c1c 0x1>,
+ <0xd00 &apps_smmu 0x1c1d 0x1>,
+ <0xe00 &apps_smmu 0x1c1e 0x1>,
+ <0xf00 &apps_smmu 0x1c1f 0x1>;
+
+ qcom,msm-bus,name = "pcie0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <45 512 0 0>,
+ <45 512 500 800>;
+
+ clocks = <&clock_gcc GCC_PCIE_0_PIPE_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_PCIE_0_AUX_CLK>,
+ <&clock_gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&clock_gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_0_SLV_AXI_CLK>,
+ <&clock_gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&clock_gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
+ <&clock_gcc GCC_PCIE_PHY_REFGEN_CLK>,
+ <&clock_gcc GCC_PCIE_PHY_AUX_CLK>;
+
+ clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+ "pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+ "pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+ "pcie_0_ldo", "pcie_0_slv_q2a_axi_clk",
+ "pcie_tbu_clk", "pcie_phy_refgen_clk",
+ "pcie_phy_aux_clk";
+
+ max-clock-frequency-hz = <0>, <0>, <19200000>, <0>, <0>,
+ <0>, <0>, <0>, <0>, <100000000>, <0>;
+
+ resets = <&clock_gcc GCC_PCIE_0_BCR>,
+ <&clock_gcc GCC_PCIE_0_PHY_BCR>;
+
+ reset-names = "pcie_0_core_reset",
+ "pcie_0_phy_reset";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 19c6543..3ab0c70 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -66,6 +66,35 @@
};
};
+ flash_led3_front {
+ flash_led3_front_en: flash_led3_front_en {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-high;
+ bias-disable;
+ };
+ };
+
+ flash_led3_front_dis: flash_led3_front_dis {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-low;
+ bias-disable;
+ };
+ };
+ };
wcd9xxx_intr {
wcd_intr_default: wcd_intr_default{
@@ -83,6 +112,95 @@
};
};
+ sdc2_clk_on: sdc2_clk_on {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_off: sdc2_clk_off {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cmd_on: sdc2_cmd_on {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_off: sdc2_cmd_off {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_data_on: sdc2_data_on {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_off: sdc2_data_off {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pcie0 {
+ pcie0_clkreq_default: pcie0_clkreq_default {
+ mux {
+ pins = "gpio36";
+ function = "pci_e0";
+ };
+
+ config {
+ pins = "gpio36";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie0_perst_default: pcie0_perst_default {
+ mux {
+ pins = "gpio35";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio35";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ pcie0_wake_default: pcie0_wake_default {
+ mux {
+ pins = "gpio37";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio37";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+
cdc_reset_ctrl {
cdc_reset_sleep: cdc_reset_sleep {
mux {
@@ -2256,6 +2374,243 @@
};
};
};
+
+ cci0_active: cci0_active {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio17","gpio18"; // Only 2
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio17","gpio18";
+ bias-pull-up; /* PULL UP*/
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci0_suspend: cci0_suspend {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio17","gpio18";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio17","gpio18";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci1_active: cci1_active {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio19","gpio20";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio19","gpio20";
+ bias-pull-up; /* PULL UP*/
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci1_suspend: cci1_suspend {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio19","gpio20";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio19","gpio20";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk0_active: cam_sensor_mclk0_active {
+ /* MCLK0 */
+ mux {
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk0_suspend: cam_sensor_mclk0_suspend {
+ /* MCLK0 */
+ mux {
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_active: cam_sensor_rear_active {
+ /* RESET, AVDD LDO */
+ mux {
+ pins = "gpio80","gpio79";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80","gpio79";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+ /* RESET, AVDD LDO */
+ mux {
+ pins = "gpio80","gpio79";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80","gpio79";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+ /* MCLK1 */
+ mux {
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk1_suspend: cam_sensor_mclk1_suspend {
+ /* MCLK1 */
+ mux {
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_active: cam_sensor_front_active {
+ /* RESET AVDD_LDO*/
+ mux {
+ pins = "gpio28", "gpio8";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28", "gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_suspend: cam_sensor_front_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_suspend: cam_sensor_mclk2_suspend {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_active: cam_sensor_rear2_active {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio9","gpio8";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9","gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+ /* RESET, STANDBY */
+ mux {
+ pins = "gpio9","gpio8";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio9","gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ trigout_a: trigout_a {
+ mux {
+ pins = "gpio62", "gpio51";
+ function = "qdss_cti";
+ };
+ config {
+ pins = "gpio62", "gpio51";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
};
@@ -2299,4 +2654,53 @@
power-source = <0>;
};
};
+
+ camera_dvdd_en {
+ camera_dvdd_en_default: camera_dvdd_en_default {
+ pins = "gpio9";
+ function = "normal";
+ power-source = <0>;
+ output-low;
+ };
+ };
+
+ camera_rear_dvdd_en {
+ camera_rear_dvdd_en_default: camera_rear_dvdd_en_default {
+ pins = "gpio12";
+ function = "normal";
+ power-source = <0>;
+ output-low;
+ };
+ };
+};
+
+&pmi8998_gpios {
+ usb2_vbus_boost {
+ usb2_vbus_boost_default: usb2_vbus_boost_default {
+ pins = "gpio2";
+ function = "normal";
+ output-low;
+ power-source = <0>;
+ };
+ };
+
+ usb2_vbus_det {
+ usb2_vbus_det_default: usb2_vbus_det_default {
+ pins = "gpio8";
+ function = "normal";
+ input-enable;
+ bias-pull-down;
+ power-source = <1>; /* VPH input supply */
+ };
+ };
+
+ usb2_id_det {
+ usb2_id_det_default: usb2_id_det_default {
+ pins = "gpio9";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ power-source = <0>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 21b5659..70e749b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,4 +139,10 @@
};
};
};
+
+ qcom,rpm-stats@c300000 {
+ compatible = "qcom,rpm-stats";
+ reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
+ reg-names = "phys_addr_base", "offset_addr";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 6ea92ee..a4dc4753 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -9,3 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+/{
+ qrd_batterydata: qcom,battery-data {
+ qcom,batt-id-range-pct = <15>;
+ #include "fg-gen3-batterydata-itech-3000mah.dtsi"
+ #include "fg-gen3-batterydata-ascent-3450mah.dtsi"
+ };
+};
+
+&pmi8998_fg {
+ qcom,battery-data = <&qrd_batterydata>;
+};
+
+&mdss_mdp {
+ #cooling-cells = <2>;
+};
+
+&soc {
+ sound-tavil {
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+ qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 1c31a7a..e5d1a74 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/msm/msm-bus-ids.h>
+
&soc {
/* QUPv3 South instances */
@@ -28,8 +30,11 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se6_4uart_active>;
pinctrl-1 = <&qupv3_se6_4uart_sleep>;
- interrupts = <GIC_SPI 607 0>;
+ interrupts-extended = <&intc GIC_SPI 607 0>,
+ <&tlmm 48 0>;
status = "disabled";
+ qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
+ qcom,wakeup-byte = <0xFD>;
};
qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
@@ -43,8 +48,11 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se7_4uart_active>;
pinctrl-1 = <&qupv3_se7_4uart_sleep>;
- interrupts = <GIC_SPI 608 0>;
+ interrupts-extended = <&intc GIC_SPI 608 0>,
+ <&tlmm 96 0>;
status = "disabled";
+ qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
+ qcom,wakeup-byte = <0xFD>;
};
/* I2C */
@@ -336,6 +344,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se9_2uart_active>;
pinctrl-1 = <&qupv3_se9_2uart_sleep>;
+ qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
interrupts = <GIC_SPI 354 0>;
status = "disabled";
};
@@ -353,6 +362,7 @@
pinctrl-0 = <&qupv3_se10_2uart_active>;
pinctrl-1 = <&qupv3_se10_2uart_sleep>;
interrupts = <GIC_SPI 355 0>;
+ qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index ca325c0..f6c1d76 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -28,6 +28,32 @@
};
};
+&spmi_bus {
+ qcom,pm8998@1 {
+ /* PM8998 S12 + S11 + S10 = VDD_APC1 supply */
+ pm8998_s12: regulator@3500 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x3500 0x100>;
+ regulator-name = "pm8998_s12";
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+
+ /* PM8998 S13 = VDD_APC0 supply */
+ pm8998_s13: regulator@3800 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x3800 0x100>;
+ regulator-name = "pm8998_s13";
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <928000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+ };
+};
+
&soc {
/* CPR controller regulators */
apc0_cpr: cprh-ctrl@17dc0000 {
@@ -48,18 +74,22 @@
qcom,cpr-step-quot-init-min = <11>;
qcom,cpr-step-quot-init-max = <12>;
qcom,cpr-count-mode = <0>; /* All at once */
- qcom,cpr-count-repeat = <1>;
+ qcom,cpr-count-repeat = <20>;
qcom,cpr-down-error-step-limit = <1>;
qcom,cpr-up-error-step-limit = <1>;
qcom,cpr-corner-switch-delay-time = <1042>;
qcom,cpr-voltage-settling-time = <1760>;
+ qcom,cpr-reset-step-quot-loop-en;
qcom,voltage-step = <4000>;
qcom,voltage-base = <352000>;
qcom,cpr-saw-use-unit-mV;
qcom,saw-avs-ctrl = <0x101C031>;
- qcom,saw-avs-limit = <0x3A00000>;
+ qcom,saw-avs-limit = <0x3A003A0>;
+
+ qcom,cpr-enable;
+ qcom,cpr-hw-closed-loop;
qcom,cpr-panic-reg-addr-list =
<0x17dc3a84 0x17dc3a88 0x17840c18>;
@@ -68,10 +98,13 @@
"APSS_SILVER_CPRH_STATUS_1",
"SILVER_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <928000>;
+ vdd-supply = <&pm8998_s13>;
+
thread@1 {
qcom,cpr-thread-id = <1>;
qcom,cpr-consecutive-up = <0>;
- qcom,cpr-consecutive-down = <2>;
+ qcom,cpr-consecutive-down = <0>;
qcom,cpr-up-threshold = <2>;
qcom,cpr-down-threshold = <2>;
@@ -89,9 +122,9 @@
qcom,cpr-corner-fmax-map = <6 12 17>;
qcom,cpr-voltage-ceiling =
- <688000 688000 688000 688000 688000
- 688000 756000 756000 756000 812000
- 812000 812000 872000 872000 872000
+ <872000 872000 872000 872000 872000
+ 872000 872000 872000 872000 872000
+ 872000 872000 872000 872000 872000
872000 928000>;
qcom,cpr-voltage-floor =
@@ -114,19 +147,41 @@
1286400000 1363200000 1440000000
1516800000 1593600000>;
+ qcom,cpr-ro-scaling-factor =
+ <2594 2795 2576 2761 2469 2673 2198
+ 2553 3188 3255 3191 2962 3055 2984
+ 2043 2947>,
+ <2594 2795 2576 2761 2469 2673 2198
+ 2553 3188 3255 3191 2962 3055 2984
+ 2043 2947>,
+ <2259 2389 2387 2531 2294 2464 2218
+ 2476 2525 2855 2817 2836 2740 2490
+ 1950 2632>;
+
qcom,cpr-open-loop-voltage-fuse-adjustment =
<100000 100000 100000>;
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <100000 100000 100000>;
+
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <17>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
+ qcom,allow-aging-voltage-adjustment =
+ <0 1 1 1 1 1 1 1>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
- qcom,cpr-consecutive-down = <2>;
+ qcom,cpr-consecutive-down = <0>;
qcom,cpr-up-threshold = <2>;
qcom,cpr-down-threshold = <2>;
@@ -144,8 +199,8 @@
qcom,cpr-corner-fmax-map = <4 7 9>;
qcom,cpr-voltage-ceiling =
- <688000 688000 688000 688000 756000
- 812000 812000 872000 928000>;
+ <872000 872000 872000 872000 872000
+ 872000 872000 872000 928000>;
qcom,cpr-voltage-floor =
<568000 568000 568000 568000 568000
@@ -160,12 +215,34 @@
576000000 652800000 729600000
806400000 883200000 960000000>;
+ qcom,cpr-ro-scaling-factor =
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2439 2577 2552 2667 2461 2577 2394
+ 2536 2132 2307 2191 2903 2838 2912
+ 2501 2095>;
+
qcom,cpr-open-loop-voltage-fuse-adjustment =
<100000 100000 100000>;
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <100000 100000 100000>;
+
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <9>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
+ qcom,allow-aging-voltage-adjustment =
+ <0 1 1 1 1 1 1 1>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
@@ -188,11 +265,12 @@
qcom,cpr-step-quot-init-min = <9>;
qcom,cpr-step-quot-init-max = <14>;
qcom,cpr-count-mode = <0>; /* All at once */
- qcom,cpr-count-repeat = <1>;
+ qcom,cpr-count-repeat = <20>;
qcom,cpr-down-error-step-limit = <1>;
qcom,cpr-up-error-step-limit = <1>;
qcom,cpr-corner-switch-delay-time = <1042>;
qcom,cpr-voltage-settling-time = <1760>;
+ qcom,cpr-reset-step-quot-loop-en;
qcom,apm-threshold-voltage = <800000>;
qcom,apm-crossover-voltage = <880000>;
@@ -204,17 +282,23 @@
qcom,cpr-saw-use-unit-mV;
qcom,saw-avs-ctrl = <0x101C031>;
- qcom,saw-avs-limit = <0x4200000>;
+ qcom,saw-avs-limit = <0x4200420>;
+
+ qcom,cpr-enable;
+ qcom,cpr-hw-closed-loop;
qcom,cpr-panic-reg-addr-list =
<0x17db3a84 0x17830c18>;
qcom,cpr-panic-reg-name-list =
"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1056000>;
+ vdd-supply = <&pm8998_s12>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
- qcom,cpr-consecutive-down = <2>;
+ qcom,cpr-consecutive-down = <0>;
qcom,cpr-up-threshold = <2>;
qcom,cpr-down-threshold = <2>;
@@ -233,9 +317,9 @@
<10 17 22>;
qcom,cpr-voltage-ceiling =
- <756000 756000 756000 756000 756000
- 756000 756000 756000 756000 756000
- 812000 812000 828000 828000 828000
+ <828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
828000 828000 884000 952000 952000
1056000 1056000>;
@@ -263,12 +347,34 @@
1728000000 1804800000 1881600000
1958400000>;
+ qcom,cpr-ro-scaling-factor =
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2086 2208 2273 2408 2203 2327 2213
+ 2340 1755 2039 2049 2474 2437 2618
+ 2003 1675>;
+
qcom,cpr-open-loop-voltage-fuse-adjustment =
<100000 100000 100000>;
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <100000 100000 100000>;
+
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <22>;
+ qcom,cpr-aging-ro-scaling-factor = <1700>;
+ qcom,allow-aging-voltage-adjustment =
+ <0 1 1 1 1 1 1 1>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
@@ -391,7 +497,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l1: regulator-l1 {
regulator-name = "pm8998_l1";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -409,7 +515,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 30000>;
pm8998_l2: regulator-l2 {
regulator-name = "pm8998_l2";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -417,6 +523,7 @@
regulator-max-microvolt = <1200000>;
qcom,init-voltage = <1200000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ regulator-always-on;
};
};
@@ -427,7 +534,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l3: regulator-l3 {
regulator-name = "pm8998_l3";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -458,7 +565,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l5: regulator-l5 {
regulator-name = "pm8998_l5";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -476,7 +583,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l6: regulator-l6 {
regulator-name = "pm8998_l6";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -494,7 +601,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l7: regulator-l7 {
regulator-name = "pm8998_l7";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -512,12 +619,12 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l8: regulator-l8 {
regulator-name = "pm8998_l8";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-max-microvolt = <1248000>;
qcom,init-voltage = <1200000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -530,13 +637,13 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l9: regulator-l9 {
regulator-name = "pm8998_l9";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1808000>;
- regulator-max-microvolt = <2960000>;
- qcom,init-voltage = <1808000>;
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ qcom,init-voltage = <1704000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -548,13 +655,13 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l10: regulator-l10 {
regulator-name = "pm8998_l10";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1808000>;
- regulator-max-microvolt = <2960000>;
- qcom,init-voltage = <1808000>;
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ qcom,init-voltage = <1704000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -566,12 +673,12 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l11: regulator-l11 {
regulator-name = "pm8998_l11";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
+ regulator-max-microvolt = <1048000>;
qcom,init-voltage = <1000000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -584,7 +691,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l12: regulator-l12 {
regulator-name = "pm8998_l12";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -606,9 +713,9 @@
pm8998_l13: regulator-l13 {
regulator-name = "pm8998_l13";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1808000>;
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2960000>;
- qcom,init-voltage = <1808000>;
+ qcom,init-voltage = <1800000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -625,7 +732,7 @@
regulator-name = "pm8998_l14";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
qcom,init-voltage = <1800000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -638,7 +745,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l15: regulator-l15 {
regulator-name = "pm8998_l15";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -656,7 +763,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l16: regulator-l16 {
regulator-name = "pm8998_l16";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -674,7 +781,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l17: regulator-l17 {
regulator-name = "pm8998_l17";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -692,12 +799,12 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l18: regulator-l18 {
regulator-name = "pm8998_l18";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <2704000>;
- regulator-max-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
qcom,init-voltage = <2704000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -710,13 +817,13 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l19: regulator-l19 {
regulator-name = "pm8998_l19";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <3008000>;
- regulator-max-microvolt = <3008000>;
- qcom,init-voltage = <3008000>;
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ qcom,init-voltage = <2856000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -732,9 +839,9 @@
pm8998_l20: regulator-l20 {
regulator-name = "pm8998_l20";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <2960000>;
+ regulator-min-microvolt = <2704000>;
regulator-max-microvolt = <2960000>;
- qcom,init-voltage = <2960000>;
+ qcom,init-voltage = <2704000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -750,9 +857,9 @@
pm8998_l21: regulator-l21 {
regulator-name = "pm8998_l21";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <2960000>;
+ regulator-min-microvolt = <2704000>;
regulator-max-microvolt = <2960000>;
- qcom,init-voltage = <2960000>;
+ qcom,init-voltage = <2704000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -769,7 +876,7 @@
regulator-name = "pm8998_l22";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <2864000>;
- regulator-max-microvolt = <2864000>;
+ regulator-max-microvolt = <3312000>;
qcom,init-voltage = <2864000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -782,13 +889,13 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l23: regulator-l23 {
regulator-name = "pm8998_l23";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <3312000>;
+ regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3312000>;
- qcom,init-voltage = <3312000>;
+ qcom,init-voltage = <3000000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -819,13 +926,13 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l25: regulator-l25 {
regulator-name = "pm8998_l25";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <3104000>;
- regulator-max-microvolt = <3104000>;
- qcom,init-voltage = <3104000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ qcom,init-voltage = <3000000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -837,7 +944,7 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l26: regulator-l26 {
regulator-name = "pm8998_l26";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
@@ -868,13 +975,13 @@
qcom,supported-modes =
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
- qcom,mode-threshold-currents = <0 10000>;
+ qcom,mode-threshold-currents = <0 1>;
pm8998_l28: regulator-l28 {
regulator-name = "pm8998_l28";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <3008000>;
+ regulator-min-microvolt = <2856000>;
regulator-max-microvolt = <3008000>;
- qcom,init-voltage = <3008000>;
+ qcom,init-voltage = <2856000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -955,3 +1062,13 @@
};
};
};
+
+&pmi8998_charger {
+ smb2_vbus: qcom,smb2-vbus {
+ regulator-name = "smb2-vbus";
+ };
+
+ smb2_vconn: qcom,smb2-vconn {
+ regulator-name = "smb2-vconn";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index 0f31c0a..be41858 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -16,7 +16,6 @@
#include "sdm845.dtsi"
#include "sdm845-rumi.dtsi"
-#include "sdm845-usb.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 RUMI";
compatible = "qcom,sdm845-rumi", "qcom,sdm845", "qcom,rumi";
@@ -28,32 +27,3 @@
status = "disabled";
};
};
-
-&usb0 {
- /delete-property/ qcom,usb-dbm;
- qcom,charging-disabled;
- dwc3@a600000 {
- maximum-speed = "high-speed";
- };
-};
-
-&qusb_phy0 {
- reg = <0x088e2000 0x4>,
- <0x0a720000 0x9500>;
- reg-names = "qusb_phy_base",
- "emu_phy_base";
- qcom,emulation;
- qcom,emu-init-seq = <0x19 0x1404
- 0x20 0x1414
- 0x79 0x1410
- 0x00 0x1418
- 0x99 0x1404
- 0x04 0x1408
- 0xd9 0x1404>;
-
- qcom,emu-dcm-reset-seq = <0x5 0x14 /* 0x1 0x14 for E1.2 */
- 0x100000 0x20
- 0x0 0x20
- 0x1a0 0x20 /* 0x220 0x20 for E1.2 */
- 0x80 0x28>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 80f34bf..6991b17 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -44,6 +44,26 @@
status = "ok";
};
+&sdhc_2 {
+ vdd-supply = <&pm8998_l21>;
+ qcom,vdd-voltage-level = <2950000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm8998_l13>;
+ qcom,vdd-io-voltage-level = <1808000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 200000000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+ status = "ok";
+};
+
&soc {
qcom,icnss@18800000 {
compatible = "qcom,icnss";
@@ -147,3 +167,37 @@
spm-level = <0>;
status = "ok";
};
+
+&pmi8998_charger {
+ qcom,suspend-input;
+};
+
+&usb0 {
+ /delete-property/ qcom,usb-dbm;
+ extcon = <0>, <0>, <&eud>;
+ qcom,charging-disabled;
+ dwc3@a600000 {
+ maximum-speed = "high-speed";
+ };
+};
+
+&qusb_phy0 {
+ reg = <0x088e2000 0x4>,
+ <0x0a720000 0x9500>;
+ reg-names = "qusb_phy_base",
+ "emu_phy_base";
+ qcom,emulation;
+ qcom,emu-init-seq = <0x19 0x1404
+ 0x20 0x1414
+ 0x79 0x1410
+ 0x00 0x1418
+ 0x99 0x1404
+ 0x04 0x1408
+ 0xd9 0x1404>;
+
+ qcom,emu-dcm-reset-seq = <0x5 0x14 /* 0x1 0x14 for E1.2 */
+ 0x100000 0x20
+ 0x0 0x20
+ 0x1a0 0x20 /* 0x220 0x20 for E1.2 */
+ 0x80 0x28>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 2ff9b2f..efd8f45 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -23,6 +23,7 @@
#include "dsi-panel-sharp-1080p-cmd.dtsi"
#include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -106,8 +107,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -115,6 +116,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_sharp_4k_dsc_video>;
vddio-supply = <&pm8998_l14>;
@@ -129,8 +131,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -138,6 +140,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_sharp_4k_dsc_cmd>;
vddio-supply = <&pm8998_l14>;
@@ -152,8 +155,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -161,6 +164,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_sharp_1080_cmd>;
vddio-supply = <&pm8998_l14>;
@@ -175,8 +179,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -184,6 +188,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_dual_sharp_1080_120hz_cmd>;
vddio-supply = <&pm8998_l14>;
@@ -198,15 +203,15 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
- qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_dual_nt35597_truly_video>;
vddio-supply = <&pm8998_l14>;
@@ -221,8 +226,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -230,6 +235,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_dual_nt35597_truly_cmd>;
vddio-supply = <&pm8998_l14>;
@@ -244,8 +250,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -253,6 +259,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_nt35597_truly_dsc_cmd>;
vddio-supply = <&pm8998_l14>;
@@ -267,8 +274,8 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
clock-names = "src_byte_clk", "src_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
@@ -276,6 +283,7 @@
pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,dsi-panel = <&dsi_nt35597_truly_dsc_video>;
vddio-supply = <&pm8998_l14>;
@@ -291,47 +299,47 @@
};
&mdss_mdp {
- connectors = <&sde_wb>;
+ connectors = <&sde_wb &dsi_dual_nt35597_truly_video_display>;
};
&dsi_dual_nt35597_truly_video {
- qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
};
&dsi_dual_nt35597_truly_cmd {
- qcom,mdss-dsi-panel-timings = [00 1c 07 07 23 21 07 07 05 03 04];
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
};
&dsi_nt35597_truly_dsc_cmd {
- qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+ qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
};
&dsi_nt35597_truly_dsc_video {
- qcom,mdss-dsi-panel-timings = [00 15 05 05 20 1f 05 05 03 03 04];
+ qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
};
&dsi_sharp_4k_dsc_video {
- qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
- qcom,mdss-dsi-t-clk-post = <0x0a>;
- qcom,mdss-dsi-t-clk-pre = <0x1e>;
+ qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x0c>;
+ qcom,mdss-dsi-t-clk-pre = <0x27>;
};
&dsi_sharp_4k_dsc_cmd {
- qcom,mdss-dsi-panel-timings = [00 12 04 04 1e 1e 04 04 02 03 04];
- qcom,mdss-dsi-t-clk-post = <0x0a>;
- qcom,mdss-dsi-t-clk-pre = <0x1e>;
+ qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x0c>;
+ qcom,mdss-dsi-t-clk-pre = <0x27>;
};
&dsi_dual_sharp_1080_120hz_cmd {
- qcom,mdss-dsi-panel-timings = [00 24 09 09 26 24 09 09 06 03 04];
+ qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09 09 06 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0f>;
qcom,mdss-dsi-t-clk-pre = <0x36>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
new file mode 100644
index 0000000..168f2a9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ mdss_dsi0_pll: qcom,mdss_dsi_pll@ae94a00 {
+ compatible = "qcom,mdss_dsi_pll_10nm";
+ label = "MDSS DSI 0 PLL";
+ cell-index = <0>;
+ #clock-cells = <1>;
+ reg = <0xae94a00 0x1e0>,
+ <0xae94400 0x800>,
+ <0xaf03000 0x8>;
+ reg-names = "pll_base", "phy_base", "gdsc_base";
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+ clock-rate = <0>;
+ gdsc-supply = <&mdss_core_gdsc>;
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+ };
+
+ mdss_dsi1_pll: qcom,mdss_dsi_pll@ae96a00 {
+ compatible = "qcom,mdss_dsi_pll_10nm";
+ label = "MDSS DSI 1 PLL";
+ cell-index = <1>;
+ #clock-cells = <1>;
+ reg = <0xae96a00 0x1e0>,
+ <0xae96400 0x800>,
+ <0xaf03000 0x8>;
+ reg-names = "pll_base", "phy_base", "gdsc_base";
+ clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+ clock-rate = <0>;
+ gdsc-supply = <&mdss_core_gdsc>;
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index ab4c253..af63d22 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -13,29 +13,32 @@
&soc {
mdss_mdp: qcom,mdss_mdp@ae00000 {
compatible = "qcom,sde-kms";
- reg = <0x0ae00000 0x81a24>,
+ reg = <0x0ae00000 0x81d40>,
<0x0aeb0000 0x2008>;
reg-names = "mdp_phys",
"vbif_phys";
- clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+ clocks =
+ <&clock_gcc GCC_DISP_AHB_CLK>,
+ <&clock_gcc GCC_DISP_AXI_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
<&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
- <&clock_dispcc DISP_CC_MDSS_MDP_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
- clock-names = "iface_clk", "bus_clk",
- "core_clk_src", "core_clk";
- clock-rate = <0 0 300000000 300000000>;
- clock-max-rate = <0 0 430000000 430000000>;
+ <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "gcc_iface", "gcc_bus", "iface_clk",
+ "bus_clk", "core_clk", "vsync_clk";
+ clock-rate = <0 0 0 0 300000000 19200000 0>;
+ clock-max-rate = <0 0 0 0 430000000 19200000 0>;
- mdp-vdd-supply = <&mdss_core_gdsc>;
+ sde-vdd-supply = <&mdss_core_gdsc>;
/* interrupt config */
interrupt-parent = <&intc>;
interrupts = <0 83 0>;
interrupt-controller;
#interrupt-cells = <1>;
- iommus = <&apps_smmu 0x880>, <&apps_smmu 0x888>,
- <&apps_smmu 0xc80>, <&apps_smmu 0xc88>;
+ iommus = <&apps_smmu 0x880 0x0>, <&apps_smmu 0x888 0x0>,
+ <&apps_smmu 0xc80 0x0>, <&apps_smmu 0xc88 0x0>;
#address-cells = <1>;
#size-cells = <0>;
@@ -94,8 +97,6 @@
1 5 9 13>;
qcom,sde-sspp-excl-rect = <1 1 1 1
1 1 1 1>;
- qcom,sde-sspp-smart-dma-priority = <5 6 7 8 1 2 3 4>;
- qcom,sde-smart-dma-rev = "smart_dma_v2";
qcom,sde-mixer-pair-mask = <2 1 6 0 0 3>;
@@ -115,6 +116,9 @@
qcom,sde-wb-linewidth = <4096>;
qcom,sde-mixer-blendstages = <0xb>;
qcom,sde-highest-bank-bit = <0x2>;
+ qcom,sde-ubwc-version = <0x200>;
+ qcom,sde-ubwc-static = <0x100>;
+ qcom,sde-ubwc-swizzle = <1>;
qcom,sde-panic-per-pipe;
qcom,sde-has-cdp;
qcom,sde-has-src-split;
@@ -142,7 +146,7 @@
qcom,platform-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "mdp-vdd";
+ qcom,supply-name = "sde-vdd";
qcom,supply-min-voltage = <0>;
qcom,supply-max-voltage = <0>;
qcom,supply-enable-load = <0>;
@@ -184,6 +188,11 @@
qcom,sde-rsc-version = <1>;
vdd-supply = <&mdss_core_gdsc>;
+ clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
+ clock-names = "iface_clk", "vsync_clk";
+ clock-rate = <0 0>;
+
qcom,sde-dram-channels = <2>;
/* data and reg bus scale settings */
@@ -200,7 +209,6 @@
};
mdss_rotator: qcom,mdss_rotator@ae00000 {
- status = "disabled";
compatible = "qcom,sde_rotator";
reg = <0x0ae00000 0xac000>,
<0x0aeb8000 0x3000>;
@@ -230,12 +238,10 @@
<&clock_gcc GCC_DISP_AHB_CLK>,
<&clock_gcc GCC_DISP_AXI_CLK>,
<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
- <&clock_dispcc DISP_CC_MDSS_ROT_CLK_SRC>,
<&clock_dispcc DISP_CC_MDSS_ROT_CLK>,
<&clock_dispcc DISP_CC_MDSS_AXI_CLK>;
clock-names = "gcc_iface", "gcc_bus",
- "iface_clk", "rot_core_clk",
- "rot_clk", "axi_clk";
+ "iface_clk", "rot_clk", "axi_clk";
interrupt-parent = <&mdss_mdp>;
interrupts = <2 0>;
@@ -252,168 +258,145 @@
smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
compatible = "qcom,smmu_sde_rot_unsec";
- iommus = <&apps_smmu 0x1090>;
- gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+ iommus = <&apps_smmu 0x1090 0x0>;
};
smmu_rot_sec: qcom,smmu_rot_sec_cb {
- status = "disabled";
compatible = "qcom,smmu_sde_rot_sec";
- iommus = <&apps_smmu 0x1091>;
- gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+ iommus = <&apps_smmu 0x1091 0x0>;
};
};
mdss_dsi0: qcom,mdss_dsi_ctrl0@ae94000 {
- compatible = "qcom,dsi-ctrl-hw-v2.0";
+ compatible = "qcom,dsi-ctrl-hw-v2.2";
label = "dsi-ctrl-0";
- status = "disabled";
cell-index = <0>;
- reg = <0xae94000 0x400>;
- reg-names = "dsi_ctrl";
+ reg = <0xae94000 0x400>,
+ <0xaf08000 0x4>;
+ reg-names = "dsi_ctrl", "disp_cc_base";
interrupt-parent = <&mdss_mdp>;
interrupts = <4 0>;
vdda-1p2-supply = <&pm8998_l26>;
- vdda-0p9-supply = <&pm8998_l1>;
clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>,
+ <&clock_dispcc DISP_CC_MDSS_ESC0_CLK>;
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
- "pixel_clk", "pixel_clk_rcg";
+ "pixel_clk", "pixel_clk_rcg",
+ "esc_clk";
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
+
qcom,ctrl-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-0p9";
- qcom,supply-min-voltage = <925000>;
- qcom,supply-max-voltage = <925000>;
- qcom,supply-enable-load = <17000>;
- qcom,supply-disable-load = <32>;
- };
-
- qcom,ctrl-supply-entry@1 {
- reg = <0>;
qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <18160>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
};
};
};
mdss_dsi1: qcom,mdss_dsi_ctrl1@ae96000 {
- compatible = "qcom,dsi-ctrl-hw-v2.0";
+ compatible = "qcom,dsi-ctrl-hw-v2.2";
label = "dsi-ctrl-1";
- status = "disabled";
cell-index = <1>;
- reg = <0xae96000 0x400>;
- reg-names = "dsi_ctrl";
+ reg = <0xae96000 0x400>,
+ <0xaf08000 0x4>;
+ reg-names = "dsi_ctrl", "disp_cc_base";
interrupt-parent = <&mdss_mdp>;
interrupts = <5 0>;
vdda-1p2-supply = <&pm8998_l26>;
- vdda-0p9-supply = <&pm8998_l1>;
- clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
- <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
- <&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
- <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ clocks = <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&clock_dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_PCLK1_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>,
+ <&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
- "pixel_clk", "pixel_clk_rcg";
+ "pixel_clk", "pixel_clk_rcg", "esc_clk";
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
qcom,ctrl-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-0p9";
- qcom,supply-min-voltage = <925000>;
- qcom,supply-max-voltage = <925000>;
- qcom,supply-enable-load = <17000>;
- qcom,supply-disable-load = <32>;
- };
-
- qcom,ctrl-supply-entry@1 {
- reg = <0>;
qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <18160>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
};
};
};
mdss_dsi_phy0: qcom,mdss_dsi_phy0@ae94400 {
compatible = "qcom,dsi-phy-v3.0";
- status = "disabled";
label = "dsi-phy-0";
cell-index = <0>;
reg = <0xae94400 0x7c0>;
reg-names = "dsi_phy";
gdsc-supply = <&mdss_core_gdsc>;
- vdda-1p2-supply = <&pm8998_l26>;
- qcom,platform-strength-ctrl = [ff 06
- ff 06
- ff 06
- ff 00];
- qcom,platform-regulator-settings = [1d
- 1d 1d 1d 1d];
- qcom,platform-lane-config = [00 00 10 0f
- 00 00 10 0f
- 00 00 10 0f
- 00 00 10 0f
- 00 00 10 8f];
-
+ vdda-0p9-supply = <&pm8998_l1>;
+ qcom,platform-strength-ctrl = [55 03
+ 55 03
+ 55 03
+ 55 03
+ 55 00];
+ qcom,platform-lane-config = [00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 80];
+ qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
qcom,phy-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
qcom,phy-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <2500>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
};
};
};
mdss_dsi_phy1: qcom,mdss_dsi_phy0@ae96400 {
compatible = "qcom,dsi-phy-v3.0";
- status = "disabled";
label = "dsi-phy-1";
cell-index = <1>;
reg = <0xae96400 0x7c0>;
reg-names = "dsi_phy";
gdsc-supply = <&mdss_core_gdsc>;
- vdda-1p2-supply = <&pm8998_l26>;
- qcom,platform-strength-ctrl = [ff 06
- ff 06
- ff 06
- ff 00];
- qcom,platform-regulator-settings = [1d
- 1d 1d 1d 1d];
- qcom,platform-lane-config = [00 00 10 0f
- 00 00 10 0f
- 00 00 10 0f
- 00 00 10 0f
- 00 00 10 8f];
-
+ vdda-0p9-supply = <&pm8998_l1>;
+ qcom,platform-strength-ctrl = [55 03
+ 55 03
+ 55 03
+ 55 03
+ 55 00];
+ qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+ qcom,platform-lane-config = [00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 80];
qcom,phy-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
qcom,phy-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1250000>;
- qcom,supply-max-voltage = <1250000>;
- qcom,supply-enable-load = <2500>;
- qcom,supply-disable-load = <1>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
index 0f94d812..a03148d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,3 +9,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+&pmi8998_charger {
+ qcom,suspend-input;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
index a75b6a7..7b8b425 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -307,4 +307,27 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ /* ipa - outbound entry to mss */
+ smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "ipa";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ipa - inbound entry from mss */
+ smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "ipa";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 7f090ad..aac63ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -12,6 +12,8 @@
*/
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+
&soc {
/* Primary USB port related DWC3 controller */
usb0: ssusb@a600000 {
@@ -19,7 +21,6 @@
reg = <0x0a600000 0xf8c00>,
<0x088ee000 0x400>;
reg-names = "core_base", "ahb2phy_base";
- iommus = <&apps_smmu 0x740>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -31,7 +32,7 @@
qcom,usb-dbm = <&dbm_1p5>;
qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
qcom,num-gsi-evt-buffs = <0x3>;
- extcon = <0>, <0>, <&eud>;
+ extcon = <&pmi8998_pdphy>, <&pmi8998_pdphy>, <&eud>;
clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
<&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
@@ -50,18 +51,62 @@
resets = <&clock_gcc GCC_USB30_PRIM_BCR>;
reset-names = "core_reset";
+ qcom,msm-bus,name = "usb0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <3>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_IPA_CFG 0 0>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 0>,
+ <MSM_BUS_MASTER_USB3
+ MSM_BUS_SLAVE_EBI_CH0 240000 800000>,
+ <MSM_BUS_MASTER_USB3
+ MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 80000>;
+
dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0x0a600000 0xcd00>;
interrupt-parent = <&intc>;
interrupts = <0 133 0>;
- usb-phy = <&qusb_phy0>, <&usb_nop_phy>;
+ usb-phy = <&qusb_phy0>, <&usb_qmp_dp_phy>;
tx-fifo-resize;
linux,sysdev_is_parent;
snps,disable-clk-gating;
snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
- maximum-speed = "high-speed";
+ };
+
+ qcom,usbbam@a704000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xa704000 0x17000>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 132 0>;
+
+ qcom,bam-type = <0>;
+ qcom,usb-bam-fifo-baseaddr = <0x146bb000>;
+ qcom,usb-bam-num-pipes = <8>;
+ qcom,ignore-core-reset-ack;
+ qcom,disable-clk-gating;
+ qcom,usb-bam-override-threshold = <0x4001>;
+ qcom,usb-bam-max-mbps-highspeed = <400>;
+ qcom,usb-bam-max-mbps-superspeed = <3600>;
+ qcom,reset-bam-on-connect;
+
+ qcom,pipe0 {
+ label = "ssusb-qdss-in-0";
+ qcom,usb-bam-mem-type = <2>;
+ qcom,dir = <1>;
+ qcom,pipe-num = <0>;
+ qcom,peer-bam = <0>;
+ qcom,peer-bam-physical-address = <0x6064000>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0x0>;
+ qcom,data-fifo-size = <0x1800>;
+ qcom,descriptor-fifo-offset = <0x1800>;
+ qcom,descriptor-fifo-size = <0x800>;
+ };
};
};
@@ -76,16 +121,26 @@
vdda33-supply = <&pm8998_l24>;
qcom,vdd-voltage-level = <0 880000 880000>;
qcom,qusb-phy-init-seq =
- /* <value reg_offset> */
- <0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
- 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
- 0x80 0x2c /* PLL_CMODE */
- 0x0a 0x184 /* PLL_LOCK_DELAY */
- 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
- 0xa5 0x240 /* TUNE1 */
- 0x09 0x244 /* TUNE2 */
- 0x00 0x220 /* IMP_CTRL1 */
- 0x58 0x224>; /* IMP_CTRL2 */
+ /* <value reg_offset> */
+ <0x23 0x210 /* PWR_CTRL1 */
+ 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+ 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+ 0x80 0x2c /* PLL_CMODE */
+ 0x0a 0x184 /* PLL_LOCK_DELAY */
+ 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
+ 0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+ 0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x21 0x214 /* PWR_CTRL2 */
+ 0x00 0x220 /* IMP_CTRL1 */
+ 0x58 0x224 /* IMP_CTRL2 */
+ 0x32 0x240 /* TUNE1 */
+ 0x29 0x244 /* TUNE2 */
+ 0xca 0x248 /* TUNE3 */
+ 0x04 0x24c /* TUNE4 */
+ 0x03 0x250 /* TUNE5 */
+ 0x00 0x23c /* CHG_CTRL2 */
+ 0x22 0x210>; /* PWR_CTRL1 */
+
phy_type= "utmi";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
@@ -95,12 +150,166 @@
reset-names = "phy_reset";
};
- dbm_1p5: dbm@a8f8000 {
+ /* Primary USB port related QMP USB DP Combo PHY */
+ usb_qmp_dp_phy: ssphy@88e8000 {
+ compatible = "qcom,usb-ssphy-qmp-dp-combo";
+ reg = <0x88e8000 0x3000>;
+ reg-names = "qmp_phy_base";
+
+ vdd-supply = <&pm8998_l1>;
+ core-supply = <&pm8998_l26>;
+ qcom,vdd-voltage-level = <0 880000 880000>;
+ qcom,vbus-valid-override;
+ qcom,qmp-phy-init-seq =
+ /* <reg_offset, value, delay> */
+ <0x1048 0x07 0x00 /* COM_PLL_IVCO */
+ 0x1080 0x14 0x00 /* COM_SYSCLK_EN_SEL */
+ 0x1034 0x08 0x00 /* COM_BIAS_EN_CLKBUFLR_EN */
+ 0x1138 0x30 0x00 /* COM_CLK_SELECT */
+ 0x103c 0x02 0x00 /* COM_SYS_CLK_CTRL */
+ 0x108c 0x08 0x00 /* COM_RESETSM_CNTRL2 */
+ 0x115c 0x16 0x00 /* COM_CMN_CONFIG */
+ 0x1164 0x01 0x00 /* COM_SVS_MODE_CLK_SEL */
+ 0x113c 0x80 0x00 /* COM_HSCLK_SEL */
+ 0x10b0 0x82 0x00 /* COM_DEC_START_MODE0 */
+ 0x10b8 0xab 0x00 /* COM_DIV_FRAC_START1_MODE0 */
+ 0x10bc 0xea 0x00 /* COM_DIV_FRAC_START2_MODE0 */
+ 0x10c0 0x02 0x00 /* COM_DIV_FRAC_START3_MODE0 */
+ 0x1060 0x06 0x00 /* COM_CP_CTRL_MODE0 */
+ 0x1068 0x16 0x00 /* COM_PLL_RCTRL_MODE0 */
+ 0x1070 0x36 0x00 /* COM_PLL_CCTRL_MODE0 */
+ 0x10dc 0x00 0x00 /* COM_INTEGLOOP_GAIN1_MODE0 */
+ 0x10d8 0x3f 0x00 /* COM_INTEGLOOP_GAIN0_MODE0 */
+ 0x10f8 0x01 0x00 /* COM_VCO_TUNE2_MODE0 */
+ 0x10f4 0xc9 0x00 /* COM_VCO_TUNE1_MODE0 */
+ 0x1148 0x0a 0x00 /* COM_CORECLK_DIV_MODE0 */
+ 0x10a0 0x00 0x00 /* COM_LOCK_CMP3_MODE0 */
+ 0x109c 0x34 0x00 /* COM_LOCK_CMP2_MODE0 */
+ 0x1098 0x15 0x00 /* COM_LOCK_CMP1_MODE0 */
+ 0x1090 0x04 0x00 /* COM_LOCK_CMP_EN */
+ 0x1154 0x00 0x00 /* COM_CORE_CLK_EN */
+ 0x1094 0x00 0x00 /* COM_LOCK_CMP_CFG */
+ 0x10f0 0x00 0x00 /* COM_VCO_TUNE_MAP */
+ 0x1040 0x0a 0x00 /* COM_SYSCLK_BUF_ENABLE */
+ 0x1010 0x01 0x00 /* COM_SSC_EN_CENTER */
+ 0x101c 0x31 0x00 /* COM_SSC_PER1 */
+ 0x1020 0x01 0x00 /* COM_SSC_PER2 */
+ 0x1014 0x00 0x00 /* COM_SSC_ADJ_PER1 */
+ 0x1018 0x00 0x00 /* COM_SSC_ADJ_PER2 */
+ 0x1024 0x85 0x00 /* COM_SSC_STEP_SIZE1 */
+ 0x1028 0x07 0x00 /* COM_SSC_STEP_SIZE2 */
+ 0x1430 0x0b 0x00 /* RXA_UCDR_FASTLOCK_FO_GAIN */
+ 0x14d4 0x0f 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL2 */
+ 0x14d8 0x4e 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL3 */
+ 0x14dc 0x18 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL4 */
+ 0x14f8 0x77 0x00 /* RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+ 0x14fc 0x80 0x00 /* RXA_RX_OFFSET_ADAPTOR_CNTRL2 */
+ 0x1504 0x03 0x00 /* RXA_SIGDET_CNTRL */
+ 0x150c 0x16 0x00 /* RXA_SIGDET_DEGLITCH_CNTRL */
+ 0x1830 0x0b 0x00 /* RXB_UCDR_FASTLOCK_FO_GAIN */
+ 0x18d4 0x0f 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL2 */
+ 0x18d8 0x4e 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL3 */
+ 0x18dc 0x18 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL4 */
+ 0x18f8 0x77 0x00 /* RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+ 0x18fc 0x80 0x00 /* RXB_RX_OFFSET_ADAPTOR_CNTRL2 */
+ 0x1904 0x03 0x00 /* RXB_SIGDET_CNTRL */
+ 0x190c 0x16 0x00 /* RXB_SIGDET_DEGLITCH_CNTRL */
+ 0x1260 0x10 0x00 /* TXA_HIGHZ_DRVR_EN */
+ 0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */
+ 0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */
+ 0x1248 0x09 0x00 /* TXA_RES_CODE_LANE_OFFSET_RX */
+ 0x1244 0x0d 0x00 /* TXA_RES_CODE_LANE_OFFSET_TX */
+ 0x1660 0x10 0x00 /* TXB_HIGHZ_DRVR_EN */
+ 0x16a4 0x12 0x00 /* TXB_RCV_DETECT_LVL_2 */
+ 0x168c 0x16 0x00 /* TXB_LANE_MODE_1 */
+ 0x1648 0x09 0x00 /* TXB_RES_CODE_LANE_OFFSET_RX */
+ 0x1644 0x0d 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
+ 0x1cc8 0x83 0x00 /* PCS_FLL_CNTRL2 */
+ 0x1ccc 0x09 0x00 /* PCS_FLL_CNT_VAL_L */
+ 0x1cd0 0xa2 0x00 /* PCS_FLL_CNT_VAL_H_TOL */
+ 0x1cd4 0x40 0x00 /* PCS_FLL_MAN_CODE */
+ 0x1cc4 0x02 0x00 /* PCS_FLL_CNTRL1 */
+ 0x1c80 0xd1 0x00 /* PCS_LOCK_DETECT_CONFIG1 */
+ 0x1c84 0x1f 0x00 /* PCS_LOCK_DETECT_CONFIG2 */
+ 0x1c88 0x47 0x00 /* PCS_LOCK_DETECT_CONFIG3 */
+ 0x1c64 0x1b 0x00 /* PCS_POWER_STATE_CONFIG2 */
+ 0x1434 0x75 0x00 /* RXA_UCDR_SO_SATURATION */
+ 0x1834 0x75 0x00 /* RXB_UCDR_SO_SATURATION */
+ 0x1dd8 0xba 0x00 /* PCS_RX_SIGDET_LVL */
+ 0x1c0c 0x9f 0x00 /* PCS_TXMGN_V0 */
+ 0x1c10 0x9f 0x00 /* PCS_TXMGN_V1 */
+ 0x1c14 0xb7 0x00 /* PCS_TXMGN_V2 */
+ 0x1c18 0x4e 0x00 /* PCS_TXMGN_V3 */
+ 0x1c1c 0x65 0x00 /* PCS_TXMGN_V4 */
+ 0x1c20 0x6b 0x00 /* PCS_TXMGN_LS */
+ 0x1c24 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V0 */
+ 0x1c28 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V0 */
+ 0x1c2c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V1 */
+ 0x1c30 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V1 */
+ 0x1c34 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V2 */
+ 0x1c38 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V2 */
+ 0x1c3c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V3 */
+ 0x1c40 0x1d 0x00 /* PCS_TXDEEMPH_M3P5DB_V3 */
+ 0x1c44 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V4 */
+ 0x1c48 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V4 */
+ 0x1c4c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_LS */
+ 0x1c50 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_LS */
+ 0x1c5c 0x02 0x00 /* PCS_RATE_SLEW_CNTRL */
+ 0x1ca0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
+ 0x1c8c 0x44 0x00 /* PCS_TSYNC_RSYNC_TIME */
+ 0x1c70 0xe7 0x00 /* PCS_RCVR_DTCT_DLY_P1U2_L */
+ 0x1c74 0x03 0x00 /* PCS_RCVR_DTCT_DLY_P1U2_H */
+ 0x1c78 0x40 0x00 /* PCS_RCVR_DTCT_DLY_U3_L */
+ 0x1c7c 0x00 0x00 /* PCS_RCVR_DTCT_DLY_U3_H */
+ 0x1cb8 0x75 0x00 /* PCS_RXEQTRAINING_WAIT_TIME */
+ 0x1cb0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
+ 0x1cbc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+ 0xffffffff 0xffffffff 0x00>;
+
+ qcom,qmp-phy-reg-offset =
+ <0x1d74 /* USB3_DP_PCS_PCS_STATUS */
+ 0x1cd8 /* USB3_DP_PCS_AUTONOMOUS_MODE_CTRL */
+ 0x1cdc /* USB3_DP_PCS_LFPS_RXTERM_IRQ_CLEAR */
+ 0x1c04 /* USB3_DP_PCS_POWER_DOWN_CONTROL */
+ 0x1c00 /* USB3_DP_PCS_SW_RESET */
+ 0x1c08 /* USB3_DP_PCS_START_CONTROL */
+ 0x2a18 /* USB3_DP_DP_PHY_PD_CTL */
+ 0x0008 /* USB3_DP_COM_POWER_DOWN_CTRL */
+ 0x0004 /* USB3_DP_COM_SW_RESET */
+ 0x001c /* USB3_DP_COM_RESET_OVRD_CTRL */
+ 0x0000 /* USB3_DP_COM_PHY_MODE_CTRL */
+ 0x0010 /* USB3_DP_COM_TYPEC_CTRL */
+ 0x000c /* USB3_DP_COM_SWI_CTRL */
+ 0x1a0c>; /* USB3_DP_PCS_MISC_CLAMP_ENABLE */
+
+ clocks = <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+
+ clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+ "ref_clk", "com_aux_clk", "cfg_ahb_clk";
+
+ resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_BCR>,
+ <&clock_gcc GCC_USB3_PHY_PRIM_BCR>;
+ reset-names = "global_phy_reset", "phy_reset";
+ };
+
+ dbm_1p5: dbm@a6f8000 {
compatible = "qcom,usb-dbm-1p5";
- reg = <0xa8f8000 0x400>;
+ reg = <0xa6f8000 0x400>;
qcom,reset-ep-after-lpm-resume;
};
+ usb_audio_qmi_dev {
+ compatible = "qcom,usb-audio-qmi-dev";
+ iommus = <&apps_smmu 0x182c>;
+ qcom,usb-audio-stream-id = <0xc>;
+ qcom,usb-audio-intr-num = <2>;
+ };
+
usb_nop_phy: usb_nop_phy {
compatible = "usb-nop-xceiv";
};
@@ -111,7 +320,6 @@
reg = <0x0a800000 0xf8c00>,
<0x088ee000 0x400>;
reg-names = "core_base", "ahb2phy_base";
- iommus = <&apps_smmu 0x760>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -140,6 +348,16 @@
reset-names = "core_reset";
status = "disabled";
+ qcom,msm-bus,name = "usb1";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 0>,
+ <MSM_BUS_MASTER_USB3_1
+ MSM_BUS_SLAVE_EBI_CH0 240000 800000>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 80000>;
+
dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0x0a800000 0xcd00>;
@@ -165,16 +383,26 @@
vdda33-supply = <&pm8998_l24>;
qcom,vdd-voltage-level = <0 880000 880000>;
qcom,qusb-phy-init-seq =
- /* <value reg_offset> */
- <0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
- 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
- 0x80 0x2c /* PLL_CMODE */
- 0x0a 0x184 /* PLL_LOCK_DELAY */
- 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
- 0xa5 0x240 /* TUNE1 */
- 0x09 0x244 /* TUNE2 */
- 0x00 0x220 /* IMP_CTRL1 */
- 0x58 0x224>; /* IMP_CTRL2 */
+ /* <value reg_offset> */
+ <0x23 0x210 /* PWR_CTRL1 */
+ 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+ 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+ 0x80 0x2c /* PLL_CMODE */
+ 0x0a 0x184 /* PLL_LOCK_DELAY */
+ 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
+ 0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+ 0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x21 0x214 /* PWR_CTRL2 */
+ 0x00 0x220 /* IMP_CTRL1 */
+ 0x58 0x224 /* IMP_CTRL2 */
+ 0x32 0x240 /* TUNE1 */
+ 0x29 0x244 /* TUNE2 */
+ 0xca 0x248 /* TUNE3 */
+ 0x04 0x24c /* TUNE4 */
+ 0x03 0x250 /* TUNE5 */
+ 0x00 0x23c /* CHG_CTRL2 */
+ 0x22 0x210>; /* PWR_CTRL1 */
+
phy_type= "utmi";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
@@ -309,10 +537,11 @@
clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>,
<&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>,
<&clock_rpmh RPMH_CXO_CLK>,
- <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+ <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
- "ref_clk";
+ "ref_clk", "cfg_ahb_clk";
resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>,
<&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 4fdf383..efd8c32 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -16,3 +16,7 @@
model = "Qualcomm Technologies, Inc. SDM845 V2";
qcom,msm-id = <321 0x20000>;
};
+
+&spmi_debug_bus {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index af88108..6f4b4ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -17,11 +17,13 @@
&soc {
msm_vidc: qcom,vidc@aa00000 {
compatible = "qcom,msm-vidc";
- status = "disabled";
+ status = "ok";
reg = <0xaa00000 0x200000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
qcom,hfi = "venus";
qcom,firmware-name = "venus";
+ qcom,never-unload-fw;
+ qcom,sw-power-collapse;
qcom,max-secure-instances = <5>;
qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
@@ -32,17 +34,23 @@
/* Clocks */
clock-names = "core_clk", "iface_clk", "bus_clk",
- "core0_clk", "core1_clk";
+ "core0_clk", "core0_bus_clk",
+ "core1_clk", "core1_bus_clk";
clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
<&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
<&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
<&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
- <&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
+ <&clock_videocc VIDEO_CC_VCODEC0_AXI_CLK>,
+ <&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>,
+ <&clock_videocc VIDEO_CC_VCODEC1_AXI_CLK>;
qcom,proxy-clock-names = "core_clk", "iface_clk",
- "bus_clk", "core0_clk", "core1_clk";
- qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0>;
+ "bus_clk", "core0_clk", "core0_bus_clk",
+ "core1_clk", "core1_bus_clk";
+ qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x1 0x0>;
qcom,allowed-clock-rates = <200000000 320000000 380000000
444000000 533000000>;
+ qcom,max-hq-mbs-per-frame = <8160>;
+ qcom,max-hq-frames-per-sec = <60>;
qcom,clock-freq-tbl {
qcom,profile-enc {
qcom,codec-mask = <0x55555555>;
@@ -89,9 +97,9 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_ns";
iommus =
- <&apps_smmu 0x10a0>,
- <&apps_smmu 0x10a8>,
- <&apps_smmu 0x10b0>;
+ <&apps_smmu 0x10a0 0x0>,
+ <&apps_smmu 0x10a8 0x0>,
+ <&apps_smmu 0x10b0 0x0>;
buffer-types = <0xfff>;
virtual-addr-pool = <0x70800000 0x6f800000>;
};
@@ -100,10 +108,10 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_sec_bitstream";
iommus =
- <&apps_smmu 0x10a1>,
- <&apps_smmu 0x10a9>,
- <&apps_smmu 0x10a5>,
- <&apps_smmu 0x10ad>;
+ <&apps_smmu 0x10a1 0x0>,
+ <&apps_smmu 0x10a9 0x0>,
+ <&apps_smmu 0x10a5 0x0>,
+ <&apps_smmu 0x10ad 0x0>;
buffer-types = <0x241>;
virtual-addr-pool = <0x4b000000 0x25800000>;
qcom,secure-context-bank;
@@ -113,8 +121,8 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_sec_pixel";
iommus =
- <&apps_smmu 0x10a3>,
- <&apps_smmu 0x10ab>;
+ <&apps_smmu 0x10a3 0x0>,
+ <&apps_smmu 0x10ab 0x0>;
buffer-types = <0x106>;
virtual-addr-pool = <0x25800000 0x25800000>;
qcom,secure-context-bank;
@@ -124,9 +132,9 @@
compatible = "qcom,msm-vidc,context-bank";
label = "venus_sec_non_pixel";
iommus =
- <&apps_smmu 0x10a4>,
- <&apps_smmu 0x10ac>,
- <&apps_smmu 0x10b4>;
+ <&apps_smmu 0x10a4 0x0>,
+ <&apps_smmu 0x10ac 0x0>,
+ <&apps_smmu 0x10b4 0x0>;
buffer-types = <0x480>;
virtual-addr-pool = <0x1000000 0x24800000>;
qcom,secure-context-bank;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 18f0186..72c2efa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -18,9 +18,12 @@
#include <dt-bindings/clock/qcom,videocc-sdm845.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/qcom,tcs-mbox.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
model = "Qualcomm Technologies, Inc. SDM845";
@@ -31,6 +34,8 @@
aliases {
ufshc1 = &ufshc_mem; /* Embedded UFS slot */
ufshc2 = &ufshc_card; /* Removable UFS slot */
+ pci-domain0 = &pcie0;
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
};
cpus {
@@ -45,6 +50,8 @@
efficiency = <1024>;
cache-size = <0x8000>;
cpu-release-addr = <0x0 0x90000000>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
+ #cooling-cells = <2>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
compatible = "arm,arch-cache";
@@ -68,7 +75,7 @@
};
};
- CPU1: cpu@1 {
+ CPU1: cpu@100 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x100>;
@@ -76,24 +83,26 @@
efficiency = <1024>;
cache-size = <0x8000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_1: l1-icache {
+ L1_I_100: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x9000>;
};
- L1_D_1: l1-dcache {
+ L1_D_100: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x9000>;
};
};
- CPU2: cpu@2 {
+ CPU2: cpu@200 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x200>;
@@ -101,24 +110,26 @@
efficiency = <1024>;
cache-size = <0x8000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_2>;
- L2_2: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_2: l1-icache {
+ L1_I_200: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x9000>;
};
- L1_D_2: l1-dcache {
+ L1_D_200: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x9000>;
};
};
- CPU3: cpu@3 {
+ CPU3: cpu@300 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x300>;
@@ -126,24 +137,26 @@
efficiency = <1024>;
cache-size = <0x8000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_3>;
- L2_3: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x20000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_3: l1-icache {
+ L1_I_300: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x9000>;
};
- L1_D_3: l1-dcache {
+ L1_D_300: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x9000>;
};
};
- CPU4: cpu@100 {
+ CPU4: cpu@400 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x400>;
@@ -151,24 +164,26 @@
efficiency = <1740>;
cache-size = <0x20000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_4>;
- L2_4: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_100: l1-icache {
+ L1_I_400: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
- L1_D_100: l1-dcache {
+ L1_D_400: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
};
- CPU5: cpu@101 {
+ CPU5: cpu@500 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x500>;
@@ -176,24 +191,26 @@
efficiency = <1740>;
cache-size = <0x20000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_5>;
- L2_5: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_101: l1-icache {
+ L1_I_500: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
- L1_D_101: l1-dcache {
+ L1_D_500: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
};
- CPU6: cpu@102 {
+ CPU6: cpu@600 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x600>;
@@ -201,24 +218,26 @@
efficiency = <1740>;
cache-size = <0x20000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_6>;
- L2_6: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_102: l1-icache {
+ L1_I_600: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
- L1_D_102: l1-dcache {
+ L1_D_600: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
};
- CPU7: cpu@103 {
+ CPU7: cpu@700 {
device_type = "cpu";
compatible = "arm,armv8";
reg = <0x0 0x700>;
@@ -226,18 +245,20 @@
efficiency = <1740>;
cache-size = <0x20000>;
cpu-release-addr = <0x0 0x90000000>;
- next-level-cache = <&L2_7>;
- L2_7: l2-cache {
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
+ #cooling-cells = <2>;
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
compatible = "arm,arch-cache";
cache-size = <0x40000>;
cache-level = <2>;
next-level-cache = <&L3_0>;
};
- L1_I_103: l1-icache {
+ L1_I_700: l1-icache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
- L1_D_103: l1-dcache {
+ L1_D_700: l1-dcache {
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
@@ -308,37 +329,61 @@
pil_modem_mem: modem_region@8b000000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x8b000000 0 0x6e00000>;
+ reg = <0 0x8b000000 0 0x7300000>;
};
- pil_video_mem: pil_video_region@91e00000 {
+ pil_video_mem: pil_video_region@92300000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x91e00000 0 0x500000>;
+ reg = <0 0x92300000 0 0x500000>;
};
- pil_cdsp_mem: cdsp_regions@92300000 {
+ pil_cdsp_mem: cdsp_regions@92800000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x92300000 0 0x800000>;
+ reg = <0 0x92800000 0 0x800000>;
};
- pil_adsp_mem: pil_adsp_region@92b00000 {
+ pil_adsp_mem: pil_adsp_region@93000000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x92b00000 0 0x1a00000>;
+ reg = <0 0x93000000 0 0x1a00000>;
};
- pil_slpi_mem: pil_slpi_region@94500000 {
+ pil_mba_mem: pil_mba_region@0x94a00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x94500000 0 0xf00000>;
+ reg = <0 0x94a00000 0 0x200000>;
};
- pil_spss_mem: spss_region@95400000 {
+ pil_slpi_mem: pil_slpi_region@94c00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x95400000 0 0x700000>;
+ reg = <0 0x94c00000 0 0x1400000>;
+ };
+
+ pil_ipa_fw_mem: pil_ipa_fw_region@96000000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96000000 0 0x10000>;
+ };
+
+ pil_ipa_gsi_mem: pil_ipa_gsi_region@96010000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96010000 0 0x5000>;
+ };
+
+ pil_gpu_mem: pil_gpu_region@96015000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96015000 0 0x1000>;
+ };
+
+ pil_spss_mem: spss_region@96100000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96100000 0 0x100000>;
};
adsp_mem: adsp_region {
@@ -346,7 +391,7 @@
alloc-ranges = <0 0x00000000 0 0xffffffff>;
reusable;
alignment = <0 0x400000>;
- size = <0 0x800000>;
+ size = <0 0xc00000>;
};
qseecom_mem: qseecom_region {
@@ -386,6 +431,7 @@
};
#include "msm-gdsc-sdm845.dtsi"
+#include "sdm845-sde-pll.dtsi"
#include "sdm845-sde.dtsi"
#include "sdm845-sde-display.dtsi"
#include "sdm845-qupv3.dtsi"
@@ -501,6 +547,58 @@
cell-index = <0>;
};
+ spmi_debug_bus: qcom,spmi-debug@6b22000 {
+ compatible = "qcom,spmi-pmic-arb-debug";
+ reg = <0x6b22000 0x60>, <0x7820A8 4>;
+ reg-names = "core", "fuse";
+ qcom,fuse-disable-bit = <12>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ qcom,pm8998-debug@0 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ qcom,pm8998-debug@1 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ qcom,pmi8998-debug@2 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ qcom,pmi8998-debug@3 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x3 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ qcom,pm8005-debug@4 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x4 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+
+ qcom,pm8005-debug@5 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x5 SPMI_USID>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ };
+ };
+
msm_cpufreq: qcom,msm-cpufreq {
compatible = "qcom,msm-cpufreq";
clock-names = "cpu0_clk", "cpu4_clk";
@@ -618,6 +716,16 @@
< 6881 /* 1804 MHz */ >;
};
+ snoc_cnoc_keepalive: qcom,snoc_cnoc_keepalive {
+ compatible = "qcom,devbw";
+ governor = "powersave";
+ qcom,src-dst-ports = <139 627>;
+ qcom,active-only;
+ status = "ok";
+ qcom,bw-tbl =
+ < 1 >;
+ };
+
devfreq_memlat_0: qcom,cpu0-memlat-mon {
compatible = "qcom,arm-memlat-mon";
qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
@@ -707,6 +815,12 @@
< 1651200 960000 >;
};
+ cpu_pmu: cpu-pmu {
+ compatible = "arm,armv8-pmuv3";
+ qcom,irq-is-percpu;
+ interrupts = <1 5 4>;
+ };
+
clock_gcc: qcom,gcc@100000 {
compatible = "qcom,gcc-sdm845", "syscon";
reg = <0x100000 0x1f0000>;
@@ -738,7 +852,7 @@
clock_dispcc: qcom,dispcc@af00000 {
compatible = "qcom,dispcc-sdm845", "syscon";
- reg = <0xaf00000 0x100000>;
+ reg = <0xaf00000 0x10000>;
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
#clock-cells = <1>;
@@ -750,6 +864,7 @@
reg = <0x5090000 0x9000>;
reg-names = "cc_base";
vdd_cx-supply = <&pm8998_s9_level>;
+ qcom,gpu_cc_gmu_clk_src-opp-handle = <&gmu>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -760,6 +875,7 @@
reg-names = "cc_base";
vdd_gfx-supply = <&pm8005_s1_level>;
vdd_mx-supply = <&pm8998_s6_level>;
+ qcom,gpu_cc_gx_gfx3d_clk_src-opp-handle = <&msm_gpu>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -774,12 +890,11 @@
<0x178b0000 0x1000>,
<0x17d42400 0x0c00>,
<0x17d44400 0x0c00>,
- <0x17d46c00 0x0c00>,
- <0x17810090 0x8>;
+ <0x17d46c00 0x0c00>;
reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
"l3_pll", "pwrcl_pll", "perfcl_pll",
"l3_sequencer", "pwrcl_sequencer",
- "perfcl_sequencer", "apps_itm_ctl";
+ "perfcl_sequencer";
vdd-l3-supply = <&apc0_l3_vreg>;
vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
@@ -790,17 +905,17 @@
< 422400000 0x50140116 0x00002020 0x1 2 >,
< 499200000 0x5014021a 0x00002020 0x1 3 >,
< 576000000 0x5014031e 0x00002020 0x1 4 >,
- < 652800000 0x501c0422 0x00002020 0x1 5 >,
- < 729600000 0x501c0526 0x00002020 0x1 6 >,
- < 806400000 0x501c062a 0x00002222 0x1 7 >;
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 729600000 0x401c0526 0x00002020 0x1 6 >,
+ < 806400000 0x401c062a 0x00002222 0x1 7 >;
qcom,pwrcl-speedbin0-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
< 422400000 0x50140116 0x00002020 0x1 2 >,
< 499200000 0x5014021a 0x00002020 0x1 3 >,
< 576000000 0x5014031e 0x00002020 0x1 4 >,
- < 652800000 0x501c0422 0x00002020 0x1 5 >,
- < 748800000 0x501c0527 0x00002020 0x1 6 >,
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 748800000 0x401c0527 0x00002020 0x1 6 >,
< 825600000 0x401c062b 0x00002222 0x1 7 >,
< 902400000 0x4024072f 0x00002626 0x1 8 >,
< 979200000 0x40240833 0x00002929 0x1 9 >,
@@ -813,10 +928,10 @@
< 422400000 0x50140116 0x00002020 0x1 2 >,
< 499200000 0x5014021a 0x00002020 0x1 3 >,
< 576000000 0x5014031e 0x00002020 0x1 4 >,
- < 652800000 0x501c0422 0x00002020 0x1 5 >,
- < 729600000 0x501c0526 0x00002020 0x1 6 >,
- < 806400000 0x501c062a 0x00002222 0x1 7 >,
- < 883200000 0x4024072b 0x00002525 0x1 8 >,
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 729600000 0x401c0526 0x00002020 0x1 6 >,
+ < 806400000 0x401c062a 0x00002222 0x1 7 >,
+ < 883200000 0x4024072e 0x00002525 0x1 8 >,
< 960000000 0x40240832 0x00002828 0x1 9 >,
< 1036800000 0x40240936 0x00002b2b 0x1 10 >,
< 1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
@@ -873,7 +988,6 @@
qcom,safe-fsm-en;
qcom,ps-fsm-en;
qcom,droop-fsm-en;
- qcom,osm-pll-setup;
clock-names = "xo_ao";
clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
@@ -901,6 +1015,36 @@
#clock-cells = <1>;
};
+ clock_aop: qcom,aopclk {
+ compatible = "qcom,aop-qmp-clk";
+ #clock-cells = <1>;
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "qdss_clk";
+ };
+
+ ufs_ice: ufsice@1d90000 {
+ compatible = "qcom,ice";
+ reg = <0x1d90000 0x8000>;
+ qcom,enable-ice-clk;
+ clock-names = "ufs_core_clk", "bus_clk",
+ "iface_clk", "ice_core_clk";
+ clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+ <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+ <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+ qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+ vdd-hba-supply = <&ufs_phy_gdsc>;
+ qcom,msm-bus,name = "ufs_ice_noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 650 0 0>, /* No vote */
+ <1 650 1000 0>; /* Max. bandwidth */
+ qcom,bus-vector-names = "MIN",
+ "MAX";
+ qcom,instance-type = "ufs";
+ };
+
ufsphy_mem: ufsphy_mem@1d87000 {
reg = <0x1d87000 0xda8>; /* PHY regs */
reg-names = "phy_mem";
@@ -911,19 +1055,20 @@
clock-names = "ref_clk_src",
"ref_clk",
"ref_aux_clk";
- clocks = <&clock_rpmh RPMH_LN_BB_CLK1>,
+ clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
status = "disabled";
};
- ufshc_mem: ufshc_mem@1d84000 {
+ ufshc_mem: ufshc@1d84000 {
compatible = "qcom,ufshc";
reg = <0x1d84000 0x2500>;
interrupts = <0 265 0>;
phys = <&ufsphy_mem>;
phy-names = "ufsphy";
+ ufs-qcom-crypto = <&ufs_ice>;
lanes-per-direction = <2>;
dev-ref-clk-freq = <0>; /* 19.2 MHz */
@@ -945,7 +1090,7 @@
<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
- <&clock_rpmh RPMH_LN_BB_CLK1>,
+ <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
@@ -1031,7 +1176,7 @@
clock-names = "ref_clk_src",
"ref_clk",
"ref_aux_clk";
- clocks = <&clock_rpmh RPMH_LN_BB_CLK1>,
+ clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
<&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
@@ -1064,7 +1209,7 @@
<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
<&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
- <&clock_rpmh RPMH_LN_BB_CLK1>,
+ <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
freq-table-hz =
@@ -1112,6 +1257,57 @@
status = "disabled";
};
+ sdhc_2: sdhci@8804000 {
+ compatible = "qcom,sdhci-msm-v5";
+ reg = <0x8804000 0x1000>;
+ reg-names = "hc_mem";
+
+ interrupts = <0 204 0>, <0 222 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <4>;
+ qcom,large-address-bus;
+
+ qcom,msm-bus,name = "sdhc2";
+ qcom,msm-bus,num-cases = <8>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ /* No vote */
+ <81 512 0 0>, <1 608 0 0>,
+ /* 400 KB/s*/
+ <81 512 1046 1600>,
+ <1 608 1600 1600>,
+ /* 20 MB/s */
+ <81 512 52286 80000>,
+ <1 608 80000 80000>,
+ /* 25 MB/s */
+ <81 512 65360 100000>,
+ <1 608 100000 100000>,
+ /* 50 MB/s */
+ <81 512 130718 200000>,
+ <1 608 133320 133320>,
+ /* 100 MB/s */
+ <81 512 261438 200000>,
+ <1 608 150000 150000>,
+ /* 200 MB/s */
+ <81 512 261438 400000>,
+ <1 608 300000 300000>,
+ /* Max. bandwidth */
+ <81 512 1338562 4096000>,
+ <1 608 1338562 4096000>;
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+ 100000000 200000000 4294967295>;
+
+ qcom,sdr104-wa;
+
+ qcom,devfreq,freq-table = <50000000 200000000>;
+ clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
+ <&clock_gcc GCC_SDCC2_APPS_CLK>;
+ clock-names = "iface_clk", "core_clk";
+
+ status = "disabled";
+ };
+
pil_modem: qcom,mss@4080000 {
compatible = "qcom,pil-q6v55-mss";
reg = <0x4080000 0x100>,
@@ -1119,9 +1315,12 @@
<0x1f65000 0x008>,
<0x1f64000 0x008>,
<0x4180000 0x020>,
- <0xc2b0000 0x004>;
+ <0xc2b0000 0x004>,
+ <0xb2e0100 0x004>,
+ <0x4180044 0x004>;
reg-names = "qdsp6_base", "halt_q6", "halt_modem",
- "halt_nc", "rmb_base", "restart_reg";
+ "halt_nc", "rmb_base", "restart_reg",
+ "pdc_sync", "alt_reset";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_MSS_CFG_AHB_CLK>,
@@ -1129,11 +1328,12 @@
<&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
<&clock_gcc GCC_MSS_GPLL0_DIV_CLK_SRC>,
<&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
- <&clock_gcc GCC_MSS_MFAB_AXIS_CLK>;
+ <&clock_gcc GCC_MSS_MFAB_AXIS_CLK>,
+ <&clock_gcc GCC_PRNG_AHB_CLK>;
clock-names = "xo", "iface_clk", "bus_clk",
"mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
- "mnoc_axi_clk";
- qcom,proxy-clock-names = "xo";
+ "mnoc_axi_clk", "prng_clk";
+ qcom,proxy-clock-names = "xo", "prng_clk";
qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
"gpll0_mss_clk", "snoc_axi_clk",
"mnoc_axi_clk";
@@ -1162,6 +1362,10 @@
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ qcom,mba-mem@0 {
+ compatible = "qcom,pil-mba-mem";
+ memory-region = <&pil_mba_mem>;
+ };
};
qcom,lpass@17300000 {
@@ -1199,7 +1403,7 @@
qcom,ssc@5c00000 {
compatible = "qcom,pil-tz-generic";
reg = <0x5c00000 0x4000>;
- interrupts = <0 494 1>;
+ interrupts = <0 377 1>;
vdd_cx-supply = <&pm8998_l27_level>;
vdd_px-supply = <&pm8998_lvs2>;
@@ -1242,6 +1446,25 @@
qcom,ea-pc = <0x270>;
};
+ slim_qca: slim@17240000 {
+ status = "ok";
+ cell-index = <3>;
+ compatible = "qcom,slim-ngd";
+ reg = <0x17240000 0x2c000>,
+ <0x17204000 0x20000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 291 0>, <0 292 0>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+
+ /* Slimbus Slave DT for WCN3990 */
+ btfmslim_codec: wcn3990 {
+ compatible = "qcom,btfmslim_slave";
+ elemental-addr = [00 01 20 02 17 02];
+ qcom,btfm-slim-ifd = "btfmslim_slave_ifd";
+ qcom,btfm-slim-ifd-elemental-addr = [00 00 20 02 17 02];
+ };
+ };
+
eud: qcom,msm-eud@88e0000 {
compatible = "qcom,msm-eud";
interrupt-names = "eud_irq";
@@ -1285,7 +1508,7 @@
compatible = "qcom,msm-watchdog";
reg = <0x17980000 0x1000>;
reg-names = "wdt-base";
- interrupts = <0 3 0>, <0 4 0>;
+ interrupts = <0 0 0>, <0 1 0>;
qcom,bark-time = <11000>;
qcom,pet-time = <10000>;
qcom,ipi-ping;
@@ -1307,7 +1530,7 @@
qcom,pas-id = <18>;
qcom,proxy-timeout-ms = <10000>;
- qcom,smem-id = <423>;
+ qcom,smem-id = <601>;
qcom,sysmon-id = <7>;
qcom,ssctl-instance-id = <0x17>;
qcom,firmware-name = "cdsp";
@@ -1329,56 +1552,98 @@
qcom,rtb-size = <0x100000>;
};
+ qcom,mpm2-sleep-counter@0x0c221000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x0c221000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
+ qcom,msm-cdsp-loader {
+ compatible = "qcom,cdsp-loader";
+ qcom,proc-img-to-load = "cdsp";
+ };
+
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-compute";
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1401>,
- <&apps_smmu 0x1421>;
+ iommus = <&apps_smmu 0x1401 0x0>,
+ <&apps_smmu 0x1421 0x0>;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1402>,
- <&apps_smmu 0x1422>;
+ iommus = <&apps_smmu 0x1402 0x0>,
+ <&apps_smmu 0x1422 0x0>;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1403>,
- <&apps_smmu 0x1423>;
+ iommus = <&apps_smmu 0x1403 0x0>,
+ <&apps_smmu 0x1423 0x0>;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1404>,
- <&apps_smmu 0x1424>;
+ iommus = <&apps_smmu 0x1404 0x0>,
+ <&apps_smmu 0x1424 0x0>;
};
qcom,msm_fastrpc_compute_cb5 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1405>,
- <&apps_smmu 0x1425>;
+ iommus = <&apps_smmu 0x1405 0x0>,
+ <&apps_smmu 0x1425 0x0>;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1406>,
- <&apps_smmu 0x1426>;
+ iommus = <&apps_smmu 0x1406 0x0>,
+ <&apps_smmu 0x1426 0x0>;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1407>,
- <&apps_smmu 0x1427>;
+ iommus = <&apps_smmu 0x1407 0x0>,
+ <&apps_smmu 0x1427 0x0>;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
- iommus = <&apps_smmu 0x1408>,
- <&apps_smmu 0x1428>;
+ iommus = <&apps_smmu 0x1408 0x0>,
+ <&apps_smmu 0x1428 0x0>;
+ };
+ qcom,msm_fastrpc_compute_cb9 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ qcom,secure-context-bank;
+ iommus = <&apps_smmu 0x1409 0x0>,
+ <&apps_smmu 0x1419 0x0>,
+ <&apps_smmu 0x1429 0x0>;
+ };
+ qcom,msm_fastrpc_compute_cb10 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ qcom,secure-context-bank;
+ iommus = <&apps_smmu 0x140A 0x0>,
+ <&apps_smmu 0x141A 0x0>,
+ <&apps_smmu 0x142A 0x0>;
+ };
+ qcom,msm_fastrpc_compute_cb11 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1823 0x0>;
+ };
+ qcom,msm_fastrpc_compute_cb12 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1824 0x0>;
};
};
@@ -1399,10 +1664,20 @@
reg = <0x65c 4>;
};
+ boot_stats@6b0 {
+ compatible = "qcom,msm-imem-boot_stats";
+ reg = <0x6b0 32>;
+ };
+
pil@94c {
compatible = "qcom,msm-imem-pil";
reg = <0x94c 200>;
};
+
+ kaslr_offset@6d0 {
+ compatible = "qcom,msm-imem-kaslr_offset";
+ reg = <0x6d0 12>;
+ };
};
qcom,venus@aae0000 {
@@ -1438,31 +1713,31 @@
qcom,dump-id = <0x60>;
};
qcom,l1_i_cache1 {
- qcom,dump-node = <&L1_I_1>;
+ qcom,dump-node = <&L1_I_100>;
qcom,dump-id = <0x61>;
};
qcom,l1_i_cache2 {
- qcom,dump-node = <&L1_I_2>;
+ qcom,dump-node = <&L1_I_200>;
qcom,dump-id = <0x62>;
};
qcom,l1_i_cache3 {
- qcom,dump-node = <&L1_I_3>;
+ qcom,dump-node = <&L1_I_300>;
qcom,dump-id = <0x63>;
};
qcom,l1_i_cache100 {
- qcom,dump-node = <&L1_I_100>;
+ qcom,dump-node = <&L1_I_400>;
qcom,dump-id = <0x64>;
};
qcom,l1_i_cache101 {
- qcom,dump-node = <&L1_I_101>;
+ qcom,dump-node = <&L1_I_500>;
qcom,dump-id = <0x65>;
};
qcom,l1_i_cache102 {
- qcom,dump-node = <&L1_I_102>;
+ qcom,dump-node = <&L1_I_600>;
qcom,dump-id = <0x66>;
};
qcom,l1_i_cache103 {
- qcom,dump-node = <&L1_I_103>;
+ qcom,dump-node = <&L1_I_700>;
qcom,dump-id = <0x67>;
};
qcom,l1_d_cache0 {
@@ -1470,31 +1745,31 @@
qcom,dump-id = <0x80>;
};
qcom,l1_d_cache1 {
- qcom,dump-node = <&L1_D_1>;
+ qcom,dump-node = <&L1_D_100>;
qcom,dump-id = <0x81>;
};
qcom,l1_d_cache2 {
- qcom,dump-node = <&L1_D_2>;
+ qcom,dump-node = <&L1_D_200>;
qcom,dump-id = <0x82>;
};
qcom,l1_d_cache3 {
- qcom,dump-node = <&L1_D_3>;
+ qcom,dump-node = <&L1_D_300>;
qcom,dump-id = <0x83>;
};
qcom,l1_d_cache100 {
- qcom,dump-node = <&L1_D_100>;
+ qcom,dump-node = <&L1_D_400>;
qcom,dump-id = <0x84>;
};
qcom,l1_d_cache101 {
- qcom,dump-node = <&L1_D_101>;
+ qcom,dump-node = <&L1_D_500>;
qcom,dump-id = <0x85>;
};
qcom,l1_d_cache102 {
- qcom,dump-node = <&L1_D_102>;
+ qcom,dump-node = <&L1_D_600>;
qcom,dump-id = <0x86>;
};
qcom,l1_d_cache103 {
- qcom,dump-node = <&L1_D_103>;
+ qcom,dump-node = <&L1_D_700>;
qcom,dump-id = <0x87>;
};
qcom,llcc1_d_cache {
@@ -1609,7 +1884,7 @@
reg-names = "msgram", "irq-reg-base";
qcom,irq-mask = <0x1>;
interrupts = <0 389 1>;
- mbox_desc_offset = <0x0>;
+ mbox-desc-offset = <0x0>;
#mbox-cells = <1>;
};
@@ -1780,6 +2055,20 @@
status = "ok";
};
+ spss_utils: qcom,spss_utils {
+ compatible = "qcom,spss-utils";
+ /* spss fuses physical address */
+ qcom,spss-fuse1-addr = <0x007841c4>;
+ qcom,spss-fuse1-bit = <27>;
+ qcom,spss-fuse2-addr = <0x007841c4>;
+ qcom,spss-fuse2-bit = <26>;
+ qcom,spss-dev-firmware-name = "spss1d"; /* 8 chars max */
+ qcom,spss-test-firmware-name = "spss1t"; /* 8 chars max */
+ qcom,spss-prod-firmware-name = "spss1p"; /* 8 chars max */
+ qcom,spss-debug-reg-addr = <0x01886020>;
+ status = "ok";
+ };
+
qcom,glink_pkt {
compatible = "qcom,glinkpkt";
@@ -1845,15 +2134,133 @@
qcom,pipe-attr-ee;
};
+ qcom_seecom: qseecom@86d00000 {
+ compatible = "qcom,qseecom";
+ reg = <0x86d00000 0x2200000>;
+ reg-names = "secapp-region";
+ qcom,hlos-num-ce-hw-instances = <1>;
+ qcom,hlos-ce-hw-instance = <0>;
+ qcom,qsee-ce-hw-instance = <0>;
+ qcom,disk-encrypt-pipe-pair = <2>;
+ qcom,support-fde;
+ qcom,no-clock-support;
+ qcom,msm-bus,name = "qseecom-noc";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <125 512 0 0>,
+ <125 512 200000 400000>,
+ <125 512 300000 800000>,
+ <125 512 400000 1000000>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ clocks = <&clock_gcc GCC_CE1_CLK>,
+ <&clock_gcc GCC_CE1_CLK>,
+ <&clock_gcc GCC_CE1_AHB_CLK>,
+ <&clock_gcc GCC_CE1_AXI_CLK>;
+ qcom,ce-opp-freq = <171430000>;
+ qcom,qsee-reentrancy-support = <2>;
+ };
+
+ qcom_rng: qrng@793000 {
+ compatible = "qcom,msm-rng";
+ reg = <0x793000 0x1000>;
+ qcom,msm-rng-iface-clk;
+ qcom,no-qrng-config;
+ qcom,msm-bus,name = "msm-rng-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 618 0 0>, /* No vote */
+ <1 618 0 800>; /* 100 KHz */
+ clocks = <&clock_gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "iface_clk";
+ };
+
+ qcom_tzlog: tz-log@146bf720 {
+ compatible = "qcom,tz-log";
+ reg = <0x146bf720 0x3000>;
+ qcom,hyplog-enabled;
+ hyplog-address-offset = <0x410>;
+ hyplog-size-offset = <0x414>;
+ };
+
+ qcom_cedev: qcedev@1de0000 {
+ compatible = "qcom,qcedev";
+ reg = <0x1de0000 0x20000>,
+ <0x1dc4000 0x24000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 272 0>;
+ qcom,bam-pipe-pair = <1>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-device = <0>;
+ qcom,ce-hw-shared;
+ qcom,bam-ee = <0>;
+ qcom,msm-bus,name = "qcedev-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <125 512 0 0>,
+ <125 512 393600 393600>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ clocks = <&clock_gcc GCC_CE1_CLK>,
+ <&clock_gcc GCC_CE1_CLK>,
+ <&clock_gcc GCC_CE1_AHB_CLK>,
+ <&clock_gcc GCC_CE1_AXI_CLK>;
+ qcom,ce-opp-freq = <171430000>;
+ };
+
+ qcom_crypto: qcrypto@1de0000 {
+ compatible = "qcom,qcrypto";
+ reg = <0x1de0000 0x20000>,
+ <0x1dc4000 0x24000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 272 0>;
+ qcom,bam-pipe-pair = <2>;
+ qcom,ce-hw-instance = <0>;
+ qcom,ce-device = <0>;
+ qcom,bam-ee = <0>;
+ qcom,ce-hw-shared;
+ qcom,clk-mgmt-sus-res;
+ qcom,msm-bus,name = "qcrypto-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <125 512 0 0>,
+ <125 512 393600 393600>;
+ clock-names = "core_clk_src", "core_clk",
+ "iface_clk", "bus_clk";
+ clocks = <&clock_gcc GCC_CE1_CLK>,
+ <&clock_gcc GCC_CE1_CLK>,
+ <&clock_gcc GCC_CE1_AHB_CLK>,
+ <&clock_gcc GCC_CE1_AXI_CLK>;
+ qcom,ce-opp-freq = <171430000>;
+ qcom,use-sw-aes-cbc-ecb-ctr-algo;
+ qcom,use-sw-aes-xts-algo;
+ qcom,use-sw-aes-ccm-algo;
+ qcom,use-sw-ahash-algo;
+ qcom,use-sw-aead-algo;
+ qcom,use-sw-hmac-algo;
+ };
+
qcom,msm_gsi {
compatible = "qcom,msm_gsi";
};
+ qcom,rmtfs_sharedmem@0 {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x0 0x200000>;
+ reg-names = "rmtfs";
+ qcom,client-id = <0x00000001>;
+ };
+
qcom,rmnet-ipa {
compatible = "qcom,rmnet-ipa3";
qcom,rmnet-ipa-ssr;
qcom,ipa-loaduC;
qcom,ipa-advertise-sg-support;
+ qcom,ipa-napi-enable;
};
ipa_hw: qcom,ipa@01e00000 {
@@ -1872,6 +2279,8 @@
qcom,modem-cfg-emb-pipe-flt;
qcom,ipa-wdi2;
qcom,use-64-bit-dma-mask;
+ qcom,arm-smmu;
+ qcom,smmu-s1-bypass;
qcom,bandwidth-vote-for-ipa;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <4>;
@@ -1951,27 +2360,57 @@
0x0 /* modem_comp_decomp_ofst; diff */
0x0 /* modem_comp_decomp_size; diff */
0xbd8 /* modem_ofst; */
- 0x1424 /* modem_size; */
- 0x1ffc /* apps_v4_flt_hash_ofst; */
+ 0x1024 /* modem_size; */
+ 0x2000 /* apps_v4_flt_hash_ofst; */
0x0 /* apps_v4_flt_hash_size; */
- 0x1ffc /* apps_v4_flt_nhash_ofst; */
+ 0x2000 /* apps_v4_flt_nhash_ofst; */
0x0 /* apps_v4_flt_nhash_size; */
- 0x1ffc /* apps_v6_flt_hash_ofst; */
+ 0x2000 /* apps_v6_flt_hash_ofst; */
0x0 /* apps_v6_flt_hash_size; */
- 0x1ffc /* apps_v6_flt_nhash_ofst; */
+ 0x2000 /* apps_v6_flt_nhash_ofst; */
0x0 /* apps_v6_flt_nhash_size; */
0x80 /* uc_info_ofst; */
0x200 /* uc_info_size; */
0x2000 /* end_ofst; */
- 0x1ffc /* apps_v4_rt_hash_ofst; */
+ 0x2000 /* apps_v4_rt_hash_ofst; */
0x0 /* apps_v4_rt_hash_size; */
- 0x1ffc /* apps_v4_rt_nhash_ofst; */
+ 0x2000 /* apps_v4_rt_nhash_ofst; */
0x0 /* apps_v4_rt_nhash_size; */
- 0x1ffc /* apps_v6_rt_hash_ofst; */
+ 0x2000 /* apps_v6_rt_hash_ofst; */
0x0 /* apps_v6_rt_hash_size; */
- 0x1ffc /* apps_v6_rt_nhash_ofst; */
+ 0x2000 /* apps_v6_rt_nhash_ofst; */
0x0 /* apps_v6_rt_nhash_size; */
+ 0x1c00 /* uc_event_ring_ofst; */
+ 0x400 /* uc_event_ring_size; */
>;
+
+ /* smp2p gpio information */
+ qcom,smp2pgpio_map_ipa_1_out {
+ compatible = "qcom,smp2pgpio-map-ipa-1-out";
+ gpios = <&smp2pgpio_ipa_1_out 0 0>;
+ };
+
+ qcom,smp2pgpio_map_ipa_1_in {
+ compatible = "qcom,smp2pgpio-map-ipa-1-in";
+ gpios = <&smp2pgpio_ipa_1_in 0 0>;
+ };
+
+ ipa_smmu_ap: ipa_smmu_ap {
+ compatible = "qcom,ipa-smmu-ap-cb";
+ iommus = <&apps_smmu 0x720 0x0>;
+ qcom,iova-mapping = <0x20000000 0x40000000>;
+ };
+
+ ipa_smmu_wlan: ipa_smmu_wlan {
+ compatible = "qcom,ipa-smmu-wlan-cb";
+ iommus = <&apps_smmu 0x721 0x0>;
+ };
+
+ ipa_smmu_uc: ipa_smmu_uc {
+ compatible = "qcom,ipa-smmu-uc-cb";
+ iommus = <&apps_smmu 0x722 0x0>;
+ qcom,iova-mapping = <0x40000000 0x20000000>;
+ };
};
qcom,ipa_fws {
@@ -2013,7 +2452,7 @@
cmd_db: qcom,cmd-db@861e0000 {
compatible = "qcom,cmd-db";
- reg = <0x861e0000 0x4000>;
+ reg = <0xc3f000c 8>;
};
dcc: dcc_v2@10a2000 {
@@ -2021,12 +2460,1076 @@
reg = <0x10a2000 0x1000>,
<0x10ae000 0x2000>;
reg-names = "dcc-base", "dcc-ram-base";
+
+ dcc-ram-offset = <0x6000>;
};
qcom,msm-core@780000 {
compatible = "qcom,apss-core-ea";
reg = <0x780000 0x1000>;
};
+
+ qcom,icnss@18800000 {
+ compatible = "qcom,icnss";
+ reg = <0x18800000 0x800000>,
+ <0xa0000000 0x10000000>,
+ <0xb0000000 0x10000>;
+ reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
+ iommus = <&apps_smmu 0x0040 0x0>,
+ <&apps_smmu 0x0041 0x0>;
+ interrupts = <0 414 0 /* CE0 */ >,
+ <0 415 0 /* CE1 */ >,
+ <0 416 0 /* CE2 */ >,
+ <0 417 0 /* CE3 */ >,
+ <0 418 0 /* CE4 */ >,
+ <0 419 0 /* CE5 */ >,
+ <0 420 0 /* CE6 */ >,
+ <0 421 0 /* CE7 */ >,
+ <0 422 0 /* CE8 */ >,
+ <0 423 0 /* CE9 */ >,
+ <0 424 0 /* CE10 */ >,
+ <0 425 0 /* CE11 */ >;
+ qcom,wlan-msa-memory = <0x100000>;
+ };
+
+ thermal_zones: thermal-zones {
+ aoss0-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 0>;
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu0-silver-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-silver-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu2-silver-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu3-silver-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ kryo-l3-0-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ kryo-l3-1-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu0-gold-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-gold-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 8>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu2-gold-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 9>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu3-gold-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 10>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu0-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 11>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu1-ts0-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 12>;
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ aoss1-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 0>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-dsp-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 1>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+
+
+ ddr-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 2>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ wlan-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 3>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ compute-hvx-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ camera-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmss-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-core-ts1-h {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu0 {
+ polling-delay-passive = <10>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 11>;
+ thermal-governor = "step_wise";
+ trips {
+ gpu0_trip: gpu0-trip {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ gpu0_cdev {
+ trip = <&gpu0_trip>;
+ cooling-device =
+ <&msm_gpu 1 THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ gpu1 {
+ polling-delay-passive = <10>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 12>;
+ thermal-governor = "step_wise";
+ trips {
+ gpu1_trip: gpu1-trip {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ gpu1_cdev {
+ trip = <&gpu1_trip>;
+ cooling-device =
+ <&msm_gpu 1 THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ pop-mem {
+ polling-delay-passive = <10>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 2>;
+ thermal-governor = "step_wise";
+ trips {
+ pop_trip: pop-trip {
+ temperature = <95000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ pop_cdev {
+ trip = <&pop_trip>;
+ cooling-device =
+ <&CPU4 1 THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ aoss0-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 0>;
+ tracks-low;
+ trips {
+ aoss0_trip: aoss0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu0-silver-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 1>;
+ tracks-low;
+ trips {
+ cpu0_trip: cpu0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu1-silver-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 2>;
+ tracks-low;
+ trips {
+ cpu1_trip: cpu1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu2-silver-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 3>;
+ tracks-low;
+ trips {
+ cpu2_trip: cpu2-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu3-silver-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 4>;
+ tracks-low;
+ trips {
+ cpu3_trip: cpu3-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ kryo-l3-0-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 5>;
+ tracks-low;
+ trips {
+ l3_0_trip: l3-0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ kryo-l3-1-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 6>;
+ tracks-low;
+ trips {
+ l3_1_trip: l3-1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu0-gold-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 7>;
+ tracks-low;
+ trips {
+ cpug0_trip: cpug0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu1-gold-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 8>;
+ tracks-low;
+ trips {
+ cpug1_trip: cpug1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu2-gold-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 9>;
+ tracks-low;
+ trips {
+ cpug2_trip: cpug2-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ cpu3-gold-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 9>;
+ tracks-low;
+ trips {
+ cpug3_trip: cpug3-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ gpu0-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 10>;
+ tracks-low;
+ trips {
+ gpu0_trip_l: gpu0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ gpu1-ts0-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 11>;
+ tracks-low;
+ trips {
+ gpu1_trip_l: gpu1-trip_l {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ aoss1-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 0>;
+ tracks-low;
+ trips {
+ aoss1_trip: aoss1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ mdm-dsp-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 1>;
+ tracks-low;
+ trips {
+ dsp_trip: dsp-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ ddr-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 2>;
+ tracks-low;
+ trips {
+ ddr_trip: ddr-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ wlan-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 3>;
+ tracks-low;
+ trips {
+ wlan_trip: wlan-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ compute-hvx-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 4>;
+ tracks-low;
+ trips {
+ hvx_trip: hvx-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ camera-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 5>;
+ tracks-low;
+ trips {
+ camera_trip: camera-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ mmss-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 6>;
+ tracks-low;
+ trips {
+ mmss_trip: mmss-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ mdm-core-ts1-l {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 7>;
+ tracks-low;
+ trips {
+ mdm_trip: mdm-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&CPU0 12 12>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&CPU4 12 12>;
+ };
+ gpu_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&msm_gpu 4 4>;
+ };
+ };
+ };
+
+ lmh-dcvs-01 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&lmh_dcvs1>;
+
+ trips {
+ active-config {
+ temperature = <95000>;
+ hysteresis = <30000>;
+ type = "passive";
+ };
+ };
+ };
+
+ lmh-dcvs-00 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&lmh_dcvs0>;
+
+ trips {
+ active-config {
+ temperature = <95000>;
+ hysteresis = <30000>;
+ type = "passive";
+ };
+ };
+ };
+
+ };
+
+ tsens0: tsens@c222000 {
+ compatible = "qcom,sdm845-tsens";
+ reg = <0xc222000 0x4>,
+ <0xc263000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 506 0>, <0 508 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens1: tsens@c223000 {
+ compatible = "qcom,sdm845-tsens";
+ reg = <0xc223000 0x4>,
+ <0xc265000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 507 0>, <0 509 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+};
+
+&clock_cpucc {
+ lmh_dcvs0: qcom,limits-dcvs@0 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,affinity = <0>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ lmh_dcvs1: qcom,limits-dcvs@1 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,affinity = <1>;
+ #thermal-sensor-cells = <0>;
+ };
};
&pcie_0_gdsc {
@@ -2114,6 +3617,9 @@
};
&gpu_gx_gdsc {
+ clock-names = "core_root_clk";
+ clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
+ qcom,force-enable-root-clk;
parent-supply = <&pm8005_s1_level>;
status = "ok";
};
@@ -2143,4 +3649,48 @@
#include "sdm845-vidc.dtsi"
#include "sdm845-pm.dtsi"
#include "sdm845-pinctrl.dtsi"
+#include "sdm845-pcie.dtsi"
#include "sdm845-audio.dtsi"
+#include "sdm845-gpu.dtsi"
+#include "sdm845-usb.dtsi"
+
+&pm8998_temp_alarm {
+ cooling-maps {
+ trip0_cpu0 {
+ trip = <&pm8998_trip0>;
+ cooling-device = <&CPU0 21 21>;
+ };
+ trip0_cpu4 {
+ trip = <&pm8998_trip0>;
+ cooling-device = <&CPU4 21 21>;
+ };
+ trip1_cpu1 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU1 22 22>;
+ };
+ trip1_cpu2 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU2 22 22>;
+ };
+ trip1_cpu3 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU3 22 22>;
+ };
+ trip1_cpu4 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU4 22 22>;
+ };
+ trip1_cpu5 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU5 22 22>;
+ };
+ trip1_cpu6 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU6 22 22>;
+ };
+ trip1_cpu7 {
+ trip = <&pm8998_trip1>;
+ cooling-device = <&CPU7 22 22>;
+ };
+ };
+};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 4a13b7a..f989858 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -1,5 +1,6 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -48,12 +49,14 @@
CONFIG_ARCH_SDM845=y
CONFIG_ARCH_SDM830=y
CONFIG_PCI=y
+CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -224,6 +227,7 @@
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -235,6 +239,7 @@
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -246,6 +251,7 @@
CONFIG_DUMMY=y
CONFIG_TUN=y
CONFIG_SKY2=y
+CONFIG_RNDIS_IPA=y
CONFIG_SMSC911X=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
@@ -261,6 +267,7 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
@@ -268,8 +275,10 @@
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
@@ -280,6 +289,7 @@
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SDM830=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
@@ -295,8 +305,17 @@
CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD934X_CODEC=y
@@ -319,6 +338,7 @@
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
CONFIG_DRM_SDE_RSC=y
@@ -375,7 +395,9 @@
CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
@@ -390,16 +412,22 @@
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
+CONFIG_SEEMP_CORE=y
CONFIG_MSM_GCC_SDM845=y
CONFIG_MSM_VIDEOCC_SDM845=y
CONFIG_MSM_CAMCC_SDM845=y
@@ -408,6 +436,8 @@
CONFIG_MSM_CLK_RPMH=y
CONFIG_CLOCK_CPU_OSM=y
CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MSM_QMP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
@@ -415,8 +445,12 @@
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_EUD=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -430,6 +464,7 @@
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
CONFIG_TRACER_PKT=y
CONFIG_QTI_RPMH_API=y
CONFIG_MSM_SMP2P=y
@@ -443,10 +478,13 @@
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_APSS_CORE_EA=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -457,6 +495,7 @@
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_EXTCON=y
+CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
@@ -464,7 +503,7 @@
CONFIG_ARM_GIC_V3_ACL=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
+CONFIG_MSM_TZ_LOG=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
@@ -505,7 +544,10 @@
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index f0f6cd9..7136ca8 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -53,13 +54,16 @@
CONFIG_ARCH_SDM845=y
CONFIG_ARCH_SDM830=y
CONFIG_PCI=y
+CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -233,6 +237,7 @@
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
@@ -245,6 +250,7 @@
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -255,6 +261,7 @@
CONFIG_BONDING=y
CONFIG_DUMMY=y
CONFIG_TUN=y
+CONFIG_RNDIS_IPA=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -270,17 +277,17 @@
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_DIAG_CHAR=y
-CONFIG_HVC_DCC=y
-CONFIG_HVC_DCC_SERIALIZE_SMP=y
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
@@ -291,6 +298,7 @@
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SDM830=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
@@ -306,8 +314,17 @@
CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD934X_CODEC=y
@@ -386,7 +403,9 @@
CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
@@ -409,16 +428,22 @@
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
+CONFIG_SEEMP_CORE=y
CONFIG_MSM_GCC_SDM845=y
CONFIG_MSM_VIDEOCC_SDM845=y
CONFIG_MSM_CAMCC_SDM845=y
@@ -427,6 +452,8 @@
CONFIG_MSM_CLK_RPMH=y
CONFIG_CLOCK_CPU_OSM=y
CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MSM_QMP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
@@ -435,8 +462,12 @@
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_EUD=y
@@ -452,6 +483,7 @@
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
CONFIG_TRACER_PKT=y
CONFIG_QTI_RPMH_API=y
CONFIG_MSM_SMP2P=y
@@ -466,11 +498,14 @@
CONFIG_ICNSS=y
CONFIG_ICNSS_DEBUG=y
CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_APSS_CORE_EA=y
CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -481,6 +516,7 @@
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_EXTCON=y
+CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
@@ -489,7 +525,7 @@
CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
+CONFIG_MSM_TZ_LOG=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
@@ -510,10 +546,12 @@
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
@@ -528,7 +566,6 @@
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
@@ -541,7 +578,6 @@
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
-CONFIG_RCU_PANIC_ON_STALL=1
CONFIG_FAULT_INJECTION=y
CONFIG_FAIL_PAGE_ALLOC=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
@@ -568,6 +604,7 @@
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
@@ -577,7 +614,10 @@
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index da095e8..dd918d0 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -74,8 +74,8 @@
#endif
/* arm-smccc */
-EXPORT_SYMBOL(arm_smccc_smc);
-EXPORT_SYMBOL(arm_smccc_hvc);
+EXPORT_SYMBOL(__arm_smccc_smc);
+EXPORT_SYMBOL(__arm_smccc_hvc);
/* caching functions */
EXPORT_SYMBOL(__dma_inv_area);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index d42e61c..5cdbc55 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -146,8 +146,11 @@
DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
#endif
- DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
- DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
+ DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
+ DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
+
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index acf3872..409abc4 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -121,6 +121,7 @@
static struct pci_config_window *
pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
{
+ struct device *dev = &root->device->dev;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
struct pci_config_window *cfg;
@@ -132,8 +133,7 @@
root->mcfg_addr = pci_mcfg_lookup(seg, bus_res);
if (!root->mcfg_addr) {
- dev_err(&root->device->dev, "%04x:%pR ECAM region not found\n",
- seg, bus_res);
+ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
return NULL;
}
@@ -141,11 +141,10 @@
cfgres.start = root->mcfg_addr + bus_res->start * bsz;
cfgres.end = cfgres.start + resource_size(bus_res) * bsz - 1;
cfgres.flags = IORESOURCE_MEM;
- cfg = pci_ecam_create(&root->device->dev, &cfgres, bus_res,
- &pci_generic_ecam_ops);
+ cfg = pci_ecam_create(dev, &cfgres, bus_res, &pci_generic_ecam_ops);
if (IS_ERR(cfg)) {
- dev_err(&root->device->dev, "%04x:%pR error %ld mapping ECAM\n",
- seg, bus_res, PTR_ERR(cfg));
+ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
+ PTR_ERR(cfg));
return NULL;
}
@@ -159,33 +158,36 @@
ri = container_of(ci, struct acpi_pci_generic_root_info, common);
pci_ecam_free(ri->cfg);
+ kfree(ci->ops);
kfree(ri);
}
-static struct acpi_pci_root_ops acpi_pci_root_ops = {
- .release_info = pci_acpi_generic_release_info,
-};
-
/* Interface called from ACPI code to setup PCI host controller */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
int node = acpi_get_node(root->device->handle);
struct acpi_pci_generic_root_info *ri;
struct pci_bus *bus, *child;
+ struct acpi_pci_root_ops *root_ops;
ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
if (!ri)
return NULL;
+ root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
+ if (!root_ops)
+ return NULL;
+
ri->cfg = pci_acpi_setup_ecam_mapping(root);
if (!ri->cfg) {
kfree(ri);
+ kfree(root_ops);
return NULL;
}
- acpi_pci_root_ops.pci_ops = &ri->cfg->ops->pci_ops;
- bus = acpi_pci_root_create(root, &acpi_pci_root_ops, &ri->common,
- ri->cfg);
+ root_ops->release_info = pci_acpi_generic_release_info;
+ root_ops->pci_ops = &ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
if (!bus)
return NULL;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 2f8d275..852548c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -867,8 +867,6 @@
{
unsigned long config_base = 0;
- if (attr->exclude_idle)
- return -EPERM;
if (is_kernel_in_hyp_mode() &&
attr->exclude_kernel != attr->exclude_hv)
return -EINVAL;
@@ -975,11 +973,74 @@
ARRAY_SIZE(pmceid));
}
+static void armv8pmu_idle_update(struct arm_pmu *cpu_pmu)
+{
+ struct pmu_hw_events *hw_events;
+ struct perf_event *event;
+ int idx;
+
+ if (!cpu_pmu)
+ return;
+
+ hw_events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ if (!hw_events)
+ return;
+
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+
+ if (!test_bit(idx, hw_events->used_mask))
+ continue;
+
+ event = hw_events->events[idx];
+
+ if (!event || !event->attr.exclude_idle ||
+ event->state != PERF_EVENT_STATE_ACTIVE)
+ continue;
+
+ cpu_pmu->pmu.read(event);
+ }
+}
+
+struct arm_pmu_and_idle_nb {
+ struct arm_pmu *cpu_pmu;
+ struct notifier_block perf_cpu_idle_nb;
+};
+
+static int perf_cpu_idle_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct arm_pmu_and_idle_nb *pmu_nb = container_of(nb,
+ struct arm_pmu_and_idle_nb, perf_cpu_idle_nb);
+
+ if (action == IDLE_START)
+ armv8pmu_idle_update(pmu_nb->cpu_pmu);
+
+ return NOTIFY_OK;
+}
+
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
{
- return smp_call_function_any(&cpu_pmu->supported_cpus,
+ int ret;
+ struct arm_pmu_and_idle_nb *pmu_idle_nb;
+
+ pmu_idle_nb = devm_kzalloc(&cpu_pmu->plat_device->dev,
+ sizeof(*pmu_idle_nb), GFP_KERNEL);
+ if (!pmu_idle_nb)
+ return -ENOMEM;
+
+ pmu_idle_nb->cpu_pmu = cpu_pmu;
+ pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;
+ idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+ ret = smp_call_function_any(&cpu_pmu->supported_cpus,
__armv8pmu_probe_pmu,
cpu_pmu, 1);
+
+ if (ret)
+ idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
+
+ return ret;
}
static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 8eb0d14..0c4a5ee 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -84,6 +84,16 @@
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
+void arch_cpu_idle_enter(void)
+{
+ idle_notifier_call_chain(IDLE_START);
+}
+
+void arch_cpu_idle_exit(void)
+{
+ idle_notifier_call_chain(IDLE_END);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index ae0496f..6252234 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -12,6 +12,7 @@
*
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
#include <asm/asm-offsets.h>
.macro SMCCC instr
@@ -20,24 +21,32 @@
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
- ret
+ ldr x4, [sp, #8]
+ cbz x4, 1f /* no quirk structure */
+ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
+ b.ne 1f
+ str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
+1: ret
.cfi_endproc
.endm
/*
* void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
*/
-ENTRY(arm_smccc_smc)
+ENTRY(__arm_smccc_smc)
SMCCC smc
-ENDPROC(arm_smccc_smc)
+ENDPROC(__arm_smccc_smc)
/*
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
*/
-ENTRY(arm_smccc_hvc)
+ENTRY(__arm_smccc_hvc)
SMCCC hvc
-ENDPROC(arm_smccc_hvc)
+ENDPROC(__arm_smccc_hvc)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 3b40f26..aaf4bd7 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -394,7 +394,7 @@
{
struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
- if (!sge) {
+ if (sched_is_energy_aware() && !sge) {
pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
return NULL;
}
@@ -407,7 +407,7 @@
{
struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
- if (!sge) {
+ if (sched_is_energy_aware() && !sge) {
pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
return NULL;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 860c3b6..40e775a 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1743,7 +1743,11 @@
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
dma_addr_t dma_addr;
- int ret, prot, len = PAGE_ALIGN(size + offset);
+ int ret, prot, len, start_offset, map_offset;
+
+ map_offset = offset & ~PAGE_MASK;
+ start_offset = offset & PAGE_MASK;
+ len = PAGE_ALIGN(map_offset + size);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE)
@@ -1753,12 +1757,12 @@
prot = __get_iommu_pgprot(attrs, prot,
is_dma_coherent(dev, attrs));
- ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
- prot);
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+ start_offset, len, prot);
if (ret < 0)
goto fail;
- return dma_addr + offset;
+ return dma_addr + map_offset;
fail:
__free_iova(mapping, dma_addr, len);
return DMA_ERROR_CODE;
@@ -1897,7 +1901,11 @@
if (!mapping)
goto err;
- mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
+ if (!mapping->bitmap)
+ mapping->bitmap = vzalloc(bitmap_size);
+
if (!mapping->bitmap)
goto err2;
@@ -1912,7 +1920,7 @@
kref_init(&mapping->kref);
return mapping;
err3:
- kfree(mapping->bitmap);
+ kvfree(mapping->bitmap);
err2:
kfree(mapping);
err:
@@ -1926,7 +1934,7 @@
container_of(kref, struct dma_iommu_mapping, kref);
iommu_domain_free(mapping->domain);
- kfree(mapping->bitmap);
+ kvfree(mapping->bitmap);
kfree(mapping);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5fc1112..0a34644 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -43,7 +43,20 @@
#include <asm/kryo3xx-arm64-edac.h>
#include <soc/qcom/scm.h>
-static const char *fault_name(unsigned int esr);
+struct fault_info {
+ int (*fn)(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+};
+
+static const struct fault_info fault_info[];
+
+static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+{
+ return fault_info + (esr & 63);
+}
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -198,10 +211,12 @@
struct pt_regs *regs)
{
struct siginfo si;
+ const struct fault_info *inf;
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
+ inf = esr_to_fault_info(esr);
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
- tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
+ tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
show_pte(tsk->mm, addr);
show_regs(regs);
@@ -220,14 +235,16 @@
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
+ const struct fault_info *inf;
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
*/
- if (user_mode(regs))
- __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
- else
+ if (user_mode(regs)) {
+ inf = esr_to_fault_info(esr);
+ __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
+ } else
__do_kernel_fault(mm, addr, esr, regs);
}
@@ -318,7 +335,8 @@
if (is_el0_instruction_abort(esr)) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
+ } else if (((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) ||
+ ((esr & ESR_ELx_CM) && !(mm_flags & FAULT_FLAG_USER))) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
@@ -507,12 +525,7 @@
return 1;
}
-static const struct fault_info {
- int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
- int sig;
- int code;
- const char *name;
-} fault_info[] = {
+static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
@@ -579,19 +592,13 @@
{ do_bad, SIGBUS, 0, "unknown 63" },
};
-static const char *fault_name(unsigned int esr)
-{
- const struct fault_info *inf = fault_info + (esr & 63);
- return inf->name;
-}
-
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- const struct fault_info *inf = fault_info + (esr & 63);
+ const struct fault_info *inf = esr_to_fault_info(esr);
struct siginfo info;
if (!inf->fn(addr, esr, regs))
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e612..07238b3 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -197,20 +197,21 @@
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long __must_check __copy_user_zeroing(void *to,
- const void __user *from,
- unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_user_zeroing(to, from, n);
- memset(to, 0, n);
- return n;
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9..2792fc6 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"23:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #32\n" \
"24:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "25:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %0, %0, #8\n" \
"29:\n" \
+ "SUB %3, %3, #32\n" \
+ "30:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
+ "SUB %0, %0, #8\n" \
+ "33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
@@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
- " .long 29b,4b\n" \
+ " .long 29b,3b\n" \
+ " .long 30b,3b\n" \
+ " .long 31b,3b\n" \
+ " .long 32b,3b\n" \
+ " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"23:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "24:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
- "25:\n" \
+ "24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
+ "SUB %3, %3, #16\n" \
+ "30:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "29:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "30:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
- "31:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "32:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"35:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #16\n" \
"36:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %0, %0, #4\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"37:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38:\n" \
+ "SUB %3, %3, #16\n" \
+ "39:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "40:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41:\n" \
+ "SUB %3, %3, #16\n" \
+ "42:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "43:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44:\n" \
+ "SUB %0, %0, #4\n" \
+ "45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
@@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
- " .long 37b,4b\n" \
+ " .long 37b,3b\n" \
+ " .long 38b,3b\n" \
+ " .long 39b,3b\n" \
+ " .long 40b,3b\n" \
+ " .long 41b,3b\n" \
+ " .long 42b,3b\n" \
+ " .long 43b,3b\n" \
+ " .long 44b,3b\n" \
+ " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
}
@@ -569,6 +600,8 @@
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
#endif
@@ -588,11 +623,15 @@
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
+ if (retn)
+ return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
+ if (retn)
+ return retn + n;
}
switch (n) {
@@ -609,6 +648,10 @@
break;
}
+ /*
+ * If we get here, retn correctly reflects the number of failing
+ * bytes.
+ */
return retn;
}
EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
- "3: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
-#define __asm_copy_from_user_5(to, from, ret) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "4: SETW [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "4: SETD [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "6: SETW [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "6: SETD [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "8: SETW [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "10: SETB [%0++],D1Ar1\n", \
- "11: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "8: SETD [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
- " MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
- " SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@@ -789,36 +711,57 @@
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 8 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*8 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #8\n")
+ "LSR D0Ar2, D0Ar2, #5\n" \
+ "ANDS D0Ar2, D0Ar2, #0x38\n" \
+ "ADDZ D0Ar2, D0Ar2, #32\n" \
+ "SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 4 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*4 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #4\n")
+ "LSR D0Ar2, D0Ar2, #6\n" \
+ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
+ "ADDZ D0Ar2, D0Ar2, #16\n" \
+ "SUB %1, %1, D0Ar2\n")
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
- inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
- unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+ unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@@ -837,12 +782,14 @@
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@@ -850,16 +797,10 @@
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
- /* We only need one check after the unalignment-adjustments,
- because if both adjustments were done, either both or
- neither reference had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
-
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
@@ -888,7 +829,7 @@
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
#endif
@@ -898,7 +839,7 @@
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
/* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
-
- copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
- return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b3c5bde..5a4f2eb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -9,6 +9,7 @@
select HAVE_CONTEXT_TRACKING
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
@@ -1526,7 +1527,7 @@
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
- select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 6bf10e7..956db6e 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -17,6 +17,18 @@
#include <irq.h>
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+extern void *irq_stack[NR_CPUS];
+
+static inline bool on_irq_stack(int cpu, unsigned long sp)
+{
+ unsigned long low = (unsigned long)irq_stack[cpu];
+ unsigned long high = low + IRQ_STACK_SIZE;
+
+ return (low <= sp && sp <= high);
+}
+
#ifdef CONFIG_I8259
static inline int irq_canonicalize(int irq)
{
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index f485afe..a8df44d 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -127,7 +127,7 @@
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
@@ -202,7 +202,7 @@
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
@@ -382,7 +382,7 @@
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
@@ -422,7 +422,7 @@
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index eebf395..2f182bd 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -216,12 +216,19 @@
LONG_S $25, PT_R25(sp)
LONG_S $28, PT_R28(sp)
LONG_S $31, PT_R31(sp)
+
+ /* Set thread_info if we're coming from user mode */
+ mfc0 k0, CP0_STATUS
+ sll k0, 3 /* extract cu0 bit */
+ bltz k0, 9f
+
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON
.set mips64
pref 0, 0($28) /* Prefetch the current pointer */
#endif
+9:
.set pop
.endm
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index fae2f94..4be2763 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -102,6 +102,7 @@
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
BLANK();
}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 804d2a2..dd6a18b 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -80,7 +80,7 @@
}
/* Sorted insert of 75th percentile into buf2 */
- for (k = 0; k < i; ++k) {
+ for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
l = min_t(unsigned int,
i, ARRAY_SIZE(buf2) - 1);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dd31754..921211b 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1824,7 +1824,7 @@
}
decode_configs(c);
- c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 6430bff..5c429d7 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -257,7 +257,7 @@
else if ((prog_req.fr1 && prog_req.frdefault) ||
(prog_req.single && !prog_req.frdefault))
/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
- state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
cpu_has_mips_r2_r6) ?
FP_FR1 : FP_FR0;
else if (prog_req.fr1)
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index dc0b296..2ac6c26 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -187,9 +187,44 @@
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- PTR_LA v0, plat_irq_dispatch
- jr v0
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_SIZE
+ PTR_ADD sp, t0, t1
+
+2:
+ jal plat_irq_dispatch
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
@@ -262,8 +297,44 @@
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- jr v0
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_SIZE
+ PTR_ADD sp, t0, t1
+
+2:
+ jalr v0
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
END(except_vec_vi_handler)
/*
@@ -448,7 +519,7 @@
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
- LEAF(handle_ri_rdhwr_vivt)
+ LEAF(handle_ri_rdhwr_tlbp)
.set push
.set noat
.set noreorder
@@ -467,7 +538,7 @@
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
- END(handle_ri_rdhwr_vivt)
+ END(handle_ri_rdhwr_tlbp)
LEAF(handle_ri_rdhwr)
.set push
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index f25f7ea..2b0a371 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -25,6 +25,8 @@
#include <linux/atomic.h>
#include <asm/uaccess.h>
+void *irq_stack[NR_CPUS];
+
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -58,6 +60,15 @@
clear_c0_status(ST0_IM);
arch_init_irq();
+
+ for_each_possible_cpu(i) {
+ int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+ void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+
+ irq_stack[i] = s;
+ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
+ }
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index de63d36..732d617 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -244,9 +244,6 @@
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
- struct thread_info *ti = task_thread_info(p);
- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
- struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
@@ -254,25 +251,46 @@
#endif
for (reg = 0; reg < 16; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = 0;
/* S0 - S7 */
- for (reg = 16; reg < 24; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = p->thread.reg16;
+ *(ptr++) = p->thread.reg17;
+ *(ptr++) = p->thread.reg18;
+ *(ptr++) = p->thread.reg19;
+ *(ptr++) = p->thread.reg20;
+ *(ptr++) = p->thread.reg21;
+ *(ptr++) = p->thread.reg22;
+ *(ptr++) = p->thread.reg23;
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
- for (reg = 28; reg < 32; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = (long)p;
+ *(ptr++) = p->thread.reg29;
+ *(ptr++) = p->thread.reg30;
+ *(ptr++) = p->thread.reg31;
- *(ptr++) = regs->cp0_status;
- *(ptr++) = regs->lo;
- *(ptr++) = regs->hi;
- *(ptr++) = regs->cp0_badvaddr;
- *(ptr++) = regs->cp0_cause;
- *(ptr++) = regs->cp0_epc;
+ *(ptr++) = p->thread.cp0_status;
+
+ /* lo, hi */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * BadVAddr, Cause
+ * Ideally these would come from the last exception frame up the stack
+ * but that requires unwinding, otherwise we can't know much for sure.
+ */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * PC
+ * use return address (RA), i.e. the moment after return from resume()
+ */
+ *(ptr++) = p->thread.reg31;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 1652f36..fbbf5fc 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -33,6 +33,7 @@
#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/irq.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
@@ -556,7 +557,19 @@
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra)
{
- unsigned long stack_page = (unsigned long)task_stack_page(task);
+ unsigned long stack_page = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (on_irq_stack(cpu, *sp)) {
+ stack_page = (unsigned long)irq_stack[cpu];
+ break;
+ }
+ }
+
+ if (!stack_page)
+ stack_page = (unsigned long)task_stack_page(task);
+
return unwind_stack_by_address(stack_page, sp, pc, ra);
}
#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3905003..ec87ef9 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -81,7 +81,7 @@
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
-extern asmlinkage void handle_ri_rdhwr_vivt(void);
+extern asmlinkage void handle_ri_rdhwr_tlbp(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
@@ -2352,9 +2352,18 @@
set_except_vector(EXCCODE_SYS, handle_sys);
set_except_vector(EXCCODE_BP, handle_bp);
- set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
- (cpu_has_vtag_icache ?
- handle_ri_rdhwr_vivt : handle_ri_rdhwr));
+
+ if (rdhwr_noopt)
+ set_except_vector(EXCCODE_RI, handle_ri);
+ else {
+ if (cpu_has_vtag_icache)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else if (current_cpu_type() == CPU_LOONGSON3)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
+ }
+
set_except_vector(EXCCODE_CPU, handle_cpu);
set_except_vector(EXCCODE_OV, handle_ov);
set_except_vector(EXCCODE_TR, handle_tr);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 9a61671..9056547 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@
if (!np_xbar)
panic("Failed to load xbar nodes from devicetree");
- if (of_address_to_resource(np_pmu, 0, &res_xbar))
+ if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name) < 0)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 88cfaf8..9d0107f 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1558,6 +1558,7 @@
vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
c->vcache.waybit = 0;
+ c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1660,6 +1661,7 @@
/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
scache_size *= 4;
c->scache.waybit = 0;
+ c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
if (scache_size)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 55ce396..2da5649 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -762,7 +762,8 @@
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr,
+ unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@@ -771,6 +772,22 @@
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
+ if (cpu_has_ftlb && flush) {
+ BUG_ON(!cpu_has_tlbinv);
+
+ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+
+ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+ return;
+ }
+
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
@@ -2197,7 +2214,7 @@
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
@@ -2252,7 +2269,7 @@
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
@@ -2308,7 +2325,7 @@
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index 3e0aa09..9e4631a 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -36,7 +36,7 @@
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
@@ -44,7 +44,7 @@
FUNC("pci-fnc", 3, 40, 32)
};
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 367c542..3901b80 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@
return alloc_bootmem_align(size, align);
}
+int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+ bool nomap)
+{
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+ return 0;
+}
+
void __init early_init_devtree(void *params)
{
__be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index a4ff86d..6c4e351 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -195,6 +195,9 @@
}
#endif /* CONFIG_BLK_DEV_INITRD */
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
unflatten_and_copy_device_tree();
setup_cpuinfo();
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 9a2aee1..0497cec 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -42,10 +42,10 @@
#define get_user __get_user
#if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr) __get_user_asm64(ptr)
+#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
#else
-#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
#endif
@@ -68,6 +68,15 @@
".previous\n"
/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
+/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
*/
@@ -91,92 +100,116 @@
" mtsp %0,%%sr2\n\t" \
: : "r"(get_fs()) : )
-#define __get_user(x, ptr) \
-({ \
- register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val __asm__ ("r9") = 0; \
- \
- load_sr2(); \
- switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm("ldb", ptr); break; \
- case 2: __get_user_asm("ldh", ptr); break; \
- case 4: __get_user_asm("ldw", ptr); break; \
- case 8: LDD_USER(ptr); break; \
- default: BUILD_BUG(); break; \
- } \
- \
- (x) = (__force __typeof__(*(ptr))) __gu_val; \
- __gu_err; \
+#define __get_user_internal(val, ptr) \
+({ \
+ register long __gu_err __asm__ ("r8") = 0; \
+ \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_asm(val, "ldb", ptr); break; \
+ case 2: __get_user_asm(val, "ldh", ptr); break; \
+ case 4: __get_user_asm(val, "ldw", ptr); break; \
+ case 8: LDD_USER(val, ptr); break; \
+ default: BUILD_BUG(); \
+ } \
+ \
+ __gu_err; \
})
-#define __get_user_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+#define __get_user(val, ptr) \
+({ \
+ load_sr2(); \
+ __get_user_internal(val, ptr); \
+})
+
+#define __get_user_asm(val, ldx, ptr) \
+{ \
+ register long __gu_val; \
+ \
+ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err)); \
+ \
+ (val) = (__force __typeof__(*(ptr))) __gu_val; \
+}
#if !defined(CONFIG_64BIT)
-#define __get_user_asm64(ptr) \
- __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
- "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+#define __get_user_asm64(val, ptr) \
+{ \
+ union { \
+ unsigned long long l; \
+ __typeof__(*(ptr)) t; \
+ } __gu_tmp; \
+ \
+ __asm__(" copy %%r0,%R0\n" \
+ "1: ldw 0(%%sr2,%2),%0\n" \
+ "2: ldw 4(%%sr2,%2),%R0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+ : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
+ : "r"(ptr), "1"(__gu_err)); \
+ \
+ (val) = __gu_tmp.t; \
+}
#endif /* !defined(CONFIG_64BIT) */
-#define __put_user(x, ptr) \
+#define __put_user_internal(x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
- load_sr2(); \
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm("stb", __x, ptr); break; \
- case 2: __put_user_asm("sth", __x, ptr); break; \
- case 4: __put_user_asm("stw", __x, ptr); break; \
- case 8: STD_USER(__x, ptr); break; \
- default: BUILD_BUG(); break; \
- } \
+ case 1: __put_user_asm("stb", __x, ptr); break; \
+ case 2: __put_user_asm("sth", __x, ptr); break; \
+ case 4: __put_user_asm("stw", __x, ptr); break; \
+ case 8: STD_USER(__x, ptr); break; \
+ default: BUILD_BUG(); \
+ } \
\
__pu_err; \
})
+#define __put_user(x, ptr) \
+({ \
+ load_sr2(); \
+ __put_user_internal(x, ptr); \
+})
+
+
/*
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+ "1: " stx " %2,0(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err) \
- : "r1")
+ : "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%%sr2,%1)" \
- "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+ "1: stw %2,0(%%sr2,%1)\n" \
+ "2: stw %R2,4(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err) \
- : "r1"); \
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 3cad8aa..4e6f0d9 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
-/* Global fixups - defined as int to avoid creation of function pointers */
-extern int fixup_get_user_skip_1;
-extern int fixup_get_user_skip_2;
-extern int fixup_put_user_skip_1;
-extern int fixup_put_user_skip_2;
-EXPORT_SYMBOL(fixup_get_user_skip_1);
-EXPORT_SYMBOL(fixup_get_user_skip_2);
-EXPORT_SYMBOL(fixup_put_user_skip_1);
-EXPORT_SYMBOL(fixup_put_user_skip_2);
-
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index e81afc37..e7ffde2 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -140,6 +140,8 @@
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
+ /* prevent soft lockup/stalled CPU messages for endless loop. */
+ rcu_sysrq_start();
for (;;);
}
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8..f2dac4d 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o
obj-y := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f2..0000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *
- * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Fixup routines for kernel exception handling.
- */
-#include <asm/asm-offsets.h>
-#include <asm/assembly.h>
-#include <asm/errno.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_SMP
- .macro get_fault_ip t1 t2
- loadgp
- addil LT%__per_cpu_offset,%r27
- LDREG RT%__per_cpu_offset(%r1),\t1
- /* t2 = smp_processor_id() */
- mfctl 30,\t2
- ldw TI_CPU(\t2),\t2
-#ifdef CONFIG_64BIT
- extrd,u \t2,63,32,\t2
-#endif
- /* t2 = &__per_cpu_offset[smp_processor_id()]; */
- LDREGX \t2(\t1),\t2
- addil LT%exception_data,%r27
- LDREG RT%exception_data(%r1),\t1
- /* t1 = this_cpu_ptr(&exception_data) */
- add,l \t1,\t2,\t1
- /* %r27 = t1->fault_gp - restore gp */
- LDREG EXCDATA_GP(\t1), %r27
- /* t1 = t1->fault_ip */
- LDREG EXCDATA_IP(\t1), \t1
- .endm
-#else
- .macro get_fault_ip t1 t2
- loadgp
- /* t1 = this_cpu_ptr(&exception_data) */
- addil LT%exception_data,%r27
- LDREG RT%exception_data(%r1),\t2
- /* %r27 = t2->fault_gp - restore gp */
- LDREG EXCDATA_GP(\t2), %r27
- /* t1 = t2->fault_ip */
- LDREG EXCDATA_IP(\t2), \t1
- .endm
-#endif
-
- .level LEVEL
-
- .text
- .section .fixup, "ax"
-
- /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY_CFI(fixup_get_user_skip_1)
- get_fault_ip %r1,%r8
- ldo 4(%r1), %r1
- ldi -EFAULT, %r8
- bv %r0(%r1)
- copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_1)
-
-ENTRY_CFI(fixup_get_user_skip_2)
- get_fault_ip %r1,%r8
- ldo 8(%r1), %r1
- ldi -EFAULT, %r8
- bv %r0(%r1)
- copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_2)
-
- /* put_user() fixups, store -EFAULT in r8 */
-ENTRY_CFI(fixup_put_user_skip_1)
- get_fault_ip %r1,%r8
- ldo 4(%r1), %r1
- bv %r0(%r1)
- ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_1)
-
-ENTRY_CFI(fixup_put_user_skip_2)
- get_fault_ip %r1,%r8
- ldo 8(%r1), %r1
- bv %r0(%r1)
- ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_2)
-
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de..85c28bb 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,321 @@
.procend
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers. Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ * additional interlocks. Assumption is that those were only efficient on old
+ * machines (pre PA8000 processors)
+ */
+
+ dst = arg0
+ src = arg1
+ len = arg2
+ end = arg3
+ t1 = r19
+ t2 = r20
+ t3 = r21
+ t4 = r22
+ srcspc = sr1
+ dstspc = sr2
+
+ t0 = r1
+ a1 = t1
+ a2 = t2
+ a3 = t3
+ a0 = t4
+
+ save_src = ret0
+ save_dst = ret1
+ save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /* Last destination address */
+ add dst,len,end
+
+ /* short copy with less than 16 bytes? */
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+ /* same alignment? */
+ xor src,dst,t0
+ extru t0,31,2,t1
+ cmpib,<>,n 0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+ /* only do 64-bit copies if we can get aligned. */
+ extru t0,31,3,t1
+ cmpib,<>,n 0,t1,.Lalign_loop32
+
+ /* loop until we are 64-bit aligned */
+.Lalign_loop64:
+ extru dst,31,3,t1
+ cmpib,=,n 0,t1,.Lcopy_loop_16_start
+20: ldb,ma 1(srcspc,src),t1
+21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop64
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_loop_16_start:
+ ldi 31,t0
+.Lcopy_loop_16:
+ cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10: ldd 0(srcspc,src),t1
+11: ldd 8(srcspc,src),t2
+ ldo 16(src),src
+12: std,ma t1,8(dstspc,dst)
+13: std,ma t2,8(dstspc,dst)
+14: ldd 0(srcspc,src),t1
+15: ldd 8(srcspc,src),t2
+ ldo 16(src),src
+16: std,ma t1,8(dstspc,dst)
+17: std,ma t2,8(dstspc,dst)
+
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+ b .Lcopy_loop_16
+ ldo -32(len),len
+
+.Lword_loop:
+ cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20: ldw,ma 4(srcspc,src),t1
+21: stw,ma t1,4(dstspc,dst)
+ b .Lword_loop
+ ldo -4(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+ /* loop until we are 32-bit aligned */
+.Lalign_loop32:
+ extru dst,31,2,t1
+ cmpib,=,n 0,t1,.Lcopy_loop_8
+20: ldb,ma 1(srcspc,src),t1
+21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop32
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_8:
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10: ldw 0(srcspc,src),t1
+11: ldw 4(srcspc,src),t2
+12: stw,ma t1,4(dstspc,dst)
+13: stw,ma t2,4(dstspc,dst)
+14: ldw 8(srcspc,src),t1
+15: ldw 12(srcspc,src),t2
+ ldo 16(src),src
+16: stw,ma t1,4(dstspc,dst)
+17: stw,ma t2,4(dstspc,dst)
+
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+ b .Lcopy_loop_8
+ ldo -16(len),len
+
+.Lbyte_loop:
+ cmpclr,COND(<>) len,%r0,%r0
+ b,n .Lcopy_done
+20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+21: stb,ma t1,1(dstspc,dst)
+ b .Lbyte_loop
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+ bv %r0(%r2)
+ sub end,dst,ret0
+
+
+ /* src and dst are not aligned the same way. */
+ /* need to go the hard way */
+.Lunaligned_copy:
+ /* align until dst is 32bit-word-aligned */
+ extru dst,31,2,t1
+ cmpib,=,n 0,t1,.Lcopy_dstaligned
+20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+21: stb,ma t1,1(dstspc,dst)
+ b .Lunaligned_copy
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+ /* store src, dst and len in safe place */
+ copy src,save_src
+ copy dst,save_dst
+ copy len,save_len
+
+ /* len now needs give number of words to copy */
+ SHRREG len,2,len
+
+ /*
+ * Copy from a not-aligned src to an aligned dst using shifts.
+ * Handles 4 words per loop.
+ */
+
+ depw,z src,28,2,t0
+ subi 32,t0,t0
+ mtsar t0
+ extru len,31,2,t0
+ cmpib,= 2,t0,.Lcase2
+ /* Make src aligned by rounding it down. */
+ depi 0,31,2,src
+
+ cmpiclr,<> 3,t0,%r0
+ b,n .Lcase3
+ cmpiclr,<> 1,t0,%r0
+ b,n .Lcase1
+.Lcase0:
+ cmpb,COND(=) %r0,len,.Lcda_finish
+ nop
+
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b,n .Ldo3
+.Lcase1:
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ ldo -1(len),len
+ cmpb,COND(=),n %r0,len,.Ldo0
+.Ldo4:
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a2, a3, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a3, a0, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a0, a1, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a1, a2, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+ ldo -4(len),len
+ cmpb,COND(<>) %r0,len,.Ldo4
+ nop
+.Ldo0:
+ shrpw a2, a3, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+ /* calculate new src, dst and len and jump to byte-copy loop */
+ sub dst,save_dst,t0
+ add save_src,t0,src
+ b .Lbyte_loop
+ sub save_len,t0,len
+
+.Lcase3:
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b .Ldo2
+ ldo 1(len),len
+.Lcase2:
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b .Ldo1
+ ldo 2(len),len
+
+
+ /* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+ b .Lcopy_done
+10: std,ma t1,8(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+ b .Lcopy_done
+10: stw,ma t1,4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+ .exit
+ENDPROC_CFI(pa_memcpy)
+ .procend
+
.end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index f82ff10..b3d47ec 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- * Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,474 +21,21 @@
* Portions derived from the GNU C Library
* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
*
- * Several strategies are tried to try to get the best performance for various
- * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
- * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
- * general registers. Unaligned copies are handled either by aligning the
- * destination and then using shift-and-write method, or in a few cases by
- * falling back to a byte-at-a-time copy.
- *
- * I chose to implement this in C because it is easier to maintain and debug,
- * and in my experiments it appears that the C code generated by gcc (3.3/3.4
- * at the time of writing) is fairly optimal. Unfortunately some of the
- * semantics of the copy routine (exception handling) is difficult to express
- * in C, so we have to play some tricks to get it to work.
- *
- * All the loads and stores are done via explicit asm() code in order to use
- * the right space registers.
- *
- * Testing with various alignments and buffer sizes shows that this code is
- * often >10x faster than a simple byte-at-a-time copy, even for strangely
- * aligned operands. It is interesting to note that the glibc version
- * of memcpy (written in C) is actually quite fast already. This routine is
- * able to beat it by 30-40% for aligned copies because of the loop unrolling,
- * but in some cases the glibc version is still slightly faster. This lends
- * more credibility that gcc can generate very good code as long as we are
- * careful.
- *
- * TODO:
- * - cache prefetching needs more experimentation to get optimal settings
- * - try not to use the post-increment address modifiers; they create additional
- * interlocks
- * - replace byte-copy loops with stybs sequences
*/
-#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
-#define s_space "%%sr1"
-#define d_space "%%sr2"
-#else
-#include "memcpy.h"
-#define s_space "%%sr0"
-#define d_space "%%sr0"
-#define pa_memcpy new2_copy
-#endif
DECLARE_PER_CPU(struct exception_data, exception_data);
-#define preserve_branch(label) do { \
- volatile int dummy = 0; \
- /* The following branch is never taken, it's just here to */ \
- /* prevent gcc from optimizing away our exception code. */ \
- if (unlikely(dummy != dummy)) \
- goto label; \
-} while (0)
-
#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
#define get_kernel_space() (0)
-#define MERGE(w0, sh_1, w1, sh_2) ({ \
- unsigned int _r; \
- asm volatile ( \
- "mtsar %3\n" \
- "shrpw %1, %2, %%sar, %0\n" \
- : "=r"(_r) \
- : "r"(w0), "r"(w1), "r"(sh_2) \
- ); \
- _r; \
-})
-#define THRESHOLD 16
-
-#ifdef DEBUG_MEMCPY
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
-#else
-#define DPRINTF(fmt, args...)
-#endif
-
-#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : _tt(_t), "+r"(_a) \
- : \
- : "r8")
-
-#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : "+r"(_a) \
- : _tt(_t) \
- : "r8")
-
-#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
-#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
-#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
-#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
-#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
-#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
-
-#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : _tt(_t) \
- : "r"(_a) \
- : "r8")
-
-#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : \
- : _tt(_t), "r"(_a) \
- : "r8")
-
-#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
-#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
-
-#ifdef CONFIG_PREFETCH
-static inline void prefetch_src(const void *addr)
-{
- __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
-}
-
-static inline void prefetch_dst(const void *addr)
-{
- __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
-}
-#else
-#define prefetch_src(addr) do { } while(0)
-#define prefetch_dst(addr) do { } while(0)
-#endif
-
-#define PA_MEMCPY_OK 0
-#define PA_MEMCPY_LOAD_ERROR 1
-#define PA_MEMCPY_STORE_ERROR 2
-
-/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
- * per loop. This code is derived from glibc.
- */
-static noinline unsigned long copy_dstaligned(unsigned long dst,
- unsigned long src, unsigned long len)
-{
- /* gcc complains that a2 and a3 may be uninitialized, but actually
- * they cannot be. Initialize a2/a3 to shut gcc up.
- */
- register unsigned int a0, a1, a2 = 0, a3 = 0;
- int sh_1, sh_2;
-
- /* prefetch_src((const void *)src); */
-
- /* Calculate how to shift a word read at the memory operation
- aligned srcp to make it aligned for copy. */
- sh_1 = 8 * (src % sizeof(unsigned int));
- sh_2 = 8 * sizeof(unsigned int) - sh_1;
-
- /* Make src aligned by rounding it down. */
- src &= -sizeof(unsigned int);
-
- switch (len % 4)
- {
- case 2:
- /* a1 = ((unsigned int *) src)[0];
- a2 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a1, cda_ldw_exc);
- ldw(s_space, 4, src, a2, cda_ldw_exc);
- src -= 1 * sizeof(unsigned int);
- dst -= 3 * sizeof(unsigned int);
- len += 2;
- goto do1;
- case 3:
- /* a0 = ((unsigned int *) src)[0];
- a1 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a0, cda_ldw_exc);
- ldw(s_space, 4, src, a1, cda_ldw_exc);
- src -= 0 * sizeof(unsigned int);
- dst -= 2 * sizeof(unsigned int);
- len += 1;
- goto do2;
- case 0:
- if (len == 0)
- return PA_MEMCPY_OK;
- /* a3 = ((unsigned int *) src)[0];
- a0 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a3, cda_ldw_exc);
- ldw(s_space, 4, src, a0, cda_ldw_exc);
- src -=-1 * sizeof(unsigned int);
- dst -= 1 * sizeof(unsigned int);
- len += 0;
- goto do3;
- case 1:
- /* a2 = ((unsigned int *) src)[0];
- a3 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a2, cda_ldw_exc);
- ldw(s_space, 4, src, a3, cda_ldw_exc);
- src -=-2 * sizeof(unsigned int);
- dst -= 0 * sizeof(unsigned int);
- len -= 1;
- if (len == 0)
- goto do0;
- goto do4; /* No-op. */
- }
-
- do
- {
- /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
-do4:
- /* a0 = ((unsigned int *) src)[0]; */
- ldw(s_space, 0, src, a0, cda_ldw_exc);
- /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
- stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-do3:
- /* a1 = ((unsigned int *) src)[1]; */
- ldw(s_space, 4, src, a1, cda_ldw_exc);
- /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
- stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
-do2:
- /* a2 = ((unsigned int *) src)[2]; */
- ldw(s_space, 8, src, a2, cda_ldw_exc);
- /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
- stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
-do1:
- /* a3 = ((unsigned int *) src)[3]; */
- ldw(s_space, 12, src, a3, cda_ldw_exc);
- /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
- stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
-
- src += 4 * sizeof(unsigned int);
- dst += 4 * sizeof(unsigned int);
- len -= 4;
- }
- while (len != 0);
-
-do0:
- /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
- stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-
- preserve_branch(handle_load_error);
- preserve_branch(handle_store_error);
-
- return PA_MEMCPY_OK;
-
-handle_load_error:
- __asm__ __volatile__ ("cda_ldw_exc:\n");
- return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
- __asm__ __volatile__ ("cda_stw_exc:\n");
- return PA_MEMCPY_STORE_ERROR;
-}
-
-
-/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
- * In case of an access fault the faulty address can be read from the per_cpu
- * exception data struct. */
-static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
- unsigned long len)
-{
- register unsigned long src, dst, t1, t2, t3;
- register unsigned char *pcs, *pcd;
- register unsigned int *pws, *pwd;
- register double *pds, *pdd;
- unsigned long ret;
-
- src = (unsigned long)srcp;
- dst = (unsigned long)dstp;
- pcs = (unsigned char *)srcp;
- pcd = (unsigned char *)dstp;
-
- /* prefetch_src((const void *)srcp); */
-
- if (len < THRESHOLD)
- goto byte_copy;
-
- /* Check alignment */
- t1 = (src ^ dst);
- if (unlikely(t1 & (sizeof(double)-1)))
- goto unaligned_copy;
-
- /* src and dst have same alignment. */
-
- /* Copy bytes till we are double-aligned. */
- t2 = src & (sizeof(double) - 1);
- if (unlikely(t2 != 0)) {
- t2 = sizeof(double) - t2;
- while (t2 && len) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- len--;
- stbma(d_space, t3, pcd, pmc_store_exc);
- t2--;
- }
- }
-
- pds = (double *)pcs;
- pdd = (double *)pcd;
-
-#if 0
- /* Copy 8 doubles at a time */
- while (len >= 8*sizeof(double)) {
- register double r1, r2, r3, r4, r5, r6, r7, r8;
- /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
- flddma(s_space, pds, r1, pmc_load_exc);
- flddma(s_space, pds, r2, pmc_load_exc);
- flddma(s_space, pds, r3, pmc_load_exc);
- flddma(s_space, pds, r4, pmc_load_exc);
- fstdma(d_space, r1, pdd, pmc_store_exc);
- fstdma(d_space, r2, pdd, pmc_store_exc);
- fstdma(d_space, r3, pdd, pmc_store_exc);
- fstdma(d_space, r4, pdd, pmc_store_exc);
-
-#if 0
- if (L1_CACHE_BYTES <= 32)
- prefetch_src((char *)pds + L1_CACHE_BYTES);
-#endif
- flddma(s_space, pds, r5, pmc_load_exc);
- flddma(s_space, pds, r6, pmc_load_exc);
- flddma(s_space, pds, r7, pmc_load_exc);
- flddma(s_space, pds, r8, pmc_load_exc);
- fstdma(d_space, r5, pdd, pmc_store_exc);
- fstdma(d_space, r6, pdd, pmc_store_exc);
- fstdma(d_space, r7, pdd, pmc_store_exc);
- fstdma(d_space, r8, pdd, pmc_store_exc);
- len -= 8*sizeof(double);
- }
-#endif
-
- pws = (unsigned int *)pds;
- pwd = (unsigned int *)pdd;
-
-word_copy:
- while (len >= 8*sizeof(unsigned int)) {
- register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
- /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
- ldwma(s_space, pws, r1, pmc_load_exc);
- ldwma(s_space, pws, r2, pmc_load_exc);
- ldwma(s_space, pws, r3, pmc_load_exc);
- ldwma(s_space, pws, r4, pmc_load_exc);
- stwma(d_space, r1, pwd, pmc_store_exc);
- stwma(d_space, r2, pwd, pmc_store_exc);
- stwma(d_space, r3, pwd, pmc_store_exc);
- stwma(d_space, r4, pwd, pmc_store_exc);
-
- ldwma(s_space, pws, r5, pmc_load_exc);
- ldwma(s_space, pws, r6, pmc_load_exc);
- ldwma(s_space, pws, r7, pmc_load_exc);
- ldwma(s_space, pws, r8, pmc_load_exc);
- stwma(d_space, r5, pwd, pmc_store_exc);
- stwma(d_space, r6, pwd, pmc_store_exc);
- stwma(d_space, r7, pwd, pmc_store_exc);
- stwma(d_space, r8, pwd, pmc_store_exc);
- len -= 8*sizeof(unsigned int);
- }
-
- while (len >= 4*sizeof(unsigned int)) {
- register unsigned int r1,r2,r3,r4;
- ldwma(s_space, pws, r1, pmc_load_exc);
- ldwma(s_space, pws, r2, pmc_load_exc);
- ldwma(s_space, pws, r3, pmc_load_exc);
- ldwma(s_space, pws, r4, pmc_load_exc);
- stwma(d_space, r1, pwd, pmc_store_exc);
- stwma(d_space, r2, pwd, pmc_store_exc);
- stwma(d_space, r3, pwd, pmc_store_exc);
- stwma(d_space, r4, pwd, pmc_store_exc);
- len -= 4*sizeof(unsigned int);
- }
-
- pcs = (unsigned char *)pws;
- pcd = (unsigned char *)pwd;
-
-byte_copy:
- while (len) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- }
-
- return PA_MEMCPY_OK;
-
-unaligned_copy:
- /* possibly we are aligned on a word, but not on a double... */
- if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
- t2 = src & (sizeof(unsigned int) - 1);
-
- if (unlikely(t2 != 0)) {
- t2 = sizeof(unsigned int) - t2;
- while (t2) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- t2--;
- }
- }
-
- pws = (unsigned int *)pcs;
- pwd = (unsigned int *)pcd;
- goto word_copy;
- }
-
- /* Align the destination. */
- if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
- t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
- while (t2) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- t2--;
- }
- dst = (unsigned long)pcd;
- src = (unsigned long)pcs;
- }
-
- ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
- if (ret)
- return ret;
-
- pcs += (len & -sizeof(unsigned int));
- pcd += (len & -sizeof(unsigned int));
- len %= sizeof(unsigned int);
-
- preserve_branch(handle_load_error);
- preserve_branch(handle_store_error);
-
- goto byte_copy;
-
-handle_load_error:
- __asm__ __volatile__ ("pmc_load_exc:\n");
- return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
- __asm__ __volatile__ ("pmc_store_exc:\n");
- return PA_MEMCPY_STORE_ERROR;
-}
-
-
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
-{
- unsigned long ret, fault_addr, reference;
- struct exception_data *d;
+extern unsigned long pa_memcpy(void *dst, const void *src,
+ unsigned long len);
- ret = pa_memcpy_internal(dstp, srcp, len);
- if (likely(ret == PA_MEMCPY_OK))
- return 0;
-
- /* if a load or store fault occured we can get the faulty addr */
- d = this_cpu_ptr(&exception_data);
- fault_addr = d->fault_addr;
-
- /* error in load or store? */
- if (ret == PA_MEMCPY_LOAD_ERROR)
- reference = (unsigned long) srcp;
- else
- reference = (unsigned long) dstp;
-
- DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
- ret, len, fault_addr, reference);
-
- if (fault_addr >= reference)
- return len - (fault_addr - reference);
- else
- return len;
-}
-
-#ifdef __KERNEL__
unsigned long __copy_to_user(void __user *dst, const void *src,
unsigned long len)
{
@@ -537,5 +84,3 @@
return __probe_kernel_read(dst, src, size);
}
-
-#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 1a0b4f6..040c48f 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -149,6 +149,23 @@
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
+ /*
+ * Fix up get_user() and put_user().
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+ * bit in the relative address of the fixup routine to indicate
+ * that %r8 should be loaded with -EFAULT to report a userspace
+ * access error.
+ */
+ if (fix->fixup & 1) {
+ regs->gr[8] = -EFAULT;
+
+ /* zero target register for get_user() */
+ if (parisc_acctyp(0, regs->iir) == VM_READ) {
+ int treg = regs->iir & 0x1f;
+ regs->gr[treg] = 0;
+ }
+ }
+
regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
regs->iaoq[0] &= ~3;
/*
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 4119945..f058e0c 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@
}
if (len & ~VMX_ALIGN_MASK) {
+ preempt_disable();
pagefault_disable();
enable_kernel_altivec();
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+ disable_kernel_altivec();
pagefault_enable();
+ preempt_enable();
}
tail = len & VMX_ALIGN_MASK;
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 033f338..b2da7c8 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch (get_xop(instruction)) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}
/* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6432d4b..767ef6d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -689,7 +689,7 @@
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
- lwz r3,GPR1(r1)
+ ld r3,GPR1(r1)
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
mr r4,r1 /* src: current exception frame */
mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -703,8 +703,8 @@
addi r6,r6,8
bdnz 2b
- /* Do real store operation to complete stwu */
- lwz r5,GPR1(r1)
+ /* Do real store operation to complete stdu */
+ ld r5,GPR1(r1)
std r8,0(r5)
/* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4f17867..4cefe688 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@
* flush all bytes from start through stop-1 inclusive
*/
-_GLOBAL(flush_icache_range)
+_GLOBAL_TOC(flush_icache_range)
BEGIN_FTR_SECTION
PURGE_PREFETCHED_INS
blr
@@ -120,7 +120,7 @@
*
* flush all bytes from start to stop-1 inclusive
*/
-_GLOBAL(flush_dcache_range)
+_GLOBAL_TOC(flush_dcache_range)
/*
* Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8d586cf..a12be60 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -245,6 +245,15 @@
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
+ /*
+ * Fixup HFSCR:TM based on CPU features. The bit is set by our
+ * early asm init because at that point we haven't updated our
+ * CPU features from firmware and device-tree. Here we have,
+ * so let's do it.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
}
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ad9fd52..197f0a6 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -636,6 +636,10 @@
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
+ unsigned int use_local;
+
+ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@@ -665,8 +669,7 @@
} pte_iterate_hashed_end();
}
- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && local) {
+ if (use_local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 4da604e..ca15613 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@
unsigned long decompress_kernel(void)
{
- unsigned long output_addr;
- unsigned char *output;
+ void *output, *kernel_end;
- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
- memset(&_bss, 0, &_ebss - &_bss);
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+ kernel_end = output + SZ__bss_start;
+ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
- * kernel image.
+ * kernel image. This also prevents initrd corruption caused by
+ * bss clearing since kernel_end will always be located behind the
+ * current bss section..
*/
- if (INITRD_START && INITRD_SIZE &&
- INITRD_START < (unsigned long) output + SZ__bss_start) {
- check_ipl_parmblock(output + SZ__bss_start,
- INITRD_START + INITRD_SIZE);
- memmove(output + SZ__bss_start,
- (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) output + SZ__bss_start;
+ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+ check_ipl_parmblock(kernel_end, INITRD_SIZE);
+ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) kernel_end;
}
#endif
+ /*
+ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+ * initialized afterwards since they reside in bss.
+ */
+ memset(&_bss, 0, &_ebss - &_bss);
+ free_mem_ptr = (unsigned long) &_end;
+ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0362cd5..0cea702 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1029,6 +1029,8 @@
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
+ if (pte_present(entry))
+ pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry);
else
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 52d7c87..a7ef702 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -144,7 +144,7 @@
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : "=d" (__rc), "=Q" (*(to)) \
+ : "=d" (__rc), "+Q" (*(to)) \
: "d" (size), "Q" (*(from)), \
"d" (__reg0), "K" (-EFAULT) \
: "cc"); \
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1fb317f..b6802b9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -673,6 +673,14 @@
return pte_pfn(pte);
}
+#define __HAVE_ARCH_PMD_WRITE
+static inline unsigned long pmd_write(pmd_t pmd)
+{
+ pte_t pte = __pte(pmd_val(pmd));
+
+ return pte_write(pte);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd)
{
@@ -688,13 +696,6 @@
return pte_young(pte);
}
-static inline unsigned long pmd_write(pmd_t pmd)
-{
- pte_t pte = __pte(pmd_val(pmd));
-
- return pte_write(pte);
-}
-
static inline unsigned long pmd_trans_huge(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 37aa537..bd7e2aa 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1495,7 +1495,7 @@
if ((long)addr < 0L) {
unsigned long pa = __pa(addr);
- if ((addr >> max_phys_bits) != 0UL)
+ if ((pa >> max_phys_bits) != 0UL)
return false;
return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 7853b53..3f9d1a8 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -30,8 +30,10 @@
{
vdso32_enabled = simple_strtoul(s, NULL, 0);
- if (vdso32_enabled > 1)
+ if (vdso32_enabled > 1) {
pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+ vdso32_enabled = 0;
+ }
return 1;
}
@@ -62,13 +64,18 @@
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
+static const int zero;
+static const int one = 1;
+
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (int *)&zero,
+ .extra2 = (int *)&one,
},
{}
};
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 81b321a..f924629 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -507,6 +507,9 @@
cpuc->lbr_entries[i].to = msr_lastbranch.to;
cpuc->lbr_entries[i].mispred = 0;
cpuc->lbr_entries[i].predicted = 0;
+ cpuc->lbr_entries[i].in_tx = 0;
+ cpuc->lbr_entries[i].abort = 0;
+ cpuc->lbr_entries[i].cycles = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index e7f155c..94aad63 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -278,7 +278,7 @@
#define ARCH_DLINFO_IA32 \
do { \
- if (vdso32_enabled) { \
+ if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 2c1ebeb..529bb4a 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -55,7 +55,8 @@
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
@@ -69,15 +70,6 @@
clwb(p);
}
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
- return iter_is_iovec(i) == false;
-}
-
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
@@ -94,7 +86,35 @@
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
- if (__iter_needs_pmem_wb(i))
+ /*
+ * In the iovec case on x86_64 copy_from_iter_nocache() uses
+ * non-temporal stores for the bulk of the transfer, but we need
+ * to manually flush if the transfer is unaligned. A cached
+ * memory copy is used when destination or size is not naturally
+ * aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ *
+ * In the non-iovec case the entire destination needs to be
+ * flushed.
+ */
+ if (iter_is_iovec(i)) {
+ unsigned long flushed, dest = (unsigned long) addr;
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+ arch_wb_cache_pmem(addr, 1);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ arch_wb_cache_pmem(addr, 1);
+ }
+
+ flushed = dest - (unsigned long) addr;
+ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+ arch_wb_cache_pmem(addr + bytes - 1, 1);
+ }
+ } else
arch_wb_cache_pmem(addr, bytes);
return len;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 93d824e..040af19 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -85,7 +85,7 @@
head = llist_reverse_order(head);
llist_for_each_entry_safe(node, tmp, head, llnode) {
mce = &node->mce;
- atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+ blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
}
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index cd74a3f..de20902 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -13,7 +13,7 @@
MCE_PANIC_SEVERITY,
};
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern struct blocking_notifier_head x86_mce_decoder_chain;
#define ATTR_LEN 16
#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a7fdf45..22cda29 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -120,7 +120,7 @@
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
*/
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
@@ -213,13 +213,13 @@
if (nb != &mce_srao_nb && nb->priority == INT_MAX)
nb->priority -= 1;
- atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+ blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);
void mce_unregister_decode_chain(struct notifier_block *nb)
{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+ blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
@@ -272,8 +272,6 @@
static void print_mce(struct mce *m)
{
- int ret = 0;
-
pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
m->extcpu, m->mcgstatus, m->bank, m->status);
@@ -309,14 +307,6 @@
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
- /*
- * Print out human-readable details about the MCE error,
- * (if the CPU has an implementation for that)
- */
- ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
- if (ret == NOTIFY_STOP)
- return;
-
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9b54034..3dfca7b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -59,7 +59,7 @@
"load_store",
"insn_fetch",
"combined_unit",
- "",
+ "decode_unit",
"northbridge",
"execution_unit",
};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8639bb2..6bf09f5 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -983,6 +983,18 @@
unsigned long return_hooker = (unsigned long)
&return_to_handler;
+ /*
+ * When resuming from suspend-to-ram, this function can be indirectly
+ * called from early CPU startup code while the CPU is in real mode,
+ * which would fail miserably. Make sure the stack pointer is a
+ * virtual address.
+ *
+ * This check isn't as accurate as virt_addr_valid(), but it should be
+ * good enough for this purpose, and it's fast.
+ */
+ if (unlikely((long)__builtin_frame_address(0) >= 0))
+ return;
+
if (unlikely(ftrace_graph_is_dead()))
return;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e244c19..067f981 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -223,6 +223,22 @@
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
+ { /* Handle problems with rebooting on ASUS EeeBook X205TA */
+ .callback = set_acpi_reboot,
+ .ident = "ASUS EeeBook X205TA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"),
+ },
+ },
+ { /* Handle problems with rebooting on ASUS EeeBook X205TAW */
+ .callback = set_acpi_reboot,
+ .ident = "ASUS EeeBook X205TAW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
+ },
+ },
/* Certec */
{ /* Handle problems with rebooting on Certec BPC600 */
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ec1f756..71beb28 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -151,8 +151,8 @@
if (from->si_signo == SIGSEGV) {
if (from->si_code == SEGV_BNDERR) {
- compat_uptr_t lower = (unsigned long)&to->si_lower;
- compat_uptr_t upper = (unsigned long)&to->si_upper;
+ compat_uptr_t lower = (unsigned long)from->si_lower;
+ compat_uptr_t upper = (unsigned long)from->si_upper;
put_user_ex(lower, &to->si_lower);
put_user_ex(upper, &to->si_upper);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 69b8f8a..43b55ef 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6925,14 +6925,20 @@
}
page = nested_get_page(vcpu, vmptr);
- if (page == NULL ||
- *(u32 *)kmap(page) != VMCS12_REVISION) {
+ if (page == NULL) {
nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
+ nested_release_page_clean(page);
+ nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
kunmap(page);
+ nested_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
break;
case EXIT_REASON_VMCLEAR:
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f..9a53a06 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -290,7 +290,7 @@
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
- _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912..889e761 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr < 256)
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ if (page_is_ram(pagenr)) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+ */
+ if (pagenr < 256)
+ return 2;
+
return 0;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
+ }
+
+ /*
+ * This must follow RAM test, since System RAM is considered a
+ * restricted resource under CONFIG_STRICT_IOMEM.
+ */
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+ /* Low 1MB bypasses iomem restrictions. */
+ if (pagenr < 256)
+ return 1;
+
+ return 0;
+ }
+
+ return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e571..aed2064 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -48,7 +48,7 @@
#if defined(CONFIG_X86_ESPFIX64)
static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
#elif defined(CONFIG_EFI)
-static const unsigned long vaddr_end = EFI_VA_START;
+static const unsigned long vaddr_end = EFI_VA_END;
#else
static const unsigned long vaddr_end = __START_KERNEL_map;
#endif
@@ -105,7 +105,7 @@
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
- vaddr_end >= EFI_VA_START);
+ vaddr_end >= EFI_VA_END);
BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 30031d5..cdfe8c6 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -201,6 +201,10 @@
return;
}
+ /* No need to reserve regions that will never be freed. */
+ if (md.attribute & EFI_MEMORY_RUNTIME)
+ return;
+
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 44c88ad..bcea81f 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -145,7 +145,7 @@
static int xen_cpu_present_to_apicid(int cpu)
{
if (cpu_present(cpu))
- return xen_get_apic_id(xen_apic_read(APIC_ID));
+ return cpu_data(cpu).apicid;
else
return BAD_APICID;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8960fc..9f21b0c 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -713,10 +713,9 @@
size = PFN_PHYS(xen_start_info->nr_p2m_frames);
}
- if (!xen_is_e820_reserved(start, size)) {
- memblock_reserve(start, size);
+ memblock_reserve(start, size);
+ if (!xen_is_e820_reserved(start, size))
return;
- }
#ifdef CONFIG_X86_32
/*
@@ -727,6 +726,7 @@
BUG();
#else
xen_relocate_p2m();
+ memblock_free(start, size);
#endif
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 976b1d7..4ddbfd5 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -164,8 +164,21 @@
#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+ unsigned long off = va - PAGE_OFFSET;
+
+ if (off >= XCHAL_KSEG_SIZE)
+ off -= XCHAL_KSEG_SIZE;
+
+ return off + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
+#else
#define __pa(x) \
((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
#define __va(x) \
((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
#define pfn_valid(pfn) \
diff --git a/block/bio.c b/block/bio.c
index db85c57..655c901 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -372,10 +372,14 @@
bio_list_init(&punt);
bio_list_init(&nopunt);
- while ((bio = bio_list_pop(current->bio_list)))
+ while ((bio = bio_list_pop(¤t->bio_list[0])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+ current->bio_list[0] = nopunt;
- *current->bio_list = nopunt;
+ bio_list_init(&nopunt);
+ while ((bio = bio_list_pop(¤t->bio_list[1])))
+ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+ current->bio_list[1] = nopunt;
spin_lock(&bs->rescue_lock);
bio_list_merge(&bs->rescue_list, &punt);
@@ -462,7 +466,9 @@
* we retry with the original gfp_flags.
*/
- if (current->bio_list && !bio_list_empty(current->bio_list))
+ if (current->bio_list &&
+ (!bio_list_empty(¤t->bio_list[0]) ||
+ !bio_list_empty(¤t->bio_list[1])))
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/block/blk-core.c b/block/blk-core.c
index df9e160..710c93b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1996,7 +1996,14 @@
*/
blk_qc_t generic_make_request(struct bio *bio)
{
- struct bio_list bio_list_on_stack;
+ /*
+ * bio_list_on_stack[0] contains bios submitted by the current
+ * make_request_fn.
+ * bio_list_on_stack[1] contains bios that were submitted before
+ * the current make_request_fn, but that haven't been processed
+ * yet.
+ */
+ struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
if (!generic_make_request_checks(bio))
@@ -2013,7 +2020,7 @@
* should be added at the tail
*/
if (current->bio_list) {
- bio_list_add(current->bio_list, bio);
+ bio_list_add(¤t->bio_list[0], bio);
goto out;
}
@@ -2032,23 +2039,39 @@
* bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
- bio_list_init(&bio_list_on_stack);
- current->bio_list = &bio_list_on_stack;
+ bio_list_init(&bio_list_on_stack[0]);
+ current->bio_list = bio_list_on_stack;
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (likely(blk_queue_enter(q, false) == 0)) {
+ struct bio_list lower, same;
+
+ /* Create a fresh bio_list for all subordinate requests */
+ bio_list_on_stack[1] = bio_list_on_stack[0];
+ bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
blk_queue_exit(q);
- bio = bio_list_pop(current->bio_list);
+ /* sort new bios into those for a lower level
+ * and those for the same level
+ */
+ bio_list_init(&lower);
+ bio_list_init(&same);
+ while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+ if (q == bdev_get_queue(bio->bi_bdev))
+ bio_list_add(&same, bio);
+ else
+ bio_list_add(&lower, bio);
+ /* now assemble so we handle the lowest level first */
+ bio_list_merge(&bio_list_on_stack[0], &lower);
+ bio_list_merge(&bio_list_on_stack[0], &same);
+ bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else {
- struct bio *bio_next = bio_list_pop(current->bio_list);
-
bio_io_error(bio);
- bio = bio_next;
}
+ bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio);
current->bio_list = NULL; /* deactivate */
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ee54ad0..7b597ec 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1474,7 +1474,7 @@
INIT_LIST_HEAD(&tags->page_list);
tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
set->numa_node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
@@ -1500,7 +1500,7 @@
do {
page = alloc_pages_node(set->numa_node,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
break;
@@ -1521,7 +1521,7 @@
* Allow kmemleak to scan these pages as they contain pointers
* to additional allocations like via ops->init_request().
*/
- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 2ce8bcb..cce0268 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -31,6 +31,7 @@
crypto_completion_t complete;
void *data;
u8 *result;
+ u32 flags;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
@@ -252,6 +253,8 @@
priv->result = req->result;
priv->complete = req->base.complete;
priv->data = req->base.data;
+ priv->flags = req->base.flags;
+
/*
* WARNING: We do not backup req->priv here! The req->priv
* is for internal use of the Crypto API and the
@@ -266,38 +269,44 @@
return 0;
}
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
/* Restore the original crypto request. */
req->result = priv->result;
- req->base.complete = priv->complete;
- req->base.data = priv->data;
+
+ ahash_request_set_callback(req, priv->flags,
+ priv->complete, priv->data);
req->priv = NULL;
/* Free the req->priv.priv from the ADJUSTED request. */
kzfree(priv);
}
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
{
struct ahash_request_priv *priv = req->priv;
+ struct crypto_async_request oreq;
- if (err == -EINPROGRESS)
- return;
+ oreq.data = priv->data;
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
- ahash_restore_req(req);
+ priv->complete(&oreq, -EINPROGRESS);
}
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
/*
* Restore the original request, see ahash_op_unaligned() for what
* goes where.
@@ -308,7 +317,7 @@
*/
/* First copy req->result into req->priv.result */
- ahash_op_unaligned_finish(areq, err);
+ ahash_restore_req(areq, err);
/* Complete the ORIGINAL request. */
areq->base.complete(&areq->base, err);
@@ -324,7 +333,12 @@
return err;
err = op(req);
- ahash_op_unaligned_finish(req, err);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
+ ahash_restore_req(req, err);
return err;
}
@@ -359,25 +373,14 @@
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
-{
- struct ahash_request_priv *priv = req->priv;
-
- if (err == -EINPROGRESS)
- return;
-
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
- ahash_restore_req(req);
-}
-
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- ahash_def_finup_finish2(areq, err);
+ if (err == -EINPROGRESS)
+ return;
+
+ ahash_restore_req(areq, err);
areq->base.complete(&areq->base, err);
}
@@ -388,11 +391,15 @@
goto out;
req->base.complete = ahash_def_finup_done2;
- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = crypto_ahash_reqtfm(req)->final(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
out:
- ahash_def_finup_finish2(req, err);
+ ahash_restore_req(req, err);
return err;
}
@@ -400,7 +407,16 @@
{
struct ahash_request *areq = req->data;
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = ahash_def_finup_finish1(areq, err);
+ if (areq->priv)
+ return;
areq->base.complete(&areq->base, err);
}
@@ -415,6 +431,11 @@
return err;
err = tfm->update(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
return ahash_def_finup_finish1(req, err);
}
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e8817e2..fde8d88 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -39,6 +39,7 @@
struct aead_async_rsgl first_rsgl;
struct list_head list;
struct kiocb *iocb;
+ struct sock *sk;
unsigned int tsgls;
char iv[];
};
@@ -379,12 +380,10 @@
static void aead_async_cb(struct crypto_async_request *_req, int err)
{
- struct sock *sk = _req->data;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
- struct aead_request *req = aead_request_cast(_req);
+ struct aead_request *req = _req->data;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+ struct sock *sk = areq->sk;
struct scatterlist *sg = areq->tsgl;
struct aead_async_rsgl *rsgl;
struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@
memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
INIT_LIST_HEAD(&areq->list);
areq->iocb = msg->msg_iocb;
+ areq->sk = sk;
memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
aead_request_set_tfm(req, tfm);
aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- aead_async_cb, sk);
+ aead_async_cb, req);
used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 9ed0878..4c5678c 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Linux ACPI interpreter
#
-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b4c1a6a..03250e1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -25,9 +25,11 @@
ACPI_MODULE_NAME("platform");
static const struct acpi_device_id forbidden_id_list[] = {
- {"PNP0000", 0}, /* PIC */
- {"PNP0100", 0}, /* Timer */
- {"PNP0200", 0}, /* AT DMA Controller */
+ {"PNP0000", 0}, /* PIC */
+ {"PNP0100", 0}, /* Timer */
+ {"PNP0200", 0}, /* AT DMA Controller */
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
{"", 0},
};
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e19f530..6d5a8c1 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -113,7 +113,7 @@
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 48e19d0..22ca892 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -729,12 +729,12 @@
static int ec_guard(struct acpi_ec *ec)
{
- unsigned long guard = usecs_to_jiffies(ec_polling_guard);
+ unsigned long guard = usecs_to_jiffies(ec->polling_guard);
unsigned long timeout = ec->timestamp + guard;
/* Ensure guarding period before polling EC status */
do {
- if (ec_busy_polling) {
+ if (ec->busy_polling) {
/* Perform busy polling */
if (ec_transaction_completed(ec))
return 0;
@@ -998,6 +998,28 @@
spin_unlock_irqrestore(&ec->lock, flags);
}
+static void acpi_ec_enter_noirq(struct acpi_ec *ec)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ ec->busy_polling = true;
+ ec->polling_guard = 0;
+ ec_log_drv("interrupt blocked");
+ spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static void acpi_ec_leave_noirq(struct acpi_ec *ec)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ ec->busy_polling = ec_busy_polling;
+ ec->polling_guard = ec_polling_guard;
+ ec_log_drv("interrupt unblocked");
+ spin_unlock_irqrestore(&ec->lock, flags);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -1278,7 +1300,7 @@
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
- if (ec_busy_polling || bits > 8)
+ if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
for (i = 0; i < bytes; ++i, ++address, ++value)
@@ -1286,7 +1308,7 @@
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
- if (ec_busy_polling || bits > 8)
+ if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
switch (result) {
@@ -1329,6 +1351,8 @@
spin_lock_init(&ec->lock);
INIT_WORK(&ec->work, acpi_ec_event_handler);
ec->timestamp = jiffies;
+ ec->busy_polling = true;
+ ec->polling_guard = 0;
return ec;
}
@@ -1390,6 +1414,7 @@
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ acpi_ec_enter_noirq(ec);
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
@@ -1429,6 +1454,7 @@
/* This is not fatal as we can poll EC events */
if (ACPI_SUCCESS(status)) {
set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
acpi_ec_enable_gpe(ec, true);
@@ -1839,34 +1865,6 @@
}
#ifdef CONFIG_PM_SLEEP
-static void acpi_ec_enter_noirq(struct acpi_ec *ec)
-{
- unsigned long flags;
-
- if (ec == first_ec) {
- spin_lock_irqsave(&ec->lock, flags);
- ec->saved_busy_polling = ec_busy_polling;
- ec->saved_polling_guard = ec_polling_guard;
- ec_busy_polling = true;
- ec_polling_guard = 0;
- ec_log_drv("interrupt blocked");
- spin_unlock_irqrestore(&ec->lock, flags);
- }
-}
-
-static void acpi_ec_leave_noirq(struct acpi_ec *ec)
-{
- unsigned long flags;
-
- if (ec == first_ec) {
- spin_lock_irqsave(&ec->lock, flags);
- ec_busy_polling = ec->saved_busy_polling;
- ec_polling_guard = ec->saved_polling_guard;
- ec_log_drv("interrupt unblocked");
- spin_unlock_irqrestore(&ec->lock, flags);
- }
-}
-
static int acpi_ec_suspend_noirq(struct device *dev)
{
struct acpi_ec *ec =
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1b41a27..219b90b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -37,6 +37,7 @@
static inline void acpi_amba_init(void) {}
#endif
int acpi_sysfs_init(void);
+void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -171,8 +172,8 @@
struct work_struct work;
unsigned long timestamp;
unsigned long nr_pending_queries;
- bool saved_busy_polling;
- unsigned int saved_polling_guard;
+ bool busy_polling;
+ unsigned int polling_guard;
};
extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d1664df..9ef3941 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1617,7 +1617,11 @@
const struct nfit_set_info_map *map0 = m0;
const struct nfit_set_info_map *map1 = m1;
- return map0->region_offset - map1->region_offset;
+ if (map0->region_offset < map1->region_offset)
+ return -1;
+ else if (map0->region_offset > map1->region_offset)
+ return 1;
+ return 0;
}
/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6..1c2b846 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3d1856f..dd3786a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1827,15 +1827,20 @@
return;
device->flags.match_driver = true;
- if (!ret) {
- ret = device_attach(&device->dev);
- if (ret < 0)
- return;
-
- if (!ret && device->pnp.type.platform_id)
- acpi_default_enumeration(device);
+ if (ret > 0) {
+ acpi_device_set_enumerated(device);
+ goto ok;
}
+ ret = device_attach(&device->dev);
+ if (ret < 0)
+ return;
+
+ if (ret > 0 || !device->pnp.type.platform_id)
+ acpi_device_set_enumerated(device);
+ else
+ acpi_default_enumeration(device);
+
ok:
list_for_each_entry(child, &device->children, node)
acpi_bus_attach(child);
@@ -2044,6 +2049,7 @@
}
}
+ acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
acpi_ec_ecdt_start();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 54abb26..a4327af 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -130,6 +130,12 @@
nvs_nosave_s3 = true;
}
+static int __init init_nvs_save_s3(const struct dmi_system_id *d)
+{
+ nvs_nosave_s3 = false;
+ return 0;
+}
+
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -324,6 +330,19 @@
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189431
+ * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
+ * saving during S3.
+ */
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G50-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 703c26e..cf05ae9 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -708,6 +708,62 @@
return result ? result : size;
}
+/*
+ * A Quirk Mechanism for GPE Flooding Prevention:
+ *
+ * Quirks may be needed to prevent GPE flooding on a specific GPE. The
+ * flooding typically cannot be detected and automatically prevented by
+ * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
+ * the AML tables. This normally indicates a feature gap in Linux, thus
+ * instead of providing endless quirk tables, we provide a boot parameter
+ * for those who want this quirk. For example, if the users want to prevent
+ * the GPE flooding for GPE 00, they need to specify the following boot
+ * parameter:
+ * acpi_mask_gpe=0x00
+ * The masking status can be modified by the following runtime controlling
+ * interface:
+ * echo unmask > /sys/firmware/acpi/interrupts/gpe00
+ */
+
+/*
+ * Currently, the GPE flooding prevention only supports to mask the GPEs
+ * numbered from 00 to 7f.
+ */
+#define ACPI_MASKABLE_GPE_MAX 0x80
+
+static u64 __initdata acpi_masked_gpes;
+
+static int __init acpi_gpe_set_masked_gpes(char *val)
+{
+ u8 gpe;
+
+ if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
+ return -EINVAL;
+ acpi_masked_gpes |= ((u64)1<<gpe);
+
+ return 1;
+}
+__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
+
+void __init acpi_gpe_apply_masked_gpes(void)
+{
+ acpi_handle handle;
+ acpi_status status;
+ u8 gpe;
+
+ for (gpe = 0;
+ gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
+ gpe++) {
+ if (acpi_masked_gpes & ((u64)1<<gpe)) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
+ }
+ }
+ }
+}
+
void acpi_irq_stats_init(void)
{
acpi_status status;
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index a82fc02..4d4cdc1 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
- default "binder"
+ default "binder,hwbinder,vndbinder"
---help---
Default value for the binder.devices parameter.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d7eb419..6485c77 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1765,7 +1765,9 @@
ret = -EBADF;
goto err_fget;
}
+ preempt_enable_no_resched();
ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+ preempt_disable();
if (ret < 0) {
ret = -EPERM;
goto err_security;
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 267a3d3..52f2674 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -54,11 +54,42 @@
writel(val, ahci_base + SATA_P0PHYCR_REG);
}
+static int ahci_da850_softreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ int pmp, ret;
+
+ pmp = sata_srst_pmp(link);
+
+ /*
+ * There's an issue with the SATA controller on da850 SoCs: if we
+ * enable Port Multiplier support, but the drive is connected directly
+ * to the board, it can't be detected. As a workaround: if PMP is
+ * enabled, we first call ahci_do_softreset() and pass it the result of
+ * sata_srst_pmp(). If this call fails, we retry with pmp = 0.
+ */
+ ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+ if (pmp && ret == -EBUSY)
+ return ahci_do_softreset(link, class, 0,
+ deadline, ahci_check_ready);
+
+ return ret;
+}
+
+static struct ata_port_operations ahci_da850_port_ops = {
+ .inherits = &ahci_platform_ops,
+ .softreset = ahci_da850_softreset,
+ /*
+ * No need to override .pmp_softreset - it's only used for actual
+ * PMP-enabled ports.
+ */
+};
+
static const struct ata_port_info ahci_da850_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
+ .port_ops = &ahci_da850_port_ops,
};
static struct scsi_host_template ahci_platform_sht = {
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 4281801..09e77d5 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2000-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
@@ -294,6 +294,7 @@
bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
struct removed_region *dma_mem = dev->removed_mem;
+ size = PAGE_ALIGN(size);
if (!no_kernel_mapping)
iounmap(cpu_addr);
mutex_lock(&dma_mem->lock);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index a95e1e5..f18ae62 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -229,20 +229,22 @@
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
struct firmware_buf **buf, void *dbuf,
- size_t size)
+ size_t size, unsigned int opt_flags)
{
struct firmware_buf *tmp;
spin_lock(&fwc->lock);
- tmp = __fw_lookup_buf(fw_name);
- if (tmp) {
- kref_get(&tmp->ref);
- spin_unlock(&fwc->lock);
- *buf = tmp;
- return 1;
+ if (!(opt_flags & FW_OPT_NOCACHE)) {
+ tmp = __fw_lookup_buf(fw_name);
+ if (tmp) {
+ kref_get(&tmp->ref);
+ spin_unlock(&fwc->lock);
+ *buf = tmp;
+ return 1;
+ }
}
tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
- if (tmp)
+ if (tmp && !(opt_flags & FW_OPT_NOCACHE))
list_add(&tmp->list, &fwc->head);
spin_unlock(&fwc->lock);
@@ -1051,7 +1053,8 @@
*/
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
- struct device *device, void *dbuf, size_t size)
+ struct device *device, void *dbuf, size_t size,
+ unsigned int opt_flags)
{
struct firmware *firmware;
struct firmware_buf *buf;
@@ -1069,7 +1072,8 @@
return 0; /* assigned */
}
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
+ ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size,
+ opt_flags);
/*
* bind with 'buf' now to avoid warning in failure path
@@ -1147,7 +1151,8 @@
goto out;
}
- ret = _request_firmware_prepare(&fw, name, device, buf, size);
+ ret = _request_firmware_prepare(&fw, name, device, buf, size,
+ opt_flags);
if (ret <= 0) /* error or already assigned */
goto out;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4e58256..da97163 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -731,6 +731,53 @@
return ret;
}
+static int regcache_sync_block_raw_multi_reg(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base,
+ unsigned int start,
+ unsigned int end)
+{
+ unsigned int i, val;
+ unsigned int regtmp = 0;
+ int ret = 0;
+ struct reg_sequence *regs;
+ size_t num_regs = ((end - start) + 1);
+
+ regs = kcalloc(num_regs, sizeof(struct reg_sequence), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ num_regs = 0;
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ /* skip registers that are not defined/available */
+ if (!regcache_reg_present(cache_present, i))
+ continue;
+
+ val = regcache_get_val(map, block, i);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, regtmp);
+ if (ret >= 0 && val == map->reg_defaults[ret].def) {
+ continue;
+ } else {
+ regs[num_regs].reg = regtmp;
+ regs[num_regs].def = val;
+ regs[num_regs].delay_us = 0;
+ num_regs += 1;
+ }
+ }
+ ret = 0;
+ if (num_regs) {
+ dev_dbg(map->dev, "%s: start: 0x%x - end: 0x%x\n",
+ __func__, regs[0].reg, regs[num_regs-1].reg);
+ ret = _regmap_raw_multi_reg_write(map, regs, num_regs);
+ }
+ kfree(regs);
+ return ret;
+}
+
static int regcache_sync_block_raw(struct regmap *map, void *block,
unsigned long *cache_present,
unsigned int block_base, unsigned int start,
@@ -778,7 +825,12 @@
unsigned int block_base, unsigned int start,
unsigned int end)
{
- if (regmap_can_raw_write(map) && !map->use_single_write)
+ if (regmap_can_raw_write(map) && map->can_multi_write)
+ return regcache_sync_block_raw_multi_reg(map, block,
+ cache_present,
+ block_base, start,
+ end);
+ else if (regmap_can_raw_write(map) && !map->use_single_write)
return regcache_sync_block_raw(map, block, cache_present,
block_base, start, end);
else
diff --git a/drivers/base/regmap/regmap-swr.c b/drivers/base/regmap/regmap-swr.c
index 1a2e09e..be1eb00 100644
--- a/drivers/base/regmap/regmap-swr.c
+++ b/drivers/base/regmap/regmap-swr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,9 @@
* GNU General Public License for more details.
*/
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/soundwire/soundwire.h>
#include <linux/module.h>
@@ -20,16 +23,22 @@
static int regmap_swr_gather_write(void *context,
const void *reg, size_t reg_size,
- const void *val, size_t val_size)
+ const void *val, size_t val_len)
{
struct device *dev = context;
struct swr_device *swr = to_swr_device(dev);
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
- int ret = 0;
- int i;
- u32 reg_addr = 0;
+ size_t addr_bytes;
+ size_t val_bytes;
+ int i, ret = 0;
+ u16 reg_addr = 0;
+ u8 *value;
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
if (swr == NULL) {
dev_err(dev, "%s: swr device is NULL\n", __func__);
return -EINVAL;
@@ -40,29 +49,107 @@
return -EINVAL;
}
reg_addr = *(u16 *)reg;
- for (i = 0; i < val_size; i++) {
- ret = swr_write(swr, swr->dev_num, (reg_addr+i),
- (u32 *)(val+i));
+ val_bytes = map->format.val_bytes;
+ /* val_len = val_bytes * val_count */
+ for (i = 0; i < (val_len / val_bytes); i++) {
+ value = (u8 *)val + (val_bytes * i);
+ ret = swr_write(swr, swr->dev_num, (reg_addr + i), value);
if (ret < 0) {
dev_err(dev, "%s: write reg 0x%x failed, err %d\n",
- __func__, (reg_addr+i), ret);
+ __func__, (reg_addr + i), ret);
break;
}
}
return ret;
}
+static int regmap_swr_raw_multi_reg_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct swr_device *swr = to_swr_device(dev);
+ struct regmap *map = dev_get_regmap(dev, NULL);
+ size_t addr_bytes;
+ size_t val_bytes;
+ size_t pad_bytes;
+ size_t num_regs;
+ int i = 0;
+ int ret = 0;
+ u16 *reg;
+ u8 *val;
+ u8 *buf;
+
+ if (swr == NULL) {
+ dev_err(dev, "%s: swr device is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ addr_bytes = map->format.reg_bytes;
+ val_bytes = map->format.val_bytes;
+ pad_bytes = map->format.pad_bytes;
+
+ if (addr_bytes + val_bytes + pad_bytes == 0) {
+ dev_err(dev, "%s: sum of addr, value and pad is 0\n", __func__);
+ return -EINVAL;
+ }
+ num_regs = count / (addr_bytes + val_bytes + pad_bytes);
+
+ reg = kcalloc(num_regs, sizeof(u16), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ val = kcalloc(num_regs, sizeof(u8), GFP_KERNEL);
+ if (!val) {
+ ret = -ENOMEM;
+ goto mem_fail;
+ }
+
+ buf = (u8 *)data;
+ for (i = 0; i < num_regs; i++) {
+ reg[i] = *(u16 *)buf;
+ buf += (map->format.reg_bytes + map->format.pad_bytes);
+ val[i] = *buf;
+ buf += map->format.val_bytes;
+ }
+ ret = swr_bulk_write(swr, swr->dev_num, reg, val, num_regs);
+ if (ret)
+ dev_err(dev, "%s: multi reg write failed\n", __func__);
+
+ kfree(val);
+mem_fail:
+ kfree(reg);
+ return ret;
+}
+
static int regmap_swr_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
+ size_t addr_bytes;
+ size_t val_bytes;
+ size_t pad_bytes;
+
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
+ val_bytes = map->format.val_bytes;
+ pad_bytes = map->format.pad_bytes;
WARN_ON(count < addr_bytes);
- return regmap_swr_gather_write(context, data, addr_bytes,
- (data + addr_bytes),
- (count - addr_bytes));
+ if (count > (addr_bytes + val_bytes + pad_bytes))
+ return regmap_swr_raw_multi_reg_write(context, data, count);
+ else
+ return regmap_swr_gather_write(context, data, addr_bytes,
+ (data + addr_bytes),
+ (count - addr_bytes));
}
static int regmap_swr_read(void *context,
@@ -72,10 +159,15 @@
struct device *dev = context;
struct swr_device *swr = to_swr_device(dev);
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
+ size_t addr_bytes;
int ret = 0;
- u32 reg_addr = 0;
+ u16 reg_addr = 0;
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
if (swr == NULL) {
dev_err(dev, "%s: swr is NULL\n", __func__);
return -EINVAL;
@@ -85,7 +177,7 @@
__func__, reg_size);
return -EINVAL;
}
- reg_addr = *(u32 *)reg;
+ reg_addr = *(u16 *)reg;
ret = swr_read(swr, swr->dev_num, reg_addr, val, val_size);
if (ret < 0)
dev_err(dev, "%s: codec reg 0x%x read failed %d\n",
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7a10487..c9441f9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -54,7 +54,7 @@
struct mutex tx_lock;
struct gendisk *disk;
- int blksize;
+ loff_t blksize;
loff_t bytesize;
/* protects initialization and shutdown of the socket */
@@ -126,7 +126,7 @@
}
static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
- int blocksize, int nr_blocks)
+ loff_t blocksize, loff_t nr_blocks)
{
int ret;
@@ -135,7 +135,7 @@
return ret;
nbd->blksize = blocksize;
- nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+ nbd->bytesize = blocksize * nr_blocks;
nbd_size_update(nbd, bdev);
@@ -648,7 +648,7 @@
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
- arg / nbd->blksize);
+ div_s64(arg, nbd->blksize));
case NBD_SET_SIZE_BLOCKS:
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
@@ -817,7 +817,7 @@
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
- debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d2ef51c..c9914d65 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -582,13 +582,13 @@
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- clear_page(mem);
+ memset(mem, 0, PAGE_SIZE);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
- copy_page(mem, cmem);
+ memcpy(mem, cmem, PAGE_SIZE);
} else {
struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
@@ -780,7 +780,7 @@
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
- copy_page(cmem, src);
+ memcpy(cmem, src, PAGE_SIZE);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index bc48a84..3e1367a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -573,9 +573,12 @@
controlling the behavior of this hardware.
config DEVPORT
- bool
+ bool "/dev/port character device"
depends on ISA || PCI
default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 9c26f87..e907d0d 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -773,6 +773,7 @@
if (*buf != 0x80) {
list_del(&entry->track);
kfree(entry);
+ entry = NULL;
return 1;
}
@@ -790,6 +791,7 @@
if (delayed_rsp_id == 0) {
list_del(&entry->track);
kfree(entry);
+ entry = NULL;
return 1;
}
@@ -803,6 +805,7 @@
if (rsp_count > 0 && rsp_count < 0x1000) {
list_del(&entry->track);
kfree(entry);
+ entry = NULL;
return 1;
}
@@ -1447,6 +1450,7 @@
dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
/* Notify the DCI process that the peripheral DCI Channel is up */
+ mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
if (entry->client_info.token != proc)
@@ -1469,6 +1473,7 @@
info.si_int, stat);
}
}
+ mutex_unlock(&driver->dci_mutex);
}
static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
@@ -1942,6 +1947,7 @@
reg_entry.cmd_code_hi = header->subsys_cmd_code;
reg_entry.cmd_code_lo = header->subsys_cmd_code;
+ mutex_lock(&driver->cmd_reg_mutex);
temp_entry = diag_cmd_search(®_entry, ALL_PROC);
if (temp_entry) {
reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
@@ -1953,6 +1959,7 @@
reg_entry.cmd_code, reg_entry.subsys_id,
reg_entry.cmd_code_hi);
}
+ mutex_unlock(&driver->cmd_reg_mutex);
return ret;
}
@@ -2684,10 +2691,12 @@
err:
pr_err("diag: Could not initialize diag DCI buffers");
kfree(driver->apps_dci_buf);
+ driver->apps_dci_buf = NULL;
if (driver->diag_dci_wq)
destroy_workqueue(driver->diag_dci_wq);
kfree(partial_pkt.data);
+ partial_pkt.data = NULL;
mutex_destroy(&driver->dci_mutex);
mutex_destroy(&dci_log_mask_mutex);
mutex_destroy(&dci_event_mask_mutex);
@@ -2707,7 +2716,9 @@
void diag_dci_exit(void)
{
kfree(partial_pkt.data);
+ partial_pkt.data = NULL;
kfree(driver->apps_dci_buf);
+ driver->apps_dci_buf = NULL;
mutex_destroy(&driver->dci_mutex);
mutex_destroy(&dci_log_mask_mutex);
mutex_destroy(&dci_event_mask_mutex);
@@ -2914,22 +2925,30 @@
mutex_destroy(&proc_buf->health_mutex);
if (proc_buf->buf_primary) {
kfree(proc_buf->buf_primary->data);
+ proc_buf->buf_primary->data = NULL;
mutex_destroy(
&proc_buf->buf_primary->data_mutex);
}
kfree(proc_buf->buf_primary);
+ proc_buf->buf_primary = NULL;
if (proc_buf->buf_cmd) {
kfree(proc_buf->buf_cmd->data);
+ proc_buf->buf_cmd->data = NULL;
mutex_destroy(
&proc_buf->buf_cmd->data_mutex);
}
kfree(proc_buf->buf_cmd);
+ proc_buf->buf_cmd = NULL;
}
}
kfree(new_entry->dci_event_mask);
+ new_entry->dci_event_mask = NULL;
kfree(new_entry->dci_log_mask);
+ new_entry->dci_log_mask = NULL;
kfree(new_entry->buffers);
+ new_entry->buffers = NULL;
kfree(new_entry);
+ new_entry = NULL;
}
mutex_unlock(&driver->dci_mutex);
return DIAG_DCI_NO_REG;
@@ -2960,6 +2979,7 @@
* masks and send the masks to peripherals
*/
kfree(entry->dci_log_mask);
+ entry->dci_log_mask = NULL;
diag_dci_invalidate_cumulative_log_mask(token);
if (token == DCI_LOCAL_PROC)
diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
@@ -2967,6 +2987,7 @@
if (ret != DIAG_DCI_NO_ERROR)
return ret;
kfree(entry->dci_event_mask);
+ entry->dci_event_mask = NULL;
diag_dci_invalidate_cumulative_event_mask(token);
if (token == DCI_LOCAL_PROC)
diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
@@ -2981,6 +3002,7 @@
if (!list_empty(&req_entry->track))
list_del(&req_entry->track);
kfree(req_entry);
+ req_entry = NULL;
}
}
@@ -2996,6 +3018,7 @@
buf_entry->data = NULL;
mutex_unlock(&buf_entry->data_mutex);
kfree(buf_entry);
+ buf_entry = NULL;
} else if (buf_entry->buf_type == DCI_BUF_CMD) {
peripheral = buf_entry->data_source;
if (peripheral == APPS_DATA)
@@ -3022,14 +3045,17 @@
mutex_unlock(&buf_entry->data_mutex);
mutex_destroy(&buf_entry->data_mutex);
kfree(buf_entry);
+ buf_entry = NULL;
}
mutex_lock(&proc_buf->buf_primary->data_mutex);
kfree(proc_buf->buf_primary->data);
+ proc_buf->buf_primary->data = NULL;
mutex_unlock(&proc_buf->buf_primary->data_mutex);
mutex_lock(&proc_buf->buf_cmd->data_mutex);
kfree(proc_buf->buf_cmd->data);
+ proc_buf->buf_cmd->data = NULL;
mutex_unlock(&proc_buf->buf_cmd->data_mutex);
mutex_destroy(&proc_buf->health_mutex);
@@ -3037,13 +3063,17 @@
mutex_destroy(&proc_buf->buf_cmd->data_mutex);
kfree(proc_buf->buf_primary);
+ proc_buf->buf_primary = NULL;
kfree(proc_buf->buf_cmd);
+ proc_buf->buf_cmd = NULL;
mutex_unlock(&proc_buf->buf_mutex);
}
mutex_destroy(&entry->write_buf_mutex);
kfree(entry->buffers);
+ entry->buffers = NULL;
kfree(entry);
+ entry = NULL;
if (driver->num_dci_client == 0) {
diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 558e362..13ad402 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -354,8 +354,8 @@
ch->tbl[j].buf = NULL;
ch->tbl[j].len = 0;
ch->tbl[j].ctx = 0;
- spin_lock_init(&(ch->lock));
}
+ spin_lock_init(&(ch->lock));
}
return 0;
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index ac8a6d0..1cf7f52 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/ratelimit.h>
@@ -218,7 +219,8 @@
if (!ch)
return;
- if (!atomic_read(&ch->connected) && driver->usb_connected)
+ if (!atomic_read(&ch->connected) &&
+ driver->usb_connected && diag_mask_param())
diag_clear_masks(NULL);
if (ch && ch->ops && ch->ops->close)
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index ea380fb..d3dde50 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -505,6 +505,7 @@
int ref_count;
int mask_clear;
struct mutex diag_maskclear_mutex;
+ struct mutex diag_notifier_mutex;
struct mutex diagchar_mutex;
struct mutex diag_file_mutex;
wait_queue_head_t wait_q;
@@ -547,7 +548,7 @@
struct mutex diag_id_mutex;
struct mutex cmd_reg_mutex;
uint32_t cmd_reg_count;
- struct mutex diagfwd_channel_mutex;
+ struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
/* Sizes that reflect memory pool sizes */
unsigned int poolsize;
unsigned int poolsize_hdlc;
@@ -666,6 +667,7 @@
void diag_cmd_remove_reg_by_pid(int pid);
void diag_cmd_remove_reg_by_proc(int proc);
int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+int diag_mask_param(void);
void diag_clear_masks(struct diag_md_session_t *info);
void diag_record_stats(int type, int flag);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index ac777b0..128d6ce 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -143,6 +143,14 @@
static struct timer_list drain_timer;
static int timer_in_progress;
+/*
+ * Diag Mask clear variable
+ * Used for clearing masks upon
+ * USB disconnection and stopping ODL
+ */
+static int diag_mask_clear_param = 1;
+module_param(diag_mask_clear_param, int, 0644);
+
struct diag_apps_data_t {
void *buf;
uint32_t len;
@@ -388,7 +396,10 @@
return ret;
}
-
+int diag_mask_param(void)
+{
+ return diag_mask_clear_param;
+}
void diag_clear_masks(struct diag_md_session_t *info)
{
int ret;
@@ -421,14 +432,17 @@
if (!session_info)
return;
- diag_clear_masks(session_info);
+ if (diag_mask_clear_param)
+ diag_clear_masks(session_info);
mutex_lock(&driver->diag_maskclear_mutex);
driver->mask_clear = 1;
mutex_unlock(&driver->diag_maskclear_mutex);
+ mutex_lock(&driver->diagchar_mutex);
session_peripheral_mask = session_info->peripheral_mask;
diag_md_session_close(session_info);
+ mutex_unlock(&driver->diagchar_mutex);
for (i = 0; i < NUM_MD_SESSIONS; i++)
if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
@@ -701,6 +715,11 @@
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ return NULL;
+ }
temp_entry = &item->entry;
if (temp_entry->cmd_code == entry->cmd_code &&
temp_entry->subsys_id == entry->subsys_id &&
@@ -3397,7 +3416,7 @@
static int __init diagchar_init(void)
{
dev_t dev;
- int ret;
+ int ret, i;
pr_debug("diagfwd initializing ..\n");
ret = 0;
@@ -3440,10 +3459,12 @@
mutex_init(&driver->hdlc_disable_mutex);
mutex_init(&driver->diagchar_mutex);
mutex_init(&driver->diag_maskclear_mutex);
+ mutex_init(&driver->diag_notifier_mutex);
mutex_init(&driver->diag_file_mutex);
mutex_init(&driver->delayed_rsp_mutex);
mutex_init(&apps_data_mutex);
- mutex_init(&driver->diagfwd_channel_mutex);
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ mutex_init(&driver->diagfwd_channel_mutex[i]);
init_waitqueue_head(&driver->wait_q);
INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
INIT_WORK(&(driver->update_user_clients),
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 3fce72f7..cd49f00 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1303,6 +1303,8 @@
static int diagfwd_mux_close(int id, int mode)
{
+ uint8_t i;
+
switch (mode) {
case DIAG_USB_MODE:
driver->usb_connected = 0;
@@ -1323,10 +1325,23 @@
*/
} else {
/*
- * With clearing of masks on ODL exit and
- * USB disconnection, closing of the channel is
- * not needed.This enables read and drop of stale packets.
+ * With sysfs parameter to clear masks set,
+ * peripheral masks are cleared on ODL exit and
+ * USB disconnection and buffers are not marked busy.
+ * This enables read and drop of stale packets.
+ *
+ * With sysfs parameter to clear masks cleared,
+ * masks are not cleared and buffers are to be marked
+ * busy to ensure traffic generated by peripheral
+ * are not read
*/
+ if (!(diag_mask_param())) {
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ diagfwd_close(i, TYPE_DATA);
+ diagfwd_close(i, TYPE_CMD);
+ }
+ }
+ /* Re enable HDLC encoding */
pr_debug("diag: In %s, re-enabling HDLC encoding\n",
__func__);
mutex_lock(&driver->hdlc_disable_mutex);
@@ -1345,6 +1360,7 @@
static void hdlc_reset_timer_start(struct diag_md_session_t *info)
{
+ mutex_lock(&driver->md_session_lock);
if (!hdlc_timer_in_progress) {
hdlc_timer_in_progress = 1;
if (info)
@@ -1354,6 +1370,7 @@
mod_timer(&driver->hdlc_reset_timer,
jiffies + msecs_to_jiffies(200));
}
+ mutex_unlock(&driver->md_session_lock);
}
static void hdlc_reset_timer_func(unsigned long data)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index b262897..e13871e 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -359,6 +359,8 @@
feature_mask_len = FEATURE_MASK_LEN;
}
+ diag_cmd_remove_reg_by_proc(peripheral);
+
driver->feature[peripheral].rcvd_feature_mask = 1;
for (i = 0; i < feature_mask_len && read_len < len; i++) {
@@ -660,7 +662,7 @@
if (!new_item)
return -ENOMEM;
kmemleak_not_leak(new_item);
- new_item->process_name = kzalloc(strlen(process_name), GFP_KERNEL);
+ new_item->process_name = kzalloc(strlen(process_name) + 1, GFP_KERNEL);
if (!new_item->process_name) {
kfree(new_item);
new_item = NULL;
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 4f7c1e0..5a8ef04 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -436,9 +436,9 @@
fwd_info->inited = 1;
fwd_info->read_bytes = 0;
fwd_info->write_bytes = 0;
- spin_lock_init(&fwd_info->buf_lock);
- spin_lock_init(&fwd_info->write_buf_lock);
+ mutex_init(&fwd_info->buf_mutex);
mutex_init(&fwd_info->data_mutex);
+ spin_lock_init(&fwd_info->write_buf_lock);
}
}
@@ -452,8 +452,8 @@
fwd_info->ch_open = 0;
fwd_info->read_bytes = 0;
fwd_info->write_bytes = 0;
- spin_lock_init(&fwd_info->buf_lock);
spin_lock_init(&fwd_info->write_buf_lock);
+ mutex_init(&fwd_info->buf_mutex);
mutex_init(&fwd_info->data_mutex);
/*
* This state shouldn't be set for Control channels
@@ -646,7 +646,7 @@
}
- mutex_lock(&driver->diagfwd_channel_mutex);
+ mutex_lock(&driver->diagfwd_channel_mutex[peripheral]);
fwd_info = &early_init_info[transport][peripheral];
if (fwd_info->p_ops && fwd_info->p_ops->close)
fwd_info->p_ops->close(fwd_info->ctxt);
@@ -670,7 +670,7 @@
diagfwd_late_open(dest_info);
diagfwd_cntl_open(dest_info);
init_fn(peripheral);
- mutex_unlock(&driver->diagfwd_channel_mutex);
+ mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
}
@@ -983,8 +983,6 @@
}
if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
- temp_buf = fwd_info->buf_1;
- atomic_set(&temp_buf->in_busy, 1);
if (driver->feature[fwd_info->peripheral].encode_hdlc &&
(fwd_info->type == TYPE_DATA ||
fwd_info->type == TYPE_CMD)) {
@@ -994,9 +992,11 @@
read_buf = fwd_info->buf_1->data;
read_len = fwd_info->buf_1->len;
}
+ if (read_buf) {
+ temp_buf = fwd_info->buf_1;
+ atomic_set(&temp_buf->in_busy, 1);
+ }
} else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
- temp_buf = fwd_info->buf_2;
- atomic_set(&temp_buf->in_busy, 1);
if (driver->feature[fwd_info->peripheral].encode_hdlc &&
(fwd_info->type == TYPE_DATA ||
fwd_info->type == TYPE_CMD)) {
@@ -1006,6 +1006,10 @@
read_buf = fwd_info->buf_2->data;
read_len = fwd_info->buf_2->len;
}
+ if (read_buf) {
+ temp_buf = fwd_info->buf_2;
+ atomic_set(&temp_buf->in_busy, 1);
+ }
} else {
pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
__func__, fwd_info->peripheral, fwd_info->type);
@@ -1061,7 +1065,6 @@
void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
{
- unsigned long flags;
if (!fwd_info)
return;
@@ -1072,10 +1075,10 @@
return;
}
- spin_lock_irqsave(&fwd_info->buf_lock, flags);
+ mutex_lock(&fwd_info->buf_mutex);
if (!fwd_info->buf_1) {
fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_1)
goto err;
kmemleak_not_leak(fwd_info->buf_1);
@@ -1083,7 +1086,7 @@
if (!fwd_info->buf_1->data) {
fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_1->data)
goto err;
fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
@@ -1095,7 +1098,7 @@
if (fwd_info->type == TYPE_DATA) {
if (!fwd_info->buf_2) {
fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_2)
goto err;
kmemleak_not_leak(fwd_info->buf_2);
@@ -1104,7 +1107,7 @@
if (!fwd_info->buf_2->data) {
fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_2->data)
goto err;
fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
@@ -1120,7 +1123,7 @@
fwd_info->buf_1->data_raw =
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_1->data_raw)
goto err;
fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
@@ -1130,7 +1133,7 @@
fwd_info->buf_2->data_raw =
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_2->data_raw)
goto err;
fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
@@ -1144,7 +1147,7 @@
if (!fwd_info->buf_1->data_raw) {
fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fwd_info->buf_1->data_raw)
goto err;
fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
@@ -1152,22 +1155,21 @@
}
}
- spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ mutex_unlock(&fwd_info->buf_mutex);
return;
err:
- spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ mutex_unlock(&fwd_info->buf_mutex);
diagfwd_buffers_exit(fwd_info);
}
static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
{
- unsigned long flags;
if (!fwd_info)
return;
- spin_lock_irqsave(&fwd_info->buf_lock, flags);
+ mutex_lock(&fwd_info->buf_mutex);
if (fwd_info->buf_1) {
kfree(fwd_info->buf_1->data);
fwd_info->buf_1->data = NULL;
@@ -1184,7 +1186,7 @@
kfree(fwd_info->buf_2);
fwd_info->buf_2 = NULL;
}
- spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ mutex_unlock(&fwd_info->buf_mutex);
}
void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index b8deb38..5884a12 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -71,8 +71,8 @@
atomic_t opened;
unsigned long read_bytes;
unsigned long write_bytes;
- spinlock_t buf_lock;
spinlock_t write_buf_lock;
+ struct mutex buf_mutex;
struct mutex data_mutex;
void *ctxt;
struct diagfwd_buf_t *buf_1;
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 6403abc..af8bf00 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,14 +34,17 @@
#include "diagfwd_socket.h"
#include "diag_ipc_logging.h"
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
#define DIAG_SVC_ID 0x1001
#define MODEM_INST_BASE 0
#define LPASS_INST_BASE 64
#define WCNSS_INST_BASE 128
#define SENSORS_INST_BASE 192
-#define WDSP_INST_BASE 256
-#define CDSP_INST_BASE 320
+#define CDSP_INST_BASE 256
+#define WDSP_INST_BASE 320
#define INST_ID_CNTL 0
#define INST_ID_CMD 1
@@ -50,6 +53,7 @@
#define INST_ID_DCI 4
struct diag_cntl_socket_info *cntl_socket;
+static uint64_t bootup_req[NUM_SOCKET_SUBSYSTEMS];
struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
{
@@ -287,13 +291,6 @@
spin_unlock_irqrestore(&info->lock, flags);
diag_ws_on_notify();
- /*
- * Initialize read buffers for the servers. The servers must read data
- * first to get the address of its clients.
- */
- if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
- diagfwd_buffers_init(info->fwd_ctxt);
-
queue_work(info->wq, &(info->read_work));
wake_up_interruptible(&info->read_wait_q);
}
@@ -422,7 +419,7 @@
return;
}
__socket_open_channel(info);
- DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened client\n", info->name);
}
static void socket_open_server(struct diag_socket_info *info)
@@ -498,6 +495,13 @@
if (!atomic_read(&info->opened))
return;
+ if (bootup_req[info->peripheral] == PEPIPHERAL_SSR_UP) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s is up, stopping cleanup: bootup_req = %d\n",
+ info->name, (int)bootup_req[info->peripheral]);
+ return;
+ }
+
memset(&info->remote_addr, 0, sizeof(struct sockaddr_msm_ipc));
diagfwd_channel_close(info->fwd_ctxt);
@@ -614,7 +618,9 @@
case CNTL_CMD_REMOVE_CLIENT:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove client\n",
info->name);
+ mutex_lock(&driver->diag_notifier_mutex);
socket_close_channel(info);
+ mutex_unlock(&driver->diag_notifier_mutex);
break;
default:
return -EINVAL;
@@ -623,6 +629,25 @@
return 0;
}
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+struct restart_notifier_block {
+ unsigned int processor;
+ char *name;
+ struct notifier_block nb;
+};
+
+static struct restart_notifier_block restart_notifiers[] = {
+ {SOCKET_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_ADSP, "adsp", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_SLPI, "slpi", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_CDSP, "cdsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+
static void cntl_socket_read_work_fn(struct work_struct *work)
{
union cntl_port_msg msg;
@@ -630,7 +655,6 @@
struct kvec iov = { 0 };
struct msghdr read_msg = { 0 };
-
if (!cntl_socket)
return;
@@ -679,6 +703,9 @@
if (!info)
return;
+ if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+ diagfwd_buffers_init(info->fwd_ctxt);
+
diagfwd_channel_read(info->fwd_ctxt);
}
@@ -847,8 +874,11 @@
int diag_socket_init(void)
{
int err = 0;
+ int i;
int peripheral = 0;
+ void *handle;
struct diag_socket_info *info = NULL;
+ struct restart_notifier_block *nb;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
info = &socket_cntl[peripheral];
@@ -869,6 +899,17 @@
goto fail;
}
+ for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+ nb = &restart_notifiers[i];
+ if (nb) {
+ handle = subsys_notif_register_notifier(nb->name,
+ &nb->nb);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s: registering notifier for '%s', handle=%p\n",
+ __func__, nb->name, handle);
+ }
+ }
+
register_ipcrtr_af_init_notifier(&socket_notify);
fail:
return err;
@@ -904,6 +945,65 @@
return 0;
}
+static int restart_notifier_cb(struct notifier_block *this, unsigned long code,
+ void *_cmd)
+{
+ struct restart_notifier_block *notifier;
+
+ notifier = container_of(this,
+ struct restart_notifier_block, nb);
+ if (!notifier) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: invalid notifier block\n", __func__);
+ return NOTIFY_DONE;
+ }
+
+ mutex_lock(&driver->diag_notifier_mutex);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s: ssr for processor %d ('%s')\n",
+ __func__, notifier->processor, notifier->name);
+
+ switch (code) {
+
+ case SUBSYS_BEFORE_SHUTDOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
+ bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+ break;
+
+ case SUBSYS_AFTER_SHUTDOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_AFTER_SHUTDOWN\n", __func__);
+ break;
+
+ case SUBSYS_BEFORE_POWERUP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_BEFORE_POWERUP\n", __func__);
+ break;
+
+ case SUBSYS_AFTER_POWERUP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: SUBSYS_AFTER_POWERUP\n", __func__);
+ if (!bootup_req[notifier->processor]) {
+ bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+ break;
+ }
+ bootup_req[notifier->processor] = PEPIPHERAL_SSR_UP;
+ break;
+
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: code: %lu\n", code);
+ break;
+ }
+ mutex_unlock(&driver->diag_notifier_mutex);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: bootup_req[%s] = %d\n",
+ notifier->name, (int)bootup_req[notifier->processor]);
+
+ return NOTIFY_DONE;
+}
+
int diag_socket_init_peripheral(uint8_t peripheral)
{
struct diag_socket_info *info = NULL;
@@ -986,9 +1086,9 @@
(info->data_ready > 0) || (!info->hdl) ||
(atomic_read(&info->diag_state) == 0));
if (err) {
- mutex_lock(&driver->diagfwd_channel_mutex);
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
- mutex_unlock(&driver->diagfwd_channel_mutex);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
return -ERESTARTSYS;
}
@@ -1000,9 +1100,9 @@
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s closing read thread. diag state is closed\n",
info->name);
- mutex_lock(&driver->diagfwd_channel_mutex);
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
- mutex_unlock(&driver->diagfwd_channel_mutex);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
return 0;
}
@@ -1069,10 +1169,10 @@
if (total_recd > 0) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
info->name, total_recd);
- mutex_lock(&driver->diagfwd_channel_mutex);
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
err = diagfwd_channel_read_done(info->fwd_ctxt,
buf, total_recd);
- mutex_unlock(&driver->diagfwd_channel_mutex);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
if (err)
goto fail;
} else {
@@ -1085,9 +1185,9 @@
return 0;
fail:
- mutex_lock(&driver->diagfwd_channel_mutex);
+ mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
- mutex_unlock(&driver->diagfwd_channel_mutex);
+ mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
return -EIO;
}
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
index a2b922a..a9487b1 100644
--- a/drivers/char/diag/diagfwd_socket.h
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,10 +24,24 @@
#define PORT_TYPE_SERVER 0
#define PORT_TYPE_CLIENT 1
+#define PEPIPHERAL_AFTER_BOOT 0
+#define PEPIPHERAL_SSR_DOWN 1
+#define PEPIPHERAL_SSR_UP 2
+
#define CNTL_CMD_NEW_SERVER 4
#define CNTL_CMD_REMOVE_SERVER 5
#define CNTL_CMD_REMOVE_CLIENT 6
+enum {
+ SOCKET_MODEM,
+ SOCKET_ADSP,
+ SOCKET_WCNSS,
+ SOCKET_SLPI,
+ SOCKET_CDSP,
+ SOCKET_APPS,
+ NUM_SOCKET_SUBSYSTEMS,
+};
+
struct diag_socket_info {
uint8_t peripheral;
uint8_t type;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5..18849f4 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -337,7 +337,6 @@
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Qualcomm SoCs.
@@ -347,6 +346,20 @@
If unsure, say Y.
+config HW_RANDOM_MSM_LEGACY
+ tristate "QTI MSM Random Number Generator support (LEGACY)"
+ depends on HW_RANDOM && ARCH_QCOM
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on QTI MSM SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called msm_rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5f52b1e..637adb5 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -30,6 +30,7 @@
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986..18cd3e9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,7 @@
rng->hwrng.init = msm_rng_init,
rng->hwrng.cleanup = msm_rng_cleanup,
rng->hwrng.read = msm_rng_read,
+ rng->hwrng.quality = 700;
ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
if (ret) {
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
new file mode 100644
index 0000000..7641a6a
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2011-2013, 2015, 2017 The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+
+#include <linux/platform_data/qcom_crypto_device.h>
+
+
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET 0x0000
+#define PRNG_STATUS_OFFSET 0x0004
+#define PRNG_LFSR_CFG_OFFSET 0x0100
+#define PRNG_CONFIG_OFFSET 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS 0x0000DDDD
+#define PRNG_CONFIG_MASK 0xFFFFFFFD
+#define PRNG_HW_ENABLE 0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */
+
+struct msm_rng_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *prng_clk;
+ uint32_t qrng_perf_client;
+ struct mutex rng_lock;
+};
+
+struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+ pr_info("calling msm_rng_bus_scale(LOW)\n");
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_device_info.qrng_perf_client, 0);
+ if (ret)
+ pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+ break;
+ default:
+ pr_err("Unsupported IOCTL call");
+ break;
+ }
+ return ret;
+}
+
+/*
+ *
+ * This function calls hardware random bit generator directory and retuns it
+ * back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+ void *data, size_t max)
+{
+ struct platform_device *pdev;
+ void __iomem *base;
+ size_t currsize = 0;
+ u32 val;
+ u32 *retdata = data;
+ int ret;
+ int failed = 0;
+
+ pdev = msm_rng_dev->pdev;
+ base = msm_rng_dev->base;
+
+ /* no room for word data */
+ if (max < 4)
+ return 0;
+
+ mutex_lock(&msm_rng_dev->rng_lock);
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 1);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ /* enable PRNG clock */
+ ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock in callback\n");
+ goto err;
+ }
+ /* read random data from h/w */
+ do {
+ /* check status bit if data is available */
+ while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+ & 0x00000001)) {
+ if (failed == 10) {
+ pr_err("Data not available after retry\n");
+ break;
+ }
+ pr_err("msm_rng:Data not available!\n");
+ msleep_interruptible(10);
+ failed++;
+ }
+
+ /* read FIFO */
+ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+ if (!val)
+ break; /* no data to read so just bail */
+
+ /* write data back to callers pointer */
+ *(retdata++) = val;
+ currsize += 4;
+ /* make sure we stay on 32bit boundary */
+ if ((max - currsize) < 4)
+ break;
+
+ } while (currsize < max);
+
+ /* vote to turn off clock */
+ clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 0);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ mutex_unlock(&msm_rng_dev->rng_lock);
+
+ val = 0L;
+ return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct msm_rng_device *msm_rng_dev;
+ int rv = 0;
+
+ msm_rng_dev = (struct msm_rng_device *)rng->priv;
+ rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+ return rv;
+}
+
+
+static struct hwrng msm_rng = {
+ .name = DRIVER_NAME,
+ .read = msm_rng_read,
+ .quality = 700,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+ unsigned long val = 0;
+ unsigned long reg_val = 0;
+ int ret = 0;
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 1);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+ /* Enable the PRNG CLK */
+ ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+ if (ret) {
+ dev_err(&(msm_rng_dev->pdev)->dev,
+ "failed to enable clock in probe\n");
+ return -EPERM;
+ }
+
+ /* Enable PRNG h/w only if it is NOT ON */
+ val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+ PRNG_HW_ENABLE;
+ /* PRNG H/W is not ON */
+ if (val != PRNG_HW_ENABLE) {
+ val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+ val &= PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+ /* The PRNG CONFIG register should be first written */
+ mb();
+
+ reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+ & PRNG_CONFIG_MASK;
+ reg_val |= PRNG_HW_ENABLE;
+ writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+ /* The PRNG clk should be disabled only after we enable the
+ * PRNG h/w by writing to the PRNG CONFIG register.
+ */
+ mb();
+ }
+ clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+ if (msm_rng_dev->qrng_perf_client) {
+ ret = msm_bus_scale_client_update_request(
+ msm_rng_dev->qrng_perf_client, 0);
+ if (ret)
+ pr_err("bus_scale_client_update_req failed!\n");
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+ .unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct msm_rng_device *msm_rng_dev = NULL;
+ void __iomem *base = NULL;
+ bool configure_qrng = true;
+ int error = 0;
+ int ret = 0;
+ struct device *dev;
+
+ struct msm_bus_scale_pdata *qrng_platform_support = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "invalid address\n");
+ error = -EFAULT;
+ goto err_exit;
+ }
+
+ msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+ if (!msm_rng_dev) {
+ error = -ENOMEM;
+ goto err_exit;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ error = -ENOMEM;
+ goto err_iomap;
+ }
+ msm_rng_dev->base = base;
+
+ /* create a handle for clock control */
+ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+ "qcom,msm-rng-iface-clk")))
+ msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+ "iface_clk");
+ else
+ msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(msm_rng_dev->prng_clk)) {
+ dev_err(&pdev->dev, "failed to register clock source\n");
+ error = -EPERM;
+ goto err_clk_get;
+ }
+
+ /* save away pdev and register driver data */
+ msm_rng_dev->pdev = pdev;
+ platform_set_drvdata(pdev, msm_rng_dev);
+
+ if (pdev->dev.of_node) {
+ /* Register bus client */
+ qrng_platform_support = msm_bus_cl_get_pdata(pdev);
+ msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
+ qrng_platform_support);
+ msm_rng_device_info.qrng_perf_client =
+ msm_rng_dev->qrng_perf_client;
+ if (!msm_rng_dev->qrng_perf_client)
+ pr_err("Unable to register bus client\n");
+ }
+
+ /* Enable rng h/w for the targets which can access the entire
+ * address space of PRNG.
+ */
+ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+ "qcom,no-qrng-config")))
+ configure_qrng = false;
+ if (configure_qrng) {
+ error = msm_rng_enable_hw(msm_rng_dev);
+ if (error)
+ goto rollback_clk;
+ }
+
+ mutex_init(&msm_rng_dev->rng_lock);
+ mutex_init(&cached_rng_lock);
+
+ /* register with hwrng framework */
+ msm_rng.priv = (unsigned long) msm_rng_dev;
+ error = hwrng_register(&msm_rng);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ error = -EPERM;
+ goto rollback_clk;
+ }
+ ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+
+ msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+ if (IS_ERR(msm_rng_class)) {
+ pr_err("class_create failed\n");
+ return PTR_ERR(msm_rng_class);
+ }
+
+ dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+ NULL, "msm-rng");
+ if (IS_ERR(dev)) {
+ pr_err("Device create failed\n");
+ error = PTR_ERR(dev);
+ goto unregister_chrdev;
+ }
+ cdev_init(&msm_rng_cdev, &msm_rng_fops);
+ msm_rng_dev_cached = msm_rng_dev;
+ return error;
+
+unregister_chrdev:
+ unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+rollback_clk:
+ clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+ iounmap(msm_rng_dev->base);
+err_iomap:
+ kzfree(msm_rng_dev);
+err_exit:
+ return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+ struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+ unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+ hwrng_unregister(&msm_rng);
+ clk_put(msm_rng_dev->prng_clk);
+ iounmap(msm_rng_dev->base);
+ platform_set_drvdata(pdev, NULL);
+ if (msm_rng_dev->qrng_perf_client)
+ msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
+
+ kzfree(msm_rng_dev);
+ msm_rng_dev_cached = NULL;
+ return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *rdata,
+ unsigned int dlen)
+{
+ int sizeread = 0;
+ int rv = -EFAULT;
+
+ if (!msm_rng_dev_cached) {
+ pr_err("%s: msm_rng_dev is not initialized.\n", __func__);
+ rv = -ENODEV;
+ goto err_exit;
+ }
+
+ if (!rdata) {
+ pr_err("%s: data buffer is null!\n", __func__);
+ rv = -EINVAL;
+ goto err_exit;
+ }
+
+ if (signal_pending(current) ||
+ mutex_lock_interruptible(&cached_rng_lock)) {
+ pr_err("%s: mutex lock interrupted!\n", __func__);
+ rv = -ERESTARTSYS;
+ goto err_exit;
+ }
+ sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+ if (sizeread == dlen)
+ rv = 0;
+
+ mutex_unlock(&cached_rng_lock);
+err_exit:
+ return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+ .generate = qrng_get_random,
+ .seed = qrng_reset,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "qrng",
+ .cra_driver_name = "fips_hw_qrng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static const struct of_device_id qrng_match[] = {
+ { .compatible = "qcom,msm-rng",
+ },
+ {}
+};
+
+static struct platform_driver rng_driver = {
+ .probe = msm_rng_probe,
+ .remove = msm_rng_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qrng_match,
+ }
+};
+
+static int __init msm_rng_init(void)
+{
+ int ret;
+
+ msm_rng_dev_cached = NULL;
+ ret = platform_driver_register(&rng_driver);
+ if (ret) {
+ pr_err("%s: platform_driver_register error:%d\n",
+ __func__, ret);
+ goto err_exit;
+ }
+ ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+ if (ret) {
+ pr_err("%s: crypto_register_algs error:%d\n",
+ __func__, ret);
+ goto err_exit;
+ }
+
+err_exit:
+ return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+ crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+ platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_DESCRIPTION("QTI MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d..7e4a9d1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@
#endif
#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@@ -122,23 +130,31 @@
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
+ unxlate_dev_mem_ptr(p, ptr);
+ }
+
if (remaining)
return -EFAULT;
@@ -181,30 +197,36 @@
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 6af1ce0..336d02a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -84,8 +84,14 @@
struct ieee1284_info state;
struct ieee1284_info saved_state;
long default_inactivity;
+ int index;
};
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
+static DEFINE_IDA(ida_index);
+
/* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1)
@@ -287,6 +293,7 @@
struct pardevice *pdev = NULL;
char *name;
struct pardev_cb ppdev_cb;
+ int index;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
@@ -299,20 +306,23 @@
return -ENXIO;
}
+ index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp;
- pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
+ pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
parport_put_port(port);
if (!pdev) {
printk(KERN_WARNING "%s: failed to register device!\n", name);
+ ida_simple_remove(&ida_index, index);
kfree(name);
return -ENXIO;
}
pp->pdev = pdev;
+ pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n");
return 0;
}
@@ -749,6 +759,7 @@
if (pp->pdev) {
parport_unregister_device(pp->pdev);
+ ida_simple_remove(&ida_index, pp->index);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
@@ -789,13 +800,29 @@
static void pp_attach(struct parport *port)
{
- device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number),
- NULL, "parport%d", port->number);
+ struct device *ret;
+
+ if (devices[port->number])
+ return;
+
+ ret = device_create(ppdev_class, port->dev,
+ MKDEV(PP_MAJOR, port->number), NULL,
+ "parport%d", port->number);
+ if (IS_ERR(ret)) {
+ pr_err("Failed to create device parport%d\n",
+ port->number);
+ return;
+ }
+ devices[port->number] = ret;
}
static void pp_detach(struct parport *port)
{
+ if (!devices[port->number])
+ return;
+
device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+ devices[port->number] = NULL;
}
static int pp_probe(struct pardevice *par_dev)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d6876d5..08d1dd5 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2042,64 +2042,66 @@
};
#endif /* CONFIG_SYSCTL */
-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-
-int random_int_secret_init(void)
-{
- get_random_bytes(random_int_secret, sizeof(random_int_secret));
- return 0;
-}
-
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
- __aligned(sizeof(unsigned long));
+struct batched_entropy {
+ union {
+ unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)];
+ unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)];
+ };
+ unsigned int position;
+};
/*
- * Get a random word for internal kernel use only. Similar to urandom but
- * with the goal of minimal entropy pool depletion. As a result, the random
- * value is not cryptographically secure but for several uses the cost of
- * depleting entropy is too high
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
+ * goal of being quite fast and not depleting entropy.
*/
-unsigned int get_random_int(void)
-{
- __u32 *hash;
- unsigned int ret;
-
- if (arch_get_random_int(&ret))
- return ret;
-
- hash = get_cpu_var(get_random_int_hash);
-
- hash[0] += current->pid + jiffies + random_get_entropy();
- md5_transform(hash, random_int_secret);
- ret = hash[0];
- put_cpu_var(get_random_int_hash);
-
- return ret;
-}
-EXPORT_SYMBOL(get_random_int);
-
-/*
- * Same as get_random_int(), but returns unsigned long.
- */
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
unsigned long get_random_long(void)
{
- __u32 *hash;
unsigned long ret;
+ struct batched_entropy *batch;
if (arch_get_random_long(&ret))
return ret;
- hash = get_cpu_var(get_random_int_hash);
-
- hash[0] += current->pid + jiffies + random_get_entropy();
- md5_transform(hash, random_int_secret);
- ret = *(unsigned long *)hash;
- put_cpu_var(get_random_int_hash);
-
+ batch = &get_cpu_var(batched_entropy_long);
+ if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
+ extract_crng((u8 *)batch->entropy_long);
+ batch->position = 0;
+ }
+ ret = batch->entropy_long[batch->position++];
+ put_cpu_var(batched_entropy_long);
return ret;
}
EXPORT_SYMBOL(get_random_long);
+#if BITS_PER_LONG == 32
+unsigned int get_random_int(void)
+{
+ return get_random_long();
+}
+#else
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
+unsigned int get_random_int(void)
+{
+ unsigned int ret;
+ struct batched_entropy *batch;
+
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ batch = &get_cpu_var(batched_entropy_int);
+ if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
+ extract_crng((u8 *)batch->entropy_int);
+ batch->position = 0;
+ }
+ ret = batch->entropy_int[batch->position++];
+ put_cpu_var(batched_entropy_int);
+ return ret;
+}
+#endif
+EXPORT_SYMBOL(get_random_int);
+
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5649234..471a301 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1136,6 +1136,8 @@
{
struct port *port;
struct scatterlist sg[1];
+ void *data;
+ int ret;
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1144,8 +1146,14 @@
if (!port)
return -EPIPE;
- sg_init_one(sg, buf, count);
- return __send_to_port(port, sg, 1, count, (void *)buf, false);
+ data = kmemdup(buf, count, GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+
+ sg_init_one(sg, data, count);
+ ret = __send_to_port(port, sg, 1, count, data, false);
+ kfree(data);
+ return ret;
}
/*
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ece2f00..1b545d6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -71,6 +71,8 @@
bool orphan;
unsigned int enable_count;
unsigned int prepare_count;
+ bool need_handoff_enable;
+ bool need_handoff_prepare;
unsigned long min_rate;
unsigned long max_rate;
unsigned long accuracy;
@@ -997,6 +999,19 @@
hlist_for_each_entry(child, &core->children, child_node)
clk_unprepare_unused_subtree(child);
+ /*
+ * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
+ *
+ * need_handoff_prepare implies this clk was already prepared by
+ * __clk_init. now we have a proper user, so unset the flag in our
+ * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
+ * for details.
+ */
+ if (core->need_handoff_prepare) {
+ core->need_handoff_prepare = false;
+ clk_core_unprepare(core);
+ }
+
if (core->prepare_count)
return;
@@ -1023,6 +1038,21 @@
hlist_for_each_entry(child, &core->children, child_node)
clk_disable_unused_subtree(child);
+ /*
+ * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
+ *
+ * need_handoff_enable implies this clk was already enabled by
+ * __clk_init. now we have a proper user, so unset the flag in our
+ * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
+ * for details.
+ */
+ if (core->need_handoff_enable) {
+ core->need_handoff_enable = false;
+ flags = clk_enable_lock();
+ clk_core_disable(core);
+ clk_enable_unlock(flags);
+ }
+
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
@@ -1679,8 +1709,14 @@
}
}
+ /*
+ * The Fabia PLLs only have 16 bits to program the fractional divider.
+ * Hence the programmed rate might be slightly different than the
+ * requested one.
+ */
if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
- best_parent_rate != parent->rate)
+ (DIV_ROUND_CLOSEST(best_parent_rate, 1000) !=
+ DIV_ROUND_CLOSEST(parent->rate, 1000)))
top = clk_calc_new_rates(parent, best_parent_rate);
out:
@@ -2315,6 +2351,56 @@
NULL,
};
+static void clk_state_subtree(struct clk_core *c)
+{
+ int vdd_level = 0;
+ struct clk_core *child;
+
+ if (!c)
+ return;
+
+ if (c->vdd_class) {
+ vdd_level = clk_find_vdd_level(c, c->rate);
+ if (vdd_level < 0)
+ vdd_level = 0;
+ }
+
+ trace_clk_state(c->name, c->prepare_count, c->enable_count,
+ c->rate, vdd_level);
+
+ hlist_for_each_entry(child, &c->children, child_node)
+ clk_state_subtree(child);
+}
+
+static int clk_state_show(struct seq_file *s, void *data)
+{
+ struct clk_core *c;
+ struct hlist_head **lists = (struct hlist_head **)s->private;
+
+ clk_prepare_lock();
+
+ for (; *lists; lists++)
+ hlist_for_each_entry(c, *lists, child_node)
+ clk_state_subtree(c);
+
+ clk_prepare_unlock();
+
+ return 0;
+}
+
+
+static int clk_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_state_show, inode->i_private);
+}
+
+static const struct file_operations clk_state_fops = {
+ .open = clk_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
@@ -2950,6 +3036,11 @@
if (!d)
return -ENOMEM;
+ d = debugfs_create_file("trace_clocks", 0444, rootdir, &all_lists,
+ &clk_state_fops);
+ if (!d)
+ return -ENOMEM;
+
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(core, &clk_debug_list, debug_node)
clk_debug_create_one(core, rootdir);
@@ -3140,6 +3231,37 @@
clk_enable_unlock(flags);
}
+ /*
+ * enable clocks with the CLK_ENABLE_HAND_OFF flag set
+ *
+ * This flag causes the framework to enable the clock at registration
+ * time, which is sometimes necessary for clocks that would cause a
+ * system crash when gated (e.g. cpu, memory, etc). The prepare_count
+ * is migrated over to the first clk consumer to call clk_prepare().
+ * Similarly the clk's enable_count is migrated to the first consumer
+ * to call clk_enable().
+ */
+ if (core->flags & CLK_ENABLE_HAND_OFF) {
+ unsigned long flags;
+
+ /*
+ * Few clocks might have hardware gating which would be
+ * required to be ON before prepare/enabling the clocks. So
+ * check if the clock has been turned ON earlier and we should
+ * prepare/enable those clocks.
+ */
+ if (clk_core_is_enabled(core)) {
+ core->need_handoff_prepare = true;
+ core->need_handoff_enable = true;
+ ret = clk_core_prepare(core);
+ if (ret)
+ goto out;
+ flags = clk_enable_lock();
+ clk_core_enable(core);
+ clk_enable_unlock(flags);
+ }
+ }
+
kref_init(&core->ref);
out:
clk_prepare_unlock();
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 34c9735..5b98ff9 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -1282,13 +1282,13 @@
LPC32XX_DEFINE_MUX(PWM1_MUX, PWMCLK_CTRL, 1, 0x1, NULL, 0),
LPC32XX_DEFINE_DIV(PWM1_DIV, PWMCLK_CTRL, 4, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_GATE(PWM1_GATE, PWMCLK_CTRL, 0, 0),
LPC32XX_DEFINE_COMPOSITE(PWM1, PWM1_MUX, PWM1_DIV, PWM1_GATE),
LPC32XX_DEFINE_MUX(PWM2_MUX, PWMCLK_CTRL, 3, 0x1, NULL, 0),
LPC32XX_DEFINE_DIV(PWM2_DIV, PWMCLK_CTRL, 8, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_GATE(PWM2_GATE, PWMCLK_CTRL, 2, 0),
LPC32XX_DEFINE_COMPOSITE(PWM2, PWM2_MUX, PWM2_DIV, PWM2_GATE),
@@ -1335,8 +1335,7 @@
LPC32XX_DEFINE_GATE(USB_DIV_GATE, USB_CTRL, 17, 0),
LPC32XX_DEFINE_COMPOSITE(USB_DIV, _NULL, USB_DIV_DIV, USB_DIV_GATE),
- LPC32XX_DEFINE_DIV(SD_DIV, MS_CTRL, 0, 4, NULL,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
+ LPC32XX_DEFINE_DIV(SD_DIV, MS_CTRL, 0, 4, NULL, CLK_DIVIDER_ONE_BASED),
LPC32XX_DEFINE_CLK(SD_GATE, MS_CTRL, BIT(5) | BIT(9), BIT(5) | BIT(9),
0x0, BIT(5) | BIT(9), 0x0, 0x0, clk_mask_ops),
LPC32XX_DEFINE_COMPOSITE(SD, _NULL, SD_DIV, SD_GATE),
@@ -1478,6 +1477,20 @@
return clk;
}
+static void __init lpc32xx_clk_div_quirk(u32 reg, u32 div_mask, u32 gate)
+{
+ u32 val;
+
+ regmap_read(clk_regmap, reg, &val);
+
+ if (!(val & div_mask)) {
+ val &= ~gate;
+ val |= BIT(__ffs(div_mask));
+ }
+
+ regmap_update_bits(clk_regmap, reg, gate | div_mask, val);
+}
+
static void __init lpc32xx_clk_init(struct device_node *np)
{
unsigned int i;
@@ -1517,6 +1530,17 @@
return;
}
+ /*
+ * Divider part of PWM and MS clocks requires a quirk to avoid
+ * a misinterpretation of formally valid zero value in register
+ * bitfield, which indicates another clock gate. Instead of
+ * adding complexity to a gate clock ensure that zero value in
+ * divider clock is never met in runtime.
+ */
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_PWMCLK_CTRL, 0xf0, BIT(0));
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_PWMCLK_CTRL, 0xf00, BIT(2));
+ lpc32xx_clk_div_quirk(LPC32XX_CLKPWR_MS_CTRL, 0xf, BIT(5) | BIT(9));
+
for (i = 1; i < LPC32XX_CLK_MAX; i++) {
clk[i] = lpc32xx_clk_register(i);
if (IS_ERR(clk[i])) {
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index cf874a1..d47b66e 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,6 +1,7 @@
config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
+ depends on REGULATOR
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
@@ -224,3 +225,14 @@
Support for the graphics clock controller on Qualcomm Technologies, Inc.
sdm845 devices.
Say Y if you want to support graphics controller devices.
+
+config MSM_CLK_AOP_QMP
+ tristate "AOP QMP Clock Driver"
+ depends on COMMON_CLK_QCOM && MSM_QMP
+ help
+ Always On Processor manages few shared clocks on some Qualcomm
+ Technologies, Inc. SoCs. It accepts requests from other hardware
+ subsystems via QMP mailboxes.
+ Say Y to support the clocks managed by AOP on platforms such as sdm845.
+
+source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 6e13562..930e281 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -24,6 +24,7 @@
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
+obj-$(CONFIG_MSM_CLK_AOP_QMP) += clk-aop-qmp.o
obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
@@ -38,3 +39,5 @@
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
+
+obj-y += mdss/
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index a274975..6296c40 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -764,6 +764,7 @@
};
static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(24000000, P_CAM_CC_PLL3_OUT_EVEN, 16, 0, 0),
F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0),
{ }
@@ -1114,19 +1115,6 @@
},
};
-static struct clk_branch cam_cc_debug_clk = {
- .halt_reg = 0xc008,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xc008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "cam_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch cam_cc_fd_core_clk = {
.halt_reg = 0xb0c8,
.halt_check = BRANCH_HALT,
@@ -1763,7 +1751,6 @@
[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
- [CAM_CC_DEBUG_CLK] = &cam_cc_debug_clk.clkr,
[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 6ff621d..d15d1bb 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -307,6 +307,15 @@
u64 quotient;
int alpha_bw = ALPHA_BITWIDTH;
+ /*
+ * The PLLs parent rate is zero probably since the parent hasn't
+ * registered yet. Return early with the requested rate.
+ */
+ if (!prate) {
+ pr_debug("PLLs parent rate hasn't been initialized.\n");
+ return rate;
+ }
+
quotient = rate;
remainder = do_div(quotient, prate);
*l = quotient;
diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c
new file mode 100644
index 0000000..f698a55
--- /dev/null
+++ b/drivers/clk/qcom/clk-aop-qmp.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mailbox_client.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
+
+#define MAX_LEN 96
+#define MBOX_TOUT_MS 1000
+
+struct qmp_pkt {
+ u32 size;
+ void *data;
+};
+
+#define DEFINE_CLK_AOP_QMP(_name, _class, _res, _estate, _dstate) \
+ static struct clk_aop_qmp _name = { \
+ .msg.class = #_class, \
+ .msg.res = #_res, \
+ .enable_state = _estate, \
+ .disable_state = _dstate, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &aop_qmp_clk_ops, \
+ .name = #_name, \
+ .num_parents = 0, \
+ .flags = CLK_ENABLE_HAND_OFF, \
+ }, \
+ }
+
+#define to_aop_qmp_clk(hw) container_of(hw, struct clk_aop_qmp, hw)
+
+/*
+ * struct qmp_mbox_msg - mailbox data to QMP
+ * @class: identifies the class.
+ * @res: identifies the resource in the class
+ * @level: identifies the level for the resource.
+ */
+struct qmp_mbox_msg {
+ char class[MAX_LEN];
+ char res[MAX_LEN];
+ int level;
+};
+
+/*
+ * struct clk_aop_qmp - AOP clock
+ * @dev: The device that corresponds to this clock.
+ * @hw: The clock hardware for this clock.
+ * @cl: The client mailbox for this clock.
+ * @mbox: The mbox controller for this clock.
+ * @level: The clock level for this clock.
+ * @enable_state: The clock state when this clock is prepared.
+ * @disable_state: The clock state when this clock is unprepared.
+ * @msg: QMP data associated with this clock.
+ * @enabled: Status of the clock enable.
+ */
+struct clk_aop_qmp {
+ struct device *dev;
+ struct clk_hw hw;
+ struct mbox_client cl;
+ struct mbox_chan *mbox;
+ int level;
+ int enable_state;
+ int disable_state;
+ struct qmp_mbox_msg msg;
+ bool enabled;
+};
+
+static DEFINE_MUTEX(clk_aop_lock);
+
+static unsigned long clk_aop_qmp_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+ return clk->level;
+}
+
+static long clk_aop_qmp_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static int clk_aop_qmp_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ char mbox_msg[MAX_LEN];
+ struct qmp_pkt pkt;
+ struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+ int ret = 0;
+
+ mutex_lock(&clk_aop_lock);
+
+ snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
+ clk->msg.class, clk->msg.res, rate);
+ pkt.size = MAX_LEN;
+ pkt.data = mbox_msg;
+
+ ret = mbox_send_message(clk->mbox, &pkt);
+ if (ret < 0) {
+ pr_err("Failed to send set rate request of %lu for %s, ret %d\n",
+ rate, clk_hw_get_name(hw), ret);
+ goto err;
+ } else
+ /* Success: update the return value */
+ ret = 0;
+
+ /* update the current clock level once the mailbox message is sent */
+ clk->level = rate;
+err:
+ mutex_unlock(&clk_aop_lock);
+
+ return ret;
+}
+
+static int clk_aop_qmp_prepare(struct clk_hw *hw)
+{
+ char mbox_msg[MAX_LEN];
+ unsigned long rate;
+ int ret = 0;
+ struct qmp_pkt pkt;
+ struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+ mutex_lock(&clk_aop_lock);
+
+ if (clk->level)
+ rate = clk->level;
+ else
+ rate = clk->enable_state;
+
+ snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
+ clk->msg.class, clk->msg.res, rate);
+ pkt.size = MAX_LEN;
+ pkt.data = mbox_msg;
+
+ ret = mbox_send_message(clk->mbox, &pkt);
+ if (ret < 0) {
+ pr_err("Failed to send clk prepare request for %s, ret %d\n",
+ clk_hw_get_name(hw), ret);
+ goto err;
+ } else
+ /* Success: update the return value */
+ ret = 0;
+
+ /* update the current clock level once the mailbox message is sent */
+ clk->level = rate;
+
+ clk->enabled = true;
+err:
+ mutex_unlock(&clk_aop_lock);
+
+ return ret;
+}
+
+static void clk_aop_qmp_unprepare(struct clk_hw *hw)
+{
+ char mbox_msg[MAX_LEN];
+ unsigned long rate;
+ int ret = 0;
+ struct qmp_pkt pkt;
+ struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+ mutex_lock(&clk_aop_lock);
+
+ rate = clk->disable_state;
+
+ snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
+ clk->msg.class, clk->msg.res, rate);
+ pkt.size = MAX_LEN;
+ pkt.data = mbox_msg;
+
+ ret = mbox_send_message(clk->mbox, &pkt);
+ if (ret < 0) {
+ pr_err("Failed to send clk unprepare request for %s, ret %d\n",
+ clk_hw_get_name(hw), ret);
+ goto err;
+ }
+
+ clk->enabled = false;
+err:
+ mutex_unlock(&clk_aop_lock);
+}
+
+static int clk_aop_qmp_is_enabled(struct clk_hw *hw)
+{
+ struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+ return clk->enabled;
+}
+
+static const struct clk_ops aop_qmp_clk_ops = {
+ .prepare = clk_aop_qmp_prepare,
+ .unprepare = clk_aop_qmp_unprepare,
+ .recalc_rate = clk_aop_qmp_recalc_rate,
+ .set_rate = clk_aop_qmp_set_rate,
+ .round_rate = clk_aop_qmp_round_rate,
+ .is_enabled = clk_aop_qmp_is_enabled,
+};
+
+DEFINE_CLK_AOP_QMP(qdss_qmp_clk, clock, qdss,
+ QDSS_CLK_LEVEL_DYNAMIC, QDSS_CLK_LEVEL_OFF);
+
+static struct clk_hw *aop_qmp_clk_hws[] = {
+ [QDSS_CLK] = &qdss_qmp_clk.hw,
+};
+
+static int qmp_update_client(struct clk_hw *hw, struct device *dev,
+ struct mbox_chan *mbox)
+{
+ struct clk_aop_qmp *clk_aop = to_aop_qmp_clk(hw);
+
+ /* Use mailbox client with blocking mode */
+ clk_aop->cl.dev = dev;
+ clk_aop->cl.tx_block = true;
+ clk_aop->cl.tx_tout = MBOX_TOUT_MS;
+ clk_aop->cl.knows_txdone = false;
+
+ if (mbox) {
+ clk_aop->mbox = mbox;
+ return 0;
+ }
+
+ /* Allocate mailbox channel */
+ mbox = clk_aop->mbox = mbox_request_channel(&clk_aop->cl, 0);
+ if (IS_ERR(clk_aop->mbox) && PTR_ERR(clk_aop->mbox) != -EPROBE_DEFER) {
+ dev_err(dev, "Failed to get mailbox channel %pK %ld\n",
+ mbox, PTR_ERR(mbox));
+ return PTR_ERR(clk_aop->mbox);
+ }
+
+ return 0;
+}
+
+static int aop_qmp_clk_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct device_node *np = pdev->dev.of_node;
+ struct mbox_chan *mbox = NULL;
+ int num_clks = ARRAY_SIZE(aop_qmp_clk_hws);
+ int ret = 0, i = 0;
+
+ /*
+ * Allocate mbox channel for the first clock client. The same channel
+ * would be used for the rest of the clock clients.
+ */
+ ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
+ if (ret < 0)
+ return ret;
+
+ for (i = 1; i < num_clks; i++) {
+ ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to update QMP client %d\n",
+ ret);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ ret = clk_aop_qmp_prepare(aop_qmp_clk_hws[i]);
+ if (ret < 0)
+ goto fail;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ clk = devm_clk_register(&pdev->dev, aop_qmp_clk_hws[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail;
+ }
+ }
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register clock provider\n");
+ goto fail;
+ }
+
+ dev_info(&pdev->dev, "Registered clocks with AOP\n");
+
+ return ret;
+fail:
+ mbox_free_channel(mbox);
+
+ return ret;
+}
+
+static const struct of_device_id aop_qmp_clk_of_match[] = {
+ { .compatible = "qcom,aop-qmp-clk", },
+ {}
+};
+
+static struct platform_driver aop_qmp_clk_driver = {
+ .driver = {
+ .name = "qmp-aop-clk",
+ .of_match_table = aop_qmp_clk_of_match,
+ },
+ .probe = aop_qmp_clk_probe,
+};
+
+static int __init aop_qmp_clk_init(void)
+{
+ return platform_driver_register(&aop_qmp_clk_driver);
+}
+subsys_initcall(aop_qmp_clk_init);
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index d5e2be6..035d337 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -224,6 +224,7 @@
u32 osm_clk_rate;
u32 xo_clk_rate;
bool secure_init;
+ bool per_core_dcvs;
bool red_fsm_en;
bool boost_fsm_en;
bool safe_fsm_en;
@@ -449,6 +450,17 @@
return 0;
}
+static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ if (!parent_hw)
+ return -EINVAL;
+
+ return clk_hw_round_rate(parent_hw, rate);
+}
+
static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -497,34 +509,22 @@
.set_rate = l3_clk_set_rate,
};
-enum {
- P_XO,
-};
-
-static const struct parent_map gcc_parent_map_1[] = {
- { P_XO, 0 },
-};
-
-static const char * const gcc_parent_names_1[] = {
- "xo",
-};
-
static struct clk_init_data osm_clks_init[] = {
[0] = {
.name = "l3_clk",
- .parent_names = (const char *[]){ "bi_tcxo" },
+ .parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_l3_osm,
},
[1] = {
.name = "pwrcl_clk",
- .parent_names = (const char *[]){ "bi_tcxo" },
+ .parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_cpu_osm,
},
[2] = {
.name = "perfcl_clk",
- .parent_names = (const char *[]){ "bi_tcxo" },
+ .parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_cpu_osm,
},
@@ -1536,8 +1536,16 @@
parent = to_clk_osm(clk_hw_get_parent(&c->hw));
spin_lock_irqsave(&parent->lock, flags);
- val = clk_osm_read_reg_no_log(parent,
+ /*
+ * Use core 0's copy as proxy for the whole cluster when per
+ * core DCVS is disabled.
+ */
+ if (parent->per_core_dcvs)
+ val = clk_osm_read_reg_no_log(parent,
OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+ else
+ val = clk_osm_read_reg_no_log(parent,
+ OSM_CYCLE_COUNTER_STATUS_REG(0));
if (val < c->prev_cycle_counter) {
/* Handle counter overflow */
@@ -2246,8 +2254,6 @@
struct clk_osm *c;
struct device *dev = &pdev->dev;
struct clk_onecell_data *clk_data;
- struct resource *res;
- void *vbase;
char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
@@ -2455,30 +2461,6 @@
(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "apps_itm_ctl");
- if (!res) {
- dev_err(&pdev->dev,
- "Unable to get platform resource for apps_itm_ctl\n");
- return -ENOMEM;
- }
-
- vbase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!vbase) {
- dev_err(&pdev->dev,
- "Unable to map in apps_itm_ctl base\n");
- return -ENOMEM;
- }
-
- val = readl_relaxed(vbase + 0x0);
- val &= ~BIT(0);
- writel_relaxed(val, vbase + 0x0);
-
- val = readl_relaxed(vbase + 0x4);
- val &= ~BIT(0);
- writel_relaxed(val, vbase + 0x4);
-
/*
* Perform typical secure-world HW initialization
* as necessary.
@@ -2503,8 +2485,10 @@
clk_osm_misc_programming(&pwrcl_clk);
clk_osm_misc_programming(&perfcl_clk);
- if (of_property_read_bool(pdev->dev.of_node,
- "qcom,enable-per-core-dcvs")) {
+ pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,enable-per-core-dcvs");
+ if (pwrcl_clk.per_core_dcvs) {
val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
val |= BIT(0);
clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
@@ -2516,6 +2500,7 @@
clk_ops_core = clk_dummy_ops;
clk_ops_core.set_rate = cpu_clk_set_rate;
+ clk_ops_core.round_rate = cpu_clk_round_rate;
clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
spin_lock_init(&l3_clk.lock);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 0c0ddf9..3a38d37 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -161,7 +161,7 @@
* @current_freq: last cached frequency when using branches with shared RCGs
* @enable_safe_config: When set, the RCG is parked at CXO when it's disabled
* @clkr: regmap clock handle
- *
+ * @flags: additional flag parameters for the RCG
*/
struct clk_rcg2 {
u32 cmd_rcgr;
@@ -172,6 +172,8 @@
unsigned long current_freq;
bool enable_safe_config;
struct clk_regmap clkr;
+ u8 flags;
+#define FORCE_ENABLE_RCG BIT(0)
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a13a45e..2f9cfdf 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -164,6 +164,47 @@
CMD_ROOT_EN, 0);
}
+static int prepare_enable_rcg_srcs(struct clk *curr, struct clk *new)
+{
+ int rc = 0;
+
+ rc = clk_prepare(curr);
+ if (rc)
+ return rc;
+
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare;
+
+ rc = clk_enable(curr);
+ if (rc)
+ goto err_curr_src_enable;
+
+ rc = clk_enable(new);
+ if (rc)
+ goto err_new_src_enable;
+
+ return rc;
+
+err_new_src_enable:
+ clk_disable(curr);
+err_curr_src_enable:
+ clk_unprepare(new);
+err_new_src_prepare:
+ clk_unprepare(curr);
+
+ return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk *curr, struct clk *new)
+{
+ clk_disable(new);
+ clk_disable(curr);
+
+ clk_unprepare(new);
+ clk_unprepare(curr);
+}
+
/*
* Calculate m/n:d rate
*
@@ -377,8 +418,9 @@
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- const struct freq_tbl *f;
- int ret;
+ const struct freq_tbl *f, *f_curr;
+ int ret, curr_src_index, new_src_index;
+ struct clk_hw *curr_src = NULL, *new_src = NULL;
f = qcom_find_freq(rcg->freq_tbl, rate);
if (!f)
@@ -393,10 +435,40 @@
return 0;
}
+ if (rcg->flags & FORCE_ENABLE_RCG) {
+ rcg->current_freq = clk_get_rate(hw->clk);
+ if (rcg->current_freq == cxo_f.freq)
+ curr_src_index = 0;
+ else {
+ f_curr = qcom_find_freq(rcg->freq_tbl,
+ rcg->current_freq);
+ if (!f_curr)
+ return -EINVAL;
+
+ curr_src_index = qcom_find_src_index(hw,
+ rcg->parent_map, f_curr->src);
+ }
+
+ new_src_index = qcom_find_src_index(hw, rcg->parent_map,
+ f->src);
+
+ curr_src = clk_hw_get_parent_by_index(hw, curr_src_index);
+ new_src = clk_hw_get_parent_by_index(hw, new_src_index);
+
+ /* The RCG could currently be disabled. Enable its parents. */
+ ret = prepare_enable_rcg_srcs(curr_src->clk, new_src->clk);
+ clk_rcg2_set_force_enable(hw);
+ }
+
ret = clk_rcg2_configure(rcg, f);
if (ret)
return ret;
+ if (rcg->flags & FORCE_ENABLE_RCG) {
+ clk_rcg2_clear_force_enable(hw);
+ disable_unprepare_rcg_srcs(curr_src->clk, new_src->clk);
+ }
+
/* Update current frequency with the requested frequency. */
rcg->current_freq = rate;
return ret;
@@ -420,6 +492,11 @@
unsigned long rate;
const struct freq_tbl *f;
+ if (rcg->flags & FORCE_ENABLE_RCG) {
+ clk_rcg2_set_force_enable(hw);
+ return 0;
+ }
+
if (!rcg->enable_safe_config)
return 0;
@@ -456,6 +533,11 @@
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ if (rcg->flags & FORCE_ENABLE_RCG) {
+ clk_rcg2_clear_force_enable(hw);
+ return;
+ }
+
if (!rcg->enable_safe_config)
return;
/*
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index bcee3ee..5e11485 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -277,9 +277,6 @@
DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 0x0,
&apps_rsc, 19200000, CLK_RPMH_APPS_RSC_STATE_MASK,
CLK_RPMH_APPS_RSC_AO_STATE_MASK);
-DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", &apps_rsc,
- 19200000, CLK_RPMH_APPS_RSC_STATE_MASK,
- CLK_RPMH_APPS_RSC_AO_STATE_MASK);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", &apps_rsc,
19200000, CLK_RPMH_APPS_RSC_STATE_MASK,
CLK_RPMH_APPS_RSC_AO_STATE_MASK);
@@ -299,8 +296,6 @@
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
[RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
- [RPMH_LN_BB_CLK1] = &sdm845_ln_bb_clk1.hw,
- [RPMH_LN_BB_CLK1_A] = &sdm845_ln_bb_clk1_ao.hw,
[RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
[RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
[RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index d74db61..d30675c 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -113,6 +113,10 @@
"disp_cc_mdss_spdm_pclk1_clk",
"disp_cc_mdss_spdm_rot_clk",
"disp_cc_mdss_vsync_clk",
+ "measure_only_snoc_clk",
+ "measure_only_cnoc_clk",
+ "measure_only_bimc_clk",
+ "measure_only_ipa_2x_clk",
"gcc_aggre_noc_pcie_tbu_clk",
"gcc_aggre_ufs_card_axi_clk",
"gcc_aggre_ufs_phy_axi_clk",
@@ -444,6 +448,14 @@
0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+ { "measure_only_snoc_clk", 0x7, 4, GCC,
+ 0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "measure_only_cnoc_clk", 0x15, 4, GCC,
+ 0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "measure_only_bimc_clk", 0xc2, 4, GCC,
+ 0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+ { "measure_only_ipa_2x_clk", 0x128, 4, GCC,
+ 0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
{ "gcc_aggre_noc_pcie_tbu_clk", 0x2D, 4, GCC,
0x2D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
{ "gcc_aggre_ufs_card_axi_clk", 0x11E, 4, GCC,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 6b1eca8..3b56fa1 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -106,8 +106,8 @@
static const char * const disp_cc_parent_names_3[] = {
"bi_tcxo",
"disp_cc_pll0",
- "gpll0",
- "gpll0",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
"core_bi_pll_test_se",
};
@@ -478,19 +478,6 @@
},
};
-static struct clk_branch disp_cc_debug_clk = {
- .halt_reg = 0x600c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x600c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "disp_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch disp_cc_mdss_ahb_clk = {
.halt_reg = 0x4004,
.halt_check = BRANCH_HALT,
@@ -546,7 +533,7 @@
"disp_cc_mdss_byte0_clk_src",
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
@@ -599,7 +586,7 @@
"disp_cc_mdss_byte1_clk_src",
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
@@ -949,7 +936,6 @@
};
static struct clk_regmap *disp_cc_sdm845_clocks[] = {
- [DISP_CC_DEBUG_CLK] = &disp_cc_debug_clk.clkr,
[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
[DISP_CC_MDSS_AXI_CLK] = &disp_cc_mdss_axi_clk.clkr,
[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 08dce3f..678dd10 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -36,6 +36,7 @@
#define GCC_APCS_CLOCK_SLEEP_ENA_VOTE_OFFSET 0x52008
#define CPUSS_AHB_CLK_SLEEP_ENA BIT(21)
+#define SYS_NOC_CPUSS_AHB_CLK_SLEEP_ENA BIT(0)
#define GCC_MMSS_MISC 0x09FFC
#define GCC_GPU_MISC 0x71028
@@ -150,6 +151,45 @@
"core_bi_pll_test_se",
};
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo_ao",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_dummy measure_only_snoc_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_snoc_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+static struct clk_dummy measure_only_cnoc_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_cnoc_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+static struct clk_dummy measure_only_bimc_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_bimc_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+static struct clk_dummy measure_only_ipa_2x_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_ipa_2x_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
static struct pll_vco fabia_vco[] = {
{ 250000000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
@@ -223,8 +263,6 @@
static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
{ }
};
@@ -236,7 +274,7 @@
.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
- .parent_names = gcc_parent_names_0,
+ .parent_names = gcc_parent_names_7,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
@@ -763,7 +801,10 @@
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
@@ -791,8 +832,9 @@
};
static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
- F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(400000, P_BI_TCXO, 12, 1, 4),
F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
{ }
@@ -1426,22 +1468,9 @@
},
};
-static struct clk_branch gcc_cxo_tx1_clkref_clk = {
- .halt_reg = 0x8c020,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c020,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cxo_tx1_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_ddrss_gpu_axi_clk = {
.halt_reg = 0x44038,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x44038,
.enable_mask = BIT(0),
@@ -1628,7 +1657,7 @@
static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
.halt_reg = 0x7100c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x7100c,
.enable_mask = BIT(0),
@@ -2051,32 +2080,6 @@
},
};
-static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
- .halt_reg = 0x17014,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x5200c,
- .enable_mask = BIT(9),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_core_2x_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_qupv3_wrap0_core_clk = {
- .halt_reg = 0x1700c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x5200c,
- .enable_mask = BIT(8),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_core_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
.halt_reg = 0x17030,
.halt_check = BRANCH_HALT_VOTED,
@@ -2221,32 +2224,6 @@
},
};
-static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
- .halt_reg = 0x18004,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x5200c,
- .enable_mask = BIT(18),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_core_2x_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_qupv3_wrap1_core_clk = {
- .halt_reg = 0x18008,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x5200c,
- .enable_mask = BIT(19),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_core_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
.halt_reg = 0x18014,
.halt_check = BRANCH_HALT_VOTED,
@@ -2443,45 +2420,6 @@
},
};
-static struct clk_branch gcc_rx1_usb2_clkref_clk = {
- .halt_reg = 0x8c014,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c014,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_rx1_usb2_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_rx2_qlink_clkref_clk = {
- .halt_reg = 0x8c018,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c018,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_rx2_qlink_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_rx3_modem_clkref_clk = {
- .halt_reg = 0x8c01c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8c01c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_rx3_modem_clkref_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_sdcc2_ahb_clk = {
.halt_reg = 0x14008,
.halt_check = BRANCH_HALT,
@@ -3146,6 +3084,13 @@
},
};
+struct clk_hw *gcc_sdm845_hws[] = {
+ [MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw,
+ [MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
+ [MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw,
+ [MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw,
+};
+
static struct clk_regmap *gcc_sdm845_clocks[] = {
[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3167,7 +3112,6 @@
[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
[GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
- [GCC_CXO_TX1_CLKREF_CLK] = &gcc_cxo_tx1_clkref_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
[GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
@@ -3218,8 +3162,6 @@
[GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
[GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
- [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
- [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
[GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
@@ -3236,8 +3178,6 @@
[GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
[GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
[GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
- [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
- [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
[GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
[GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
[GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
@@ -3258,9 +3198,6 @@
[GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
[GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
[GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
- [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
- [GCC_RX2_QLINK_CLKREF_CLK] = &gcc_rx2_qlink_clkref_clk.clkr,
- [GCC_RX3_MODEM_CLKREF_CLK] = &gcc_rx3_modem_clkref_clk.clkr,
[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
[GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
@@ -3358,6 +3295,8 @@
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
[GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
};
static const struct regmap_config gcc_sdm845_regmap_config = {
@@ -3384,19 +3323,21 @@
static int gcc_sdm845_probe(struct platform_device *pdev)
{
+ struct clk *clk;
struct regmap *regmap;
- int ret = 0;
+ int i, ret = 0;
regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/*
- * Set the CPUSS_AHB_CLK_SLEEP_ENA bit to allow the cpuss_ahb_clk to be
+ * Set the *_SLEEP_ENA bits to allow certain cpuss* clocks to be
* turned off by hardware during certain apps low power modes.
*/
regmap_update_bits(regmap, GCC_APCS_CLOCK_SLEEP_ENA_VOTE_OFFSET,
- CPUSS_AHB_CLK_SLEEP_ENA, CPUSS_AHB_CLK_SLEEP_ENA);
+ CPUSS_AHB_CLK_SLEEP_ENA | SYS_NOC_CPUSS_AHB_CLK_SLEEP_ENA,
+ CPUSS_AHB_CLK_SLEEP_ENA | SYS_NOC_CPUSS_AHB_CLK_SLEEP_ENA);
vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
if (IS_ERR(vdd_cx.regulator[0])) {
@@ -3414,6 +3355,13 @@
return PTR_ERR(vdd_cx_ao.regulator[0]);
}
+ /* Register the dummy measurement clocks */
+ for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+ clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register GCC clocks\n");
@@ -3424,8 +3372,9 @@
regmap_update_bits(regmap, GCC_MMSS_MISC, 0x3, 0x3);
regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3);
- /* Keep these HMSS clocks enabled always */
+ /* Keep these CPUSS clocks enabled always */
clk_prepare_enable(gcc_cpuss_ahb_clk.clkr.hw.clk);
+ clk_prepare_enable(gcc_sys_noc_cpuss_ahb_clk.clkr.hw.clk);
clk_prepare_enable(gcc_cpuss_dvm_bus_clk.clkr.hw.clk);
clk_prepare_enable(gcc_cpuss_gnoc_clk.clkr.hw.clk);
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index a5a7488..0115bb1 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -80,8 +80,8 @@
"bi_tcxo",
"gpu_cc_pll0",
"gpu_cc_pll1",
- "gpll0",
- "gpll0_out_even",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src",
"core_bi_pll_test_se",
};
@@ -101,7 +101,7 @@
"gpu_cc_pll0_out_odd",
"gpu_cc_pll1_out_even",
"gpu_cc_pll1_out_odd",
- "gpll0",
+ "gcc_gpu_gpll0_clk_src",
"core_bi_pll_test_se",
};
@@ -114,8 +114,8 @@
static const char * const gpu_cc_parent_names_2[] = {
"bi_tcxo",
- "gpll0",
- "gpll0",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src",
"core_bi_pll_test_se",
};
@@ -211,9 +211,9 @@
.cmd_rcgr = 0x101c,
.mnd_width = 0,
.hid_width = 5,
- .enable_safe_config = true,
.parent_map = gpu_cc_parent_map_1,
.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .flags = FORCE_ENABLE_RCG,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpu_cc_gx_gfx3d_clk_src",
.parent_names = gpu_cc_parent_names_1,
@@ -413,19 +413,6 @@
},
};
-static struct clk_branch gpu_cc_debug_clk = {
- .halt_reg = 0x1100,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1100,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gpu_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gpu_cc_gx_cxo_clk = {
.halt_reg = 0x1060,
.halt_check = BRANCH_HALT,
@@ -544,7 +531,6 @@
[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
- [GPU_CC_DEBUG_CLK] = &gpu_cc_debug_clk.clkr,
[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
[GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
index 229780e..7213e37 100644
--- a/drivers/clk/qcom/mdss/Kconfig
+++ b/drivers/clk/qcom/mdss/Kconfig
@@ -1,5 +1,6 @@
-config MSM_MDSS_PLL
+config QCOM_MDSS_PLL
bool "MDSS pll programming"
+ depends on COMMON_CLK_QCOM
---help---
It provides support for DSI, eDP and HDMI interface pll programming on MDSS
hardware. It also handles the pll specific resources and turn them on/off when
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index 64c7609..d183393 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,9 +1,3 @@
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8998.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
new file mode 100644
index 0000000..2cb9d05
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -0,0 +1,1523 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include "mdss-dsi-pll.h"
+#include "mdss-pll.h"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#define VCO_DELAY_USEC 1
+
+#define MHZ_250 250000000UL
+#define MHZ_500 500000000UL
+#define MHZ_1000 1000000000UL
+#define MHZ_1100 1100000000UL
+#define MHZ_1900 1900000000UL
+#define MHZ_3000 3000000000UL
+
+/* Register Offsets from PLL base address */
+#define PLL_ANALOG_CONTROLS_ONE 0x000
+#define PLL_ANALOG_CONTROLS_TWO 0x004
+#define PLL_ANALOG_CONTROLS_THREE 0x010
+#define PLL_DSM_DIVIDER 0x01c
+#define PLL_FEEDBACK_DIVIDER 0x020
+#define PLL_SYSTEM_MUXES 0x024
+#define PLL_CMODE 0x02c
+#define PLL_CALIBRATION_SETTINGS 0x030
+#define PLL_BAND_SEL_CAL_SETTINGS_THREE 0x054
+#define PLL_FREQ_DETECT_SETTINGS_ONE 0x064
+#define PLL_PFILT 0x07c
+#define PLL_IFILT 0x080
+#define PLL_OUTDIV 0x094
+#define PLL_CORE_OVERRIDE 0x0a4
+#define PLL_CORE_INPUT_OVERRIDE 0x0a8
+#define PLL_PLL_DIGITAL_TIMERS_TWO 0x0b4
+#define PLL_DECIMAL_DIV_START_1 0x0cc
+#define PLL_FRAC_DIV_START_LOW_1 0x0d0
+#define PLL_FRAC_DIV_START_MID_1 0x0d4
+#define PLL_FRAC_DIV_START_HIGH_1 0x0d8
+#define PLL_SSC_STEPSIZE_LOW_1 0x10c
+#define PLL_SSC_STEPSIZE_HIGH_1 0x110
+#define PLL_SSC_DIV_PER_LOW_1 0x114
+#define PLL_SSC_DIV_PER_HIGH_1 0x118
+#define PLL_SSC_DIV_ADJPER_LOW_1 0x11c
+#define PLL_SSC_DIV_ADJPER_HIGH_1 0x120
+#define PLL_SSC_CONTROL 0x13c
+#define PLL_PLL_OUTDIV_RATE 0x140
+#define PLL_PLL_LOCKDET_RATE_1 0x144
+#define PLL_PLL_PROP_GAIN_RATE_1 0x14c
+#define PLL_PLL_BAND_SET_RATE_1 0x154
+#define PLL_PLL_INT_GAIN_IFILT_BAND_1 0x15c
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x164
+#define PLL_PLL_LOCK_OVERRIDE 0x180
+#define PLL_PLL_LOCK_DELAY 0x184
+#define PLL_CLOCK_INVERTERS 0x18c
+#define PLL_COMMON_STATUS_ONE 0x1a0
+
+/* Register Offsets from PHY base address */
+#define PHY_CMN_CLK_CFG0 0x010
+#define PHY_CMN_CLK_CFG1 0x014
+#define PHY_CMN_RBUF_CTRL 0x01c
+#define PHY_CMN_PLL_CNTRL 0x038
+#define PHY_CMN_CTRL_0 0x024
+
+/* Bit definition of SSC control registers */
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+#define SSC_FREQ_UPDATE BIT(2)
+#define SSC_FREQ_UPDATE_MUX BIT(3)
+#define SSC_UPDATE_SSC BIT(4)
+#define SSC_UPDATE_SSC_MUX BIT(5)
+#define SSC_START BIT(6)
+#define SSC_START_MUX BIT(7)
+
+enum {
+ DSI_PLL_0,
+ DSI_PLL_1,
+ DSI_PLL_MAX
+};
+
+struct dsi_pll_regs {
+ u32 pll_prop_gain_rate;
+ u32 pll_outdiv_rate;
+ u32 pll_lockdet_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start_low;
+ u32 frac_div_start_mid;
+ u32 frac_div_start_high;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize_low;
+ u32 ssc_stepsize_high;
+ u32 ssc_div_per_low;
+ u32 ssc_div_per_high;
+ u32 ssc_adjper_low;
+ u32 ssc_adjper_high;
+ u32 ssc_control;
+};
+
+struct dsi_pll_config {
+ u32 ref_freq;
+ bool div_override;
+ u32 output_div;
+ bool ignore_frac;
+ bool disable_prescaler;
+ bool enable_ssc;
+ bool ssc_center;
+ u32 dec_bits;
+ u32 frac_bits;
+ u32 lock_timer;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+ u32 thresh_cycles;
+ u32 refclk_cycles;
+};
+
+struct dsi_pll_10nm {
+ struct mdss_pll_resources *rsc;
+ struct dsi_pll_config pll_configuration;
+ struct dsi_pll_regs reg_setup;
+};
+
+static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
+static struct dsi_pll_10nm plls[DSI_PLL_MAX];
+
+static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
+{
+ u32 reg;
+ struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
+
+ if (!rsc)
+ return;
+
+ /* Only DSI PLL0 can act as a master */
+ if (rsc->index != DSI_PLL_0)
+ return;
+
+ /* default configuration: source is either internal or ref clock */
+ rsc->slave = NULL;
+
+ if (!orsc) {
+ pr_warn("slave PLL unavilable, assuming standalone config\n");
+ return;
+ }
+
+ /* check to see if the source of DSI1 PLL bitclk is set to external */
+ reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
+ reg &= (BIT(2) | BIT(3));
+ if (reg == 0x04)
+ rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
+
+ pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
+}
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+
+ config->ref_freq = 19200000;
+ config->output_div = 1;
+ config->dec_bits = 8;
+ config->frac_bits = 18;
+ config->lock_timer = 64;
+ config->ssc_freq = 31500;
+ config->ssc_offset = 5000;
+ config->ssc_adj_per = 2;
+ config->thresh_cycles = 32;
+ config->refclk_cycles = 256;
+
+ config->div_override = false;
+ config->ignore_frac = false;
+ config->disable_prescaler = false;
+ config->enable_ssc = rsc->ssc_en;
+ config->ssc_center = rsc->ssc_center;
+
+ if (config->enable_ssc) {
+ if (rsc->ssc_freq)
+ config->ssc_freq = rsc->ssc_freq;
+ if (rsc->ssc_ppm)
+ config->ssc_offset = rsc->ssc_ppm;
+ }
+
+ dsi_pll_config_slave(rsc);
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u64 target_freq;
+ u64 fref = rsc->vco_ref_clk_rate;
+ u32 computed_output_div, div_log = 0;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+ u32 i;
+
+ target_freq = rsc->vco_current_rate;
+ pr_debug("target_freq = %llu\n", target_freq);
+
+ if (config->div_override) {
+ computed_output_div = config->output_div;
+
+ /*
+ * Computed_output_div = 2 ^ div_log
+ * To get div_log from output div just get the index of the
+ * 1 bit in the value.
+ * div_log ranges from 0-3. so check the 4 lsbs
+ */
+
+ for (i = 0; i < 4; i++) {
+ if (computed_output_div & (1 << i)) {
+ div_log = i;
+ break;
+ }
+ }
+
+ } else {
+ if (target_freq < MHZ_250) {
+ computed_output_div = 8;
+ div_log = 3;
+ } else if (target_freq < MHZ_500) {
+ computed_output_div = 4;
+ div_log = 2;
+ } else if (target_freq < MHZ_1000) {
+ computed_output_div = 2;
+ div_log = 1;
+ } else {
+ computed_output_div = 1;
+ div_log = 0;
+ }
+ }
+ pr_debug("computed_output_div = %d\n", computed_output_div);
+
+ pll_freq = target_freq * computed_output_div;
+
+ if (config->disable_prescaler)
+ divider = fref;
+ else
+ divider = fref * 2;
+
+ multiplier = 1 << config->frac_bits;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ div_u64_rem(dec_multiple, multiplier, &frac);
+
+ dec = div_u64(dec_multiple, multiplier);
+
+ if (pll_freq <= MHZ_1900)
+ regs->pll_prop_gain_rate = 8;
+ else if (pll_freq <= MHZ_3000)
+ regs->pll_prop_gain_rate = 10;
+ else
+ regs->pll_prop_gain_rate = 12;
+ if (pll_freq < MHZ_1100)
+ regs->pll_clock_inverters = 8;
+ else
+ regs->pll_clock_inverters = 0;
+
+ regs->pll_outdiv_rate = div_log;
+ regs->pll_lockdet_rate = config->lock_timer;
+ regs->decimal_div_start = dec;
+ regs->frac_div_start_low = (frac & 0xff);
+ regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+ regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ pr_debug("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = regs->frac_div_start_low |
+ (regs->frac_div_start_mid << 8) |
+ (regs->frac_div_start_high << 16);
+ ssc_step_size = regs->decimal_div_start;
+ ssc_step_size *= (1 << config->frac_bits);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ regs->ssc_div_per_low = ssc_per & 0xFF;
+ regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+ regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+ regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+ regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+ regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+ regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ regs->decimal_div_start, frac, config->frac_bits);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ void __iomem *pll_base = rsc->pll_base;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ if (pll->pll_configuration.enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
+ regs->ssc_stepsize_low);
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
+ regs->ssc_stepsize_high);
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
+ regs->ssc_div_per_low);
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
+ regs->ssc_div_per_high);
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_LOW_1,
+ regs->ssc_adjper_low);
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_HIGH_1,
+ regs->ssc_adjper_high);
+ MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
+ SSC_EN | regs->ssc_control);
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ void __iomem *pll_base = rsc->pll_base;
+
+ MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x80);
+ MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
+ MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
+ MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
+ MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+ MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+ MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
+ MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ void __iomem *pll_base = rsc->pll_base;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
+ MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
+ reg->decimal_div_start);
+ MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
+ reg->frac_div_start_low);
+ MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
+ reg->frac_div_start_mid);
+ MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
+ reg->frac_div_start_high);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
+ MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
+ MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+
+}
+
+static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+ struct mdss_pll_resources *rsc = vco->priv;
+ struct dsi_pll_10nm *pll;
+
+ if (!rsc) {
+ pr_err("pll resource not found\n");
+ return -EINVAL;
+ }
+
+ if (rsc->pll_on)
+ return 0;
+
+ pll = rsc->priv;
+ if (!pll) {
+ pr_err("pll configuration not found\n");
+ return -EINVAL;
+ }
+
+ pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+ rsc->vco_current_rate = rate;
+ rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+ rsc->index, rc);
+ return rc;
+ }
+
+ dsi_pll_setup_config(pll, rsc);
+
+ dsi_pll_calc_dec_frac(pll, rsc);
+
+ dsi_pll_calc_ssc(pll, rsc);
+
+ dsi_pll_commit(pll, rsc);
+
+ dsi_pll_config_hzindep_reg(pll, rsc);
+
+ dsi_pll_ssc_commit(pll, rsc);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ mdss_pll_resource_enable(rsc, false);
+
+ return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
+{
+ int rc;
+ u32 status;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->index, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
+{
+ u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+ MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
+{
+ u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+ MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
+{
+ u32 data;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
+{
+ u32 data;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5)));
+}
+
+static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
+{
+ int rc;
+ struct mdss_pll_resources *rsc = vco->priv;
+
+ dsi_pll_enable_pll_bias(rsc);
+ if (rsc->slave)
+ dsi_pll_enable_pll_bias(rsc->slave);
+
+ /* Start PLL */
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_10nm_lock_status(rsc);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", rsc->index);
+ goto error;
+ }
+
+ rsc->pll_on = true;
+
+ dsi_pll_enable_global_clk(rsc);
+ if (rsc->slave)
+ dsi_pll_enable_global_clk(rsc->slave);
+
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+ if (rsc->slave)
+ MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
+{
+ dsi_pll_disable_global_clk(rsc);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(rsc);
+}
+
+static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
+{
+ struct mdss_pll_resources *rsc = vco->priv;
+
+ if (!rsc->pll_on &&
+ mdss_pll_resource_enable(rsc, true)) {
+ pr_err("failed to enable pll (%d) resources\n", rsc->index);
+ return;
+ }
+
+ rsc->handoff_resources = false;
+
+ pr_debug("stop PLL (%d)\n", rsc->index);
+
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(rsc);
+ if (rsc->slave)
+ dsi_pll_disable_sub(rsc->slave);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+ rsc->pll_on = false;
+}
+
+long vco_10nm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long rrate = rate;
+ struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+ if (rate < vco->min_rate)
+ rrate = vco->min_rate;
+ if (rate > vco->max_rate)
+ rrate = vco->max_rate;
+
+ *parent_rate = rrate;
+
+ return rrate;
+}
+
+static void vco_10nm_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("dsi pll resources not available\n");
+ return;
+ }
+
+ pll->vco_cached_rate = clk_hw_get_rate(hw);
+ dsi_pll_disable(vco);
+ mdss_pll_resource_enable(pll, false);
+}
+
+static int vco_10nm_prepare(struct clk_hw *hw)
+{
+ int rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("dsi pll resources are not available\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("failed to enable pll (%d) resource, rc=%d\n",
+ pll->index, rc);
+ return rc;
+ }
+
+ if ((pll->vco_cached_rate != 0) &&
+ (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+ rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+ pll->vco_cached_rate);
+ if (rc) {
+ pr_err("pll(%d) set_rate failed, rc=%d\n",
+ pll->index, rc);
+ mdss_pll_resource_enable(pll, false);
+ return rc;
+ }
+ }
+
+ rc = dsi_pll_enable(vco);
+ if (rc) {
+ mdss_pll_resource_enable(pll, false);
+ pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+ struct mdss_pll_resources *pll = vco->priv;
+ int rc;
+ u64 ref_clk = vco->ref_clk_rate;
+ u64 vco_rate;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u32 outdiv;
+ u64 pll_freq, tmp64;
+
+ if (!vco->priv)
+ pr_err("vco priv is null\n");
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("failed to enable pll(%d) resource, rc=%d\n",
+ pll->index, rc);
+ return 0;
+ }
+
+ dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
+ dec &= 0xFF;
+
+ frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
+ 0xFF) <<
+ 8);
+ frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) <<
+ 16);
+
+ /* OUTDIV_1:0 field is (log(outdiv, 2)) */
+ outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+ outdiv &= 0x3;
+ outdiv = 1 << outdiv;
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+ **/
+ multiplier = 1 << 18;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = div_u64(pll_freq, outdiv);
+
+ pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
+ dec, frac, outdiv, vco_rate);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return (unsigned long)vco_rate;
+}
+
+static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+ int rc;
+ struct mdss_pll_resources *pll = context;
+ u32 reg_val;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ *div = (reg_val & 0xF0) >> 4;
+
+ /**
+ * Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
+ *div -= 1;
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+ u32 reg_val;
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ reg_val &= ~0xF0;
+ reg_val |= (div << 4);
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
+{
+ int rc;
+ struct mdss_pll_resources *pll = context;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
+ div++;
+ pixel_clk_set_div_sub(pll, div);
+ if (pll->slave)
+ pixel_clk_set_div_sub(pll->slave, div);
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return 0;
+}
+
+static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+ int rc;
+ struct mdss_pll_resources *pll = context;
+ u32 reg_val;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ *div = (reg_val & 0x0F);
+
+ /**
+ *Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
+ *div -= 1;
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
+{
+ u32 reg_val;
+
+ reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+ reg_val &= ~0x0F;
+ reg_val |= div;
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
+{
+ int rc;
+ struct mdss_pll_resources *rsc = context;
+ struct dsi_pll_8998 *pll;
+
+ if (!rsc) {
+ pr_err("pll resource not found\n");
+ return -EINVAL;
+ }
+
+ pll = rsc->priv;
+ if (!pll) {
+ pr_err("pll configuration not found\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
+ div++;
+
+ bit_clk_set_div_sub(rsc, div);
+ /* For slave PLL, this divider always should be set to 1 */
+ if (rsc->slave)
+ bit_clk_set_div_sub(rsc->slave, 1);
+
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static int post_vco_clk_get_div(void *context, unsigned int reg,
+ unsigned int *div)
+{
+ int rc;
+ struct mdss_pll_resources *pll = context;
+ u32 reg_val;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= 0x3;
+
+ if (reg_val == 2)
+ *div = 1;
+ else if (reg_val == 3)
+ *div = 4;
+ else
+ *div = 1;
+
+ /**
+ *Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
+ *div -= 1;
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+ u32 reg_val;
+ int rc = 0;
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= ~0x03;
+ if (div == 1) {
+ reg_val |= 0x2;
+ } else if (div == 4) {
+ reg_val |= 0x3;
+ } else {
+ rc = -EINVAL;
+ pr_err("unsupported divider %d\n", div);
+ goto error;
+ }
+
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+
+error:
+ return rc;
+}
+
+static int post_vco_clk_set_div(void *context, unsigned int reg,
+ unsigned int div)
+{
+ int rc = 0;
+ struct mdss_pll_resources *pll = context;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
+ div++;
+ rc = post_vco_clk_set_div_sub(pll, div);
+ if (!rc && pll->slave)
+ rc = post_vco_clk_set_div_sub(pll->slave, div);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static int post_bit_clk_get_div(void *context, unsigned int reg,
+ unsigned int *div)
+{
+ int rc;
+ struct mdss_pll_resources *pll = context;
+ u32 reg_val;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= 0x3;
+
+ if (reg_val == 0)
+ *div = 1;
+ else if (reg_val == 1)
+ *div = 2;
+ else
+ *div = 1;
+
+ /**
+ *Common clock framework the divider value is interpreted as one less
+ * hence we return one less for all dividers except when zero
+ */
+ if (*div != 0)
+ *div -= 1;
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+ int rc = 0;
+ u32 reg_val;
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= ~0x03;
+ if (div == 1) {
+ reg_val |= 0x0;
+ } else if (div == 2) {
+ reg_val |= 0x1;
+ } else {
+ rc = -EINVAL;
+ pr_err("unsupported divider %d\n", div);
+ goto error;
+ }
+
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+
+error:
+ return rc;
+}
+
+static int post_bit_clk_set_div(void *context, unsigned int reg,
+ unsigned int div)
+{
+ int rc = 0;
+ struct mdss_pll_resources *pll = context;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ /**
+ * In common clock framework the divider value provided is one less and
+ * and hence adjusting the divider value by one prior to writing it to
+ * hardware
+ */
+ div++;
+ rc = post_bit_clk_set_div_sub(pll, div);
+ if (!rc && pll->slave)
+ rc = post_bit_clk_set_div_sub(pll->slave, div);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static struct regmap_config dsi_pll_10nm_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7c0,
+};
+
+static struct regmap_bus post_vco_regmap_bus = {
+ .reg_write = post_vco_clk_set_div,
+ .reg_read = post_vco_clk_get_div,
+};
+
+static struct regmap_bus post_bit_regmap_bus = {
+ .reg_write = post_bit_clk_set_div,
+ .reg_read = post_bit_clk_get_div,
+};
+
+static struct regmap_bus pclk_src_regmap_bus = {
+ .reg_write = pixel_clk_set_div,
+ .reg_read = pixel_clk_get_div,
+};
+
+static struct regmap_bus bitclk_src_regmap_bus = {
+ .reg_write = bit_clk_set_div,
+ .reg_read = bit_clk_get_div,
+};
+
+static const struct clk_ops clk_ops_vco_10nm = {
+ .recalc_rate = vco_10nm_recalc_rate,
+ .set_rate = vco_10nm_set_rate,
+ .round_rate = vco_10nm_round_rate,
+ .prepare = vco_10nm_prepare,
+ .unprepare = vco_10nm_unprepare,
+};
+
+static struct regmap_bus mdss_mux_regmap_bus = {
+ .reg_write = mdss_set_mux_sel,
+ .reg_read = mdss_get_mux_sel,
+};
+
+/*
+ * Clock tree for generating DSI byte and pixel clocks.
+ *
+ *
+ * +---------------+
+ * | vco_clk |
+ * +-------+-------+
+ * |
+ * +--------------------------------------+
+ * | |
+ * +-------v-------+ |
+ * | bitclk_src | |
+ * | DIV(1..15) | |
+ * +-------+-------+ |
+ * | |
+ * +--------------------+ |
+ * Shadow Path | | |
+ * + +-------v-------+ +------v------+ +------v-------+
+ * | | byteclk_src | |post_bit_div | |post_vco_div |
+ * | | DIV(8) | |DIV(1,2) | |DIV(1,4) |
+ * | +-------+-------+ +------+------+ +------+-------+
+ * | | | |
+ * | | +------+ +----+
+ * | +--------+ | |
+ * | | +----v-----v------+
+ * +-v---------v----+ \ pclk_src_mux /
+ * \ byteclk_mux / \ /
+ * \ / +-----+-----+
+ * +----+-----+ | Shadow Path
+ * | | +
+ * v +-----v------+ |
+ * dsi_byte_clk | pclk_src | |
+ * | DIV(1..15) | |
+ * +-----+------+ |
+ * | |
+ * | |
+ * +--------+ |
+ * | |
+ * +---v----v----+
+ * \ pclk_mux /
+ * \ /
+ * +---+---+
+ * |
+ * |
+ * v
+ * dsi_pclk
+ *
+ */
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1000000000UL,
+ .max_rate = 3500000000UL,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_vco_clk",
+ .parent_names = (const char *[]){"xo_board"},
+ .num_parents = 1,
+ .ops = &clk_ops_vco_10nm,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1000000000UL,
+ .max_rate = 3500000000UL,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_vco_clk",
+ .parent_names = (const char *[]){"xo_board"},
+ .num_parents = 1,
+ .ops = &clk_ops_vco_10nm,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap_div dsi0pll_bitclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_bitclk_src",
+ .parent_names = (const char *[]){"dsi0pll_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_bitclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_bitclk_src",
+ .parent_names = (const char *[]){"dsi1pll_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi0pll_post_vco_div = {
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_vco_div",
+ .parent_names = (const char *[]){"dsi0pll_vco_clk"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_post_vco_div = {
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_vco_div",
+ .parent_names = (const char *[]){"dsi1pll_vco_clk"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+ .div = 8,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_byteclk_src",
+ .parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+ .div = 8,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_byteclk_src",
+ .parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_regmap_div dsi0pll_post_bit_div = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_bit_div",
+ .parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_post_bit_div = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_bit_div",
+ .parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_mux = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0_phy_pll_out_byteclk",
+ .parent_names = (const char *[]){"dsi0pll_byteclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_mux = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_phy_pll_out_byteclk",
+ .parent_names = (const char *[]){"dsi1pll_byteclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_pclk_src_mux",
+ .parent_names = (const char *[]){"dsi0pll_post_bit_div",
+ "dsi0pll_post_vco_div"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_pclk_src_mux",
+ .parent_names = (const char *[]){"dsi1pll_post_bit_div",
+ "dsi1pll_post_vco_div"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_pclk_src",
+ .parent_names = (const char *[]){
+ "dsi0pll_pclk_src_mux"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_pclk_src",
+ .parent_names = (const char *[]){
+ "dsi1pll_pclk_src_mux"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_mux = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0_phy_pll_out_dsiclk",
+ .parent_names = (const char *[]){"dsi0pll_pclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_mux = {
+ .shift = 0,
+ .width = 1,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1_phy_pll_out_dsiclk",
+ .parent_names = (const char *[]){"dsi1pll_pclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
+ [VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+ [BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
+ [BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+ [POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
+ [POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+ [BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
+ [PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
+ [PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+ [PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+ [VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+ [BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
+ [BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+ [POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
+ [POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+ [BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
+ [PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
+ [PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+ [PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+
+};
+
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0, ndx, i;
+ struct clk *clk;
+ struct clk_onecell_data *clk_data;
+ int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_10nm);
+ struct regmap *rmap;
+
+ if (!pdev || !pdev->dev.of_node ||
+ !pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ ndx = pll_res->index;
+
+ if (ndx >= DSI_PLL_MAX) {
+ pr_err("pll index(%d) NOT supported\n", ndx);
+ return -EINVAL;
+ }
+
+ pll_rsc_db[ndx] = pll_res;
+ plls[ndx].rsc = pll_res;
+ pll_res->priv = &plls[ndx];
+ pll_res->vco_delay = VCO_DELAY_USEC;
+
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+ sizeof(struct clk *)), GFP_KERNEL);
+ if (!clk_data->clks) {
+ devm_kfree(&pdev->dev, clk_data);
+ return -ENOMEM;
+ }
+ clk_data->clk_num = num_clks;
+
+ /* Establish client data */
+ if (ndx == 0) {
+ rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_post_vco_div.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_post_bit_div.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_bitclk_src.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_pclk_src.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_pclk_mux.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi0pll_byteclk_mux.clkr.regmap = rmap;
+
+ dsi0pll_vco_clk.priv = pll_res;
+ for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+ clk = devm_clk_register(&pdev->dev,
+ mdss_dsi_pllcc_10nm[i]);
+ if (IS_ERR(clk)) {
+ pr_err("clk registration failed for DSI clock:%d\n",
+ pll_res->index);
+ rc = -EINVAL;
+ goto clk_register_fail;
+ }
+ clk_data->clks[i] = clk;
+
+ }
+
+ rc = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, clk_data);
+
+
+ } else {
+ rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_post_vco_div.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_post_bit_div.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_bitclk_src.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_pclk_src.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_pclk_mux.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+
+ rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ pll_res, &dsi_pll_10nm_config);
+ dsi1pll_byteclk_mux.clkr.regmap = rmap;
+ dsi1pll_vco_clk.priv = pll_res;
+
+ for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+ clk = devm_clk_register(&pdev->dev,
+ mdss_dsi_pllcc_10nm[i]);
+ if (IS_ERR(clk)) {
+ pr_err("clk registration failed for DSI clock:%d\n",
+ pll_res->index);
+ rc = -EINVAL;
+ goto clk_register_fail;
+ }
+ clk_data->clks[i] = clk;
+
+ }
+
+ rc = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, clk_data);
+ }
+ if (!rc) {
+ pr_info("Registered DSI PLL ndx=%d, clocks successfully", ndx);
+
+ return rc;
+ }
+clk_register_fail:
+ devm_kfree(&pdev->dev, clk_data->clks);
+ devm_kfree(&pdev->dev, clk_data);
+ return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
deleted file mode 100644
index 8c6bc2c..0000000
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
+++ /dev/null
@@ -1,1414 +0,0 @@
-/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
-#include "mdss-dsi-pll.h"
-#include "mdss-pll.h"
-
-#define VCO_DELAY_USEC 1
-
-#define MHZ_375 375000000UL
-#define MHZ_750 750000000UL
-#define MHZ_1500 1500000000UL
-#define MHZ_1900 1900000000UL
-#define MHZ_3000 3000000000UL
-
-/* Register Offsets from PLL base address */
-#define PLL_ANALOG_CONTROLS_ONE 0x000
-#define PLL_ANALOG_CONTROLS_TWO 0x004
-#define PLL_ANALOG_CONTROLS_THREE 0x010
-#define PLL_DSM_DIVIDER 0x01c
-#define PLL_FEEDBACK_DIVIDER 0x020
-#define PLL_SYSTEM_MUXES 0x024
-#define PLL_CMODE 0x02c
-#define PLL_CALIBRATION_SETTINGS 0x030
-#define PLL_BAND_SEL_CAL_SETTINGS_THREE 0x054
-#define PLL_FREQ_DETECT_SETTINGS_ONE 0x064
-#define PLL_OUTDIV 0x094
-#define PLL_CORE_OVERRIDE 0x0a4
-#define PLL_CORE_INPUT_OVERRIDE 0x0a8
-#define PLL_PLL_DIGITAL_TIMERS_TWO 0x0b4
-#define PLL_DECIMAL_DIV_START_1 0x0cc
-#define PLL_FRAC_DIV_START_LOW_1 0x0d0
-#define PLL_FRAC_DIV_START_MID_1 0x0d4
-#define PLL_FRAC_DIV_START_HIGH_1 0x0d8
-#define PLL_SSC_STEPSIZE_LOW_1 0x10c
-#define PLL_SSC_STEPSIZE_HIGH_1 0x110
-#define PLL_SSC_DIV_PER_LOW_1 0x114
-#define PLL_SSC_DIV_PER_HIGH_1 0x118
-#define PLL_SSC_DIV_ADJPER_LOW_1 0x11c
-#define PLL_SSC_DIV_ADJPER_HIGH_1 0x120
-#define PLL_SSC_CONTROL 0x13c
-#define PLL_PLL_OUTDIV_RATE 0x140
-#define PLL_PLL_LOCKDET_RATE_1 0x144
-#define PLL_PLL_PROP_GAIN_RATE_1 0x14c
-#define PLL_PLL_BAND_SET_RATE_1 0x154
-#define PLL_PLL_INT_GAIN_IFILT_BAND_1 0x15c
-#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x164
-#define PLL_PLL_LOCK_OVERRIDE 0x180
-#define PLL_PLL_LOCK_DELAY 0x184
-#define PLL_COMMON_STATUS_ONE 0x1a0
-
-/* Register Offsets from PHY base address */
-#define PHY_CMN_CLK_CFG0 0x010
-#define PHY_CMN_CLK_CFG1 0x014
-#define PHY_CMN_RBUF_CTRL 0x01c
-#define PHY_CMN_PLL_CNTRL 0x038
-#define PHY_CMN_CTRL_0 0x024
-
-/* Bit definition of SSC control registers */
-#define SSC_CENTER BIT(0)
-#define SSC_EN BIT(1)
-#define SSC_FREQ_UPDATE BIT(2)
-#define SSC_FREQ_UPDATE_MUX BIT(3)
-#define SSC_UPDATE_SSC BIT(4)
-#define SSC_UPDATE_SSC_MUX BIT(5)
-#define SSC_START BIT(6)
-#define SSC_START_MUX BIT(7)
-
-enum {
- DSI_PLL_0,
- DSI_PLL_1,
- DSI_PLL_MAX
-};
-
-struct dsi_pll_regs {
- u32 pll_prop_gain_rate;
- u32 pll_outdiv_rate;
- u32 pll_lockdet_rate;
- u32 decimal_div_start;
- u32 frac_div_start_low;
- u32 frac_div_start_mid;
- u32 frac_div_start_high;
- u32 ssc_stepsize_low;
- u32 ssc_stepsize_high;
- u32 ssc_div_per_low;
- u32 ssc_div_per_high;
- u32 ssc_adjper_low;
- u32 ssc_adjper_high;
- u32 ssc_control;
-};
-
-struct dsi_pll_config {
- u32 ref_freq;
- bool div_override;
- u32 output_div;
- bool ignore_frac;
- bool disable_prescaler;
- bool enable_ssc;
- bool ssc_center;
- u32 dec_bits;
- u32 frac_bits;
- u32 lock_timer;
- u32 ssc_freq;
- u32 ssc_offset;
- u32 ssc_adj_per;
- u32 thresh_cycles;
- u32 refclk_cycles;
-};
-
-struct dsi_pll_8998 {
- struct mdss_pll_resources *rsc;
- struct dsi_pll_config pll_configuration;
- struct dsi_pll_regs reg_setup;
-};
-
-static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
-static struct dsi_pll_8998 plls[DSI_PLL_MAX];
-
-static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
-{
- u32 reg;
- struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
-
- if (!rsc)
- return;
-
- /* Only DSI PLL0 can act as a master */
- if (rsc->index != DSI_PLL_0)
- return;
-
- /* default configuration: source is either internal or ref clock */
- rsc->slave = NULL;
-
- if (!orsc) {
- pr_warn("slave PLL unavilable, assuming standalone config\n");
- return;
- }
-
- /* check to see if the source of DSI1 PLL bitclk is set to external */
- reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
- reg &= (BIT(2) | BIT(3));
- if (reg == 0x04)
- rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
-
- pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
-}
-
-static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
- struct mdss_pll_resources *rsc)
-{
- struct dsi_pll_config *config = &pll->pll_configuration;
-
- config->ref_freq = 19200000;
- config->output_div = 1;
- config->dec_bits = 8;
- config->frac_bits = 18;
- config->lock_timer = 64;
- config->ssc_freq = 31500;
- config->ssc_offset = 5000;
- config->ssc_adj_per = 2;
- config->thresh_cycles = 32;
- config->refclk_cycles = 256;
-
- config->div_override = false;
- config->ignore_frac = false;
- config->disable_prescaler = false;
- config->enable_ssc = rsc->ssc_en;
- config->ssc_center = rsc->ssc_center;
-
- if (config->enable_ssc) {
- if (rsc->ssc_freq)
- config->ssc_freq = rsc->ssc_freq;
- if (rsc->ssc_ppm)
- config->ssc_offset = rsc->ssc_ppm;
- }
-
- dsi_pll_config_slave(rsc);
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
- struct mdss_pll_resources *rsc)
-{
- struct dsi_pll_config *config = &pll->pll_configuration;
- struct dsi_pll_regs *regs = &pll->reg_setup;
- u64 target_freq;
- u64 fref = rsc->vco_ref_clk_rate;
- u32 computed_output_div, div_log;
- u64 pll_freq;
- u64 divider;
- u64 dec, dec_multiple;
- u32 frac;
- u64 multiplier;
-
- target_freq = rsc->vco_current_rate;
- pr_debug("target_freq = %llu\n", target_freq);
-
- if (config->div_override) {
- computed_output_div = config->output_div;
- } else {
- if (target_freq < MHZ_375) {
- computed_output_div = 8;
- div_log = 3;
- } else if (target_freq < MHZ_750) {
- computed_output_div = 4;
- div_log = 2;
- } else if (target_freq < MHZ_1500) {
- computed_output_div = 2;
- div_log = 1;
- } else {
- computed_output_div = 1;
- div_log = 0;
- }
- }
- pr_debug("computed_output_div = %d\n", computed_output_div);
-
- pll_freq = target_freq * computed_output_div;
-
- if (config->disable_prescaler)
- divider = fref;
- else
- divider = fref * 2;
-
- multiplier = 1 << config->frac_bits;
- dec_multiple = div_u64(pll_freq * multiplier, divider);
- div_u64_rem(dec_multiple, multiplier, &frac);
-
- dec = div_u64(dec_multiple, multiplier);
-
- if (pll_freq <= MHZ_1900)
- regs->pll_prop_gain_rate = 8;
- else if (pll_freq <= MHZ_3000)
- regs->pll_prop_gain_rate = 10;
- else
- regs->pll_prop_gain_rate = 12;
-
- regs->pll_outdiv_rate = div_log;
- regs->pll_lockdet_rate = config->lock_timer;
- regs->decimal_div_start = dec;
- regs->frac_div_start_low = (frac & 0xff);
- regs->frac_div_start_mid = (frac & 0xff00) >> 8;
- regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-static void dsi_pll_calc_ssc(struct dsi_pll_8998 *pll,
- struct mdss_pll_resources *rsc)
-{
- struct dsi_pll_config *config = &pll->pll_configuration;
- struct dsi_pll_regs *regs = &pll->reg_setup;
- u32 ssc_per;
- u32 ssc_mod;
- u64 ssc_step_size;
- u64 frac;
-
- if (!config->enable_ssc) {
- pr_debug("SSC not enabled\n");
- return;
- }
-
- ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
- ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
- ssc_per -= ssc_mod;
-
- frac = regs->frac_div_start_low |
- (regs->frac_div_start_mid << 8) |
- (regs->frac_div_start_high << 16);
- ssc_step_size = regs->decimal_div_start;
- ssc_step_size *= (1 << config->frac_bits);
- ssc_step_size += frac;
- ssc_step_size *= config->ssc_offset;
- ssc_step_size *= (config->ssc_adj_per + 1);
- ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
- ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
- regs->ssc_div_per_low = ssc_per & 0xFF;
- regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
- regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
- regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
- regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
- regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
- regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
- pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
- regs->decimal_div_start, frac, config->frac_bits);
- pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
- ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_8998 *pll,
- struct mdss_pll_resources *rsc)
-{
- void __iomem *pll_base = rsc->pll_base;
- struct dsi_pll_regs *regs = &pll->reg_setup;
-
- if (pll->pll_configuration.enable_ssc) {
- pr_debug("SSC is enabled\n");
-
- MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
- regs->ssc_stepsize_low);
- MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
- regs->ssc_stepsize_high);
- MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
- regs->ssc_div_per_low);
- MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
- regs->ssc_div_per_high);
- MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_LOW_1,
- regs->ssc_adjper_low);
- MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_HIGH_1,
- regs->ssc_adjper_high);
- MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
- SSC_EN | regs->ssc_control);
- }
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll,
- struct mdss_pll_resources *rsc)
-{
- void __iomem *pll_base = rsc->pll_base;
-
- MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x80);
- MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
- MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
- MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
- MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
- MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x00);
- MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
- MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
- MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
- MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
- MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
-}
-
-static void dsi_pll_commit(struct dsi_pll_8998 *pll,
- struct mdss_pll_resources *rsc)
-{
- void __iomem *pll_base = rsc->pll_base;
- struct dsi_pll_regs *reg = &pll->reg_setup;
-
- MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
- MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
- reg->decimal_div_start);
- MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
- reg->frac_div_start_low);
- MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
- reg->frac_div_start_mid);
- MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
- reg->frac_div_start_high);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0xc8);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x0a);
-
-}
-
-static int vco_8998_set_rate(struct clk *c, unsigned long rate)
-{
- int rc;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *rsc = vco->priv;
- struct dsi_pll_8998 *pll;
-
- if (!rsc) {
- pr_err("pll resource not found\n");
- return -EINVAL;
- }
-
- if (rsc->pll_on)
- return 0;
-
- pll = rsc->priv;
- if (!pll) {
- pr_err("pll configuration not found\n");
- return -EINVAL;
- }
-
- pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
-
- rsc->vco_current_rate = rate;
- rsc->vco_ref_clk_rate = vco->ref_clk_rate;
-
- rc = mdss_pll_resource_enable(rsc, true);
- if (rc) {
- pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
- rsc->index, rc);
- return rc;
- }
-
- dsi_pll_setup_config(pll, rsc);
-
- dsi_pll_calc_dec_frac(pll, rsc);
-
- dsi_pll_calc_ssc(pll, rsc);
-
- dsi_pll_commit(pll, rsc);
-
- dsi_pll_config_hzindep_reg(pll, rsc);
-
- dsi_pll_ssc_commit(pll, rsc);
-
- /* flush, ensure all register writes are done*/
- wmb();
-
- mdss_pll_resource_enable(rsc, false);
-
- return 0;
-}
-
-static int dsi_pll_8998_lock_status(struct mdss_pll_resources *pll)
-{
- int rc;
- u32 status;
- u32 const delay_us = 100;
- u32 const timeout_us = 5000;
-
- rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
- status,
- ((status & BIT(0)) > 0),
- delay_us,
- timeout_us);
- if (rc)
- pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
- pll->index, status);
-
- return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
-{
- u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
- MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
- ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
-{
- u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
-
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
- MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
- ndelay(250);
-}
-
-static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
-{
- int rc;
- struct mdss_pll_resources *rsc = vco->priv;
-
- dsi_pll_enable_pll_bias(rsc);
- if (rsc->slave)
- dsi_pll_enable_pll_bias(rsc->slave);
-
- /* Start PLL */
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
-
- /*
- * ensure all PLL configurations are written prior to checking
- * for PLL lock.
- */
- wmb();
-
- /* Check for PLL lock */
- rc = dsi_pll_8998_lock_status(rsc);
- if (rc) {
- pr_err("PLL(%d) lock failed\n", rsc->index);
- goto error;
- }
-
- rsc->pll_on = true;
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
- if (rsc->slave)
- MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
-
-error:
- return rc;
-}
-
-static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
-{
- dsi_pll_disable_pll_bias(rsc);
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
-}
-
-static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
-{
- struct mdss_pll_resources *rsc = vco->priv;
-
- if (!rsc->pll_on &&
- mdss_pll_resource_enable(rsc, true)) {
- pr_err("failed to enable pll (%d) resources\n", rsc->index);
- return;
- }
-
- rsc->handoff_resources = false;
-
- pr_debug("stop PLL (%d)\n", rsc->index);
-
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
- dsi_pll_disable_sub(rsc);
- if (rsc->slave)
- dsi_pll_disable_sub(rsc->slave);
-
- /* flush, ensure all register writes are done*/
- wmb();
- rsc->pll_on = false;
-}
-
-static void vco_8998_unprepare(struct clk *c)
-{
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *pll = vco->priv;
-
- if (!pll) {
- pr_err("dsi pll resources not available\n");
- return;
- }
-
- pll->vco_cached_rate = c->rate;
- dsi_pll_disable(vco);
- mdss_pll_resource_enable(pll, false);
-}
-
-static int vco_8998_prepare(struct clk *c)
-{
- int rc = 0;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *pll = vco->priv;
-
- if (!pll) {
- pr_err("dsi pll resources are not available\n");
- return -EINVAL;
- }
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("failed to enable pll (%d) resource, rc=%d\n",
- pll->index, rc);
- return rc;
- }
-
- if ((pll->vco_cached_rate != 0) &&
- (pll->vco_cached_rate == c->rate)) {
- rc = c->ops->set_rate(c, pll->vco_cached_rate);
- if (rc) {
- pr_err("pll(%d) set_rate failed, rc=%d\n",
- pll->index, rc);
- mdss_pll_resource_enable(pll, false);
- return rc;
- }
- }
-
- rc = dsi_pll_enable(vco);
- if (rc) {
- mdss_pll_resource_enable(pll, false);
- pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
- return rc;
- }
-
- return rc;
-}
-
-static unsigned long dsi_pll_get_vco_rate(struct clk *c)
-{
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *pll = vco->priv;
- int rc;
- u64 ref_clk = vco->ref_clk_rate;
- u64 vco_rate;
- u64 multiplier;
- u32 frac;
- u32 dec;
- u32 outdiv;
- u64 pll_freq, tmp64;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("failed to enable pll(%d) resource, rc=%d\n",
- pll->index, rc);
- return 0;
- }
-
- dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
- dec &= 0xFF;
-
- frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
- frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
- 0xFF) <<
- 8);
- frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
- 0x3) <<
- 16);
-
- /* OUTDIV_1:0 field is (log(outdiv, 2)) */
- outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
- outdiv &= 0x3;
- outdiv = 1 << outdiv;
-
- /*
- * TODO:
- * 1. Assumes prescaler is disabled
- * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
- **/
- multiplier = 1 << 18;
- pll_freq = dec * (ref_clk * 2);
- tmp64 = (ref_clk * 2 * frac);
- pll_freq += div_u64(tmp64, multiplier);
-
- vco_rate = div_u64(pll_freq, outdiv);
-
- pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
- dec, frac, outdiv, vco_rate);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return (unsigned long)vco_rate;
-}
-
-enum handoff vco_8998_handoff(struct clk *c)
-{
- enum handoff ret = HANDOFF_DISABLED_CLK;
- int rc;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
- struct mdss_pll_resources *pll = vco->priv;
- u32 status;
-
- if (!pll) {
- pr_err("Unable to find pll resource\n");
- return HANDOFF_DISABLED_CLK;
- }
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("failed to enable pll(%d) resources, rc=%d\n",
- pll->index, rc);
- return ret;
- }
-
- status = MDSS_PLL_REG_R(pll->pll_base, PLL_COMMON_STATUS_ONE);
- if (status & BIT(0)) {
- pll->handoff_resources = true;
- pll->pll_on = true;
- c->rate = dsi_pll_get_vco_rate(c);
- ret = HANDOFF_ENABLED_CLK;
- } else {
- (void)mdss_pll_resource_enable(pll, false);
- ret = HANDOFF_DISABLED_CLK;
- }
-
- return ret;
-}
-
-static int pixel_clk_get_div(struct div_clk *clk)
-{
- int rc;
- struct mdss_pll_resources *pll = clk->priv;
- u32 reg_val;
- int div;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
- div = (reg_val & 0xF0) >> 4;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return div;
-}
-
-static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- u32 reg_val;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
- reg_val &= ~0xF0;
- reg_val |= (div << 4);
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
-}
-
-static int pixel_clk_set_div(struct div_clk *clk, int div)
-{
- int rc;
- struct mdss_pll_resources *pll = clk->priv;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- pixel_clk_set_div_sub(pll, div);
- if (pll->slave)
- pixel_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return 0;
-}
-
-static int bit_clk_get_div(struct div_clk *clk)
-{
- int rc;
- struct mdss_pll_resources *pll = clk->priv;
- u32 reg_val;
- int div;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
- div = (reg_val & 0x0F);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return div;
-}
-
-static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
-{
- u32 reg_val;
-
- reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
- reg_val &= ~0x0F;
- reg_val |= div;
- MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
-}
-
-static int bit_clk_set_div(struct div_clk *clk, int div)
-{
- int rc;
- struct mdss_pll_resources *rsc = clk->priv;
- struct dsi_pll_8998 *pll;
-
- if (!rsc) {
- pr_err("pll resource not found\n");
- return -EINVAL;
- }
-
- pll = rsc->priv;
- if (!pll) {
- pr_err("pll configuration not found\n");
- return -EINVAL;
- }
-
- rc = mdss_pll_resource_enable(rsc, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- bit_clk_set_div_sub(rsc, div);
- /* For slave PLL, this divider always should be set to 1 */
- if (rsc->slave)
- bit_clk_set_div_sub(rsc->slave, 1);
-
- (void)mdss_pll_resource_enable(rsc, false);
-
- return rc;
-}
-
-static int post_vco_clk_get_div(struct div_clk *clk)
-{
- int rc;
- struct mdss_pll_resources *pll = clk->priv;
- u32 reg_val;
- int div;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= 0x3;
-
- if (reg_val == 2)
- div = 1;
- else if (reg_val == 3)
- div = 4;
- else
- div = 1;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return div;
-}
-
-static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- u32 reg_val;
- int rc = 0;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= ~0x03;
- if (div == 1) {
- reg_val |= 0x2;
- } else if (div == 4) {
- reg_val |= 0x3;
- } else {
- rc = -EINVAL;
- pr_err("unsupported divider %d\n", div);
- goto error;
- }
-
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
- return rc;
-}
-
-static int post_vco_clk_set_div(struct div_clk *clk, int div)
-{
- int rc = 0;
- struct mdss_pll_resources *pll = clk->priv;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- rc = post_vco_clk_set_div_sub(pll, div);
- if (!rc && pll->slave)
- rc = post_vco_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_bit_clk_get_div(struct div_clk *clk)
-{
- int rc;
- struct mdss_pll_resources *pll = clk->priv;
- u32 reg_val;
- int div;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= 0x3;
-
- if (reg_val == 0)
- div = 1;
- else if (reg_val == 1)
- div = 2;
- else
- div = 1;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return div;
-}
-
-static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- int rc = 0;
- u32 reg_val;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= ~0x03;
- if (div == 1) {
- reg_val |= 0x0;
- } else if (div == 2) {
- reg_val |= 0x1;
- } else {
- rc = -EINVAL;
- pr_err("unsupported divider %d\n", div);
- goto error;
- }
-
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
- return rc;
-}
-
-static int post_bit_clk_set_div(struct div_clk *clk, int div)
-{
- int rc = 0;
- struct mdss_pll_resources *pll = clk->priv;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- rc = post_bit_clk_set_div_sub(pll, div);
- if (!rc && pll->slave)
- rc = post_bit_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-long vco_8998_round_rate(struct clk *c, unsigned long rate)
-{
- unsigned long rrate = rate;
- struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
- if (rate < vco->min_rate)
- rrate = vco->min_rate;
- if (rate > vco->max_rate)
- rrate = vco->max_rate;
-
- return rrate;
-}
-
-/* clk ops that require runtime fixup */
-static const struct clk_ops clk_ops_gen_mux_dsi;
-static const struct clk_ops clk_ops_bitclk_src_c;
-static const struct clk_ops clk_ops_post_vco_div_c;
-static const struct clk_ops clk_ops_post_bit_div_c;
-static const struct clk_ops clk_ops_pclk_src_c;
-
-static struct clk_div_ops clk_post_vco_div_ops = {
- .set_div = post_vco_clk_set_div,
- .get_div = post_vco_clk_get_div,
-};
-
-static struct clk_div_ops clk_post_bit_div_ops = {
- .set_div = post_bit_clk_set_div,
- .get_div = post_bit_clk_get_div,
-};
-
-static struct clk_div_ops pixel_clk_div_ops = {
- .set_div = pixel_clk_set_div,
- .get_div = pixel_clk_get_div,
-};
-
-static struct clk_div_ops clk_bitclk_src_ops = {
- .set_div = bit_clk_set_div,
- .get_div = bit_clk_get_div,
-};
-
-static const struct clk_ops clk_ops_vco_8998 = {
- .set_rate = vco_8998_set_rate,
- .round_rate = vco_8998_round_rate,
- .handoff = vco_8998_handoff,
- .prepare = vco_8998_prepare,
- .unprepare = vco_8998_unprepare,
-};
-
-static struct clk_mux_ops mdss_mux_ops = {
- .set_mux_sel = mdss_set_mux_sel,
- .get_mux_sel = mdss_get_mux_sel,
-};
-
-/*
- * Clock tree for generating DSI byte and pixel clocks.
- *
- *
- * +---------------+
- * | vco_clk |
- * +-------+-------+
- * |
- * +--------------------------------------+
- * | |
- * +-------v-------+ |
- * | bitclk_src | |
- * | DIV(1..15) | |
- * +-------+-------+ |
- * | |
- * +--------------------+ |
- * Shadow Path | | |
- * + +-------v-------+ +------v------+ +------v-------+
- * | | byteclk_src | |post_bit_div | |post_vco_div |
- * | | DIV(8) | |DIV(1,2) | |DIV(1,4) |
- * | +-------+-------+ +------+------+ +------+-------+
- * | | | |
- * | | +------+ +----+
- * | +--------+ | |
- * | | +----v-----v------+
- * +-v---------v----+ \ pclk_src_mux /
- * \ byteclk_mux / \ /
- * \ / +-----+-----+
- * +----+-----+ | Shadow Path
- * | | +
- * v +-----v------+ |
- * dsi_byte_clk | pclk_src | |
- * | DIV(1..15) | |
- * +-----+------+ |
- * | |
- * | |
- * +--------+ |
- * | |
- * +---v----v----+
- * \ pclk_mux /
- * \ /
- * +---+---+
- * |
- * |
- * v
- * dsi_pclk
- *
- */
-
-static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
- .ref_clk_rate = 19200000UL,
- .min_rate = 1500000000UL,
- .max_rate = 3500000000UL,
- .c = {
- .dbg_name = "dsi0pll_vco_clk",
- .ops = &clk_ops_vco_8998,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_vco_clk.c),
- },
-};
-
-static struct div_clk dsi0pll_bitclk_src = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 15,
- },
- .ops = &clk_bitclk_src_ops,
- .c = {
- .parent = &dsi0pll_vco_clk.c,
- .dbg_name = "dsi0pll_bitclk_src",
- .ops = &clk_ops_bitclk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_bitclk_src.c),
- }
-};
-
-static struct div_clk dsi0pll_post_vco_div = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 4,
- },
- .ops = &clk_post_vco_div_ops,
- .c = {
- .parent = &dsi0pll_vco_clk.c,
- .dbg_name = "dsi0pll_post_vco_div",
- .ops = &clk_ops_post_vco_div_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_post_vco_div.c),
- }
-};
-
-static struct div_clk dsi0pll_post_bit_div = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 2,
- },
- .ops = &clk_post_bit_div_ops,
- .c = {
- .parent = &dsi0pll_bitclk_src.c,
- .dbg_name = "dsi0pll_post_bit_div",
- .ops = &clk_ops_post_bit_div_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_post_bit_div.c),
- }
-};
-
-static struct mux_clk dsi0pll_pclk_src_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dsi0pll_post_bit_div.c, 0},
- {&dsi0pll_post_vco_div.c, 1},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &dsi0pll_post_bit_div.c,
- .dbg_name = "dsi0pll_pclk_src_mux",
- .ops = &clk_ops_gen_mux,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_pclk_src_mux.c),
- }
-};
-
-static struct div_clk dsi0pll_pclk_src = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 15,
- },
- .ops = &pixel_clk_div_ops,
- .c = {
- .parent = &dsi0pll_pclk_src_mux.c,
- .dbg_name = "dsi0pll_pclk_src",
- .ops = &clk_ops_pclk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_pclk_src.c),
- },
-};
-
-static struct mux_clk dsi0pll_pclk_mux = {
- .num_parents = 1,
- .parents = (struct clk_src[]) {
- {&dsi0pll_pclk_src.c, 0},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &dsi0pll_pclk_src.c,
- .dbg_name = "dsi0pll_pclk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_pclk_mux.c),
- }
-};
-
-static struct div_clk dsi0pll_byteclk_src = {
- .data = {
- .div = 8,
- .min_div = 8,
- .max_div = 8,
- },
- .c = {
- .parent = &dsi0pll_bitclk_src.c,
- .dbg_name = "dsi0pll_byteclk_src",
- .ops = &clk_ops_div,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_byteclk_src.c),
- },
-};
-
-static struct mux_clk dsi0pll_byteclk_mux = {
- .num_parents = 1,
- .parents = (struct clk_src[]) {
- {&dsi0pll_byteclk_src.c, 0},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &dsi0pll_byteclk_src.c,
- .dbg_name = "dsi0pll_byteclk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_byteclk_mux.c),
- }
-};
-
-static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
- .ref_clk_rate = 19200000UL,
- .min_rate = 1500000000UL,
- .max_rate = 3500000000UL,
- .c = {
- .dbg_name = "dsi1pll_vco_clk",
- .ops = &clk_ops_vco_8998,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_vco_clk.c),
- },
-};
-
-static struct div_clk dsi1pll_bitclk_src = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 15,
- },
- .ops = &clk_bitclk_src_ops,
- .c = {
- .parent = &dsi1pll_vco_clk.c,
- .dbg_name = "dsi1pll_bitclk_src",
- .ops = &clk_ops_bitclk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_bitclk_src.c),
- }
-};
-
-static struct div_clk dsi1pll_post_vco_div = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 4,
- },
- .ops = &clk_post_vco_div_ops,
- .c = {
- .parent = &dsi1pll_vco_clk.c,
- .dbg_name = "dsi1pll_post_vco_div",
- .ops = &clk_ops_post_vco_div_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_post_vco_div.c),
- }
-};
-
-static struct div_clk dsi1pll_post_bit_div = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 2,
- },
- .ops = &clk_post_bit_div_ops,
- .c = {
- .parent = &dsi1pll_bitclk_src.c,
- .dbg_name = "dsi1pll_post_bit_div",
- .ops = &clk_ops_post_bit_div_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_post_bit_div.c),
- }
-};
-
-static struct mux_clk dsi1pll_pclk_src_mux = {
- .num_parents = 2,
- .parents = (struct clk_src[]) {
- {&dsi1pll_post_bit_div.c, 0},
- {&dsi1pll_post_vco_div.c, 1},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &dsi1pll_post_bit_div.c,
- .dbg_name = "dsi1pll_pclk_src_mux",
- .ops = &clk_ops_gen_mux,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_pclk_src_mux.c),
- }
-};
-
-static struct div_clk dsi1pll_pclk_src = {
- .data = {
- .div = 1,
- .min_div = 1,
- .max_div = 15,
- },
- .ops = &pixel_clk_div_ops,
- .c = {
- .parent = &dsi1pll_pclk_src_mux.c,
- .dbg_name = "dsi1pll_pclk_src",
- .ops = &clk_ops_pclk_src_c,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_pclk_src.c),
- },
-};
-
-static struct mux_clk dsi1pll_pclk_mux = {
- .num_parents = 1,
- .parents = (struct clk_src[]) {
- {&dsi1pll_pclk_src.c, 0},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &dsi1pll_pclk_src.c,
- .dbg_name = "dsi1pll_pclk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_pclk_mux.c),
- }
-};
-
-static struct div_clk dsi1pll_byteclk_src = {
- .data = {
- .div = 8,
- .min_div = 8,
- .max_div = 8,
- },
- .c = {
- .parent = &dsi1pll_bitclk_src.c,
- .dbg_name = "dsi1pll_byteclk_src",
- .ops = &clk_ops_div,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_byteclk_src.c),
- },
-};
-
-static struct mux_clk dsi1pll_byteclk_mux = {
- .num_parents = 1,
- .parents = (struct clk_src[]) {
- {&dsi1pll_byteclk_src.c, 0},
- },
- .ops = &mdss_mux_ops,
- .c = {
- .parent = &dsi1pll_byteclk_src.c,
- .dbg_name = "dsi1pll_byteclk_mux",
- .ops = &clk_ops_gen_mux_dsi,
- .flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_byteclk_mux.c),
- }
-};
-
-static struct clk_lookup mdss_dsi_pll0cc_8998[] = {
- CLK_LIST(dsi0pll_byteclk_mux),
- CLK_LIST(dsi0pll_byteclk_src),
- CLK_LIST(dsi0pll_pclk_mux),
- CLK_LIST(dsi0pll_pclk_src),
- CLK_LIST(dsi0pll_pclk_src_mux),
- CLK_LIST(dsi0pll_post_bit_div),
- CLK_LIST(dsi0pll_post_vco_div),
- CLK_LIST(dsi0pll_bitclk_src),
- CLK_LIST(dsi0pll_vco_clk),
-};
-static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
- CLK_LIST(dsi1pll_byteclk_mux),
- CLK_LIST(dsi1pll_byteclk_src),
- CLK_LIST(dsi1pll_pclk_mux),
- CLK_LIST(dsi1pll_pclk_src),
- CLK_LIST(dsi1pll_pclk_src_mux),
- CLK_LIST(dsi1pll_post_bit_div),
- CLK_LIST(dsi1pll_post_vco_div),
- CLK_LIST(dsi1pll_bitclk_src),
- CLK_LIST(dsi1pll_vco_clk),
-};
-
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res)
-{
- int rc = 0, ndx;
-
- if (!pdev || !pdev->dev.of_node ||
- !pll_res || !pll_res->pll_base || !pll_res->phy_base) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- ndx = pll_res->index;
-
- if (ndx >= DSI_PLL_MAX) {
- pr_err("pll index(%d) NOT supported\n", ndx);
- return -EINVAL;
- }
-
- pll_rsc_db[ndx] = pll_res;
- pll_res->priv = &plls[ndx];
- plls[ndx].rsc = pll_res;
-
- /* runtime fixup of all div and mux clock ops */
- clk_ops_gen_mux_dsi = clk_ops_gen_mux;
- clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
- clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
- clk_ops_bitclk_src_c = clk_ops_div;
- clk_ops_bitclk_src_c.prepare = mdss_pll_div_prepare;
-
- /*
- * Set the ops for the two dividers in the pixel clock tree to the
- * slave_div to ensure that a set rate on this divider clock will not
- * be propagated to it's parent. This is needed ensure that when we set
- * the rate for pixel clock, the vco is not reconfigured
- */
- clk_ops_post_vco_div_c = clk_ops_slave_div;
- clk_ops_post_vco_div_c.prepare = mdss_pll_div_prepare;
-
- clk_ops_post_bit_div_c = clk_ops_slave_div;
- clk_ops_post_bit_div_c.prepare = mdss_pll_div_prepare;
-
- clk_ops_pclk_src_c = clk_ops_div;
- clk_ops_pclk_src_c.prepare = mdss_pll_div_prepare;
-
- pll_res->vco_delay = VCO_DELAY_USEC;
- if (ndx == 0) {
- dsi0pll_byteclk_mux.priv = pll_res;
- dsi0pll_byteclk_src.priv = pll_res;
- dsi0pll_pclk_mux.priv = pll_res;
- dsi0pll_pclk_src.priv = pll_res;
- dsi0pll_pclk_src_mux.priv = pll_res;
- dsi0pll_post_bit_div.priv = pll_res;
- dsi0pll_post_vco_div.priv = pll_res;
- dsi0pll_bitclk_src.priv = pll_res;
- dsi0pll_vco_clk.priv = pll_res;
-
- rc = of_msm_clock_register(pdev->dev.of_node,
- mdss_dsi_pll0cc_8998,
- ARRAY_SIZE(mdss_dsi_pll0cc_8998));
- } else {
- dsi1pll_byteclk_mux.priv = pll_res;
- dsi1pll_byteclk_src.priv = pll_res;
- dsi1pll_pclk_mux.priv = pll_res;
- dsi1pll_pclk_src.priv = pll_res;
- dsi1pll_pclk_src_mux.priv = pll_res;
- dsi1pll_post_bit_div.priv = pll_res;
- dsi1pll_post_vco_div.priv = pll_res;
- dsi1pll_bitclk_src.priv = pll_res;
- dsi1pll_vco_clk.priv = pll_res;
-
- rc = of_msm_clock_register(pdev->dev.of_node,
- mdss_dsi_pll1cc_8998,
- ARRAY_SIZE(mdss_dsi_pll1cc_8998));
- }
- if (rc)
- pr_err("dsi%dpll clock register failed, rc=%d\n", ndx, rc);
-
- return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
index 286c99e..7fc38a2 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
#ifndef __MDSS_DSI_PLL_H
#define __MDSS_DSI_PLL_H
+#include <linux/clk-provider.h>
+#include "mdss-pll.h"
#define MAX_DSI_PLL_EN_SEQS 10
#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG (0x0020)
@@ -31,6 +33,7 @@
};
struct dsi_pll_vco_clk {
+ struct clk_hw hw;
unsigned long ref_clk_rate;
unsigned long min_rate;
unsigned long max_rate;
@@ -38,73 +41,16 @@
struct lpfr_cfg *lpfr_lut;
u32 lpfr_lut_size;
void *priv;
-
- struct clk c;
-
int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
(struct mdss_pll_resources *dsi_pll_Res);
};
-static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
{
- return container_of(clk, struct dsi_pll_vco_clk, c);
+ return container_of(hw, struct dsi_pll_vco_clk, hw);
}
-int dsi_pll_clock_register_hpm(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_20nm(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_lpm(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
- struct mdss_pll_resources *pll_res);
-
-int set_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_byte_mux_sel(struct mux_clk *clk);
-int dsi_pll_mux_prepare(struct clk *c);
-int fixed_4div_set_div(struct div_clk *clk, int div);
-int fixed_4div_get_div(struct div_clk *clk);
-int digital_set_div(struct div_clk *clk, int div);
-int digital_get_div(struct div_clk *clk);
-int analog_set_div(struct div_clk *clk, int div);
-int analog_get_div(struct div_clk *clk);
-int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
-int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-unsigned long vco_get_rate(struct clk *c);
-long vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff vco_handoff(struct clk *c);
-int vco_prepare(struct clk *c);
-void vco_unprepare(struct clk *c);
-
-/* APIs for 20nm PHY PLL */
-int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
- unsigned long rate);
-long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff pll_20nm_vco_handoff(struct clk *c);
-int pll_20nm_vco_prepare(struct clk *c);
-void pll_20nm_vco_unprepare(struct clk *c);
-int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
-
-int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
-int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int fixed_hr_oclk2_get_div(struct div_clk *clk);
-int hr_oclk3_set_div(struct div_clk *clk, int div);
-int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
-int hr_oclk3_get_div(struct div_clk *clk);
-int ndiv_set_div(struct div_clk *clk, int div);
-int shadow_ndiv_set_div(struct div_clk *clk, int div);
-int ndiv_get_div(struct div_clk *clk);
-void __dsi_pll_disable(void __iomem *pll_base);
-
-int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel(struct mux_clk *clk);
-int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel(struct mux_clk *clk);
-
#endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
index 690c53f..4d79772 100644
--- a/drivers/clk/qcom/mdss/mdss-pll-util.c
+++ b/drivers/clk/qcom/mdss/mdss-pll-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
-#include <linux/clk/msm-clock-generic.h>
#include <linux/of_address.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index c22fa80..7f82fda 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -19,12 +19,8 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
-#include <linux/clk/msm-clock-generic.h>
-
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
-#include "mdss-hdmi-pll.h"
-#include "mdss-dp-pll.h"
int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
{
@@ -128,32 +124,10 @@
goto err;
}
- if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
- pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
- pll_res->target_id = MDSS_PLL_TARGET_8996;
- pll_res->revision = 1;
- } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
- pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
- pll_res->target_id = MDSS_PLL_TARGET_8996;
- pll_res->revision = 2;
- } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8998")) {
- pll_res->pll_interface_type = MDSS_DSI_PLL_8998;
- } else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_8998")) {
- pll_res->pll_interface_type = MDSS_DP_PLL_8998;
- } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
- pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
- } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
- pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
- } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
- pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
- } else if (!strcmp(compatible_stream,
- "qcom,mdss_hdmi_pll_8996_v3_1p8")) {
- pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
- } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8998")) {
- pll_res->pll_interface_type = MDSS_HDMI_PLL_8998;
- } else {
+ if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
+ pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+ else
goto err;
- }
return rc;
@@ -174,28 +148,8 @@
}
switch (pll_res->pll_interface_type) {
- case MDSS_DSI_PLL_8996:
- rc = dsi_pll_clock_register_8996(pdev, pll_res);
- break;
- case MDSS_DSI_PLL_8998:
- rc = dsi_pll_clock_register_8998(pdev, pll_res);
- case MDSS_DP_PLL_8998:
- rc = dp_pll_clock_register_8998(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996:
- rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996_V2:
- rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996_V3:
- rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8996_V3_1_8:
- rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
- break;
- case MDSS_HDMI_PLL_8998:
- rc = hdmi_8998_pll_clock_register(pdev, pll_res);
+ case MDSS_DSI_PLL_10NM:
+ rc = dsi_pll_clock_register_10nm(pdev, pll_res);
break;
case MDSS_UNKNOWN_PLL:
default:
@@ -392,15 +346,7 @@
}
static const struct of_device_id mdss_pll_dt_match[] = {
- {.compatible = "qcom,mdss_dsi_pll_8996"},
- {.compatible = "qcom,mdss_dsi_pll_8996_v2"},
- {.compatible = "qcom,mdss_dsi_pll_8998"},
- {.compatible = "qcom,mdss_hdmi_pll_8996"},
- {.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
- {.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
- {.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
- {.compatible = "qcom,mdss_dp_pll_8998"},
- {.compatible = "qcom,mdss_hdmi_pll_8998"},
+ {.compatible = "qcom,mdss_dsi_pll_10nm"},
{}
};
@@ -425,7 +371,7 @@
return rc;
}
-subsys_initcall(mdss_pll_driver_init);
+fs_initcall(mdss_pll_driver_init);
static void __exit mdss_pll_driver_deinit(void)
{
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 48dddf6..eccfcea 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -12,10 +12,16 @@
#ifndef __MDSS_PLL_H
#define __MDSS_PLL_H
-
-#include <linux/mdss_io_util.h>
-#include <linux/clk/msm-clock-generic.h>
+#include <linux/sde_io_util.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include "../clk-regmap.h"
+#include "../clk-regmap-divider.h"
+#include "../clk-regmap-mux.h"
+
#define MDSS_PLL_REG_W(base, offset, data) \
writel_relaxed((data), (base) + (offset))
@@ -30,14 +36,7 @@
(base) + (offset))
enum {
- MDSS_DSI_PLL_8996,
- MDSS_DSI_PLL_8998,
- MDSS_DP_PLL_8998,
- MDSS_HDMI_PLL_8996,
- MDSS_HDMI_PLL_8996_V2,
- MDSS_HDMI_PLL_8996_V3,
- MDSS_HDMI_PLL_8996_V3_1_8,
- MDSS_HDMI_PLL_8998,
+ MDSS_DSI_PLL_10NM,
MDSS_UNKNOWN_PLL,
};
@@ -195,25 +194,27 @@
WARN(1, "gdsc_base register is not defined\n");
return true;
}
-
- return ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) &&
- (!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
+ return readl_relaxed(pll_res->gdsc_base) & BIT(31) ? false : true;
}
-static inline int mdss_pll_div_prepare(struct clk *c)
+static inline int mdss_pll_div_prepare(struct clk_hw *hw)
{
- struct div_clk *div = to_div_clk(c);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
/* Restore the divider's value */
- return div->ops->set_div(div, div->data.div);
+ return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
+ clk_hw_get_rate(parent_hw));
}
-static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
+static inline int mdss_set_mux_sel(void *context, unsigned int reg,
+ unsigned int val)
{
return 0;
}
-static inline int mdss_get_mux_sel(struct mux_clk *clk)
+static inline int mdss_get_mux_sel(void *context, unsigned int reg,
+ unsigned int *val)
{
+ *val = 0;
return 0;
}
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 8b63979..4eb8a04 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -151,19 +151,6 @@
},
};
-static struct clk_branch video_cc_debug_clk = {
- .halt_reg = 0xa58,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa58,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "video_cc_debug_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch video_cc_qdss_trig_clk = {
.halt_reg = 0x970,
.halt_check = BRANCH_HALT,
@@ -299,7 +286,6 @@
static struct clk_regmap *video_cc_sdm845_clocks[] = {
[VIDEO_CC_APB_CLK] = &video_cc_apb_clk.clkr,
[VIDEO_CC_AT_CLK] = &video_cc_at_clk.clkr,
- [VIDEO_CC_DEBUG_CLK] = &video_cc_debug_clk.clkr,
[VIDEO_CC_QDSS_TRIG_CLK] = &video_cc_qdss_trig_clk.clkr,
[VIDEO_CC_QDSS_TSCTR_DIV8_CLK] = &video_cc_qdss_tsctr_div8_clk.clkr,
[VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 82e62c5..5db1897 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -81,6 +81,7 @@
static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool arch_counter_suspend_stop;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
@@ -440,15 +441,14 @@
{
u32 cntkctl = arch_timer_get_cntkctl();
- /* Disable user access to the timers and the physical counter */
+ /* Disable user access to the timers */
/* Also disable virtual event stream */
cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
| ARCH_TIMER_USR_VT_ACCESS_EN
- | ARCH_TIMER_VIRT_EVT_EN
- | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ | ARCH_TIMER_VIRT_EVT_EN);
- /* Enable user access to the virtual counter */
- cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+ /* Enable user access to the virtual and physical counters */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN | ARCH_TIMER_USR_PCT_ACCESS_EN;
arch_timer_set_cntkctl(cntkctl);
}
@@ -577,7 +577,7 @@
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter = {
@@ -617,6 +617,8 @@
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ if (!arch_counter_suspend_stop)
+ clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
@@ -909,6 +911,10 @@
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ /* On some systems, the counter stops ticking when in suspend. */
+ arch_counter_suspend_stop = of_property_read_bool(np,
+ "arm,no-tick-in-suspend");
+
return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66e604e..b315236 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2480,6 +2480,20 @@
*********************************************************************/
static enum cpuhp_state hp_online;
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+ cpufreq_online(cpu);
+
+ return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+ cpufreq_offline(cpu);
+
+ return 0;
+}
+
/**
* cpufreq_register_driver - register a CPU Frequency driver
* @driver_data: A struct cpufreq_driver containing the values#
@@ -2542,8 +2556,8 @@
}
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
- cpufreq_online,
- cpufreq_offline);
+ cpuhp_cpufreq_online,
+ cpuhp_cpufreq_offline);
if (ret < 0)
goto err_if_unreg;
hp_online = ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f..f61b78a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -498,7 +498,49 @@
help
This driver supports Qualcomm crypto engine accelerator
hardware. To compile this driver as a module, choose M here. The
- module will be called qcrypto.
+ module will be called qcrypt.
+
+config CRYPTO_DEV_QCOM_MSM_QCE
+ tristate "QTI Crypto Engine (QCE) module"
+ depends on ARCH_QCOM
+ help
+ This driver supports QTI Crypto Engine accelerator hardware, which
+ is present on SDM845. This is the core crypto driver which adds
+ CE5.0 functionalities. To compile this driver as a module, choose
+ M here. The module will be called QCE50.
+
+config CRYPTO_DEV_QCRYPTO
+ tristate "QTI Crypto accelerator"
+ depends on ARCH_QCOM
+ select CRYPTO_DES
+ select CRYPTO_ALGAPI
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ help
+ This driver supports QTI crypto acceleration
+ for kernel clients. To compile this driver as a module,
+ choose M here: the module will be called qcrypto. Please
+ select Y here to enable.
+
+config CRYPTO_DEV_QCEDEV
+ tristate "QCEDEV Interface to CE module"
+ depends on ARCH_QCOM
+ help
+ This driver supports QTI QCEDEV Crypto Engine 5.0.
+ This exposes the interface to the QCE hardware accelerator
+ via IOCTLs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+ tristate "OTA Crypto module"
+ depends on ARCH_QCOM
+ help
+ This driver supports QTI OTA Crypto in the FSM9xxx.
+ To compile this driver as a module, choose M here: the
+ module will be called ota_crypto. Please select Y here
+ to enable.
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
@@ -555,4 +597,8 @@
source "drivers/crypto/chelsio/Kconfig"
+if ARCH_QCOM
+source drivers/crypto/msm/Kconfig
+endif # ARCH_QCOM
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250f..5f7b988 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -28,6 +28,7 @@
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e..354a16a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -506,7 +506,7 @@
ctx->dev = caam_jr_alloc();
if (IS_ERR(ctx->dev)) {
- dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
+ pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->dev);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e483b78..98468b9 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -282,7 +282,8 @@
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
- if (ret || status) {
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
diff --git a/drivers/crypto/msm/Kconfig b/drivers/crypto/msm/Kconfig
new file mode 100644
index 0000000..3011aa6
--- /dev/null
+++ b/drivers/crypto/msm/Kconfig
@@ -0,0 +1,10 @@
+
+config CRYPTO_DEV_QCOM_ICE
+ tristate "Inline Crypto Module"
+ default n
+ depends on BLK_DEV_DM
+ help
+ This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
+ and later, to accelerate crypto operations for storage needs.
+ To compile this driver as a module, choose M here: the
+ module will be called ice.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
new file mode 100644
index 0000000..9ecb646
--- /dev/null
+++ b/drivers/crypto/msm/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o
diff --git a/drivers/crypto/msm/compat_qcedev.c b/drivers/crypto/msm/compat_qcedev.c
new file mode 100644
index 0000000..0ca28be
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.c
@@ -0,0 +1,431 @@
+/*
+ * QTI CE 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/qcedev.h>
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+static int compat_get_qcedev_pmem_info(
+ struct compat_qcedev_pmem_info __user *pmem32,
+ struct qcedev_pmem_info __user *pmem)
+{
+ compat_ulong_t offset;
+ compat_int_t fd_src;
+ compat_int_t fd_dst;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ err |= get_user(fd_src, &pmem32->fd_src);
+ err |= put_user(fd_src, &pmem->fd_src);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem32->src[i].offset);
+ err |= put_user(offset, &pmem->src[i].offset);
+ err |= get_user(len, &pmem32->src[i].len);
+ err |= put_user(len, &pmem->src[i].len);
+ }
+
+ err |= get_user(fd_dst, &pmem32->fd_dst);
+ err |= put_user(fd_dst, &pmem->fd_dst);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem32->dst[i].offset);
+ err |= put_user(offset, &pmem->dst[i].offset);
+ err |= get_user(len, &pmem32->dst[i].len);
+ err |= put_user(len, &pmem->dst[i].len);
+ }
+
+ return err;
+}
+
+static int compat_put_qcedev_pmem_info(
+ struct compat_qcedev_pmem_info __user *pmem32,
+ struct qcedev_pmem_info __user *pmem)
+{
+ compat_ulong_t offset;
+ compat_int_t fd_src;
+ compat_int_t fd_dst;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ err |= get_user(fd_src, &pmem->fd_src);
+ err |= put_user(fd_src, &pmem32->fd_src);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem->src[i].offset);
+ err |= put_user(offset, &pmem32->src[i].offset);
+ err |= get_user(len, &pmem->src[i].len);
+ err |= put_user(len, &pmem32->src[i].len);
+ }
+
+ err |= get_user(fd_dst, &pmem->fd_dst);
+ err |= put_user(fd_dst, &pmem32->fd_dst);
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(offset, &pmem->dst[i].offset);
+ err |= put_user(offset, &pmem32->dst[i].offset);
+ err |= get_user(len, &pmem->dst[i].len);
+ err |= put_user(len, &pmem32->dst[i].len);
+ }
+
+ return err;
+}
+
+static int compat_get_qcedev_vbuf_info(
+ struct compat_qcedev_vbuf_info __user *vbuf32,
+ struct qcedev_vbuf_info __user *vbuf)
+{
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, &vbuf32->src[i].vaddr);
+ vbuf->src[i].vaddr = NULL;
+ err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+ err |= get_user(len, &vbuf32->src[i].len);
+ err |= put_user(len, &vbuf->src[i].len);
+ }
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
+ vbuf->dst[i].vaddr = NULL;
+ err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+ err |= get_user(len, &vbuf32->dst[i].len);
+ err |= put_user(len, &vbuf->dst[i].len);
+ }
+ return err;
+}
+
+static int compat_put_qcedev_vbuf_info(
+ struct compat_qcedev_vbuf_info __user *vbuf32,
+ struct qcedev_vbuf_info __user *vbuf)
+{
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+ vbuf32->src[i].vaddr = 0;
+ err |= put_user(vaddr, &vbuf32->src[i].vaddr);
+ err |= get_user(len, &vbuf->src[i].len);
+ err |= put_user(len, &vbuf32->src[i].len);
+ }
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+ vbuf32->dst[i].vaddr = 0;
+ err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
+ err |= get_user(len, &vbuf->dst[i].len);
+ err |= put_user(len, &vbuf32->dst[i].len);
+ }
+ return err;
+}
+
+static int compat_get_qcedev_cipher_op_req(
+ struct compat_qcedev_cipher_op_req __user *data32,
+ struct qcedev_cipher_op_req __user *data)
+{
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_cipher_alg_enum alg;
+ compat_ulong_t byteoffset;
+ enum qcedev_oper_enum op;
+ compat_ulong_t data_len;
+ compat_ulong_t encklen;
+ compat_ulong_t entries;
+ compat_ulong_t ivlen;
+ uint8_t in_place_op;
+ int err = 0, i = 0;
+ uint8_t use_pmem;
+ uint8_t enckey;
+ uint8_t iv;
+
+ err |= get_user(use_pmem, &data32->use_pmem);
+ err |= put_user(use_pmem, &data->use_pmem);
+
+ if (use_pmem)
+ err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem);
+ else
+ err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+ err |= get_user(entries, &data32->entries);
+ err |= put_user(entries, &data->entries);
+ err |= get_user(data_len, &data32->data_len);
+ err |= put_user(data_len, &data->data_len);
+ err |= get_user(in_place_op, &data32->in_place_op);
+ err |= put_user(in_place_op, &data->in_place_op);
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ err |= get_user(enckey, &(data32->enckey[i]));
+ err |= put_user(enckey, &(data->enckey[i]));
+ }
+
+ err |= get_user(encklen, &data32->encklen);
+ err |= put_user(encklen, &data->encklen);
+
+ for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+ err |= get_user(iv, &(data32->iv[i]));
+ err |= put_user(iv, &(data->iv[i]));
+ }
+
+ err |= get_user(ivlen, &data32->ivlen);
+ err |= put_user(ivlen, &data->ivlen);
+ err |= get_user(byteoffset, &data32->byteoffset);
+ err |= put_user(byteoffset, &data->byteoffset);
+ err |= get_user(alg, &data32->alg);
+ err |= put_user(alg, &data->alg);
+ err |= get_user(mode, &data32->mode);
+ err |= put_user(mode, &data->mode);
+ err |= get_user(op, &data32->op);
+ err |= put_user(op, &data->op);
+
+ return err;
+}
+
+static int compat_put_qcedev_cipher_op_req(
+ struct compat_qcedev_cipher_op_req __user *data32,
+ struct qcedev_cipher_op_req __user *data)
+{
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_cipher_alg_enum alg;
+ compat_ulong_t byteoffset;
+ enum qcedev_oper_enum op;
+ compat_ulong_t data_len;
+ compat_ulong_t encklen;
+ compat_ulong_t entries;
+ compat_ulong_t ivlen;
+ uint8_t in_place_op;
+ int err = 0, i = 0;
+ uint8_t use_pmem;
+ uint8_t enckey;
+ uint8_t iv;
+
+ err |= get_user(use_pmem, &data->use_pmem);
+ err |= put_user(use_pmem, &data32->use_pmem);
+
+ if (use_pmem)
+ err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem);
+ else
+ err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+ err |= get_user(entries, &data->entries);
+ err |= put_user(entries, &data32->entries);
+ err |= get_user(data_len, &data->data_len);
+ err |= put_user(data_len, &data32->data_len);
+ err |= get_user(in_place_op, &data->in_place_op);
+ err |= put_user(in_place_op, &data32->in_place_op);
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ err |= get_user(enckey, &(data->enckey[i]));
+ err |= put_user(enckey, &(data32->enckey[i]));
+ }
+
+ err |= get_user(encklen, &data->encklen);
+ err |= put_user(encklen, &data32->encklen);
+
+ for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+ err |= get_user(iv, &(data->iv[i]));
+ err |= put_user(iv, &(data32->iv[i]));
+ }
+
+ err |= get_user(ivlen, &data->ivlen);
+ err |= put_user(ivlen, &data32->ivlen);
+ err |= get_user(byteoffset, &data->byteoffset);
+ err |= put_user(byteoffset, &data32->byteoffset);
+ err |= get_user(alg, &data->alg);
+ err |= put_user(alg, &data32->alg);
+ err |= get_user(mode, &data->mode);
+ err |= put_user(mode, &data32->mode);
+ err |= get_user(op, &data->op);
+ err |= put_user(op, &data32->op);
+
+ return err;
+}
+
+static int compat_get_qcedev_sha_op_req(
+ struct compat_qcedev_sha_op_req __user *data32,
+ struct qcedev_sha_op_req __user *data)
+{
+ enum qcedev_sha_alg_enum alg;
+ compat_ulong_t authklen;
+ compat_ulong_t data_len;
+ compat_ulong_t entries;
+ compat_ulong_t diglen;
+ compat_uptr_t authkey;
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint8_t digest;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, &data32->data[i].vaddr);
+ data->data[i].vaddr = 0;
+ err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+ err |= get_user(len, &data32->data[i].len);
+ err |= put_user(len, &data->data[i].len);
+ }
+
+ err |= get_user(entries, &data32->entries);
+ err |= put_user(entries, &data->entries);
+ err |= get_user(data_len, &data32->data_len);
+ err |= put_user(data_len, &data->data_len);
+
+ for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+ err |= get_user(digest, &(data32->digest[i]));
+ err |= put_user(digest, &(data->digest[i]));
+ }
+
+ err |= get_user(diglen, &data32->diglen);
+ err |= put_user(diglen, &data->diglen);
+ err |= get_user(authkey, &data32->authkey);
+ data->authkey = NULL;
+ err |= put_user(authkey, (compat_uptr_t *)&data->authkey);
+ err |= get_user(authklen, &data32->authklen);
+ err |= put_user(authklen, &data->authklen);
+ err |= get_user(alg, &data32->alg);
+ err |= put_user(alg, &data->alg);
+
+ return err;
+}
+
+static int compat_put_qcedev_sha_op_req(
+ struct compat_qcedev_sha_op_req __user *data32,
+ struct qcedev_sha_op_req __user *data)
+{
+ enum qcedev_sha_alg_enum alg;
+ compat_ulong_t authklen;
+ compat_ulong_t data_len;
+ compat_ulong_t entries;
+ compat_ulong_t diglen;
+ compat_uptr_t authkey;
+ compat_uptr_t vaddr;
+ int err = 0, i = 0;
+ uint8_t digest;
+ uint32_t len;
+
+ for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+ data32->data[i].vaddr = 0;
+ err |= put_user(vaddr, &data32->data[i].vaddr);
+ err |= get_user(len, &data->data[i].len);
+ err |= put_user(len, &data32->data[i].len);
+ }
+
+ err |= get_user(entries, &data->entries);
+ err |= put_user(entries, &data32->entries);
+ err |= get_user(data_len, &data->data_len);
+ err |= put_user(data_len, &data32->data_len);
+
+ for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+ err |= get_user(digest, &(data->digest[i]));
+ err |= put_user(digest, &(data32->digest[i]));
+ }
+
+ err |= get_user(diglen, &data->diglen);
+ err |= put_user(diglen, &data32->diglen);
+ err |= get_user(authkey, (compat_uptr_t *)&data->authkey);
+ data32->authkey = 0;
+ err |= put_user(authkey, &data32->authkey);
+ err |= get_user(authklen, &data->authklen);
+ err |= put_user(authklen, &data32->authklen);
+ err |= get_user(alg, &data->alg);
+ err |= put_user(alg, &data32->alg);
+
+ return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+ switch (cmd) {
+ case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+ return QCEDEV_IOCTL_ENC_REQ;
+ case COMPAT_QCEDEV_IOCTL_DEC_REQ:
+ return QCEDEV_IOCTL_DEC_REQ;
+ case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+ return QCEDEV_IOCTL_SHA_INIT_REQ;
+ case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+ return QCEDEV_IOCTL_SHA_UPDATE_REQ;
+ case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+ return QCEDEV_IOCTL_SHA_FINAL_REQ;
+ case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ:
+ return QCEDEV_IOCTL_GET_SHA_REQ;
+ case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+ return QCEDEV_IOCTL_GET_CMAC_REQ;
+ default:
+ return cmd;
+ }
+
+}
+
+long compat_qcedev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+ case COMPAT_QCEDEV_IOCTL_DEC_REQ: {
+ struct compat_qcedev_cipher_op_req __user *data32;
+ struct qcedev_cipher_op_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!data)
+ return -EFAULT;
+
+ err = compat_get_qcedev_cipher_op_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+ err = compat_put_qcedev_cipher_op_req(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+ case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+ case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+ case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+ case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: {
+ struct compat_qcedev_sha_op_req __user *data32;
+ struct qcedev_sha_op_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (!data)
+ return -EFAULT;
+
+ err = compat_get_qcedev_sha_op_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+ err = compat_put_qcedev_sha_op_req(data32, data);
+ return ret ? ret : err;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(compat_qcedev_ioctl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI 32-64 Compatibility for Crypto driver");
diff --git a/drivers/crypto/msm/compat_qcedev.h b/drivers/crypto/msm/compat_qcedev.h
new file mode 100644
index 0000000..4cc3933
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.h
@@ -0,0 +1,165 @@
+#ifndef _UAPI_COMPAT_QCEDEV__H
+#define _UAPI_COMPAT_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/**
+ * struct compat_buf_info - Buffer information
+ * @offset: Offset from the base address of the buffer
+ * (Used when buffer is allocated using PMEM)
+ * @vaddr: Virtual buffer address pointer
+ * @len: Size of the buffer
+ */
+struct compat_buf_info {
+ union {
+ compat_ulong_t offset;
+ compat_uptr_t vaddr;
+ };
+ compat_ulong_t len;
+};
+
+/**
+ * struct compat_qcedev_vbuf_info - Source and destination Buffer information
+ * @src: Array of buf_info for input/source
+ * @dst: Array of buf_info for output/destination
+ */
+struct compat_qcedev_vbuf_info {
+ struct compat_buf_info src[QCEDEV_MAX_BUFFERS];
+ struct compat_buf_info dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct compat_qcedev_pmem_info - Stores PMEM buffer information
+ * @fd_src: Handle to /dev/adsp_pmem used to allocate
+ * memory for input/src buffer
+ * @src: Array of buf_info for input/source
+ * @fd_dst: Handle to /dev/adsp_pmem used to allocate
+ * memory for output/dst buffer
+ * @dst: Array of buf_info for output/destination
+ * @pmem_src_offset: The offset from input/src buffer
+ * (allocated by PMEM)
+ */
+struct compat_qcedev_pmem_info {
+ compat_int_t fd_src;
+ struct compat_buf_info src[QCEDEV_MAX_BUFFERS];
+ compat_int_t fd_dst;
+ struct compat_buf_info dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct compat_qcedev_cipher_op_req - Holds the ciphering request information
+ * @use_pmem (IN): Flag to indicate if buffer source is PMEM
+ * QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+ * @pmem (IN): Stores PMEM buffer information.
+ * Refer struct qcedev_pmem_info
+ * @vbuf (IN/OUT): Stores Source and destination Buffer information
+ * Refer to struct qcedev_vbuf_info
+ * @data_len (IN): Total Length of input/src and output/dst in bytes
+ * @in_place_op (IN): Indicates whether the operation is inplace where
+ * source == destination
+ * When using PMEM allocated memory, must set this to 1
+ * @enckey (IN): 128 bits of confidentiality key
+ * enckey[0] bit 127-120, enckey[1] bit 119-112,..
+ * enckey[15] bit 7-0
+ * @encklen (IN): Length of the encryption key(set to 128 bits/16
+ * bytes in the driver)
+ * @iv (IN/OUT): Initialization vector data
+ * This is updated by the driver, incremented by
+ * number of blocks encrypted/decrypted.
+ * @ivlen (IN): Length of the IV
+ * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
+ * for AES-128 CTR mode only)
+ * @alg (IN): Type of ciphering algorithm: AES/DES/3DES
+ * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
+ * Applicable when using AES algorithm only
+ * @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+ * QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+ *
+ * If use_pmem is set to 0, the driver assumes that memory was not allocated
+ * via PMEM, and kernel will need to allocate memory and copy data from user
+ * space buffer (data_src/dta_dst) and process accordingly and copy data back
+ * to the user space buffer
+ *
+ * If use_pmem is set to 1, the driver assumes that memory was allocated via
+ * PMEM.
+ * The kernel driver will use the fd_src to determine the kernel virtual address
+ * base that maps to the user space virtual address base for the buffer
+ * allocated in user space.
+ * The final input/src and output/dst buffer pointer will be determined
+ * by adding the offsets to the kernel virtual addr.
+ *
+ * If use of hardware key is supported in the target, user can configure the
+ * key parameters (encklen, enckey) to use the hardware key.
+ * In order to use the hardware key, set encklen to 0 and set the enckey
+ * data array to 0.
+ */
+struct compat_qcedev_cipher_op_req {
+ uint8_t use_pmem;
+ union {
+ struct compat_qcedev_pmem_info pmem;
+ struct compat_qcedev_vbuf_info vbuf;
+ };
+ compat_ulong_t entries;
+ compat_ulong_t data_len;
+ uint8_t in_place_op;
+ uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
+ compat_ulong_t encklen;
+ uint8_t iv[QCEDEV_MAX_IV_SIZE];
+ compat_ulong_t ivlen;
+ compat_ulong_t byteoffset;
+ enum qcedev_cipher_alg_enum alg;
+ enum qcedev_cipher_mode_enum mode;
+ enum qcedev_oper_enum op;
+};
+
+/**
+ * struct qcedev_sha_op_req - Holds the hashing request information
+ * @data (IN): Array of pointers to the data to be hashed
+ * @entries (IN): Number of buf_info entries in the data array
+ * @data_len (IN): Length of data to be hashed
+ * @digest (IN/OUT): Returns the hashed data information
+ * @diglen (OUT): Size of the hashed/digest data
+ * @authkey (IN): Pointer to authentication key for HMAC
+ * @authklen (IN): Size of the authentication key
+ * @alg (IN): Secure Hash algorithm
+ */
+struct compat_qcedev_sha_op_req {
+ struct compat_buf_info data[QCEDEV_MAX_BUFFERS];
+ compat_ulong_t entries;
+ compat_ulong_t data_len;
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ compat_ulong_t diglen;
+ compat_uptr_t authkey;
+ compat_ulong_t authklen;
+ enum qcedev_sha_alg_enum alg;
+};
+
+struct file;
+extern long compat_qcedev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QCEDEV_IOCTL_ENC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_DEC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_LOCK_CE \
+ _IO(QCEDEV_IOC_MAGIC, 7)
+#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE \
+ _IO(QCEDEV_IOC_MAGIC, 8)
+#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ \
+ _IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)
+
+#endif /* CONFIG_COMPAT */
+#endif /* _UAPI_COMPAT_QCEDEV__H */
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
new file mode 100644
index 0000000..b411726
--- /dev/null
+++ b/drivers/crypto/msm/ice.c
@@ -0,0 +1,1780 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/device-mapper.h>
+#include <linux/clk.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus.h>
+#include <crypto/ice.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+#include "iceregs.h"
+
+#ifdef CONFIG_PFK
+#include <linux/pfk.h>
+#else
+#include <linux/bio.h>
+static inline int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+ return 0;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ return 0;
+}
+
+static inline void pfk_clear_on_reset(void)
+{
+}
+#endif
+
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+ ((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_OWNER_QSEE_OS 50
+#define TZ_SVC_KEYSTORE 5 /* Keystore management */
+
+#define TZ_OS_KS_RESTORE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+
+#define TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+
+#define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
+#define QCOM_UFS_ICE_DEV "iceufs"
+#define QCOM_SDCC_ICE_DEV "icesdcc"
+#define QCOM_ICE_TYPE_NAME_LEN 8
+#define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
+#define QCOM_ICE_UFS 10
+#define QCOM_ICE_SDCC 20
+
+struct ice_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ const char *name;
+ u32 max_freq;
+ u32 min_freq;
+ u32 curr_freq;
+ bool enabled;
+};
+
+struct qcom_ice_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ int saved_vote;
+ bool is_max_bw_needed;
+ struct device_attribute max_bus_bw;
+};
+
+static LIST_HEAD(ice_devices);
+/*
+ * ICE HW device structure.
+ */
+struct ice_device {
+ struct list_head list;
+ struct device *pdev;
+ struct cdev cdev;
+ dev_t device_no;
+ struct class *driver_class;
+ void __iomem *mmio;
+ struct resource *res;
+ int irq;
+ bool is_ice_enabled;
+ bool is_ice_disable_fuse_blown;
+ ice_error_cb error_cb;
+ void *host_controller_data; /* UFS/EMMC/other? */
+ struct list_head clk_list_head;
+ u32 ice_hw_version;
+ bool is_ice_clk_available;
+ char ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
+ struct regulator *reg;
+ bool is_regulator_available;
+ struct qcom_ice_bus_vote bus_vote;
+ ktime_t ice_reset_start_time;
+ ktime_t ice_reset_complete_time;
+};
+
+static int qti_ice_setting_config(struct request *req,
+ struct platform_device *pdev,
+ struct ice_crypto_setting *crypto_data,
+ struct ice_data_setting *setting)
+{
+ struct ice_device *ice_dev = NULL;
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev) {
+ pr_debug("%s no ICE device\n", __func__);
+
+ /* make the caller finish peacfully */
+ return 0;
+ }
+
+ if (ice_dev->is_ice_disable_fuse_blown) {
+ pr_err("%s ICE disabled fuse is blown\n", __func__);
+ return -EPERM;
+ }
+
+ if ((short)(crypto_data->key_index) >= 0) {
+
+ memcpy(&setting->crypto_data, crypto_data,
+ sizeof(setting->crypto_data));
+
+ if (rq_data_dir(req) == WRITE)
+ setting->encr_bypass = false;
+ else if (rq_data_dir(req) == READ)
+ setting->decr_bypass = false;
+ else {
+ /* Should I say BUG_ON */
+ setting->encr_bypass = true;
+ setting->decr_bypass = true;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *, bool);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+ int err = 0;
+
+ if (vote != ice_dev->bus_vote.curr_vote) {
+ err = msm_bus_scale_client_update_request(
+ ice_dev->bus_vote.client_handle, vote);
+ if (err) {
+ dev_err(ice_dev->pdev,
+ "%s:failed:client_handle=0x%x, vote=%d, err=%d\n",
+ __func__, ice_dev->bus_vote.client_handle,
+ vote, err);
+ goto out;
+ }
+ ice_dev->bus_vote.curr_vote = vote;
+ }
+out:
+ return err;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+ const char *speed_mode)
+{
+ struct device *dev = ice_dev->pdev;
+ struct device_node *np = dev->of_node;
+ int err;
+ const char *key = "qcom,bus-vector-names";
+
+ if (!speed_mode) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ice_dev->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+ err = of_property_match_string(np, key, "MAX");
+ else
+ err = of_property_match_string(np, key, speed_mode);
+out:
+ if (err < 0)
+ dev_err(dev, "%s: Invalid %s mode %d\n",
+ __func__, speed_mode, err);
+ return err;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+ int err = 0;
+ struct msm_bus_scale_pdata *bus_pdata;
+ struct device *dev = ice_dev->pdev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+
+ bus_pdata = msm_bus_cl_get_pdata(pdev);
+ if (!bus_pdata) {
+ dev_err(dev, "%s: failed to get bus vectors\n", __func__);
+ err = -ENODATA;
+ goto out;
+ }
+
+ err = of_property_count_strings(np, "qcom,bus-vector-names");
+ if (err < 0 || err != bus_pdata->num_usecases) {
+ dev_err(dev, "%s: Error = %d with qcom,bus-vector-names\n",
+ __func__, err);
+ goto out;
+ }
+ err = 0;
+
+ ice_dev->bus_vote.client_handle =
+ msm_bus_scale_register_client(bus_pdata);
+ if (!ice_dev->bus_vote.client_handle) {
+ dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
+ __func__);
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* cache the vote index for minimum and maximum bandwidth */
+ ice_dev->bus_vote.min_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+ ice_dev->bus_vote.max_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+out:
+ return err;
+}
+
+#else
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+ return 0;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+ const char *speed_mode)
+{
+ return 0;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+ return 0;
+}
+#endif /* CONFIG_MSM_BUS_SCALING */
+
+static int qcom_ice_get_vreg(struct ice_device *ice_dev)
+{
+ int ret = 0;
+
+ if (!ice_dev->is_regulator_available)
+ return 0;
+
+ if (ice_dev->reg)
+ return 0;
+
+ ice_dev->reg = devm_regulator_get(ice_dev->pdev, "vdd-hba");
+ if (IS_ERR(ice_dev->reg)) {
+ ret = PTR_ERR(ice_dev->reg);
+ dev_err(ice_dev->pdev, "%s: %s get failed, err=%d\n",
+ __func__, "vdd-hba-supply", ret);
+ }
+ return ret;
+}
+
+static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
+{
+ u32 regval;
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
+ ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
+ ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
+ regval = qcom_ice_readl(ice_dev,
+ QCOM_ICE_REGS_ADVANCED_CONTROL);
+ regval |= 0x800;
+ qcom_ice_writel(ice_dev, regval,
+ QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /* Ensure register is updated */
+ mb();
+ }
+}
+
+static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
+{
+ u32 regval;
+
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /*
+ * Enable low power mode sequence
+ * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+ */
+ regval |= 0x7000;
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static void qcom_ice_enable_test_bus_config(struct ice_device *ice_dev)
+{
+ /*
+ * Configure & enable ICE_TEST_BUS_REG to reflect ICE intr lines
+ * MAIN_TEST_BUS_SELECTOR = 0 (ICE_CONFIG)
+ * TEST_BUS_REG_EN = 1 (ENABLE)
+ */
+ u32 regval;
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ return;
+
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+ regval &= 0x0FFFFFFF;
+ /* TBD: replace 0x2 with define in iceregs.h */
+ regval |= 0x2;
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static void qcom_ice_optimization_enable(struct ice_device *ice_dev)
+{
+ u32 regval;
+
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ regval |= 0xD807100;
+ else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+ regval |= 0x3F007100;
+
+ /* ICE Optimizations Enable Sequence */
+ udelay(5);
+ /* [0]-0, [1]-0, [2]-8, [3]-E, [4]-0, [5]-0, [6]-F, [7]-A */
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ /* ICE HPG requires sleep before writing */
+ udelay(5);
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+ regval = 0;
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP);
+ regval |= 0xF;
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ENDIAN_SWAP);
+ /*
+ * Ensure previous instructions were completed before issue
+ * next ICE commands
+ */
+ mb();
+ }
+}
+
+static int qcom_ice_wait_bist_status(struct ice_device *ice_dev)
+{
+ int count;
+ u32 reg;
+
+ /* Poll until all BIST bits are reset */
+ for (count = 0; count < QCOM_ICE_MAX_BIST_CHECK_COUNT; count++) {
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS);
+ if (!(reg & ICE_BIST_STATUS_MASK))
+ break;
+ udelay(50);
+ }
+
+ if (reg)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int qcom_ice_enable(struct ice_device *ice_dev)
+{
+ unsigned int reg;
+ int ret = 0;
+
+ if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+ ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+ (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1)))
+ ret = qcom_ice_wait_bist_status(ice_dev);
+ if (ret) {
+ dev_err(ice_dev->pdev, "BIST status error (%d)\n", ret);
+ return ret;
+ }
+
+ /* Starting ICE v3 enabling is done at storage controller (UFS/SDCC) */
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 3)
+ return 0;
+
+ /*
+ * To enable ICE, perform following
+ * 1. Set IGNORE_CONTROLLER_RESET to USE in ICE_RESET register
+ * 2. Disable GLOBAL_BYPASS bit in ICE_CONTROL register
+ */
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET);
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ reg &= 0x0;
+ else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+ reg &= ~0x100;
+
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_RESET);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL);
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ reg &= 0xFFFE;
+ else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+ reg &= ~0x7;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_CONTROL);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+ ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+ (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
+ if ((reg & 0x80000000) != 0x0) {
+ pr_err("%s: Bypass failed for ice = %p",
+ __func__, (void *)ice_dev);
+ WARN_ON(1);
+ }
+ }
+ return 0;
+}
+
+static int qcom_ice_verify_ice(struct ice_device *ice_dev)
+{
+ unsigned int rev;
+ unsigned int maj_rev, min_rev, step_rev;
+
+ rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
+ maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
+ min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
+ step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
+
+ if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
+ pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n",
+ __func__, (unsigned long)ice_dev->mmio,
+ maj_rev, min_rev, step_rev);
+ return -ENODEV;
+ }
+ ice_dev->ice_hw_version = rev;
+
+ dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
+ maj_rev, min_rev, step_rev,
+ ice_dev->mmio);
+
+ return 0;
+}
+
+static void qcom_ice_enable_intr(struct ice_device *ice_dev)
+{
+ unsigned int reg;
+
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+ reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static void qcom_ice_disable_intr(struct ice_device *ice_dev)
+{
+ unsigned int reg;
+
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+ reg |= QCOM_ICE_NON_SEC_IRQ_MASK;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static irqreturn_t qcom_ice_isr(int isr, void *data)
+{
+ irqreturn_t retval = IRQ_NONE;
+ u32 status;
+ struct ice_device *ice_dev = data;
+
+ status = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS);
+ if (status) {
+ ice_dev->error_cb(ice_dev->host_controller_data, status);
+
+ /* Interrupt has been handled. Clear the IRQ */
+ qcom_ice_writel(ice_dev, status, QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
+ /* Ensure instruction is completed */
+ mb();
+ retval = IRQ_HANDLED;
+ }
+ return retval;
+}
+
+static void qcom_ice_parse_ice_instance_type(struct platform_device *pdev,
+ struct ice_device *ice_dev)
+{
+ int ret = -1;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *type;
+
+ ret = of_property_read_string_index(np, "qcom,instance-type", 0, &type);
+ if (ret) {
+ pr_err("%s: Could not get ICE instance type\n", __func__);
+ goto out;
+ }
+ strlcpy(ice_dev->ice_instance_type, type, QCOM_ICE_TYPE_NAME_LEN);
+out:
+ return;
+}
+
+static int qcom_ice_parse_clock_info(struct platform_device *pdev,
+ struct ice_device *ice_dev)
+{
+ int ret = -1, cnt, i, len;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char *name;
+ struct ice_clk_info *clki;
+ u32 *clkfreq = NULL;
+
+ if (!np)
+ goto out;
+
+ cnt = of_property_count_strings(np, "clock-names");
+ if (cnt <= 0) {
+ dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+ __func__);
+ ret = cnt;
+ goto out;
+ }
+
+ if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
+ dev_info(dev, "qcom,op-freq-hz property not specified\n");
+ goto out;
+ }
+
+ len = len/sizeof(*clkfreq);
+ if (len != cnt)
+ goto out;
+
+ clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
+ if (!clkfreq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
+
+ INIT_LIST_HEAD(&ice_dev->clk_list_head);
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(np,
+ "clock-names", i, (const char **)&name);
+ if (ret)
+ goto out;
+
+ clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+ if (!clki) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ clki->max_freq = clkfreq[i];
+ clki->name = kstrdup(name, GFP_KERNEL);
+ list_add_tail(&clki->list, &ice_dev->clk_list_head);
+ }
+out:
+ if (clkfreq)
+ devm_kfree(dev, (void *)clkfreq);
+ return ret;
+}
+
+static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
+ struct ice_device *ice_dev)
+{
+ struct device *dev = &pdev->dev;
+ int rc = -1;
+ int irq;
+
+ ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ice_dev->res) {
+ pr_err("%s: No memory available for IORESOURCE\n", __func__);
+ return -ENOMEM;
+ }
+
+ ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
+ if (IS_ERR(ice_dev->mmio)) {
+ rc = PTR_ERR(ice_dev->mmio);
+ pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
+ goto out;
+ }
+
+ if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
+ pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
+ __func__);
+ ice_dev->is_regulator_available = false;
+ } else {
+ ice_dev->is_regulator_available = true;
+ }
+ ice_dev->is_ice_clk_available = of_property_read_bool(
+ (&pdev->dev)->of_node,
+ "qcom,enable-ice-clk");
+
+ if (ice_dev->is_ice_clk_available) {
+ rc = qcom_ice_parse_clock_info(pdev, ice_dev);
+ if (rc) {
+ pr_err("%s: qcom_ice_parse_clock_info failed (%d)\n",
+ __func__, rc);
+ goto err_dev;
+ }
+ }
+
+ /* ICE interrupts is only relevant for v2.x */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ rc = devm_request_irq(dev, irq, qcom_ice_isr, 0, dev_name(dev),
+ ice_dev);
+ if (rc) {
+ pr_err("%s: devm_request_irq irq=%d failed (%d)\n",
+ __func__, irq, rc);
+ goto err_dev;
+ }
+ ice_dev->irq = irq;
+ pr_info("ICE IRQ = %d\n", ice_dev->irq);
+ } else {
+ dev_dbg(dev, "IRQ resource not available\n");
+ }
+
+ qcom_ice_parse_ice_instance_type(pdev, ice_dev);
+
+ return 0;
+err_dev:
+ if (rc && ice_dev->mmio)
+ devm_iounmap(dev, ice_dev->mmio);
+out:
+ return rc;
+}
+
+/*
+ * ICE HW instance can exist in UFS or eMMC based storage HW
+ * Userspace does not know what kind of ICE it is dealing with.
+ * Though userspace can find which storage device it is booting
+ * from but all kind of storage types dont support ICE from
+ * beginning. So ICE device is created for user space to ping
+ * if ICE exist for that kind of storage
+ */
+static const struct file_operations qcom_ice_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int register_ice_device(struct ice_device *ice_dev)
+{
+ int rc = 0;
+ unsigned int baseminor = 0;
+ unsigned int count = 1;
+ struct device *class_dev;
+ int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
+
+ rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ if (rc < 0) {
+ pr_err("alloc_chrdev_region failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ return rc;
+ }
+ ice_dev->driver_class = class_create(THIS_MODULE,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ if (IS_ERR(ice_dev->driver_class)) {
+ rc = -ENOMEM;
+ pr_err("class_create failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ goto exit_unreg_chrdev_region;
+ }
+ class_dev = device_create(ice_dev->driver_class, NULL,
+ ice_dev->device_no, NULL,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+
+ if (!class_dev) {
+ pr_err("class_device_create failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ rc = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ cdev_init(&ice_dev->cdev, &qcom_ice_fops);
+ ice_dev->cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
+ if (rc < 0) {
+ pr_err("cdev_add failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ goto exit_destroy_device;
+ }
+ return 0;
+
+exit_destroy_device:
+ device_destroy(ice_dev->driver_class, ice_dev->device_no);
+
+exit_destroy_class:
+ class_destroy(ice_dev->driver_class);
+
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(ice_dev->device_no, 1);
+ return rc;
+}
+
+static int qcom_ice_probe(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+ int rc = 0;
+
+ if (!pdev) {
+ pr_err("%s: Invalid platform_device passed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ice_dev = kzalloc(sizeof(struct ice_device), GFP_KERNEL);
+
+ if (!ice_dev) {
+ rc = -ENOMEM;
+ pr_err("%s: Error %d allocating memory for ICE device:\n",
+ __func__, rc);
+ goto out;
+ }
+
+ ice_dev->pdev = &pdev->dev;
+ if (!ice_dev->pdev) {
+ rc = -EINVAL;
+ pr_err("%s: Invalid device passed in platform_device\n",
+ __func__);
+ goto err_ice_dev;
+ }
+
+ if (pdev->dev.of_node)
+ rc = qcom_ice_get_device_tree_data(pdev, ice_dev);
+ else {
+ rc = -EINVAL;
+ pr_err("%s: ICE device node not found\n", __func__);
+ }
+
+ if (rc)
+ goto err_ice_dev;
+
+ pr_debug("%s: Registering ICE device\n", __func__);
+ rc = register_ice_device(ice_dev);
+ if (rc) {
+ pr_err("create character device failed.\n");
+ goto err_ice_dev;
+ }
+
+ /*
+ * If ICE is enabled here, it would be waste of power.
+ * We would enable ICE when first request for crypto
+ * operation arrives.
+ */
+ ice_dev->is_ice_enabled = false;
+
+ platform_set_drvdata(pdev, ice_dev);
+ list_add_tail(&ice_dev->list, &ice_devices);
+
+ goto out;
+
+err_ice_dev:
+ kfree(ice_dev);
+out:
+ return rc;
+}
+
+static int qcom_ice_remove(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+
+ ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return 0;
+
+ qcom_ice_disable_intr(ice_dev);
+
+ device_init_wakeup(&pdev->dev, false);
+ if (ice_dev->mmio)
+ iounmap(ice_dev->mmio);
+
+ list_del_init(&ice_dev->list);
+ kfree(ice_dev);
+
+ return 1;
+}
+
+static int qcom_ice_suspend(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int qcom_ice_restore_config(void)
+{
+ struct scm_desc desc = {0};
+ int ret;
+
+ /*
+ * TZ would check KEYS_RAM_RESET_COMPLETED status bit before processing
+ * restore config command. This would prevent two calls from HLOS to TZ
+ * One to check KEYS_RAM_RESET_COMPLETED status bit second to restore
+ * config
+ */
+
+ desc.arginfo = TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID;
+
+ ret = scm_call2(TZ_OS_KS_RESTORE_KEY_ID, &desc);
+
+ if (ret)
+ pr_err("%s: Error: 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
+{
+ struct scm_desc desc = {0};
+ int ret = -1;
+
+ /* For ice 3, key configuration needs to be restored in case of reset */
+
+ desc.arginfo = TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID;
+
+ if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
+ desc.args[0] = QCOM_ICE_SDCC;
+
+ if (!strcmp(ice_dev->ice_instance_type, "ufs"))
+ desc.args[0] = QCOM_ICE_UFS;
+
+ ret = scm_call2(TZ_OS_KS_RESTORE_KEY_CONFIG_ID, &desc);
+
+ if (ret)
+ pr_err("%s: Error: 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+static int qcom_ice_init_clocks(struct ice_device *ice)
+{
+ int ret = -EINVAL;
+ struct ice_clk_info *clki;
+ struct device *dev = ice->pdev;
+ struct list_head *head = &ice->clk_list_head;
+
+ if (!head || list_empty(head)) {
+ dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+ goto out;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ /* Not all clocks would have a rate to be set */
+ ret = 0;
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(dev,
+ "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+ }
+out:
+ return ret;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
+{
+ int ret = 0;
+ struct ice_clk_info *clki;
+ struct device *dev = ice->pdev;
+ struct list_head *head = &ice->clk_list_head;
+
+ if (!head || list_empty(head)) {
+ dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!ice->is_ice_clk_available) {
+ dev_err(dev, "%s:ICE Clock not available\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ if (enable)
+ ret = clk_prepare_enable(clki->clk);
+ else
+ clk_disable_unprepare(clki->clk);
+
+ if (ret) {
+ dev_err(dev, "Unable to %s ICE core clk\n",
+ enable?"enable":"disable");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
+{
+ /* We need to enable source for ICE secure interrupts */
+ int ret = 0;
+ u32 regval;
+
+ regval = scm_io_read((unsigned long)ice_dev->res +
+ QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
+
+ regval &= ~QCOM_ICE_SEC_IRQ_MASK;
+ ret = scm_io_write((unsigned long)ice_dev->res +
+ QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ if (!ret)
+ pr_err("%s: failed(0x%x) to init secure ICE config\n",
+ __func__, ret);
+ return ret;
+}
+
+static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
+{
+ int ret = 0, scm_ret = 0;
+
+ /* scm command buffer structure */
+ struct qcom_scm_cmd_buf {
+ unsigned int device_id;
+ unsigned int spare;
+ } cbuf = {0};
+
+ /*
+ * Ideally, we should check ICE version to decide whether to proceed or
+ * or not. Since version wont be available when this function is called
+ * we need to depend upon is_ice_clk_available to decide
+ */
+ if (ice_dev->is_ice_clk_available)
+ goto out;
+
+ /*
+ * Store dev_id in ice_device structure so that emmc/ufs cases can be
+ * handled properly
+ */
+ #define RESTORE_SEC_CFG_CMD 0x2
+ #define ICE_TZ_DEV_ID 20
+
+ cbuf.device_id = ICE_TZ_DEV_ID;
+ ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
+ if (ret || scm_ret) {
+ pr_err("%s: failed, ret %d scm_ret %d\n",
+ __func__, ret, scm_ret);
+ if (!ret)
+ ret = scm_ret;
+ }
+out:
+
+ return ret;
+}
+
+static int qcom_ice_finish_init(struct ice_device *ice_dev)
+{
+ unsigned int reg;
+ int err = 0;
+
+ if (!ice_dev) {
+ pr_err("%s: Null data received\n", __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (ice_dev->is_ice_clk_available) {
+ err = qcom_ice_init_clocks(ice_dev);
+ if (err)
+ goto out;
+
+ err = qcom_ice_bus_register(ice_dev);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * It is possible that ICE device is not probed when host is probed
+ * This would cause host probe to be deferred. When probe for host is
+ * deferred, it can cause power collapse for host and that can wipe
+ * configurations of host & ice. It is prudent to restore the config
+ */
+ err = qcom_ice_update_sec_cfg(ice_dev);
+ if (err)
+ goto out;
+
+ err = qcom_ice_verify_ice(ice_dev);
+ if (err)
+ goto out;
+
+ /* if ICE_DISABLE_FUSE is blown, return immediately
+ * Currently, FORCE HW Keys are also disabled, since
+ * there is no use case for their usage neither in FDE
+ * nor in PFE
+ */
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
+ reg &= (ICE_FUSE_SETTING_MASK |
+ ICE_FORCE_HW_KEY0_SETTING_MASK |
+ ICE_FORCE_HW_KEY1_SETTING_MASK);
+
+ if (reg) {
+ ice_dev->is_ice_disable_fuse_blown = true;
+ pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
+ __func__);
+ err = -EPERM;
+ goto out;
+ }
+
+ /* TZ side of ICE driver would handle secure init of ICE HW from v2 */
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
+ !qcom_ice_secure_ice_init(ice_dev)) {
+ pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
+ err = -EFAULT;
+ goto out;
+ }
+
+ qcom_ice_low_power_mode_enable(ice_dev);
+ qcom_ice_optimization_enable(ice_dev);
+ qcom_ice_config_proc_ignore(ice_dev);
+ qcom_ice_enable_test_bus_config(ice_dev);
+ qcom_ice_enable(ice_dev);
+ ice_dev->is_ice_enabled = true;
+ qcom_ice_enable_intr(ice_dev);
+
+out:
+ return err;
+}
+
+static int qcom_ice_init(struct platform_device *pdev,
+ void *host_controller_data,
+ ice_error_cb error_cb)
+{
+ /*
+ * A completion event for host controller would be triggered upon
+ * initialization completion
+ * When ICE is initialized, it would put ICE into Global Bypass mode
+ * When any request for data transfer is received, it would enable
+ * the ICE for that particular request
+ */
+ struct ice_device *ice_dev;
+
+ ice_dev = platform_get_drvdata(pdev);
+ if (!ice_dev) {
+ pr_err("%s: invalid device\n", __func__);
+ return -EINVAL;
+ }
+
+ ice_dev->error_cb = error_cb;
+ ice_dev->host_controller_data = host_controller_data;
+
+ return qcom_ice_finish_init(ice_dev);
+}
+
+static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
+{
+ int err = 0;
+
+ if (ice_dev->is_ice_disable_fuse_blown) {
+ err = -EPERM;
+ goto out;
+ }
+
+ if (ice_dev->is_ice_enabled) {
+ /*
+ * ICE resets into global bypass mode with optimization and
+ * low power mode disabled. Hence we need to redo those seq's.
+ */
+ qcom_ice_low_power_mode_enable(ice_dev);
+
+ qcom_ice_enable_test_bus_config(ice_dev);
+
+ qcom_ice_optimization_enable(ice_dev);
+ qcom_ice_enable(ice_dev);
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+ /*
+ * When ICE resets, it wipes all of keys from LUTs
+ * ICE driver should call TZ to restore keys
+ */
+ if (qcom_ice_restore_config()) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * ICE looses its key configuration when UFS is reset,
+ * restore it
+ */
+ } else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
+ err = qcom_ice_restore_key_config(ice_dev);
+ if (err)
+ goto out;
+
+ /*
+ * for PFE case, clear the cached ICE key table,
+ * this will force keys to be reconfigured
+ * per each next transaction
+ */
+ pfk_clear_on_reset();
+ }
+ }
+
+ ice_dev->ice_reset_complete_time = ktime_get();
+out:
+ return err;
+}
+
+static int qcom_ice_resume(struct platform_device *pdev)
+{
+ /*
+ * ICE is power collapsed when storage controller is power collapsed
+ * ICE resume function is responsible for:
+ * ICE HW enabling sequence
+ * Key restoration
+ * A completion event should be triggered
+ * upon resume completion
+ * Storage driver will be fully operational only
+ * after receiving this event
+ */
+ struct ice_device *ice_dev;
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return -EINVAL;
+
+ if (ice_dev->is_ice_clk_available) {
+ /*
+ * Storage is calling this function after power collapse which
+ * would put ICE into GLOBAL_BYPASS mode. Make sure to enable
+ * ICE
+ */
+ qcom_ice_enable(ice_dev);
+ }
+
+ return 0;
+}
+
+static void qcom_ice_dump_test_bus(struct ice_device *ice_dev)
+{
+ u32 reg = 0x1;
+ u32 val;
+ u8 bus_selector;
+ u8 stream_selector;
+
+ pr_err("ICE TEST BUS DUMP:\n");
+
+ for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) {
+ reg = 0x1; /* enable test bus */
+ reg |= bus_selector << 28;
+ if (bus_selector == 0xD)
+ continue;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+ /*
+ * make sure test bus selector is written before reading
+ * the test bus register
+ */
+ mb();
+ val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+ pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+ reg, val);
+ }
+
+ pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
+ for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
+ reg = 0xD0000001; /* enable stream test bus */
+ reg |= stream_selector << 16;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+ /*
+ * make sure test bus selector is written before reading
+ * the test bus register
+ */
+ mb();
+ val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+ pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+ reg, val);
+ }
+}
+
+static void qcom_ice_debug(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+
+ if (!pdev) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ goto out;
+ }
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev) {
+ pr_err("%s: No ICE device available\n", __func__);
+ goto out;
+ }
+
+ if (!ice_dev->is_ice_enabled) {
+ pr_err("%s: ICE device is not enabled\n", __func__);
+ goto out;
+ }
+
+ pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
+ ice_dev->ice_instance_type, ice_dev);
+
+ pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET));
+
+ pr_err("%s: ICE Version: 0x%08x | ICE FUSE: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING));
+
+ pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_2));
+
+ pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_3),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_4));
+
+ pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_5),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS));
+
+ pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_CLR));
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
+ pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev,
+ QCOM_ICE_INVALID_CCFG_ERR_STTS));
+ }
+
+ if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+ ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+ (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+ pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS));
+ }
+
+ pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP));
+
+ pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2));
+
+ pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2));
+
+ pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS2));
+
+ pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS3),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS4));
+
+ pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS2));
+
+ pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS3),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS4));
+
+ pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB));
+
+ pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB));
+
+ pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB));
+
+ pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB));
+
+ pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB));
+
+ pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB));
+
+ pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB));
+
+ pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB));
+
+ pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB));
+
+ pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB));
+
+ qcom_ice_dump_test_bus(ice_dev);
+ pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
+ ice_dev->ice_instance_type,
+ (unsigned long long)ice_dev->ice_reset_start_time.tv64,
+ (unsigned long long)ice_dev->ice_reset_complete_time.tv64);
+
+ if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
+ ice_dev->ice_reset_start_time)) > 0)
+ pr_err("%s: Time taken for reset: %lu\n",
+ ice_dev->ice_instance_type,
+ (unsigned long)ktime_to_us(ktime_sub(
+ ice_dev->ice_reset_complete_time,
+ ice_dev->ice_reset_start_time)));
+out:
+ return;
+}
+
+static int qcom_ice_reset(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+
+ ice_dev = platform_get_drvdata(pdev);
+ if (!ice_dev) {
+ pr_err("%s: INVALID ice_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ ice_dev->ice_reset_start_time = ktime_get();
+
+ return qcom_ice_finish_power_collapse(ice_dev);
+}
+
+static int qcom_ice_config_start(struct platform_device *pdev,
+ struct request *req,
+ struct ice_data_setting *setting, bool async)
+{
+ struct ice_crypto_setting *crypto_data;
+ struct ice_crypto_setting pfk_crypto_data = {0};
+ union map_info *info;
+ int ret = 0;
+ bool is_pfe = false;
+
+ if (!pdev || !req || !setting) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * It is not an error to have a request with no bio
+ * Such requests must bypass ICE. So first set bypass and then
+ * return if bio is not available in request
+ */
+ if (setting) {
+ setting->encr_bypass = true;
+ setting->decr_bypass = true;
+ }
+
+ if (!req->bio) {
+ /* It is not an error to have a request with no bio */
+ return 0;
+ }
+
+ ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
+ if (is_pfe) {
+ if (ret) {
+ if (ret != -EBUSY && ret != -EAGAIN)
+ pr_err("%s error %d while configuring ice key for PFE\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return qti_ice_setting_config(req, pdev,
+ &pfk_crypto_data, setting);
+ }
+
+ /*
+ * info field in req->end_io_data could be used by mulitple dm or
+ * non-dm entities. To ensure that we are running operation on dm
+ * based request, check BIO_DONT_FREE flag
+ */
+ if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+ info = dm_get_rq_mapinfo(req);
+ if (!info) {
+ pr_debug("%s info not available in request\n",
+ __func__);
+ return 0;
+ }
+
+ crypto_data = (struct ice_crypto_setting *)info->ptr;
+ if (!crypto_data) {
+ pr_err("%s crypto_data not available in request\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return qti_ice_setting_config(req, pdev,
+ crypto_data, setting);
+ }
+
+ /*
+ * It is not an error. If target is not req-crypt based, all request
+ * from storage driver would come here to check if there is any ICE
+ * setting required
+ */
+ return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config_start);
+
+static int qcom_ice_config_end(struct request *req)
+{
+ int ret = 0;
+ bool is_pfe = false;
+
+ if (!req) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!req->bio) {
+ /* It is not an error to have a request with no bio */
+ return 0;
+ }
+
+ ret = pfk_load_key_end(req->bio, &is_pfe);
+ if (is_pfe) {
+ if (ret != 0)
+ pr_err("%s error %d while end configuring ice key for PFE\n",
+ __func__, ret);
+ return ret;
+ }
+
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config_end);
+
+
+static int qcom_ice_status(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+ unsigned int test_bus_reg_status;
+
+ if (!pdev) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ return -EINVAL;
+ }
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return -ENODEV;
+
+ if (!ice_dev->is_ice_enabled)
+ return -ENODEV;
+
+ test_bus_reg_status = qcom_ice_readl(ice_dev,
+ QCOM_ICE_REGS_TEST_BUS_REG);
+
+ return !!(test_bus_reg_status & QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR);
+
+}
+
+struct qcom_ice_variant_ops qcom_ice_ops = {
+ .name = "qcom",
+ .init = qcom_ice_init,
+ .reset = qcom_ice_reset,
+ .resume = qcom_ice_resume,
+ .suspend = qcom_ice_suspend,
+ .config_start = qcom_ice_config_start,
+ .config_end = qcom_ice_config_end,
+ .status = qcom_ice_status,
+ .debug = qcom_ice_debug,
+};
+
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
+{
+ struct platform_device *ice_pdev = NULL;
+ struct ice_device *ice_dev = NULL;
+
+ if (!node) {
+ pr_err("%s: invalid node %p", __func__, node);
+ goto out;
+ }
+
+ if (!of_device_is_available(node)) {
+ pr_err("%s: device unavailable\n", __func__);
+ goto out;
+ }
+
+ if (list_empty(&ice_devices)) {
+ pr_err("%s: invalid device list\n", __func__);
+ ice_pdev = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ list_for_each_entry(ice_dev, &ice_devices, list) {
+ if (ice_dev->pdev->of_node == node) {
+ pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ break;
+ }
+ }
+
+ ice_pdev = to_platform_device(ice_dev->pdev);
+ pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
+out:
+ return ice_pdev;
+}
+
+static struct ice_device *get_ice_device_from_storage_type
+ (const char *storage_type)
+{
+ struct ice_device *ice_dev = NULL;
+
+ if (list_empty(&ice_devices)) {
+ pr_err("%s: invalid device list\n", __func__);
+ ice_dev = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ list_for_each_entry(ice_dev, &ice_devices, list) {
+ if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
+ pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ break;
+ }
+ }
+out:
+ return ice_dev;
+}
+
+static int enable_ice_setup(struct ice_device *ice_dev)
+{
+ int ret = -1, vote;
+
+ /* Setup Regulator */
+ if (ice_dev->is_regulator_available) {
+ if (qcom_ice_get_vreg(ice_dev)) {
+ pr_err("%s: Could not get regulator\n", __func__);
+ goto out;
+ }
+ ret = regulator_enable(ice_dev->reg);
+ if (ret) {
+ pr_err("%s:%p: Could not enable regulator\n",
+ __func__, ice_dev);
+ goto out;
+ }
+ }
+
+ /* Setup Clocks */
+ if (qcom_ice_enable_clocks(ice_dev, true)) {
+ pr_err("%s:%p:%s Could not enable clocks\n", __func__,
+ ice_dev, ice_dev->ice_instance_type);
+ goto out_reg;
+ }
+
+ /* Setup Bus Vote */
+ vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+ if (vote < 0)
+ goto out_clocks;
+
+ ret = qcom_ice_set_bus_vote(ice_dev, vote);
+ if (ret) {
+ pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+ goto out_clocks;
+ }
+
+ return ret;
+
+out_clocks:
+ qcom_ice_enable_clocks(ice_dev, false);
+out_reg:
+ if (ice_dev->is_regulator_available) {
+ if (qcom_ice_get_vreg(ice_dev)) {
+ pr_err("%s: Could not get regulator\n", __func__);
+ goto out;
+ }
+ ret = regulator_disable(ice_dev->reg);
+ if (ret) {
+ pr_err("%s:%pK: Could not disable regulator\n",
+ __func__, ice_dev);
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int disable_ice_setup(struct ice_device *ice_dev)
+{
+ int ret = -1, vote;
+
+ /* Setup Bus Vote */
+ vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+ if (vote < 0) {
+ pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
+ goto out_disable_clocks;
+ }
+
+ ret = qcom_ice_set_bus_vote(ice_dev, vote);
+ if (ret)
+ pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+
+out_disable_clocks:
+
+ /* Setup Clocks */
+ if (qcom_ice_enable_clocks(ice_dev, false))
+ pr_err("%s:%p:%s Could not disable clocks\n", __func__,
+ ice_dev, ice_dev->ice_instance_type);
+
+ /* Setup Regulator */
+ if (ice_dev->is_regulator_available) {
+ if (qcom_ice_get_vreg(ice_dev)) {
+ pr_err("%s: Could not get regulator\n", __func__);
+ goto out;
+ }
+ ret = regulator_disable(ice_dev->reg);
+ if (ret) {
+ pr_err("%s:%p: Could not disable regulator\n",
+ __func__, ice_dev);
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+ int ret = -1;
+ struct ice_device *ice_dev = NULL;
+
+ ice_dev = get_ice_device_from_storage_type(storage_type);
+ if (ice_dev == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (!ice_dev)
+ return ret;
+
+ if (enable)
+ return enable_ice_setup(ice_dev);
+ else
+ return disable_ice_setup(ice_dev);
+}
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
+{
+ return &qcom_ice_ops;
+}
+EXPORT_SYMBOL(qcom_ice_get_variant_ops);
+
+/* Following struct is required to match device with driver from dts file */
+static const struct of_device_id qcom_ice_match[] = {
+ { .compatible = "qcom,ice" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qcom_ice_match);
+
+static struct platform_driver qcom_ice_driver = {
+ .probe = qcom_ice_probe,
+ .remove = qcom_ice_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcom_ice",
+ .of_match_table = qcom_ice_match,
+ },
+};
+module_platform_driver(qcom_ice_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Inline Crypto Engine driver");
diff --git a/drivers/crypto/msm/iceregs.h b/drivers/crypto/msm/iceregs.h
new file mode 100644
index 0000000..4b63e7a
--- /dev/null
+++ b/drivers/crypto/msm/iceregs.h
@@ -0,0 +1,159 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+
+/* Register bits for ICE version */
+#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03
+
+#define ICE_CORE_STEP_REV_MASK 0xFFFF
+#define ICE_CORE_STEP_REV 0 /* bit 15-0 */
+#define ICE_CORE_MAJOR_REV_MASK 0xFF000000
+#define ICE_CORE_MAJOR_REV 24 /* bit 31-24 */
+#define ICE_CORE_MINOR_REV_MASK 0xFF0000
+#define ICE_CORE_MINOR_REV 16 /* bit 23-16 */
+
+#define ICE_BIST_STATUS_MASK (0xF0000000) /* bits 28-31 */
+
+#define ICE_FUSE_SETTING_MASK 0x1
+#define ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
+#define ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+
+/* QCOM ICE Registers from SWI */
+#define QCOM_ICE_REGS_CONTROL 0x0000
+#define QCOM_ICE_REGS_RESET 0x0004
+#define QCOM_ICE_REGS_VERSION 0x0008
+#define QCOM_ICE_REGS_FUSE_SETTING 0x0010
+#define QCOM_ICE_REGS_PARAMETERS_1 0x0014
+#define QCOM_ICE_REGS_PARAMETERS_2 0x0018
+#define QCOM_ICE_REGS_PARAMETERS_3 0x001C
+#define QCOM_ICE_REGS_PARAMETERS_4 0x0020
+#define QCOM_ICE_REGS_PARAMETERS_5 0x0024
+
+
+/* QCOM ICE v3.X only */
+#define QCOM_ICE_GENERAL_ERR_STTS 0x0040
+#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030
+#define QCOM_ICE_GENERAL_ERR_MASK 0x0044
+
+
+/* QCOM ICE v2.X only */
+#define QCOM_ICE_REGS_NON_SEC_IRQ_STTS 0x0040
+#define QCOM_ICE_REGS_NON_SEC_IRQ_MASK 0x0044
+
+
+#define QCOM_ICE_REGS_NON_SEC_IRQ_CLR 0x0048
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1 0x0050
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2 0x0054
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1 0x0058
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2 0x005C
+#define QCOM_ICE_REGS_STREAM1_BIST_ERROR_VEC 0x0060
+#define QCOM_ICE_REGS_STREAM2_BIST_ERROR_VEC 0x0064
+#define QCOM_ICE_REGS_STREAM1_BIST_FINISH_VEC 0x0068
+#define QCOM_ICE_REGS_STREAM2_BIST_FINISH_VEC 0x006C
+#define QCOM_ICE_REGS_BIST_STATUS 0x0070
+#define QCOM_ICE_REGS_BYPASS_STATUS 0x0074
+#define QCOM_ICE_REGS_ADVANCED_CONTROL 0x1000
+#define QCOM_ICE_REGS_ENDIAN_SWAP 0x1004
+#define QCOM_ICE_REGS_TEST_BUS_CONTROL 0x1010
+#define QCOM_ICE_REGS_TEST_BUS_REG 0x1014
+#define QCOM_ICE_REGS_STREAM1_COUNTERS1 0x1100
+#define QCOM_ICE_REGS_STREAM1_COUNTERS2 0x1104
+#define QCOM_ICE_REGS_STREAM1_COUNTERS3 0x1108
+#define QCOM_ICE_REGS_STREAM1_COUNTERS4 0x110C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB 0x1110
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB 0x1114
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB 0x1118
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB 0x111C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB 0x1120
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB 0x1124
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB 0x1128
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB 0x112C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB 0x1130
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB 0x1134
+#define QCOM_ICE_REGS_STREAM2_COUNTERS1 0x1200
+#define QCOM_ICE_REGS_STREAM2_COUNTERS2 0x1204
+#define QCOM_ICE_REGS_STREAM2_COUNTERS3 0x1208
+#define QCOM_ICE_REGS_STREAM2_COUNTERS4 0x120C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB 0x1210
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB 0x1214
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB 0x1218
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB 0x121C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB 0x1220
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB 0x1224
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB 0x1228
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB 0x122C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB 0x1230
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB 0x1234
+
+#define QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE (1L << 0)
+#define QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE (1L << 1)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_LBO (1L << 2)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_LBO (1L << 3)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUN (1L << 4)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUN (1L << 5)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUS (1L << 6)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUS (1L << 7)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DBO (1L << 8)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DBO (1L << 9)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL (1L << 10)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL (1L << 11)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX (1L << 12)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_CONF_IDX (1L << 13)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS (1L << 14)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS (1L << 15)
+
+#define QCOM_ICE_NON_SEC_IRQ_MASK \
+ (QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE |\
+ QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_LBO |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_LBO |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_DUN |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DUS |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_DBO |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DBO |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS)
+
+/* QCOM ICE registers from secure side */
+#define QCOM_ICE_TEST_BUS_REG_SECURE_INTR (1L << 28)
+#define QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR (1L << 2)
+
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_STTS 0x2050
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK 0x2054
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_CLR 0x2058
+
+#define QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED (1L << 0)
+#define QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED (1L << 1)
+#define QCOM_ICE_QCOMC_DBG_OPEN_EVENT (1L << 30)
+#define QCOM_ICE_KEYS_RAM_RESET_COMPLETED (1L << 31)
+
+#define QCOM_ICE_SEC_IRQ_MASK \
+ (QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED |\
+ QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED |\
+ QCOM_ICE_QCOMC_DBG_OPEN_EVENT | \
+ QCOM_ICE_KEYS_RAM_RESET_COMPLETED)
+
+
+#define qcom_ice_writel(ice, val, reg) \
+ writel_relaxed((val), (ice)->mmio + (reg))
+#define qcom_ice_readl(ice, reg) \
+ readl_relaxed((ice)->mmio + (reg))
+
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_ */
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
new file mode 100644
index 0000000..3a2a51d
--- /dev/null
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -0,0 +1,974 @@
+/* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* QTI Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cache.h>
+
+
+#include <linux/qcota.h>
+#include "qce.h"
+#include "qce_ota.h"
+
+enum qce_ota_oper_enum {
+ QCE_OTA_F8_OPER = 0,
+ QCE_OTA_MPKT_F8_OPER = 1,
+ QCE_OTA_F9_OPER = 2,
+ QCE_OTA_VAR_MPKT_F8_OPER = 3,
+ QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+ struct list_head rlist;
+ struct completion complete;
+ int err;
+ enum qce_ota_oper_enum op;
+ union {
+ struct qce_f9_req f9_req;
+ struct qce_f8_req f8_req;
+ struct qce_f8_multi_pkt_req f8_mp_req;
+ struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
+ } req;
+ unsigned int steps;
+ struct ota_qce_dev *pqce;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV "qcota"
+
+
+struct ota_dev_control {
+
+ /* misc device */
+ struct miscdevice miscdevice;
+ struct list_head ready_commands;
+ unsigned int magic;
+ struct list_head qce_dev;
+ spinlock_t lock;
+ struct mutex register_lock;
+ bool registered;
+ uint32_t total_units;
+};
+
+struct ota_qce_dev {
+ struct list_head qlist;
+ /* qce handle */
+ void *qce;
+
+ /* platform device */
+ struct platform_device *pdev;
+
+ struct ota_async_req *active_command;
+ struct tasklet_struct done_tasklet;
+ struct ota_dev_control *podev;
+ uint32_t unit;
+ u64 total_req;
+ u64 err_req;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
+
+static const struct file_operations qcota_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qcota_ioctl,
+ .open = qcota_open,
+ .release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev = {
+ .miscdevice = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qcota0",
+ .fops = &qcota_fops,
+ },
+ .magic = OTA_MAGIC,
+};
+
+#define DEBUG_MAX_FNAME 16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+ u64 f8_req;
+ u64 f8_mp_req;
+ u64 f8_v_mp_req;
+ u64 f9_req;
+ u64 f8_op_success;
+ u64 f8_op_fail;
+ u64 f8_mp_op_success;
+ u64 f8_mp_op_fail;
+ u64 f8_v_mp_op_success;
+ u64 f8_v_mp_op_fail;
+ u64 f9_op_success;
+ u64 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota;
+
+static struct ota_dev_control *qcota_control(void)
+{
+
+ return &qcota_dev;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+ struct ota_dev_control *podev;
+
+ podev = qcota_control();
+ if (podev == NULL) {
+ pr_err("%s: no such device %d\n", __func__,
+ MINOR(inode->i_rdev));
+ return -ENOENT;
+ }
+
+ file->private_data = podev;
+
+ return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+ struct ota_dev_control *podev;
+
+ podev = file->private_data;
+
+ if (podev != NULL && podev->magic != OTA_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ }
+
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static bool _next_v_mp_req(struct ota_async_req *areq)
+{
+ unsigned char *p;
+
+ if (areq->err)
+ return false;
+ if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
+ return false;
+
+ p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
+ p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
+ p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
+
+ areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
+ areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
+ areq->req.f8_v_mp_req.qce_f8_req.data_len =
+ areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
+
+ areq->req.f8_v_mp_req.qce_f8_req.count_c++;
+ return true;
+}
+
+static void req_done(unsigned long data)
+{
+ struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
+ struct ota_dev_control *podev = pqce->podev;
+ struct ota_async_req *areq;
+ unsigned long flags;
+ struct ota_async_req *new_req = NULL;
+ int ret = 0;
+ bool schedule = true;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ areq = pqce->active_command;
+ if (unlikely(areq == NULL))
+ pr_err("ota_crypto: req_done, no active request\n");
+ else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
+ if (_next_v_mp_req(areq)) {
+ /* execute next subcommand */
+ spin_unlock_irqrestore(&podev->lock, flags);
+ ret = start_req(pqce, areq);
+ if (unlikely(ret)) {
+ areq->err = ret;
+ schedule = true;
+ spin_lock_irqsave(&podev->lock, flags);
+ } else {
+ areq = NULL;
+ schedule = false;
+ }
+ } else {
+ /* done with this variable mp req */
+ schedule = true;
+ }
+ }
+ while (schedule) {
+ if (!list_empty(&podev->ready_commands)) {
+ new_req = container_of(podev->ready_commands.next,
+ struct ota_async_req, rlist);
+ list_del(&new_req->rlist);
+ pqce->active_command = new_req;
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ new_req->err = 0;
+ /* start a new request */
+ ret = start_req(pqce, new_req);
+ if (unlikely(new_req && ret)) {
+ new_req->err = ret;
+ complete(&new_req->complete);
+ ret = 0;
+ new_req = NULL;
+ spin_lock_irqsave(&podev->lock, flags);
+ } else {
+ schedule = false;
+ }
+ } else {
+ pqce->active_command = NULL;
+ spin_unlock_irqrestore(&podev->lock, flags);
+ schedule = false;
+ };
+ }
+ if (areq)
+ complete(&areq->complete);
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+ int ret)
+{
+ struct ota_async_req *areq = (struct ota_async_req *) cookie;
+ struct ota_qce_dev *pqce;
+
+ pqce = areq->pqce;
+ areq->req.f9_req.mac_i = *((uint32_t *)icv);
+
+ if (ret) {
+ pqce->err_req++;
+ areq->err = -ENXIO;
+ } else
+ areq->err = 0;
+
+ tasklet_schedule(&pqce->done_tasklet);
+}
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+ int ret)
+{
+ struct ota_async_req *areq = (struct ota_async_req *) cookie;
+ struct ota_qce_dev *pqce;
+
+ pqce = areq->pqce;
+
+ if (ret) {
+ pqce->err_req++;
+ areq->err = -ENXIO;
+ } else {
+ areq->err = 0;
+ }
+
+ tasklet_schedule(&pqce->done_tasklet);
+}
+
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
+{
+ struct qce_f9_req *pf9;
+ struct qce_f8_multi_pkt_req *p_mp_f8;
+ struct qce_f8_req *pf8;
+ int ret = 0;
+
+ /* command should be on the podev->active_command */
+ areq->pqce = pqce;
+
+ switch (areq->op) {
+ case QCE_OTA_F8_OPER:
+ pf8 = &areq->req.f8_req;
+ ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+ break;
+ case QCE_OTA_MPKT_F8_OPER:
+ p_mp_f8 = &areq->req.f8_mp_req;
+ ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
+ break;
+
+ case QCE_OTA_F9_OPER:
+ pf9 = &areq->req.f9_req;
+ ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
+ break;
+
+ case QCE_OTA_VAR_MPKT_F8_OPER:
+ pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
+ ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ };
+ areq->err = ret;
+ pqce->total_req++;
+ if (ret)
+ pqce->err_req++;
+ return ret;
+}
+
+static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
+{
+ /* do this function with spinlock set */
+ struct ota_qce_dev *p;
+
+ if (unlikely(list_empty(&podev->qce_dev))) {
+ pr_err("%s: no valid qce to schedule\n", __func__);
+ return NULL;
+ }
+
+ list_for_each_entry(p, &podev->qce_dev, qlist) {
+ if (p->active_command == NULL)
+ return p;
+ }
+ return NULL;
+}
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct qcota_stat *pstat;
+ struct ota_qce_dev *pqce;
+
+ areq->err = 0;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ pqce = schedule_qce(podev);
+ if (pqce) {
+ pqce->active_command = areq;
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ ret = start_req(pqce, areq);
+ if (ret != 0) {
+ spin_lock_irqsave(&podev->lock, flags);
+ pqce->active_command = NULL;
+ spin_unlock_irqrestore(&podev->lock, flags);
+ }
+
+ } else {
+ list_add_tail(&areq->rlist, &podev->ready_commands);
+ spin_unlock_irqrestore(&podev->lock, flags);
+ }
+
+ if (ret == 0)
+ wait_for_completion(&areq->complete);
+
+ pstat = &_qcota_stat;
+ switch (areq->op) {
+ case QCE_OTA_F8_OPER:
+ if (areq->err)
+ pstat->f8_op_fail++;
+ else
+ pstat->f8_op_success++;
+ break;
+
+ case QCE_OTA_MPKT_F8_OPER:
+
+ if (areq->err)
+ pstat->f8_mp_op_fail++;
+ else
+ pstat->f8_mp_op_success++;
+ break;
+
+ case QCE_OTA_F9_OPER:
+ if (areq->err)
+ pstat->f9_op_fail++;
+ else
+ pstat->f9_op_success++;
+ break;
+ case QCE_OTA_VAR_MPKT_F8_OPER:
+ default:
+ if (areq->err)
+ pstat->f8_v_mp_op_fail++;
+ else
+ pstat->f8_v_mp_op_success++;
+ break;
+ };
+
+ return areq->err;
+}
+
+static long qcota_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct ota_dev_control *podev;
+ uint8_t *user_src;
+ uint8_t *user_dst;
+ uint8_t *k_buf = NULL;
+ struct ota_async_req areq;
+ uint32_t total, temp;
+ struct qcota_stat *pstat;
+ int i;
+ uint8_t *p = NULL;
+
+ podev = file->private_data;
+ if (podev == NULL || podev->magic != OTA_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ return -ENOENT;
+ }
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+ return -ENOTTY;
+
+ init_completion(&areq.complete);
+
+ pstat = &_qcota_stat;
+
+ switch (cmd) {
+ case QCOTA_F9_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f9_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+ sizeof(struct qce_f9_req)))
+ return -EFAULT;
+
+ user_src = areq.req.f9_req.message;
+ if (!access_ok(VERIFY_READ, (void __user *)user_src,
+ areq.req.f9_req.msize))
+ return -EFAULT;
+
+ if (areq.req.f9_req.msize == 0)
+ return 0;
+ k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+
+ if (__copy_from_user(k_buf, (void __user *)user_src,
+ areq.req.f9_req.msize)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+
+ areq.req.f9_req.message = k_buf;
+ areq.op = QCE_OTA_F9_OPER;
+
+ pstat->f9_req++;
+ err = submit_req(&areq, podev);
+
+ areq.req.f9_req.message = user_src;
+ if (err == 0 && __copy_to_user((void __user *)arg,
+ &areq.req.f9_req, sizeof(struct qce_f9_req))) {
+ err = -EFAULT;
+ }
+ kfree(k_buf);
+ break;
+
+ case QCOTA_F8_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f8_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+ sizeof(struct qce_f8_req)))
+ return -EFAULT;
+ total = areq.req.f8_req.data_len;
+ user_src = areq.req.f8_req.data_in;
+ if (user_src != NULL) {
+ if (!access_ok(VERIFY_READ, (void __user *)
+ user_src, total))
+ return -EFAULT;
+
+ };
+
+ user_dst = areq.req.f8_req.data_out;
+ if (!access_ok(VERIFY_WRITE, (void __user *)
+ user_dst, total))
+ return -EFAULT;
+
+ if (!total)
+ return 0;
+ k_buf = kmalloc(total, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+
+ /* k_buf returned from kmalloc should be cache line aligned */
+ if (user_src && __copy_from_user(k_buf,
+ (void __user *)user_src, total)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+
+ if (user_src)
+ areq.req.f8_req.data_in = k_buf;
+ else
+ areq.req.f8_req.data_in = NULL;
+ areq.req.f8_req.data_out = k_buf;
+
+ areq.op = QCE_OTA_F8_OPER;
+
+ pstat->f8_req++;
+ err = submit_req(&areq, podev);
+
+ if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+ err = -EFAULT;
+ kfree(k_buf);
+
+ break;
+
+ case QCOTA_F8_MPKT_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f8_multi_pkt_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+ sizeof(struct qce_f8_multi_pkt_req)))
+ return -EFAULT;
+ temp = areq.req.f8_mp_req.qce_f8_req.data_len;
+ if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
+ areq.req.f8_mp_req.cipher_size)
+ return -EINVAL;
+ total = (uint32_t) areq.req.f8_mp_req.num_pkt *
+ areq.req.f8_mp_req.qce_f8_req.data_len;
+
+ user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+ if (!access_ok(VERIFY_READ, (void __user *)
+ user_src, total))
+ return -EFAULT;
+
+ user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+ if (!access_ok(VERIFY_WRITE, (void __user *)
+ user_dst, total))
+ return -EFAULT;
+
+ if (!total)
+ return 0;
+ k_buf = kmalloc(total, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+ /* k_buf returned from kmalloc should be cache line aligned */
+ if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+ kfree(k_buf);
+
+ return -EFAULT;
+ }
+
+ areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+ areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+ areq.op = QCE_OTA_MPKT_F8_OPER;
+
+ pstat->f8_mp_req++;
+ err = submit_req(&areq, podev);
+
+ if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+ err = -EFAULT;
+ kfree(k_buf);
+ break;
+
+ case QCOTA_F8_V_MPKT_REQ:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(struct qce_f8_varible_multi_pkt_req)))
+ return -EFAULT;
+ if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
+ sizeof(struct qce_f8_varible_multi_pkt_req)))
+ return -EFAULT;
+
+ if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
+ return -EINVAL;
+
+ for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)
+ areq.req.f8_v_mp_req.cipher_iov[i].addr,
+ areq.req.f8_v_mp_req.cipher_iov[i].size))
+ return -EFAULT;
+ total += areq.req.f8_v_mp_req.cipher_iov[i].size;
+ total = ALIGN(total, L1_CACHE_BYTES);
+ }
+
+ if (!total)
+ return 0;
+ k_buf = kmalloc(total, GFP_KERNEL);
+ if (k_buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+ user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
+ if (__copy_from_user(p, (void __user *)user_src,
+ areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+ p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+ p = (uint8_t *) ALIGN(((uintptr_t)p),
+ L1_CACHE_BYTES);
+ }
+
+ areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
+ areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
+ areq.req.f8_v_mp_req.qce_f8_req.data_len =
+ areq.req.f8_v_mp_req.cipher_iov[0].size;
+ areq.steps = 0;
+ areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
+
+ pstat->f8_v_mp_req++;
+ err = submit_req(&areq, podev);
+
+ if (err != 0) {
+ kfree(k_buf);
+ return err;
+ }
+
+ for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+ user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
+ if (__copy_to_user(user_dst, p,
+ areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+ kfree(k_buf);
+ return -EFAULT;
+ }
+ p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+ p = (uint8_t *) ALIGN(((uintptr_t)p),
+ L1_CACHE_BYTES);
+ }
+ kfree(k_buf);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+ void *handle = NULL;
+ int rc = 0;
+ struct ota_dev_control *podev;
+ struct ce_hw_support ce_support;
+ struct ota_qce_dev *pqce;
+ unsigned long flags;
+
+ podev = &qcota_dev;
+ pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
+ if (!pqce) {
+ pr_err("qcota_probe: Memory allocation FAIL\n");
+ return -ENOMEM;
+ }
+
+ pqce->podev = podev;
+ pqce->active_command = NULL;
+ tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
+
+ /* open qce */
+ handle = qce_open(pdev, &rc);
+ if (handle == NULL) {
+ pr_err("%s: device %s, can not open qce\n",
+ __func__, pdev->name);
+ goto err;
+ }
+ if (qce_hw_support(handle, &ce_support) < 0 ||
+ ce_support.ota == false) {
+ pr_err("%s: device %s, qce does not support ota capability\n",
+ __func__, pdev->name);
+ rc = -ENODEV;
+ goto err;
+ }
+ pqce->qce = handle;
+ pqce->pdev = pdev;
+ pqce->total_req = 0;
+ pqce->err_req = 0;
+ platform_set_drvdata(pdev, pqce);
+
+ mutex_lock(&podev->register_lock);
+ rc = 0;
+ if (podev->registered == false) {
+ rc = misc_register(&podev->miscdevice);
+ if (rc == 0) {
+ pqce->unit = podev->total_units;
+ podev->total_units++;
+ podev->registered = true;
+ };
+ } else {
+ pqce->unit = podev->total_units;
+ podev->total_units++;
+ }
+ mutex_unlock(&podev->register_lock);
+ if (rc) {
+ pr_err("ion: failed to register misc device.\n");
+ goto err;
+ }
+
+ spin_lock_irqsave(&podev->lock, flags);
+ list_add_tail(&pqce->qlist, &podev->qce_dev);
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ return 0;
+err:
+ if (handle)
+ qce_close(handle);
+
+ platform_set_drvdata(pdev, NULL);
+ tasklet_kill(&pqce->done_tasklet);
+ kfree(pqce);
+ return rc;
+}
+
+static int qcota_remove(struct platform_device *pdev)
+{
+ struct ota_dev_control *podev;
+ struct ota_qce_dev *pqce;
+ unsigned long flags;
+
+ pqce = platform_get_drvdata(pdev);
+ if (!pqce)
+ return 0;
+ if (pqce->qce)
+ qce_close(pqce->qce);
+
+ podev = pqce->podev;
+ if (!podev)
+ goto ret;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ list_del(&pqce->qlist);
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ mutex_lock(&podev->register_lock);
+ if (--podev->total_units == 0) {
+ if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&podev->miscdevice);
+ podev->registered = false;
+ }
+ mutex_unlock(&podev->register_lock);
+ret:
+
+ tasklet_kill(&pqce->done_tasklet);
+ kfree(pqce);
+ return 0;
+}
+
+static const struct of_device_id qcota_match[] = {
+ { .compatible = "qcom,qcota",
+ },
+ {}
+};
+
+static struct platform_driver qcota_plat_driver = {
+ .probe = qcota_probe,
+ .remove = qcota_remove,
+ .driver = {
+ .name = "qcota",
+ .owner = THIS_MODULE,
+ .of_match_table = qcota_match,
+ },
+};
+
+static int _disp_stats(void)
+{
+ struct qcota_stat *pstat;
+ int len = 0;
+ struct ota_dev_control *podev = &qcota_dev;
+ unsigned long flags;
+ struct ota_qce_dev *p;
+
+ pstat = &_qcota_stat;
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ "\nQTI OTA crypto accelerator Statistics:\n");
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 request : %llu\n",
+ pstat->f8_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 operation success : %llu\n",
+ pstat->f8_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 operation fail : %llu\n",
+ pstat->f8_op_fail);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 MP request : %llu\n",
+ pstat->f8_mp_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 MP operation success : %llu\n",
+ pstat->f8_mp_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 MP operation fail : %llu\n",
+ pstat->f8_mp_op_fail);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 Variable MP request : %llu\n",
+ pstat->f8_v_mp_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 Variable MP operation success: %llu\n",
+ pstat->f8_v_mp_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F8 Variable MP operation fail : %llu\n",
+ pstat->f8_v_mp_op_fail);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F9 request : %llu\n",
+ pstat->f9_req);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F9 operation success : %llu\n",
+ pstat->f9_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " F9 operation fail : %llu\n",
+ pstat->f9_op_fail);
+
+ spin_lock_irqsave(&podev->lock, flags);
+
+ list_for_each_entry(p, &podev->qce_dev, qlist) {
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req : %llu\n",
+ p->unit,
+ p->total_req
+ );
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req Error : %llu\n",
+ p->unit,
+ p->err_req
+ );
+ }
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc = -EINVAL;
+ int len;
+
+ len = _disp_stats();
+ if (len <= count)
+ rc = simple_read_from_buffer((void __user *) buf, len,
+ ppos, (void *) _debug_read_buf, len);
+
+ return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ota_dev_control *podev = &qcota_dev;
+ unsigned long flags;
+ struct ota_qce_dev *p;
+
+ memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
+
+ spin_lock_irqsave(&podev->lock, flags);
+
+ list_for_each_entry(p, &podev->qce_dev, qlist) {
+ p->total_req = 0;
+ p->err_req = 0;
+ }
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+ .open = _debug_stats_open,
+ .read = _debug_stats_read,
+ .write = _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+ int rc;
+ char name[DEBUG_MAX_FNAME];
+ struct dentry *dent;
+
+ _debug_dent = debugfs_create_dir("qcota", NULL);
+ if (IS_ERR(_debug_dent)) {
+ pr_err("qcota debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(_debug_dent));
+ return PTR_ERR(_debug_dent);
+ }
+
+ snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
+ _debug_qcota = 0;
+ dent = debugfs_create_file(name, 0644, _debug_dent,
+ &_debug_qcota, &_debug_stats_ops);
+ if (dent == NULL) {
+ pr_err("qcota debugfs_create_file fail, error %ld\n",
+ PTR_ERR(dent));
+ rc = PTR_ERR(dent);
+ goto err;
+ }
+ return 0;
+err:
+ debugfs_remove_recursive(_debug_dent);
+ return rc;
+}
+
+static int __init qcota_init(void)
+{
+ int rc;
+ struct ota_dev_control *podev;
+
+ rc = _qcota_debug_init();
+ if (rc)
+ return rc;
+
+ podev = &qcota_dev;
+ INIT_LIST_HEAD(&podev->ready_commands);
+ INIT_LIST_HEAD(&podev->qce_dev);
+ spin_lock_init(&podev->lock);
+ mutex_init(&podev->register_lock);
+ podev->registered = false;
+ podev->total_units = 0;
+
+ return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+ debugfs_remove_recursive(_debug_dent);
+ platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Ota Crypto driver");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
new file mode 100644
index 0000000..7b4ca24
--- /dev/null
+++ b/drivers/crypto/msm/qce.h
@@ -0,0 +1,191 @@
+/*
+ * QTI Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size in bytes */
+#define SHA256_DIGESTSIZE 32
+#define SHA1_DIGESTSIZE 20
+
+#define AES_CE_BLOCK_SIZE 16
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE 64
+#define DES_KEY_SIZE 8
+#define TRIPLE_DES_KEY_SIZE 24
+#define AES128_KEY_SIZE 16
+#define AES192_KEY_SIZE 24
+#define AES256_KEY_SIZE 32
+#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH 16
+#define DES_IV_LENGTH 8
+#define MAX_IV_LENGTH AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA 0xFF00
+
+/* Maximum Nonce bytes */
+#define MAX_NONCE 16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+ unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+ CIPHER_ALG_DES = 0,
+ CIPHER_ALG_3DES = 1,
+ CIPHER_ALG_AES = 2,
+ CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+ QCE_HASH_SHA1 = 0,
+ QCE_HASH_SHA256 = 1,
+ QCE_HASH_SHA1_HMAC = 2,
+ QCE_HASH_SHA256_HMAC = 3,
+ QCE_HASH_AES_CMAC = 4,
+ QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+ QCE_ENCRYPT = 0,
+ QCE_DECRYPT = 1,
+ QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+ QCE_MODE_CBC = 0,
+ QCE_MODE_ECB = 1,
+ QCE_MODE_CTR = 2,
+ QCE_MODE_XTS = 3,
+ QCE_MODE_CCM = 4,
+ QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+ QCE_REQ_ABLK_CIPHER = 0,
+ QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+ QCE_REQ_AEAD = 2,
+ QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+ bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+ bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+ bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+ bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+ bool cmac;
+ bool aes_key_192;
+ bool aes_xts;
+ bool aes_ccm;
+ bool ota;
+ bool aligned_only;
+ bool bam;
+ bool is_shared;
+ bool hw_key;
+ bool use_sw_aes_cbc_ecb_ctr_algo;
+ bool use_sw_aead_algo;
+ bool use_sw_aes_xts_algo;
+ bool use_sw_ahash_algo;
+ bool use_sw_hmac_algo;
+ bool use_sw_aes_ccm_algo;
+ bool clk_mgmt_sus_res;
+ unsigned int ce_device;
+ unsigned int ce_hw_instance;
+ unsigned int max_request;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+ qce_comp_func_ptr_t qce_cb; /* call back */
+ enum qce_hash_alg_enum alg; /* sha algorithm */
+ unsigned char *digest; /* sha digest */
+ struct scatterlist *src; /* pointer to scatter list entry */
+ uint32_t auth_data[4]; /* byte count */
+ unsigned char *authkey; /* auth key */
+ unsigned int authklen; /* auth key length */
+ bool first_blk; /* first block indicator */
+ bool last_blk; /* last block indicator */
+ unsigned int size; /* data length in bytes */
+ void *areq;
+ unsigned int flags;
+};
+
+struct qce_req {
+ enum qce_req_op_enum op; /* operation type */
+ qce_comp_func_ptr_t qce_cb; /* call back */
+ void *areq;
+ enum qce_cipher_alg_enum alg; /* cipher algorithms*/
+ enum qce_cipher_dir_enum dir; /* encryption? decryption? */
+ enum qce_cipher_mode_enum mode; /* algorithm mode */
+ enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
+ unsigned char *authkey; /* authentication key */
+ unsigned int authklen; /* authentication key kength */
+ unsigned int authsize; /* authentication key kength */
+ unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */
+ unsigned char *assoc; /* Ptr to formatted associated data */
+ unsigned int assoclen; /* Formatted associated data length */
+ struct scatterlist *asg; /* Formatted associated data sg */
+ unsigned char *enckey; /* cipher key */
+ unsigned int encklen; /* cipher key length */
+ unsigned char *iv; /* initialization vector */
+ unsigned int ivsize; /* initialization vector size*/
+ unsigned int cryptlen; /* data length */
+ unsigned int use_pmem; /* is source of data PMEM allocated? */
+ struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
+ unsigned int flags;
+};
+
+struct qce_pm_table {
+ int (*suspend)(void *handle);
+ int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
+void qce_get_driver_stats(void *handle);
+void qce_clear_driver_stats(void *handle);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 0000000..0cf4386
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,6141 @@
+/*
+ * QTI Crypto Engine driver.
+ *
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <linux/clk/qcom.h>
+#include <linux/qcrypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <soc/qcom/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+#include "qce_ota.h"
+
+#define CRYPTO_CONFIG_RESET 0xE01EF
+#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
+#define QCE_MAX_NUM_DSCR 0x200
+#define QCE_SECTOR_SIZE 0x200
+#define CE_CLK_100MHZ 100000000
+#define CE_CLK_DIV 1000000
+
+#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
+#define CRYPTO_CORE_MINOR_VER_NUM 0x03
+#define CRYPTO_CORE_STEP_VER_NUM 0x1
+
+#define CRYPTO_REQ_USER_PAT 0xdead0000
+
+static DEFINE_MUTEX(bam_register_lock);
+static DEFINE_MUTEX(qce_iomap_mutex);
+
+struct bam_registration_info {
+ struct list_head qlist;
+ unsigned long handle;
+ uint32_t cnt;
+ uint32_t bam_mem;
+ void __iomem *bam_iobase;
+ bool support_cmd_dscr;
+};
+static LIST_HEAD(qce50_bam_list);
+
+/* Used to determine the mode */
+#define MAX_BUNCH_MODE_REQ 2
+/* Max number of request supported */
+#define MAX_QCE_BAM_REQ 8
+/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
+#define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2)
+/* To create extra request space to hold dummy request */
+#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
+/* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
+#define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
+/* QCE driver modes */
+#define IN_INTERRUPT_MODE 0
+#define IN_BUNCH_MODE 1
+/* Dummy request data length */
+#define DUMMY_REQ_DATA_LEN 64
+/* Delay timer to expire when in bunch mode */
+#define DELAY_IN_JIFFIES 5
+/* Index to point the dummy request */
+#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
+
+#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
+
+enum qce_owner {
+ QCE_OWNER_NONE = 0,
+ QCE_OWNER_CLIENT = 1,
+ QCE_OWNER_TIMEOUT = 2
+};
+
+struct dummy_request {
+ struct qce_sha_req sreq;
+ struct scatterlist sg;
+ struct ahash_request areq;
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+ struct device *pdev; /* Handle to platform_device structure */
+ struct bam_registration_info *pbam;
+
+ unsigned char *coh_vmem; /* Allocated coherent virtual memory */
+ dma_addr_t coh_pmem; /* Allocated coherent physical memory */
+ int memsize; /* Memory allocated */
+ unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
+ int iovec_memsize; /* Memory allocated */
+ uint32_t bam_mem; /* bam physical address, from DT */
+ uint32_t bam_mem_size; /* bam io size, from DT */
+ int is_shared; /* CE HW is shared */
+ bool support_cmd_dscr;
+ bool support_hw_key;
+ bool support_clk_mgmt_sus_res;
+ bool support_only_core_src_clk;
+
+ void __iomem *iobase; /* Virtual io base of CE HW */
+ unsigned int phy_iobase; /* Physical io base of CE HW */
+
+ struct clk *ce_core_src_clk; /* Handle to CE src clk*/
+ struct clk *ce_core_clk; /* Handle to CE clk */
+ struct clk *ce_clk; /* Handle to CE clk */
+ struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
+ bool no_get_around;
+ bool no_ccm_mac_status_get_around;
+ unsigned int ce_opp_freq_hz;
+ bool use_sw_aes_cbc_ecb_ctr_algo;
+ bool use_sw_aead_algo;
+ bool use_sw_aes_xts_algo;
+ bool use_sw_ahash_algo;
+ bool use_sw_hmac_algo;
+ bool use_sw_aes_ccm_algo;
+ uint32_t engines_avail;
+ struct qce_ce_cfg_reg_setting reg;
+ struct ce_bam_info ce_bam_info;
+ struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
+ unsigned int ce_request_index;
+ enum qce_owner owner;
+ atomic_t no_of_queued_req;
+ struct timer_list timer;
+ struct dummy_request dummyreq;
+ unsigned int mode;
+ unsigned int intr_cadence;
+ unsigned int dev_no;
+ struct qce_driver_stats qce_stats;
+ atomic_t bunch_cmd_seq;
+ atomic_t last_intr_seq;
+ bool cadence_flag;
+ uint8_t *dummyreq_in_buf;
+};
+
+static void print_notify_debug(struct sps_event_notify *notify);
+static void _sps_producer_callback(struct sps_event_notify *notify);
+static int qce_dummy_req(struct qce_device *pce_dev);
+
+static int _qce50_disp_stats;
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha1[] = {
+ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+ 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned int n;
+
+ n = len / sizeof(uint32_t);
+ for (; n > 0; n--) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) |
+ (*(b+3) & 0xff);
+ b += sizeof(uint32_t);
+ iv++;
+ }
+
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00);
+ } else if (n == 2) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000);
+ } else if (n == 1) {
+ *iv = ((*b << 24) & 0xff000000);
+ }
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned int i, j;
+ unsigned char swap_iv[AES_IV_LENGTH];
+
+ memset(swap_iv, 0, AES_IV_LENGTH);
+ for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
+ swap_iv[i] = b[j];
+ _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+ int i;
+
+ for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+ nbytes -= sg->length;
+ return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; ++i) {
+ dma_map_sg(dev, sg, 1, direction);
+ sg = sg_next(sg);
+ }
+
+ return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; ++i) {
+ dma_unmap_sg(dev, sg, 1, direction);
+ sg = sg_next(sg);
+ }
+
+ return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+ unsigned int rev;
+ unsigned int maj_rev, min_rev, step_rev;
+
+ rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before checking the version.
+ */
+ mb();
+ maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+ min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+ step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+ if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
+ pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
+ pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+ return -EIO;
+ }
+
+ /*
+ * The majority of crypto HW bugs have been fixed in 5.3.0 and
+ * above. That allows a single sps transfer of consumer
+ * pipe, and a single sps transfer of producer pipe
+ * for a crypto request. no_get_around flag indicates this.
+ *
+ * In 5.3.1, the CCM MAC_FAILED in result dump issue is
+ * fixed. no_ccm_mac_status_get_around flag indicates this.
+ */
+ pce_dev->no_get_around = (min_rev >=
+ CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
+ if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
+ pce_dev->no_ccm_mac_status_get_around = true;
+ else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
+ (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
+ pce_dev->no_ccm_mac_status_get_around = true;
+ else
+ pce_dev->no_ccm_mac_status_get_around = false;
+
+ pce_dev->ce_bam_info.minor_version = min_rev;
+
+ pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+ CRYPTO_ENGINES_AVAIL);
+ dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
+ maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+ pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+ dev_info(pce_dev->pdev, "CE device = 0x%x\n, IO base, CE = 0x%p\n, Consumer (IN) PIPE %d, Producer (OUT) PIPE %d\n IO base BAM = 0x%p\n BAM IRQ %d\n Engines Availability = 0x%x\n",
+ pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
+ pce_dev->ce_bam_info.dest_pipe_index,
+ pce_dev->ce_bam_info.src_pipe_index,
+ pce_dev->ce_bam_info.bam_iobase,
+ pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
+ return 0;
+};
+
+static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
+ struct qce_device *pce_dev,
+ int req_info, struct qce_sha_req *sreq)
+{
+ struct ce_sps_data *pce_sps_data;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ cmdlistptr = &pce_sps_data->cmdlistptr;
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ return &cmdlistptr->auth_sha1;
+ case QCE_HASH_SHA256:
+ return &cmdlistptr->auth_sha256;
+ case QCE_HASH_SHA1_HMAC:
+ return &cmdlistptr->auth_sha1_hmac;
+ case QCE_HASH_SHA256_HMAC:
+ return &cmdlistptr->auth_sha256_hmac;
+ case QCE_HASH_AES_CMAC:
+ if (sreq->authklen == AES128_KEY_SIZE)
+ return &cmdlistptr->auth_aes_128_cmac;
+ return &cmdlistptr->auth_aes_256_cmac;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+ struct qce_sha_req *sreq,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+ uint32_t diglen;
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ bool sha1 = false;
+ struct sps_command_element *pce = NULL;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+ uint32_t auth_cfg;
+
+ if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC) ||
+ (sreq->alg == QCE_HASH_AES_CMAC)) {
+
+
+ /* no more check for null key. use flag */
+ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+ == QCRYPTO_CTX_USE_HW_KEY)
+ use_hw_key = true;
+ else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
+ pce = cmdlistinfo->go_proc;
+ if (use_hw_key == true) {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+ pce_dev->phy_iobase);
+ } else {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+ pce_dev->phy_iobase);
+ pce = cmdlistinfo->auth_key;
+ if (use_pipe_key == false) {
+ _byte_stream_to_net_words(mackey32,
+ sreq->authkey,
+ sreq->authklen);
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ }
+ }
+ }
+
+ if (sreq->alg == QCE_HASH_AES_CMAC)
+ goto go_proc;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+ return -EIO;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ case QCE_HASH_SHA1_HMAC:
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA256:
+ case QCE_HASH_SHA256_HMAC:
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+ if (sreq->first_blk) {
+ if (sha1) {
+ for (i = 0; i < 5; i++)
+ auth32[i] = _std_init_vector_sha1[i];
+ } else {
+ for (i = 0; i < 8; i++)
+ auth32[i] = _std_init_vector_sha256[i];
+ }
+ } else {
+ _byte_stream_to_net_words(auth32, sreq->digest, diglen);
+ }
+
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < 5; i++, pce++)
+ pce->data = auth32[i];
+
+ if ((sreq->alg == QCE_HASH_SHA256) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC)) {
+ for (i = 5; i < 8; i++, pce++)
+ pce->data = auth32[i];
+ }
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ pce = cmdlistinfo->auth_bytecount;
+ for (i = 0; i < 2; i++, pce++)
+ pce->data = sreq->auth_data[i];
+
+ /* Set/reset last bit in CFG register */
+ pce = cmdlistinfo->auth_seg_cfg;
+ auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+ 1 << CRYPTO_FIRST |
+ 1 << CRYPTO_USE_PIPE_KEY_AUTH |
+ 1 << CRYPTO_USE_HW_KEY_AUTH);
+ if (sreq->last_blk)
+ auth_cfg |= 1 << CRYPTO_LAST;
+ if (sreq->first_blk)
+ auth_cfg |= 1 << CRYPTO_FIRST;
+ if (use_hw_key)
+ auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+ if (use_pipe_key)
+ auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+ pce->data = auth_cfg;
+go_proc:
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = sreq->size;
+
+ pce = cmdlistinfo->encr_seg_cfg;
+ pce->data = 0;
+
+ /* write auth seg size start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+
+ /* always ensure there is input data. ZLT does not work for bam-ndp */
+ if (sreq->size)
+ pce->data = sreq->size;
+ else
+ pce->data = pce_dev->ce_bam_info.ce_burst_size;
+
+ return 0;
+}
+
+static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
+ struct qce_device *pce_dev,
+ int req_info, struct qce_req *creq)
+{
+ struct ce_sps_data *pce_sps_data;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ cmdlistptr = &pce_sps_data->cmdlistptr;
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ switch (creq->mode) {
+ case QCE_MODE_CBC:
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->aead_hmac_sha1_cbc_des;
+ else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->aead_hmac_sha256_cbc_des;
+ else
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ switch (creq->mode) {
+ case QCE_MODE_CBC:
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->aead_hmac_sha1_cbc_3des;
+ else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->aead_hmac_sha256_cbc_3des;
+ else
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case CIPHER_ALG_AES:
+ switch (creq->mode) {
+ case QCE_MODE_CBC:
+ if (creq->encklen == AES128_KEY_SIZE) {
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha1_cbc_aes_128;
+ else if (creq->auth_alg ==
+ QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha256_cbc_aes_128;
+ else
+ return NULL;
+ } else if (creq->encklen == AES256_KEY_SIZE) {
+ if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha1_cbc_aes_256;
+ else if (creq->auth_alg ==
+ QCE_HASH_SHA256_HMAC)
+ return &cmdlistptr->
+ aead_hmac_sha256_cbc_aes_256;
+ else
+ return NULL;
+ } else
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
+ uint32_t totallen_in, uint32_t coffset,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+ struct sps_command_element *pce;
+ uint32_t a_cfg;
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+ uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t enciv_in_word;
+ uint32_t key_size;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = q_req->ivsize;
+
+ key_size = q_req->encklen;
+ enck_size_in_word = key_size/sizeof(uint32_t);
+
+ switch (q_req->alg) {
+ case CIPHER_ALG_DES:
+ enciv_in_word = 2;
+ break;
+ case CIPHER_ALG_3DES:
+ enciv_in_word = 2;
+ break;
+ case CIPHER_ALG_AES:
+ if ((key_size != AES128_KEY_SIZE) &&
+ (key_size != AES256_KEY_SIZE))
+ return -EINVAL;
+ enciv_in_word = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* only support cbc mode */
+ if (q_req->mode != QCE_MODE_CBC)
+ return -EINVAL;
+
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ for (i = 0; i < enciv_in_word; i++, pce++)
+ pce->data = enciv32[i];
+
+ /*
+ * write encr key
+ * do not use hw key or pipe key
+ */
+ _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < enck_size_in_word; i++, pce++)
+ pce->data = enckey32[i];
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ encr_cfg = pce->data;
+ if (q_req->dir == QCE_ENCRYPT)
+ encr_cfg |= (1 << CRYPTO_ENCODE);
+ else
+ encr_cfg &= ~(1 << CRYPTO_ENCODE);
+ pce->data = encr_cfg;
+
+ /* we only support sha1-hmac and sha256-hmac at this point */
+ _byte_stream_to_net_words(mackey32, q_req->authkey,
+ q_req->authklen);
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ pce = cmdlistinfo->auth_iv;
+
+ if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+ for (i = 0; i < 5; i++, pce++)
+ pce->data = _std_init_vector_sha1[i];
+ else
+ for (i = 0; i < 8; i++, pce++)
+ pce->data = _std_init_vector_sha256[i];
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ pce = cmdlistinfo->auth_bytecount;
+ for (i = 0; i < 2; i++, pce++)
+ pce->data = 0;
+
+ pce = cmdlistinfo->auth_seg_cfg;
+ a_cfg = pce->data;
+ a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
+ if (q_req->dir == QCE_ENCRYPT)
+ a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ else
+ a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ pce->data = a_cfg;
+
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = totallen_in;
+
+ /* write auth seg size start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = totallen_in;
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ pce->data = q_req->cryptlen;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (coffset & 0xffff);
+
+ return 0;
+
+}
+
+static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
+ struct qce_device *pce_dev,
+ int req_info, struct qce_req *creq)
+{
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ cmdlistptr = &pce_sps_data->cmdlistptr;
+ if (creq->alg != CIPHER_ALG_AES) {
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode == QCE_MODE_ECB)
+ return &cmdlistptr->cipher_des_ecb;
+ return &cmdlistptr->cipher_des_cbc;
+ case CIPHER_ALG_3DES:
+ if (creq->mode == QCE_MODE_ECB)
+ return &cmdlistptr->cipher_3des_ecb;
+ return &cmdlistptr->cipher_3des_cbc;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (creq->encklen == AES128_KEY_SIZE)
+ return &cmdlistptr->cipher_aes_128_ecb;
+ return &cmdlistptr->cipher_aes_256_ecb;
+ case QCE_MODE_CBC:
+ case QCE_MODE_CTR:
+ if (creq->encklen == AES128_KEY_SIZE)
+ return &cmdlistptr->cipher_aes_128_cbc_ctr;
+ return &cmdlistptr->cipher_aes_256_cbc_ctr;
+ case QCE_MODE_XTS:
+ if (creq->encklen/2 == AES128_KEY_SIZE)
+ return &cmdlistptr->cipher_aes_128_xts;
+ return &cmdlistptr->cipher_aes_256_xts;
+ case QCE_MODE_CCM:
+ if (creq->encklen == AES128_KEY_SIZE)
+ return &cmdlistptr->aead_aes_128_ccm;
+ return &cmdlistptr->aead_aes_256_ccm;
+ default:
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+ uint32_t totallen_in, uint32_t coffset,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+ 0, 0, 0, 0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t key_size;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = creq->ivsize;
+ int i;
+ struct sps_command_element *pce = NULL;
+
+ if (creq->mode == QCE_MODE_XTS)
+ key_size = creq->encklen/2;
+ else
+ key_size = creq->encklen;
+
+ pce = cmdlistinfo->go_proc;
+ if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ use_hw_key = true;
+ } else {
+ if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
+ }
+ pce = cmdlistinfo->go_proc;
+ if (use_hw_key == true)
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+ pce_dev->phy_iobase);
+ else
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+ pce_dev->phy_iobase);
+ if ((use_pipe_key == false) && (use_hw_key == false)) {
+ _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+ enck_size_in_word = key_size/sizeof(uint32_t);
+ }
+
+ if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+ uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+ uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+ uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+ uint32_t auth_cfg = 0;
+
+ /* write nonce */
+ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+ pce = cmdlistinfo->auth_nonce_info;
+ for (i = 0; i < noncelen32; i++, pce++)
+ pce->data = nonce32[i];
+
+ if (creq->authklen == AES128_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+ else {
+ if (creq->authklen == AES256_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+ }
+ if (creq->dir == QCE_ENCRYPT)
+ auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ else
+ auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+ if (use_hw_key == true) {
+ auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+ } else {
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ /* write auth key */
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authklen32; i++, pce++)
+ pce->data = enckey32[i];
+ }
+
+ pce = cmdlistinfo->auth_seg_cfg;
+ pce->data = auth_cfg;
+
+ pce = cmdlistinfo->auth_seg_size;
+ if (creq->dir == QCE_ENCRYPT)
+ pce->data = totallen_in;
+ else
+ pce->data = totallen_in - creq->authsize;
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+ } else {
+ if (creq->op != QCE_REQ_AEAD) {
+ pce = cmdlistinfo->auth_seg_cfg;
+ pce->data = 0;
+ }
+ }
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+ break;
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+ break;
+ case QCE_MODE_XTS:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+ break;
+ case QCE_MODE_CCM:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+ encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+ (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+ break;
+ case QCE_MODE_CTR:
+ default:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+ break;
+ }
+
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = enciv32[0];
+ pce++;
+ pce->data = enciv32[1];
+ }
+ if (use_hw_key == false) {
+ pce = cmdlistinfo->encr_key;
+ pce->data = enckey32[0];
+ pce++;
+ pce->data = enckey32[1];
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = enciv32[0];
+ pce++;
+ pce->data = enciv32[1];
+ }
+ if (use_hw_key == false) {
+ /* write encr key */
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < 6; i++, pce++)
+ pce->data = enckey32[i];
+ }
+ break;
+ case CIPHER_ALG_AES:
+ default:
+ if (creq->mode == QCE_MODE_XTS) {
+ uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+ = {0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t xtsklen =
+ creq->encklen/(2 * sizeof(uint32_t));
+
+ if ((use_hw_key == false) && (use_pipe_key == false)) {
+ _byte_stream_to_net_words(xtskey32,
+ (creq->enckey + creq->encklen/2),
+ creq->encklen/2);
+ /* write xts encr key */
+ pce = cmdlistinfo->encr_xts_key;
+ for (i = 0; i < xtsklen; i++, pce++)
+ pce->data = xtskey32[i];
+ }
+ /* write xts du size */
+ pce = cmdlistinfo->encr_xts_du_size;
+ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+ case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+ pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+ creq->cryptlen);
+ break;
+ case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+ pce->data =
+ min((unsigned int)QCE_SECTOR_SIZE * 2,
+ creq->cryptlen);
+ break;
+ default:
+ pce->data = creq->cryptlen;
+ break;
+ }
+ }
+ if (creq->mode != QCE_MODE_ECB) {
+ if (creq->mode == QCE_MODE_XTS)
+ _byte_stream_swap_to_net_words(enciv32,
+ creq->iv, ivsize);
+ else
+ _byte_stream_to_net_words(enciv32, creq->iv,
+ ivsize);
+ /* write encr cntr iv */
+ pce = cmdlistinfo->encr_cntr_iv;
+ for (i = 0; i < 4; i++, pce++)
+ pce->data = enciv32[i];
+
+ if (creq->mode == QCE_MODE_CCM) {
+ /* write cntr iv for ccm */
+ pce = cmdlistinfo->encr_ccm_cntr_iv;
+ for (i = 0; i < 4; i++, pce++)
+ pce->data = enciv32[i];
+ /* update cntr_iv[3] by one */
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce += 3;
+ pce->data += 1;
+ }
+ }
+
+ if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ } else {
+ if (use_hw_key == false) {
+ /* write encr key */
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < enck_size_in_word; i++, pce++)
+ pce->data = enckey32[i];
+ }
+ } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+ break;
+ } /* end of switch (creq->mode) */
+
+ if (use_pipe_key)
+ encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+ << CRYPTO_USE_PIPE_KEY_ENCR);
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+ if (creq->dir == QCE_ENCRYPT)
+ pce->data |= (1 << CRYPTO_ENCODE);
+ else
+ pce->data &= ~(1 << CRYPTO_ENCODE);
+ encr_cfg = pce->data;
+ } else {
+ encr_cfg |=
+ ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+ }
+ if (use_hw_key == true)
+ encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ else
+ encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ pce->data = encr_cfg;
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+ pce->data = (creq->cryptlen + creq->authsize);
+ else
+ pce->data = creq->cryptlen;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (coffset & 0xffff);
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = totallen_in;
+
+ return 0;
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t cfg;
+ struct sps_command_element *pce;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cfg = pce_dev->reg.auth_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cfg = pce_dev->reg.auth_cfg_snow3g;
+ break;
+ };
+
+ /* write key in CRYPTO_AUTH_IV0-3_REG */
+ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < key_size_in_word; i++, pce++)
+ pce->data = ikey32[i];
+
+ /* write last bits in CRYPTO_AUTH_IV4_REG */
+ pce->data = req->last_bits;
+
+ /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+ pce = cmdlistinfo->auth_bytecount;
+ pce->data = req->fresh;
+
+ /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
+ pce++;
+ pce->data = req->count_i;
+
+ /* write auth seg cfg */
+ pce = cmdlistinfo->auth_seg_cfg;
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= BIT(CRYPTO_F9_DIRECTION);
+ pce->data = cfg;
+
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = req->msize;
+
+ /* write auth seg start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = req->msize;
+
+
+ /* write go */
+ pce = cmdlistinfo->go_proc;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+ return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+ bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+ uint16_t cipher_size,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t cfg;
+ struct sps_command_element *pce;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cfg = pce_dev->reg.encr_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cfg = pce_dev->reg.encr_cfg_snow3g;
+ break;
+ };
+ /* write key */
+ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < key_size_in_word; i++, pce++)
+ pce->data = ckey32[i];
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ if (key_stream_mode)
+ cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ cfg |= BIT(CRYPTO_F8_DIRECTION);
+ pce->data = cfg;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (cipher_offset & 0xffff);
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ pce->data = cipher_size;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = req->data_len;
+
+ /* write cntr0_iv0 for countC */
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = req->count_c;
+ /* write cntr1_iv1 for nPkts, and bearer */
+ pce++;
+ if (npkts == 1)
+ npkts = 0;
+ pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+ /* write go */
+ pce = cmdlistinfo->go_proc;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+ return 0;
+}
+
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
+{
+ int i, j, ents;
+ struct ce_sps_data *pce_sps_data;
+ struct sps_iovec *iovec;
+ uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ iovec = pce_sps_data->in_transfer.iovec;
+ pr_info("==============================================\n");
+ pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+ pr_info("==============================================\n");
+ for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
+ pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
+ iovec->addr, iovec->size, iovec->flags);
+ if (iovec->flags & cmd_flags) {
+ struct sps_command_element *pced;
+
+ pced = (struct sps_command_element *)
+ (GET_VIRT_ADDR(iovec->addr));
+ ents = iovec->size/(sizeof(struct sps_command_element));
+ for (j = 0; j < ents; j++) {
+ pr_info(" [%d] [0x%x] 0x%x\n", j,
+ pced->addr, pced->data);
+ pced++;
+ }
+ }
+ iovec++;
+ }
+
+ pr_info("==============================================\n");
+ pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+ pr_info("==============================================\n");
+ iovec = pce_sps_data->out_transfer.iovec;
+ for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
+ pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
+ iovec->addr, iovec->size, iovec->flags);
+ iovec++;
+ }
+}
+
+#ifdef QCE_DEBUG
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+ _qce_dump_descr_fifos(pce_dev, req_info);
+}
+
+#define QCE_WRITE_REG(val, addr) \
+{ \
+ pr_info(" [0x%p] 0x%x\n", addr, (uint32_t)val); \
+ writel_relaxed(val, addr); \
+}
+
+#else
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+}
+
+#define QCE_WRITE_REG(val, addr) \
+ writel_relaxed(val, addr)
+
+#endif
+
+static int _ce_setup_hash_direct(struct qce_device *pce_dev,
+ struct qce_sha_req *sreq)
+{
+ uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+ uint32_t diglen;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+ bool sha1 = false;
+ uint32_t auth_cfg = 0;
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ if (sreq->alg == QCE_HASH_AES_CMAC) {
+ /* write seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+ /* write seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* Clear auth_ivn, auth_keyn registers */
+ for (i = 0; i < 16; i++) {
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+ }
+ /* write auth_bytecnt 0/1/2/3, start with 0 */
+ for (i = 0; i < 4; i++)
+ QCE_WRITE_REG(0, pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG +
+ i * sizeof(uint32_t));
+
+ if (sreq->authklen == AES128_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
+ else
+ auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
+ }
+
+ if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC) ||
+ (sreq->alg == QCE_HASH_AES_CMAC)) {
+
+ _byte_stream_to_net_words(mackey32, sreq->authkey,
+ sreq->authklen);
+
+ /* no more check for null key. use flag to check*/
+
+ if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+ QCRYPTO_CTX_USE_HW_KEY) {
+ use_hw_key = true;
+ } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY) {
+ use_pipe_key = true;
+ } else {
+ /* setup key */
+ for (i = 0; i < authk_size_in_word; i++)
+ QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG +
+ i*sizeof(uint32_t))));
+ }
+ }
+
+ if (sreq->alg == QCE_HASH_AES_CMAC)
+ goto go_proc;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+ return -EIO;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ auth_cfg = pce_dev->reg.auth_cfg_sha1;
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA256:
+ auth_cfg = pce_dev->reg.auth_cfg_sha256;
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+ if (sreq->first_blk) {
+ if (sha1) {
+ for (i = 0; i < 5; i++)
+ auth32[i] = _std_init_vector_sha1[i];
+ } else {
+ for (i = 0; i < 8; i++)
+ auth32[i] = _std_init_vector_sha256[i];
+ }
+ } else {
+ _byte_stream_to_net_words(auth32, sreq->digest, diglen);
+ }
+
+ /* Set auth_ivn, auth_keyn registers */
+ for (i = 0; i < 5; i++)
+ QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+ if ((sreq->alg == QCE_HASH_SHA256) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC)) {
+ for (i = 5; i < 8; i++)
+ QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+ }
+
+
+ /* write auth_bytecnt 0/1/2/3, start with 0 */
+ for (i = 0; i < 2; i++)
+ QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG +
+ i * sizeof(uint32_t));
+
+ /* Set/reset last bit in CFG register */
+ if (sreq->last_blk)
+ auth_cfg |= 1 << CRYPTO_LAST;
+ else
+ auth_cfg &= ~(1 << CRYPTO_LAST);
+ if (sreq->first_blk)
+ auth_cfg |= 1 << CRYPTO_FIRST;
+ else
+ auth_cfg &= ~(1 << CRYPTO_FIRST);
+ if (use_hw_key)
+ auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+ if (use_pipe_key)
+ auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+go_proc:
+ /* write seg_cfg */
+ QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write auth seg_size */
+ QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth_seg_start */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+ /* reset encr seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write seg_size */
+ QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* issue go to crypto */
+ if (use_hw_key == false) {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ } else {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+ }
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+static int _ce_setup_aead_direct(struct qce_device *pce_dev,
+ struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
+{
+ int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+ uint32_t a_cfg;
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+ uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t enciv_in_word;
+ uint32_t key_size;
+ uint32_t ivsize = q_req->ivsize;
+ uint32_t encr_cfg;
+
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ key_size = q_req->encklen;
+ enck_size_in_word = key_size/sizeof(uint32_t);
+
+ switch (q_req->alg) {
+
+ case CIPHER_ALG_DES:
+
+ switch (q_req->mode) {
+ case QCE_MODE_CBC:
+ encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enciv_in_word = 2;
+ break;
+
+ case CIPHER_ALG_3DES:
+
+ switch (q_req->mode) {
+ case QCE_MODE_CBC:
+ encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enciv_in_word = 2;
+
+ break;
+
+ case CIPHER_ALG_AES:
+
+ switch (q_req->mode) {
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+ else if (key_size == AES256_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ enciv_in_word = 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+
+
+
+ /* write CNTR0_IV0_REG */
+ if (q_req->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+ for (i = 0; i < enciv_in_word; i++)
+ QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+ }
+
+ /*
+ * write encr key
+ * do not use hw key or pipe key
+ */
+ _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+ for (i = 0; i < enck_size_in_word; i++)
+ QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
+
+ /* write encr seg cfg */
+ if (q_req->dir == QCE_ENCRYPT)
+ encr_cfg |= (1 << CRYPTO_ENCODE);
+ QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* we only support sha1-hmac and sha256-hmac at this point */
+ _byte_stream_to_net_words(mackey32, q_req->authkey,
+ q_req->authklen);
+ for (i = 0; i < authk_size_in_word; i++)
+ QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
+
+ if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
+ for (i = 0; i < 5; i++)
+ QCE_WRITE_REG(_std_init_vector_sha1[i],
+ pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+ } else {
+ for (i = 0; i < 8; i++)
+ QCE_WRITE_REG(_std_init_vector_sha256[i],
+ pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+ }
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+ /* write encr seg size */
+ QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write encr start */
+ QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_START_REG);
+
+ if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+ a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
+ else
+ a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
+
+ if (q_req->dir == QCE_ENCRYPT)
+ a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ else
+ a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ /* write auth seg_cfg */
+ QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write auth seg_size */
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth_seg_start */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+
+ /* write seg_size */
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+
+ CRYPTO_CONFIG_REG));
+ /* issue go to crypto */
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+};
+
+static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
+ struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
+{
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+ 0, 0, 0, 0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t key_size;
+ bool use_hw_key = false;
+ bool use_pipe_key = false;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = creq->ivsize;
+ int i;
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ if (creq->mode == QCE_MODE_XTS)
+ key_size = creq->encklen/2;
+ else
+ key_size = creq->encklen;
+
+ if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ use_hw_key = true;
+ } else {
+ if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+ QCRYPTO_CTX_USE_PIPE_KEY)
+ use_pipe_key = true;
+ }
+ if ((use_pipe_key == false) && (use_hw_key == false)) {
+ _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+ enck_size_in_word = key_size/sizeof(uint32_t);
+ }
+ if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+ uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+ uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+ uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+ uint32_t auth_cfg = 0;
+
+ /* Clear auth_ivn, auth_keyn registers */
+ for (i = 0; i < 16; i++) {
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+ QCE_WRITE_REG(0, (pce_dev->iobase +
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+ }
+ /* write auth_bytecnt 0/1/2/3, start with 0 */
+ for (i = 0; i < 4; i++)
+ QCE_WRITE_REG(0, pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG +
+ i * sizeof(uint32_t));
+ /* write nonce */
+ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+ for (i = 0; i < noncelen32; i++)
+ QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
+ CRYPTO_AUTH_INFO_NONCE0_REG +
+ (i*sizeof(uint32_t)));
+
+ if (creq->authklen == AES128_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+ else {
+ if (creq->authklen == AES256_KEY_SIZE)
+ auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+ }
+ if (creq->dir == QCE_ENCRYPT)
+ auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ else
+ auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+ if (use_hw_key == true) {
+ auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+ } else {
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ /* write auth key */
+ for (i = 0; i < authklen32; i++)
+ QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+ CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+ }
+ QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
+ CRYPTO_AUTH_SEG_CFG_REG);
+ if (creq->dir == QCE_ENCRYPT) {
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase +
+ CRYPTO_AUTH_SEG_SIZE_REG);
+ } else {
+ QCE_WRITE_REG((totallen_in - creq->authsize),
+ pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+ }
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+ } else {
+ if (creq->op != QCE_REQ_AEAD)
+ QCE_WRITE_REG(0, pce_dev->iobase +
+ CRYPTO_AUTH_SEG_CFG_REG);
+ }
+ /*
+ * Ensure previous instructions (write to all AUTH registers)
+ * was completed before accessing a register that is not in
+ * in the same 1K range.
+ */
+ mb();
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+ break;
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+ break;
+ case QCE_MODE_XTS:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+ break;
+ case QCE_MODE_CCM:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+ break;
+ case QCE_MODE_CTR:
+ default:
+ if (key_size == AES128_KEY_SIZE)
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+ else
+ encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+ break;
+ }
+
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ } else {
+ encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
+ }
+ if (use_hw_key == false) {
+ QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
+ CRYPTO_ENCR_KEY0_REG);
+ QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
+ CRYPTO_ENCR_KEY1_REG);
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+ CRYPTO_CNTR1_IV1_REG);
+ encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+ } else {
+ encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
+ }
+ if (use_hw_key == false) {
+ /* write encr key */
+ for (i = 0; i < 6; i++)
+ QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+ }
+ break;
+ case CIPHER_ALG_AES:
+ default:
+ if (creq->mode == QCE_MODE_XTS) {
+ uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+ = {0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t xtsklen =
+ creq->encklen/(2 * sizeof(uint32_t));
+
+ if ((use_hw_key == false) && (use_pipe_key == false)) {
+ _byte_stream_to_net_words(xtskey32,
+ (creq->enckey + creq->encklen/2),
+ creq->encklen/2);
+ /* write xts encr key */
+ for (i = 0; i < xtsklen; i++)
+ QCE_WRITE_REG(xtskey32[i],
+ pce_dev->iobase +
+ CRYPTO_ENCR_XTS_KEY0_REG +
+ (i * sizeof(uint32_t)));
+ }
+ /* write xts du size */
+ switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+ case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+ QCE_WRITE_REG(
+ min((uint32_t)QCE_SECTOR_SIZE,
+ creq->cryptlen), pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+ QCE_WRITE_REG(
+ min((uint32_t)(QCE_SECTOR_SIZE * 2),
+ creq->cryptlen), pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ default:
+ QCE_WRITE_REG(creq->cryptlen,
+ pce_dev->iobase +
+ CRYPTO_ENCR_XTS_DU_SIZE_REG);
+ break;
+ }
+ }
+ if (creq->mode != QCE_MODE_ECB) {
+ if (creq->mode == QCE_MODE_XTS)
+ _byte_stream_swap_to_net_words(enciv32,
+ creq->iv, ivsize);
+ else
+ _byte_stream_to_net_words(enciv32, creq->iv,
+ ivsize);
+
+ /* write encr cntr iv */
+ for (i = 0; i <= 3; i++)
+ QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG +
+ (i * sizeof(uint32_t)));
+
+ if (creq->mode == QCE_MODE_CCM) {
+ /* write cntr iv for ccm */
+ for (i = 0; i <= 3; i++)
+ QCE_WRITE_REG(enciv32[i],
+ pce_dev->iobase +
+ CRYPTO_ENCR_CCM_INT_CNTR0_REG +
+ (i * sizeof(uint32_t)));
+ /* update cntr_iv[3] by one */
+ QCE_WRITE_REG((enciv32[3] + 1),
+ pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG +
+ (3 * sizeof(uint32_t)));
+ }
+ }
+
+ if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ } else {
+ if ((use_hw_key == false) && (use_pipe_key == false)) {
+ for (i = 0; i < enck_size_in_word; i++)
+ QCE_WRITE_REG(enckey32[i],
+ pce_dev->iobase +
+ CRYPTO_ENCR_KEY0_REG +
+ (i * sizeof(uint32_t)));
+ }
+ } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+ break;
+ } /* end of switch (creq->mode) */
+
+ if (use_pipe_key)
+ encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+ << CRYPTO_USE_PIPE_KEY_ENCR);
+
+ /* write encr seg cfg */
+ encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+ if (use_hw_key == true)
+ encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ else
+ encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ /* write encr seg cfg */
+ QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write encr seg size */
+ if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
+ QCE_WRITE_REG((creq->cryptlen + creq->authsize),
+ pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+ } else {
+ QCE_WRITE_REG(creq->cryptlen,
+ pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+ }
+
+ /* write encr seg start */
+ QCE_WRITE_REG((coffset & 0xffff),
+ pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+
+ /* write encr counter mask */
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
+ QCE_WRITE_REG(0xffffffff,
+ pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
+
+ /* write seg size */
+ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* issue go to crypto */
+ if (use_hw_key == false) {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ } else {
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+ }
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+};
+
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+ struct qce_f9_req *req)
+{
+ uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+ uint32_t auth_cfg;
+ int i;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+ break;
+ };
+
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+ /* set big endian configuration */
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /*
+ * Ensure previous instructions (setting the CONFIG register)
+ * was completed before issuing starting to set other config register
+ * This is to ensure the configurations are done in correct endian-ness
+ * as set in the CONFIG registers
+ */
+ mb();
+
+ /* write enc_seg_cfg */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write ecn_seg_size */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write key in CRYPTO_AUTH_IV0-3_REG */
+ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+ for (i = 0; i < key_size_in_word; i++)
+ QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+ /* write last bits in CRYPTO_AUTH_IV4_REG */
+ QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
+ CRYPTO_AUTH_IV4_REG));
+
+ /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+ QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT0_REG));
+
+ /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
+ QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
+ CRYPTO_AUTH_BYTECNT1_REG));
+
+ /* write auth seg cfg */
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+ QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+ /* write auth seg size */
+ QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write auth seg start*/
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+ /* write seg size */
+ QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+ /* set little endian configuration before go*/
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write go */
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+ struct qce_f8_req *req, bool key_stream_mode,
+ uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+ uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+ break;
+ };
+ /* clear status */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+ /* set big endian configuration */
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write auth seg configuration */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+ /* write auth seg size */
+ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+ /* write key */
+ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+ for (i = 0; i < key_size_in_word; i++)
+ QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
+ (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+ /* write encr seg cfg */
+ if (key_stream_mode)
+ encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+ if (req->direction == QCE_OTA_DIR_DOWNLINK)
+ encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+ QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_CFG_REG);
+
+ /* write encr seg start */
+ QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
+ CRYPTO_ENCR_SEG_START_REG);
+ /* write encr seg size */
+ QCE_WRITE_REG(cipher_size, pce_dev->iobase +
+ CRYPTO_ENCR_SEG_SIZE_REG);
+
+ /* write seg size */
+ QCE_WRITE_REG(req->data_len, pce_dev->iobase +
+ CRYPTO_SEG_SIZE_REG);
+
+ /* write cntr0_iv0 for countC */
+ QCE_WRITE_REG(req->count_c, pce_dev->iobase +
+ CRYPTO_CNTR0_IV0_REG);
+ /* write cntr1_iv1 for nPkts, and bearer */
+ if (npkts == 1)
+ npkts = 0;
+ QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+ npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+ pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+ /* set little endian configuration before go*/
+ QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+ CRYPTO_CONFIG_REG));
+ /* write go */
+ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)),
+ pce_dev->iobase + CRYPTO_GOPROC_REG);
+ /*
+ * Ensure previous instructions (setting the GO register)
+ * was completed before issuing a DMA transfer request
+ */
+ mb();
+ return 0;
+}
+
+
+static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
+{
+ int rc = 0;
+ struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
+ [req_info].ce_sps;
+
+ if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
+ return rc;
+
+ rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
+ GET_PHYS_ADDR(pce_sps_data->
+ cmdlistptr.unlock_all_pipes.cmdlist),
+ 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
+ if (rc) {
+ pr_err("sps_xfr_one() fail rc=%d", rc);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+ bool is_complete);
+
+static int _aead_complete(struct qce_device *pce_dev, int req_info)
+{
+ struct aead_request *areq;
+ unsigned char mac[SHA256_DIGEST_SIZE];
+ uint32_t ccm_fail_status = 0;
+ uint32_t result_dump_status;
+ int32_t result_status = 0;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = (struct aead_request *) preq_info->areq;
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+
+ if (preq_info->asg)
+ qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
+ /* check MAC */
+ memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
+ SHA256_DIGEST_SIZE);
+
+ /* read status before unlock */
+ if (preq_info->dir == QCE_DECRYPT) {
+ if (pce_dev->no_get_around)
+ if (pce_dev->no_ccm_mac_status_get_around)
+ ccm_fail_status = be32_to_cpu(pce_sps_data->
+ result->status);
+ else
+ ccm_fail_status = be32_to_cpu(pce_sps_data->
+ result_null->status);
+ else
+ ccm_fail_status = readl_relaxed(pce_dev->iobase +
+ CRYPTO_STATUS_REG);
+ }
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, mac, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("aead operation error. Status %x\n", result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("aead sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+
+ if (preq_info->mode == QCE_MODE_CCM) {
+ /*
+ * Not from result dump, instead, use the status we just
+ * read of device for MAC_FAILED.
+ */
+ if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
+ (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
+ result_status = -EBADMSG;
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, mac, NULL, result_status);
+
+ } else {
+ uint32_t ivsize = 0;
+ struct crypto_aead *aead;
+ unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+ aead = crypto_aead_reqtfm(areq);
+ ivsize = crypto_aead_ivsize(aead);
+ memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
+ sizeof(iv));
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, mac, iv, result_status);
+
+ }
+ return 0;
+};
+
+static int _sha_complete(struct qce_device *pce_dev, int req_info)
+{
+ struct ahash_request *areq;
+ unsigned char digest[SHA256_DIGEST_SIZE];
+ uint32_t bytecount32[2];
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = (struct ahash_request *) preq_info->areq;
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ DMA_TO_DEVICE);
+ memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
+ SHA256_DIGEST_SIZE);
+ _byte_stream_to_net_words(bytecount32,
+ (unsigned char *)pce_sps_data->result->auth_byte_count,
+ 2 * CRYPTO_REG_SIZE);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, digest, (char *)bytecount32,
+ -ENXIO);
+ return -ENXIO;
+ }
+
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+
+ pr_err("sha operation error. Status %x\n", result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status) {
+ pr_err("sha sps operation error. sps status %x\n",
+ pce_sps_data->consumer_status);
+ result_status = -ENXIO;
+ }
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, digest, (char *)bytecount32, result_status);
+ return 0;
+}
+
+static int _f9_complete(struct qce_device *pce_dev, int req_info)
+{
+ uint32_t mac_i;
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+ void *areq;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = preq_info->areq;
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ preq_info->ota_size, DMA_TO_DEVICE);
+ _byte_stream_to_net_words(&mac_i,
+ (char *)(&pce_sps_data->result->auth_iv[0]),
+ CRYPTO_REG_SIZE);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("f9 operation error. Status %x\n", result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("f9 sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, (char *)&mac_i, NULL, result_status);
+
+ return 0;
+}
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
+{
+ struct ablkcipher_request *areq;
+ unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = (struct ablkcipher_request *) preq_info->areq;
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+ preq_info->dst_nents, DMA_FROM_DEVICE);
+ }
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ pce_sps_data->result->status = 0;
+
+ if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR))) {
+ pr_err("ablk_cipher operation error. Status %x\n",
+ result_dump_status);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("ablk_cipher sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+
+ if (preq_info->mode == QCE_MODE_ECB) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
+ result_status);
+ } else {
+ if (pce_dev->ce_bam_info.minor_version == 0) {
+ if (preq_info->mode == QCE_MODE_CBC) {
+ if (preq_info->dir == QCE_DECRYPT)
+ memcpy(iv, (char *)preq_info->dec_iv,
+ sizeof(iv));
+ else
+ memcpy(iv, (unsigned char *)
+ (sg_virt(areq->src) +
+ areq->src->length - 16),
+ sizeof(iv));
+ }
+ if ((preq_info->mode == QCE_MODE_CTR) ||
+ (preq_info->mode == QCE_MODE_XTS)) {
+ uint32_t num_blk = 0;
+ uint32_t cntr_iv3 = 0;
+ unsigned long long cntr_iv64 = 0;
+ unsigned char *b = (unsigned char *)(&cntr_iv3);
+
+ memcpy(iv, areq->info, sizeof(iv));
+ if (preq_info->mode != QCE_MODE_XTS)
+ num_blk = areq->nbytes/16;
+ else
+ num_blk = 1;
+ cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
+ (((*(iv + 13)) << 16) & 0xff0000) |
+ (((*(iv + 14)) << 8) & 0xff00) |
+ (*(iv + 15) & 0xff);
+ cntr_iv64 =
+ (((unsigned long long)cntr_iv3 &
+ 0xFFFFFFFFULL) +
+ (unsigned long long)num_blk) %
+ (unsigned long long)(0x100000000ULL);
+
+ cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
+ *(iv + 15) = (char)(*b);
+ *(iv + 14) = (char)(*(b + 1));
+ *(iv + 13) = (char)(*(b + 2));
+ *(iv + 12) = (char)(*(b + 3));
+ }
+ } else {
+ memcpy(iv,
+ (char *)(pce_sps_data->result->encr_cntr_iv),
+ sizeof(iv));
+ }
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, iv, result_status);
+ }
+ return 0;
+}
+
+static int _f8_complete(struct qce_device *pce_dev, int req_info)
+{
+ int32_t result_status = 0;
+ uint32_t result_dump_status;
+ uint32_t result_dump_status2;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+ qce_comp_func_ptr_t qce_callback;
+ void *areq;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ qce_callback = preq_info->qce_cb;
+ areq = preq_info->areq;
+ if (preq_info->phy_ota_dst)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+ preq_info->ota_size, DMA_FROM_DEVICE);
+ if (preq_info->phy_ota_src)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ preq_info->ota_size, (preq_info->phy_ota_dst) ?
+ DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+ if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, -ENXIO);
+ return -ENXIO;
+ }
+ result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+ result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
+
+ if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+ | (1 << CRYPTO_HSD_ERR)))) {
+ pr_err(
+ "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
+ result_dump_status, result_dump_status2, req_info);
+ result_status = -ENXIO;
+ } else if (pce_sps_data->consumer_status |
+ pce_sps_data->producer_status) {
+ pr_err("f8 sps operation error. sps status %x %x\n",
+ pce_sps_data->consumer_status,
+ pce_sps_data->producer_status);
+ result_status = -ENXIO;
+ }
+ pce_sps_data->result->status = 0;
+ pce_sps_data->result->status2 = 0;
+ qce_free_req_info(pce_dev, req_info, true);
+ qce_callback(areq, NULL, NULL, result_status);
+ return 0;
+}
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
+{
+ struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
+ .ce_sps;
+ pce_sps_data->in_transfer.iovec_count = 0;
+ pce_sps_data->out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
+{
+ struct sps_iovec *iovec;
+
+ if (sps_bam_pipe->iovec_count == 0)
+ return;
+ iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
+ iovec->flags |= flag;
+}
+
+static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
+ struct sps_transfer *sps_bam_pipe)
+{
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ uint32_t data_cnt;
+
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE)
+ data_cnt = SPS_MAX_PKT_SIZE;
+ else
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+ iovec->flags = SPS_GET_UPPER_ADDR(paddr);
+ sps_bam_pipe->iovec_count++;
+ iovec++;
+ paddr += data_cnt;
+ len -= data_cnt;
+ }
+ return 0;
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+ struct scatterlist *sg_src, uint32_t nbytes,
+ struct sps_transfer *sps_bam_pipe)
+{
+ uint32_t data_cnt, len;
+ dma_addr_t addr;
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+
+ while (nbytes > 0) {
+ len = min(nbytes, sg_dma_len(sg_src));
+ nbytes -= len;
+ addr = sg_dma_address(sg_src);
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE) {
+ data_cnt = SPS_MAX_PKT_SIZE;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ } else {
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ }
+ iovec++;
+ sps_bam_pipe->iovec_count++;
+ addr += data_cnt;
+ len -= data_cnt;
+ }
+ sg_src = sg_next(sg_src);
+ }
+ return 0;
+}
+
+static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
+ struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
+ struct sps_transfer *sps_bam_pipe)
+{
+ uint32_t data_cnt, len;
+ dma_addr_t addr;
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ unsigned int res_within_sg;
+
+ if (!sg_src)
+ return -ENOENT;
+ res_within_sg = sg_dma_len(sg_src);
+
+ while (off > 0) {
+ if (!sg_src) {
+ pr_err("broken sg list off %d nbytes %d\n",
+ off, nbytes);
+ return -ENOENT;
+ }
+ len = sg_dma_len(sg_src);
+ if (off < len) {
+ res_within_sg = len - off;
+ break;
+ }
+ off -= len;
+ sg_src = sg_next(sg_src);
+ if (sg_src)
+ res_within_sg = sg_dma_len(sg_src);
+ }
+ while (nbytes > 0 && sg_src) {
+ len = min(nbytes, res_within_sg);
+ nbytes -= len;
+ addr = sg_dma_address(sg_src) + off;
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+ while (len > 0) {
+ if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count,
+ (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ if (len > SPS_MAX_PKT_SIZE) {
+ data_cnt = SPS_MAX_PKT_SIZE;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ } else {
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = SPS_GET_LOWER_ADDR(addr);
+ iovec->flags = SPS_GET_UPPER_ADDR(addr);
+ }
+ iovec++;
+ sps_bam_pipe->iovec_count++;
+ addr += data_cnt;
+ len -= data_cnt;
+ }
+ if (nbytes) {
+ sg_src = sg_next(sg_src);
+ if (!sg_src) {
+ pr_err("more data bytes %d\n", nbytes);
+ return -ENOMEM;
+ }
+ res_within_sg = sg_dma_len(sg_src);
+ off = 0;
+ }
+ }
+ return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+ struct qce_cmdlist_info *cmdptr,
+ struct sps_transfer *sps_bam_pipe)
+{
+ dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ iovec->size = cmdptr->size;
+ iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+ iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
+ sps_bam_pipe->iovec_count++;
+ if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
+ pr_err("Num of descrptor %d exceed max (%d)",
+ sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
+{
+ int rc = 0;
+ struct ce_sps_data *pce_sps_data;
+
+ pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+ pce_sps_data->out_transfer.user =
+ (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+ (unsigned int) req_info));
+ pce_sps_data->in_transfer.user =
+ (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+ (unsigned int) req_info));
+ _qce_dump_descr_fifos_dbg(pce_dev, req_info);
+
+ if (pce_sps_data->in_transfer.iovec_count) {
+ rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
+ &pce_sps_data->in_transfer);
+ if (rc) {
+ pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
+ (uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
+ rc);
+ goto ret;
+ }
+ }
+ rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+ &pce_sps_data->out_transfer);
+ if (rc)
+ pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+ (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
+ret:
+ if (rc)
+ _qce_dump_descr_fifos(pce_dev, req_info);
+ return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ * 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+ struct qce_sps_ep_conn_data *ep,
+ bool is_producer)
+{
+ int rc = 0;
+ struct sps_pipe *sps_pipe_info;
+ struct sps_connect *sps_connect_info = &ep->connect;
+ struct sps_register_event *sps_event = &ep->event;
+
+ /* Allocate endpoint context */
+ sps_pipe_info = sps_alloc_endpoint();
+ if (!sps_pipe_info) {
+ pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+ is_producer);
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* Now save the sps pipe handle */
+ ep->pipe = sps_pipe_info;
+
+ /* Get default connection configuration for an endpoint */
+ rc = sps_get_config(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+ goto get_config_err;
+ }
+
+ /* Modify the default connection configuration */
+ if (is_producer) {
+ /*
+ * For CE producer transfer, source should be
+ * CE peripheral where as destination should
+ * be system memory.
+ */
+ sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
+ sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+ /* Producer pipe will handle this connection */
+ sps_connect_info->mode = SPS_MODE_SRC;
+ sps_connect_info->options =
+ SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+ } else {
+ /* For CE consumer transfer, source should be
+ * system memory where as destination should
+ * CE peripheral
+ */
+ sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+ sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
+ sps_connect_info->mode = SPS_MODE_DEST;
+ sps_connect_info->options =
+ SPS_O_AUTO_ENABLE;
+ }
+
+ /* Producer pipe index */
+ sps_connect_info->src_pipe_index =
+ pce_dev->ce_bam_info.src_pipe_index;
+ /* Consumer pipe index */
+ sps_connect_info->dest_pipe_index =
+ pce_dev->ce_bam_info.dest_pipe_index;
+ /* Set pipe group */
+ sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
+ sps_connect_info->event_thresh = 0x10;
+ /*
+ * Max. no of scatter/gather buffers that can
+ * be passed by block layer = 32 (NR_SG).
+ * Each BAM descritor needs 64 bits (8 bytes).
+ * One BAM descriptor is required per buffer transfer.
+ * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+ * But due to HW limitation we need to allocate atleast one extra
+ * descriptor memory (256 bytes + 8 bytes). But in order to be
+ * in power of 2, we are allocating 512 bytes of memory.
+ */
+ sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
+ sizeof(struct sps_iovec);
+ if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
+ sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
+ sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ &sps_connect_info->desc.phys_base,
+ GFP_KERNEL);
+ if (sps_connect_info->desc.base == NULL) {
+ rc = -ENOMEM;
+ pr_err("Can not allocate coherent memory for sps data\n");
+ goto get_config_err;
+ }
+
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+ /* Establish connection between peripheral and memory endpoint */
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+ goto sps_connect_err;
+ }
+
+ sps_event->mode = SPS_TRIGGER_CALLBACK;
+ sps_event->xfer_done = NULL;
+ sps_event->user = (void *)pce_dev;
+ if (is_producer) {
+ sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
+ sps_event->callback = _sps_producer_callback;
+ rc = sps_register_event(ep->pipe, sps_event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc=%d\n",
+ rc);
+ goto sps_connect_err;
+ }
+ } else {
+ sps_event->options = SPS_O_EOT;
+ sps_event->callback = NULL;
+ }
+
+ pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n",
+ is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+ (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
+ goto out;
+
+sps_connect_err:
+ dma_free_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ sps_connect_info->desc.base,
+ sps_connect_info->desc.phys_base);
+get_config_err:
+ sps_free_endpoint(sps_pipe_info);
+out:
+ return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+ struct qce_sps_ep_conn_data *ep)
+{
+ struct sps_pipe *sps_pipe_info = ep->pipe;
+ struct sps_connect *sps_connect_info = &ep->connect;
+
+ sps_disconnect(sps_pipe_info);
+ dma_free_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ sps_connect_info->desc.base,
+ sps_connect_info->desc.phys_base);
+ sps_free_endpoint(sps_pipe_info);
+}
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+ struct bam_registration_info *pbam;
+
+ mutex_lock(&bam_register_lock);
+ pbam = pce_dev->pbam;
+ if (pbam == NULL)
+ goto ret;
+
+ pbam->cnt--;
+ if (pbam->cnt > 0)
+ goto ret;
+
+ if (pce_dev->ce_bam_info.bam_handle) {
+ sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
+
+ pr_debug("deregister bam handle 0x%lx\n",
+ pce_dev->ce_bam_info.bam_handle);
+ pce_dev->ce_bam_info.bam_handle = 0;
+ }
+ iounmap(pbam->bam_iobase);
+ pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+ list_del(&pbam->qlist);
+ kfree(pbam);
+
+ret:
+ pce_dev->pbam = NULL;
+ mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
+{
+ int rc = 0;
+ struct sps_bam_props bam = {0};
+ struct bam_registration_info *pbam = NULL;
+ struct bam_registration_info *p;
+ uint32_t bam_cfg = 0;
+
+
+ mutex_lock(&bam_register_lock);
+
+ list_for_each_entry(p, &qce50_bam_list, qlist) {
+ if (p->bam_mem == pce_dev->bam_mem) {
+ pbam = p; /* found */
+ break;
+ }
+ }
+
+ if (pbam) {
+ pr_debug("found bam 0x%x\n", pbam->bam_mem);
+ pbam->cnt++;
+ pce_dev->ce_bam_info.bam_handle = pbam->handle;
+ pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+ pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+ pce_dev->pbam = pbam;
+ pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+ goto ret;
+ }
+
+ pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
+ if (!pbam) {
+ rc = -ENOMEM;
+ goto ret;
+ }
+ pbam->cnt = 1;
+ pbam->bam_mem = pce_dev->bam_mem;
+ pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
+ pce_dev->bam_mem_size);
+ if (!pbam->bam_iobase) {
+ kfree(pbam);
+ rc = -ENOMEM;
+ pr_err("Can not map BAM io memory\n");
+ goto ret;
+ }
+ pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+ pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+ pbam->handle = 0;
+ pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+ bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
+ CRYPTO_BAM_CNFG_BITS_REG);
+ pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+ true : false;
+ if (pbam->support_cmd_dscr == false) {
+ pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+ bam_cfg);
+ pce_dev->no_get_around = false;
+ }
+ pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+
+ bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
+ bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
+
+ /*
+ * This event thresold value is only significant for BAM-to-BAM
+ * transfer. It's ignored for BAM-to-System mode transfer.
+ */
+ bam.event_threshold = 0x10; /* Pipe event threshold */
+ /*
+ * This threshold controls when the BAM publish
+ * the descriptor size on the sideband interface.
+ * SPS HW will only be used when
+ * data transfer size > 64 bytes.
+ */
+ bam.summing_threshold = 64;
+ /* SPS driver wll handle the crypto BAM IRQ */
+ bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
+ /*
+ * Set flag to indicate BAM global device control is managed
+ * remotely.
+ */
+ if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+ else
+ bam.manage = SPS_BAM_MGR_LOCAL;
+
+ bam.ee = pce_dev->ce_bam_info.bam_ee;
+ bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
+ bam.options |= SPS_BAM_CACHED_WP;
+ pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
+ pr_debug("bam virtual base=0x%p\n", bam.virt_addr);
+
+ /* Register CE Peripheral BAM device to SPS driver */
+ rc = sps_register_bam_device(&bam, &pbam->handle);
+ if (rc) {
+ pr_err("sps_register_bam_device() failed! err=%d", rc);
+ rc = -EIO;
+ iounmap(pbam->bam_iobase);
+ kfree(pbam);
+ goto ret;
+ }
+
+ pce_dev->pbam = pbam;
+ list_add_tail(&pbam->qlist, &qce50_bam_list);
+ pce_dev->ce_bam_info.bam_handle = pbam->handle;
+
+ret:
+ mutex_unlock(&bam_register_lock);
+
+ return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+ int rc = 0;
+
+ rc = qce_sps_get_bam(pce_dev);
+ if (rc)
+ return rc;
+ pr_debug("BAM device registered. bam_handle=0x%lx\n",
+ pce_dev->ce_bam_info.bam_handle);
+
+ rc = qce_sps_init_ep_conn(pce_dev,
+ &pce_dev->ce_bam_info.producer, true);
+ if (rc)
+ goto sps_connect_producer_err;
+ rc = qce_sps_init_ep_conn(pce_dev,
+ &pce_dev->ce_bam_info.consumer, false);
+ if (rc)
+ goto sps_connect_consumer_err;
+
+ pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
+ (unsigned long long)pce_dev->ce_bam_info.bam_mem,
+ (unsigned int)pce_dev->ce_bam_info.bam_irq);
+ return rc;
+
+sps_connect_consumer_err:
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+sps_connect_producer_err:
+ qce_sps_release_bam(pce_dev);
+ return rc;
+}
+
+static inline int qce_alloc_req_info(struct qce_device *pce_dev)
+{
+ int i;
+ int request_index = pce_dev->ce_request_index;
+
+ for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+ request_index++;
+ if (request_index >= MAX_QCE_BAM_REQ)
+ request_index = 0;
+ if (xchg(&pce_dev->ce_request_info[request_index].
+ in_use, true) == false) {
+ pce_dev->ce_request_index = request_index;
+ return request_index;
+ }
+ }
+ pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
+ pce_dev->dev_no, atomic_read(
+ &pce_dev->no_of_queued_req));
+ return -EBUSY;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+ bool is_complete)
+{
+ pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
+ if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+ if (req_info < MAX_QCE_BAM_REQ && is_complete)
+ atomic_dec(&pce_dev->no_of_queued_req);
+ } else
+ pr_warn("request info %d free already\n", req_info);
+}
+
+static void print_notify_debug(struct sps_event_notify *notify)
+{
+ phys_addr_t addr =
+ DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
+ notify->data.transfer.iovec.addr);
+ pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n",
+ notify->event_id, &addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags,
+ notify->data.transfer.user);
+}
+
+static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
+{
+ struct ce_request_info *preq_info;
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+
+ switch (preq_info->xfer_type) {
+ case QCE_XFER_CIPHERING:
+ _ablk_cipher_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_HASHING:
+ _sha_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_AEAD:
+ _aead_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_F8:
+ _f8_complete(pce_dev, req_info);
+ break;
+ case QCE_XFER_F9:
+ _f9_complete(pce_dev, req_info);
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, true);
+ break;
+ }
+}
+
+static void qce_multireq_timeout(unsigned long data)
+{
+ struct qce_device *pce_dev = (struct qce_device *)data;
+ int ret = 0;
+ int last_seq;
+ unsigned long flags;
+
+ last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
+ if (last_seq == 0 ||
+ last_seq != atomic_read(&pce_dev->last_intr_seq)) {
+ atomic_set(&pce_dev->last_intr_seq, last_seq);
+ mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+ return;
+ }
+ /* last bunch mode command time out */
+
+ /*
+ * From here to dummy request finish sps request and set owner back
+ * to none, we disable interrupt.
+ * So it won't get preempted or interrupted. If bam inerrupts happen
+ * between, and completion callback gets called from BAM, a new
+ * request may be issued by the client driver. Deadlock may happen.
+ */
+ local_irq_save(flags);
+ if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
+ != QCE_OWNER_NONE) {
+ local_irq_restore(flags);
+ mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+ return;
+ }
+
+ ret = qce_dummy_req(pce_dev);
+ if (ret)
+ pr_warn("pcedev %d: Failed to insert dummy req\n",
+ pce_dev->dev_no);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
+ pce_dev->mode = IN_INTERRUPT_MODE;
+ local_irq_restore(flags);
+
+ del_timer(&(pce_dev->timer));
+ pce_dev->qce_stats.no_of_timeouts++;
+ pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
+}
+
+void qce_get_driver_stats(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (!_qce50_disp_stats)
+ return;
+ pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
+ pce_dev->qce_stats.no_of_timeouts);
+ pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
+ pce_dev->qce_stats.no_of_dummy_reqs);
+ if (pce_dev->mode)
+ pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
+ else
+ pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
+ pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
+ atomic_read(&pce_dev->no_of_queued_req));
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ pce_dev->qce_stats.no_of_timeouts = 0;
+ pce_dev->qce_stats.no_of_dummy_reqs = 0;
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+static void _sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+ int rc = 0;
+ unsigned int req_info;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ print_notify_debug(notify);
+
+ req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
+ if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
+ pr_warn("request information %d out of range\n", req_info);
+ return;
+ }
+
+ req_info = req_info & 0x00ff;
+ if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
+ pr_warn("request information %d out of range\n", req_info);
+ return;
+ }
+
+ preq_info = &pce_dev->ce_request_info[req_info];
+
+ pce_sps_data = &preq_info->ce_sps;
+ if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
+ preq_info->xfer_type == QCE_XFER_AEAD) &&
+ pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ pce_sps_data->out_transfer.iovec_count = 0;
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+ &pce_sps_data->out_transfer);
+ if (rc) {
+ pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+ (uintptr_t)pce_dev->ce_bam_info.producer.pipe,
+ rc);
+ }
+ return;
+ }
+
+ _qce_req_complete(pce_dev, req_info);
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+ qce_sps_release_bam(pce_dev);
+}
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+ struct sps_command_element **cmd_ptr, u32 addr,
+ u32 data, struct sps_command_element **populate)
+{
+ (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+ (*cmd_ptr)->command = 0;
+ (*cmd_ptr)->data = data;
+ (*cmd_ptr)->mask = 0xFFFFFFFF;
+ (*cmd_ptr)->reserved = 0;
+ if (populate != NULL)
+ *populate = *cmd_ptr;
+ (*cmd_ptr)++;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+ bool key_128)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t xts_key_reg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to AES cipher operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ switch (mode) {
+ case QCE_MODE_CBC:
+ case QCE_MODE_CTR:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+ if (mode == QCE_MODE_CBC)
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+ else
+ encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
+ iv_reg = 4;
+ key_reg = 4;
+ xts_key_reg = 0;
+ } else {
+ cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+ if (mode == QCE_MODE_CBC)
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+ else
+ encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
+ iv_reg = 4;
+ key_reg = 8;
+ xts_key_reg = 0;
+ }
+ break;
+ case QCE_MODE_ECB:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
+ iv_reg = 0;
+ key_reg = 4;
+ xts_key_reg = 0;
+ } else {
+ cmdlistptr->cipher_aes_256_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
+ iv_reg = 0;
+ key_reg = 8;
+ xts_key_reg = 0;
+ }
+ break;
+ case QCE_MODE_XTS:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_xts.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
+ iv_reg = 4;
+ key_reg = 4;
+ xts_key_reg = 4;
+ } else {
+ cmdlistptr->cipher_aes_256_xts.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+ encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
+ iv_reg = 4;
+ key_reg = 8;
+ xts_key_reg = 8;
+ }
+ break;
+ default:
+ pr_err("Unknown mode of operation %d received, exiting now\n",
+ mode);
+ return -EINVAL;
+ break;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+ (uint32_t)0xffffffff, &pcl_info->encr_mask);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ if (xts_key_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+ 0, &pcl_info->encr_xts_key);
+ for (i = 1; i < xts_key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_XTS_KEY0_REG +
+ i * sizeof(uint32_t)), 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
+ &pcl_info->encr_xts_du_size);
+ }
+ if (iv_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ }
+ /* Add dummy to align size to burst-size multiple */
+ if (mode == QCE_MODE_XTS) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ } else {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_size);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+ bool mode_cbc)
+{
+
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to cipher operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case CIPHER_ALG_DES:
+ if (mode_cbc) {
+ cmdlistptr->cipher_des_cbc.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+
+ encr_cfg = pdev->reg.encr_cfg_des_cbc;
+ iv_reg = 2;
+ key_reg = 2;
+ } else {
+ cmdlistptr->cipher_des_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_des_ecb;
+ iv_reg = 0;
+ key_reg = 2;
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (mode_cbc) {
+ cmdlistptr->cipher_3des_cbc.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+ encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+ iv_reg = 2;
+ key_reg = 6;
+ } else {
+ cmdlistptr->cipher_3des_ecb.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+ encr_cfg = pdev->reg.encr_cfg_3des_ecb;
+ iv_reg = 0;
+ key_reg = 6;
+ }
+ break;
+ default:
+ pr_err("Unknown algorithms %d received, exiting now\n", alg);
+ return -EINVAL;
+ break;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ if (iv_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+ NULL);
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
+ int cri_index, unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+ [cri_index].ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_null);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
+ pdev->ce_bam_info.ce_burst_size, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+ pdev->reg.encr_cfg_aes_ecb_128, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+ return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+ bool key_128)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t key_reg = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to authentication operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case QCE_HASH_SHA1:
+ cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha1);
+
+ auth_cfg = pdev->reg.auth_cfg_sha1;
+ iv_reg = 5;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ break;
+ case QCE_HASH_SHA256:
+ cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha256);
+
+ auth_cfg = pdev->reg.auth_cfg_sha256;
+ iv_reg = 8;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+ auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
+ key_reg = 16;
+ iv_reg = 5;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+ auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
+ key_reg = 16;
+ iv_reg = 8;
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_AES_CMAC:
+ if (key_128 == true) {
+ cmdlistptr->auth_aes_128_cmac.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+ auth_cfg = pdev->reg.auth_cfg_cmac_128;
+ key_reg = 4;
+ } else {
+ cmdlistptr->auth_aes_256_cmac.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+ auth_cfg = pdev->reg.auth_cfg_cmac_256;
+ key_reg = 8;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+ NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ break;
+ default:
+ pr_err("Unknown algorithms %d received, exiting now\n", alg);
+ return -EINVAL;
+ break;
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ if (alg == QCE_HASH_AES_CMAC) {
+ /* reset auth iv, bytecount and key registers */
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, NULL);
+ } else {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ if (key_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+ int cri_index,
+ unsigned char **pvaddr,
+ uint32_t alg,
+ uint32_t mode,
+ uint32_t key_size,
+ bool sha1)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ uint32_t key_reg;
+ uint32_t iv_reg;
+ uint32_t i;
+ uint32_t enciv_in_word;
+ uint32_t encr_cfg;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ switch (alg) {
+
+ case CIPHER_ALG_DES:
+
+ switch (mode) {
+
+ case QCE_MODE_CBC:
+ if (sha1) {
+ cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_des);
+ } else {
+ cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_des);
+ }
+ encr_cfg = pdev->reg.encr_cfg_des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ enciv_in_word = 2;
+
+ break;
+
+ case CIPHER_ALG_3DES:
+ switch (mode) {
+
+ case QCE_MODE_CBC:
+ if (sha1) {
+ cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_3des);
+ } else {
+ cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_3des);
+ }
+ encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ enciv_in_word = 2;
+
+ break;
+
+ case CIPHER_ALG_AES:
+ switch (mode) {
+
+ case QCE_MODE_CBC:
+ if (key_size == AES128_KEY_SIZE) {
+ if (sha1) {
+ cmdlistptr->
+ aead_hmac_sha1_cbc_aes_128.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_aes_128);
+ } else {
+ cmdlistptr->
+ aead_hmac_sha256_cbc_aes_128.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_aes_128);
+ }
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+ } else if (key_size == AES256_KEY_SIZE) {
+ if (sha1) {
+ cmdlistptr->
+ aead_hmac_sha1_cbc_aes_256.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha1_cbc_aes_256);
+ } else {
+ cmdlistptr->
+ aead_hmac_sha256_cbc_aes_256.
+ cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->
+ aead_hmac_sha256_cbc_aes_256);
+ }
+ encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ enciv_in_word = 4;
+
+ break;
+
+ default:
+ return -EINVAL;
+ };
+
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+
+ key_reg = key_size/sizeof(uint32_t);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ if (mode != QCE_MODE_ECB) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < enciv_in_word; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ };
+
+ if (sha1)
+ iv_reg = 5;
+ else
+ iv_reg = 8;
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+ &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+
+ if (sha1)
+ qce_add_cmd_element(
+ pdev,
+ &ce_vaddr,
+ CRYPTO_AUTH_SEG_CFG_REG,
+ pdev->reg.auth_cfg_aead_sha1_hmac,
+ &pcl_info->auth_seg_cfg);
+ else
+ qce_add_cmd_element(
+ pdev,
+ &ce_vaddr,
+ CRYPTO_AUTH_SEG_CFG_REG,
+ pdev->reg.auth_cfg_aead_sha256_hmac,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+ return 0;
+}
+
+static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, bool key_128)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+ [cri_index].ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t key_reg = 0;
+
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to aead operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ if (key_128 == true) {
+ cmdlistptr->aead_aes_128_ccm.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+ auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
+ encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
+ key_reg = 4;
+ } else {
+
+ cmdlistptr->aead_aes_256_ccm.cmdlist =
+ (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+ auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
+ encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
+
+ key_reg = 8;
+ }
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+ encr_cfg, &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+ (uint32_t)0xffffffff, &pcl_info->encr_mask);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+ (uint32_t)0xffffffff, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+ /* reset auth iv, bytecount and key registers */
+ for (i = 0; i < 8; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+ 0, NULL);
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ /* set auth key */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+ &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ /* set NONCE info */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+ &pcl_info->auth_nonce_info);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_INFO_NONCE0_REG +
+ i * sizeof(uint32_t)), 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+ &pcl_info->encr_ccm_cntr_iv);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 4;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to f8 cipher algorithm defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+
+ switch (alg) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f8_kasumi);
+ encr_cfg = pdev->reg.encr_cfg_kasumi;
+ break;
+
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f8_snow3g);
+ encr_cfg = pdev->reg.encr_cfg_snow3g;
+ break;
+ }
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+ /* set config to big endian */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+ NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start;
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t iv_reg = 0;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr_start = (uintptr_t)(*pvaddr);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to authentication operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f9_kasumi);
+ auth_cfg = pdev->reg.auth_cfg_kasumi;
+ break;
+
+ case QCE_OTA_ALGO_SNOW3G:
+ default:
+ cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->f9_snow3g);
+ auth_cfg = pdev->reg.auth_cfg_snow3g;
+ };
+
+ /* clear status register */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+ 0, NULL);
+ /* set config to big endian */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+ iv_reg = 5;
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+ &pcl_info->encr_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++) {
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ pdev->reg.crypto_cfg_le, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+ (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+ int cri_index, unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr;
+ uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+
+ cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+ *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+ pdev->ce_bam_info.ce_burst_size);
+ ce_vaddr = (struct sps_command_element *)(*pvaddr);
+ cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+ /*
+ * Designate chunks of the allocated memory to command list
+ * to unlock pipes.
+ */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ CRYPTO_CONFIG_RESET, NULL);
+ pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
+ unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ ce_vaddr =
+ (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
+ pdev->ce_bam_info.ce_burst_size);
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+ true);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+ false);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+ false);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+ false);
+ _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+ false);
+
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ true);
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ false);
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ true);
+ _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ false);
+
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
+ false);
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
+ false);
+
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
+ false);
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
+ false);
+
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+ true);
+ _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+ false);
+
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ QCE_MODE_CBC, DES_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES128_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES256_KEY_SIZE, true);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+ QCE_MODE_CBC, DES_KEY_SIZE, false);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+ QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES128_KEY_SIZE, false);
+ _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+ QCE_MODE_CBC, AES256_KEY_SIZE, false);
+
+ _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
+
+ _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
+ _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
+ _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+ _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+ _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+ _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+ _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
+
+ return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+ unsigned char *vaddr;
+ int i;
+ unsigned char *iovec_vaddr;
+ int iovec_memsize;
+
+ vaddr = pce_dev->coh_vmem;
+ vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+ pce_dev->ce_bam_info.ce_burst_size);
+ iovec_vaddr = pce_dev->iovec_vmem;
+ iovec_memsize = pce_dev->iovec_memsize;
+ for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
+ /* Allow for 256 descriptor (cmd and data) entries per pipe */
+ pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
+ (struct sps_iovec *)iovec_vaddr;
+ pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
+ virt_to_phys(pce_dev->ce_request_info[i].
+ ce_sps.in_transfer.iovec);
+ iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+ iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+ pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
+ (struct sps_iovec *)iovec_vaddr;
+ pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
+ virt_to_phys(pce_dev->ce_request_info[i].
+ ce_sps.out_transfer.iovec);
+ iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+ iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+ if (pce_dev->support_cmd_dscr)
+ qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
+ vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+ pce_dev->ce_bam_info.ce_burst_size);
+ pce_dev->ce_request_info[i].ce_sps.result_dump =
+ (uintptr_t)vaddr;
+ pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
+ GET_PHYS_ADDR((uintptr_t)vaddr);
+ pce_dev->ce_request_info[i].ce_sps.result =
+ (struct ce_result_dump_format *)vaddr;
+ vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+ pce_dev->ce_request_info[i].ce_sps.result_dump_null =
+ (uintptr_t)vaddr;
+ pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
+ GET_PHYS_ADDR((uintptr_t)vaddr);
+ pce_dev->ce_request_info[i].ce_sps.result_null =
+ (struct ce_result_dump_format *)vaddr;
+ vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+ pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
+ (uintptr_t)vaddr;
+ vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
+ }
+ if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
+ iovec_memsize < 0)
+ panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
+ pce_dev->memsize, (uintptr_t)vaddr -
+ (uintptr_t)pce_dev->coh_vmem);
+ return 0;
+}
+
+static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
+{
+ uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
+ uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
+
+ pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
+ BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
+ BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
+ (pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+ pce_dev->reg.crypto_cfg_le =
+ (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
+
+ /* Initialize encr_cfg register for AES alg */
+ pce_dev->reg.encr_cfg_aes_cbc_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_cbc_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ctr_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ctr_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_xts_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_xts_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ecb_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ecb_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_aes_ccm_128 =
+ (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
+ (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+ pce_dev->reg.encr_cfg_aes_ccm_256 =
+ (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+ (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+ /* Initialize encr_cfg register for DES alg */
+ pce_dev->reg.encr_cfg_des_ecb =
+ (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_des_cbc =
+ (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_3des_ecb =
+ (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+ pce_dev->reg.encr_cfg_3des_cbc =
+ (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+ /* Initialize encr_cfg register for kasumi/snow3g alg */
+ pce_dev->reg.encr_cfg_kasumi =
+ (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+ pce_dev->reg.encr_cfg_snow3g =
+ (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
+ /* Initialize auth_cfg register for CMAC alg */
+ pce_dev->reg.auth_cfg_cmac_128 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+
+ pce_dev->reg.auth_cfg_cmac_256 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
+
+ /* Initialize auth_cfg register for HMAC alg */
+ pce_dev->reg.auth_cfg_hmac_sha1 =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ pce_dev->reg.auth_cfg_hmac_sha256 =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ /* Initialize auth_cfg register for SHA1/256 alg */
+ pce_dev->reg.auth_cfg_sha1 =
+ (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ pce_dev->reg.auth_cfg_sha256 =
+ (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+ /* Initialize auth_cfg register for AEAD alg */
+ pce_dev->reg.auth_cfg_aead_sha1_hmac =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+ pce_dev->reg.auth_cfg_aead_sha256_hmac =
+ (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+ pce_dev->reg.auth_cfg_aes_ccm_128 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
+ ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+ pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+ pce_dev->reg.auth_cfg_aes_ccm_256 =
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+ ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+ pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+ /* Initialize auth_cfg register for kasumi/snow3g */
+ pce_dev->reg.auth_cfg_kasumi =
+ (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+ BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+ pce_dev->reg.auth_cfg_snow3g =
+ (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+ BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+ return 0;
+}
+
+static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
+ struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+ struct qce_cmdlist_info *cmdlistinfo;
+ struct ce_sps_data *pce_sps_data;
+
+ pce_sps_data = &preq_info->ce_sps;
+ if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+ !(pce_dev->no_ccm_mac_status_get_around)) {
+ cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
+ _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ pce_dev->ce_bam_info.ce_burst_size,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+ }
+}
+
+static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
+ struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+ struct ce_sps_data *pce_sps_data;
+
+ pce_sps_data = &preq_info->ce_sps;
+
+ if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+ !(pce_dev->no_ccm_mac_status_get_around)) {
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ pce_dev->ce_bam_info.ce_burst_size,
+ &pce_sps_data->out_transfer);
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
+ CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
+ }
+}
+
+/* QCE_DUMMY_REQ */
+static void qce_dummy_complete(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret)
+{
+ if (!cookie)
+ pr_err("invalid cookie\n");
+}
+
+static int qce_dummy_req(struct qce_device *pce_dev)
+{
+ int ret = 0;
+
+ if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+ in_use, true) == false))
+ return -EBUSY;
+ ret = qce_process_sha_req(pce_dev, NULL);
+ pce_dev->qce_stats.no_of_dummy_reqs++;
+ return ret;
+}
+
+static int select_mode(struct qce_device *pce_dev,
+ struct ce_request_info *preq_info)
+{
+ struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
+ unsigned int no_of_queued_req;
+ unsigned int cadence;
+
+ if (!pce_dev->no_get_around) {
+ _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+ return 0;
+ }
+
+ /*
+ * claim ownership of device
+ */
+again:
+ if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
+ != QCE_OWNER_NONE) {
+ ndelay(40);
+ goto again;
+ }
+ no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
+ if (pce_dev->mode == IN_INTERRUPT_MODE) {
+ if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
+ pce_dev->mode = IN_BUNCH_MODE;
+ pr_debug("pcedev %d mode switch to BUNCH\n",
+ pce_dev->dev_no);
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_dev->intr_cadence = 0;
+ atomic_set(&pce_dev->bunch_cmd_seq, 1);
+ atomic_set(&pce_dev->last_intr_seq, 1);
+ mod_timer(&(pce_dev->timer),
+ (jiffies + DELAY_IN_JIFFIES));
+ } else {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ }
+ } else {
+ pce_dev->intr_cadence++;
+ cadence = (preq_info->req_len >> 7) + 1;
+ if (cadence > SET_INTR_AT_REQ)
+ cadence = SET_INTR_AT_REQ;
+ if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
+ == cadence) && pce_dev->cadence_flag))
+ atomic_inc(&pce_dev->bunch_cmd_seq);
+ else {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_dev->intr_cadence = 0;
+ atomic_set(&pce_dev->bunch_cmd_seq, 0);
+ atomic_set(&pce_dev->last_intr_seq, 0);
+ pce_dev->cadence_flag = ~pce_dev->cadence_flag;
+ }
+ }
+
+ return 0;
+}
+
+static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
+{
+ int rc = 0;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct aead_request *areq = (struct aead_request *) q_req->areq;
+ uint32_t authsize = q_req->authsize;
+ uint32_t totallen_in, out_len;
+ uint32_t hw_pad_out = 0;
+ int ce_burst_size;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
+ totallen_in = areq->cryptlen + q_req->assoclen;
+ if (q_req->dir == QCE_ENCRYPT) {
+ q_req->cryptlen = areq->cryptlen;
+ out_len = areq->cryptlen + authsize;
+ hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+ } else {
+ q_req->cryptlen = areq->cryptlen - authsize;
+ out_len = q_req->cryptlen;
+ hw_pad_out = authsize;
+ }
+
+ /*
+ * For crypto 5.0 that has burst size alignment requirement
+ * for data descritpor,
+ * the agent above(qcrypto) prepares the src scatter list with
+ * memory starting with associated data, followed by
+ * data stream to be ciphered.
+ * The destination scatter list is pointing to the same
+ * data area as source.
+ */
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ preq_info->src_nents = count_sg(areq->src, totallen_in);
+ else
+ preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
+ areq->assoclen);
+
+ if (q_req->assoclen) {
+ preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
+
+ /* formatted associated data input */
+ qce_dma_map_sg(pce_dev->pdev, q_req->asg,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
+ preq_info->asg = q_req->asg;
+ } else {
+ preq_info->assoc_nents = 0;
+ preq_info->asg = NULL;
+ }
+ /* cipher input */
+ qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher + mac output for encryption */
+ if (areq->src != areq->dst) {
+ if (pce_dev->ce_bam_info.minor_version == 0)
+ /*
+ * The destination scatter list is pointing to the same
+ * data area as src.
+ * Note, the associated data will be pass-through
+ * at the beginning of destination area.
+ */
+ preq_info->dst_nents = count_sg(areq->dst,
+ out_len + areq->assoclen);
+ else
+ preq_info->dst_nents = count_sg(areq->dst, out_len +
+ areq->assoclen);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ } else {
+ preq_info->dst_nents = preq_info->src_nents;
+ }
+
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
+ q_req);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported cipher algorithm %d, mode %d\n",
+ q_req->alg, q_req->mode);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ /* set up crypto device */
+ rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+ q_req->assoclen, cmdlistinfo);
+ } else {
+ /* set up crypto device */
+ rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
+ q_req->assoclen);
+ }
+
+ if (rc < 0)
+ goto bad;
+
+ preq_info->mode = q_req->mode;
+
+ /* setup for callback, and issue command to bam */
+ preq_info->areq = q_req->areq;
+ preq_info->qce_cb = q_req->qce_cb;
+ preq_info->dir = q_req->dir;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_AEAD;
+ preq_info->req_len = totallen_in;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+
+ if (pce_dev->ce_bam_info.minor_version == 0) {
+ goto bad;
+ } else {
+ if (q_req->assoclen && (_qce_sps_add_sg_data(
+ pce_dev, q_req->asg, q_req->assoclen,
+ &pce_sps_data->in_transfer)))
+ goto bad;
+ if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
+ areq->assoclen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
+
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ /* Pass through to ignore associated data*/
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ q_req->assoclen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
+ areq->assoclen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ /* Pass through to ignore hw_pad (padding of the MAC data) */
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ hw_pad_out, &pce_sps_data->out_transfer))
+ goto bad;
+ if (pce_dev->no_get_around ||
+ totallen_in <= SPS_MAX_PKT_SIZE) {
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ } else {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ }
+
+ _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
+
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+ }
+ if (rc)
+ goto bad;
+ return 0;
+
+bad:
+ if (preq_info->assoc_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
+ preq_info->assoc_nents, DMA_TO_DEVICE);
+ }
+ if (preq_info->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ if (areq->src != areq->dst) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+
+static int _qce_suspend(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ struct sps_pipe *sps_pipe_info;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ qce_enable_clk(pce_dev);
+
+ sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+ sps_disconnect(sps_pipe_info);
+
+ sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+ sps_disconnect(sps_pipe_info);
+
+ qce_disable_clk(pce_dev);
+ return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ struct sps_pipe *sps_pipe_info;
+ struct sps_connect *sps_connect_info;
+ int rc;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ qce_enable_clk(pce_dev);
+
+ sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+ sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+ return rc;
+ }
+ sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+ sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc)
+ pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+ (uintptr_t)sps_pipe_info, rc);
+
+ rc = sps_register_event(sps_pipe_info,
+ &pce_dev->ce_bam_info.producer.event);
+ if (rc)
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+
+ qce_disable_clk(pce_dev);
+ return rc;
+}
+
+struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ struct aead_request *areq;
+ uint32_t authsize;
+ struct crypto_aead *aead;
+ uint32_t ivsize;
+ uint32_t totallen;
+ int rc = 0;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ if (q_req->mode == QCE_MODE_CCM)
+ return _qce_aead_ccm_req(handle, q_req);
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ areq = (struct aead_request *) q_req->areq;
+ aead = crypto_aead_reqtfm(areq);
+ ivsize = crypto_aead_ivsize(aead);
+ q_req->ivsize = ivsize;
+ authsize = q_req->authsize;
+ if (q_req->dir == QCE_ENCRYPT)
+ q_req->cryptlen = areq->cryptlen;
+ else
+ q_req->cryptlen = areq->cryptlen - authsize;
+
+ if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
+ pr_err("Integer overflow on total aead req length.\n");
+ return -EINVAL;
+ }
+
+ totallen = q_req->cryptlen + areq->assoclen;
+
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
+ req_info, q_req);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
+ q_req->alg, q_req->mode, q_req->encklen,
+ q_req->authsize);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ /* set up crypto device */
+ rc = _ce_setup_aead(pce_dev, q_req, totallen,
+ areq->assoclen, cmdlistinfo);
+ if (rc < 0) {
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * For crypto 5.0 that has burst size alignment requirement
+ * for data descritpor,
+ * the agent above(qcrypto) prepares the src scatter list with
+ * memory starting with associated data, followed by
+ * iv, and data stream to be ciphered.
+ */
+ preq_info->src_nents = count_sg(areq->src, totallen);
+
+
+ /* cipher input */
+ qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher output for encryption */
+ if (areq->src != areq->dst) {
+ preq_info->dst_nents = count_sg(areq->dst, totallen);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+
+
+ /* setup for callback, and issue command to bam */
+ preq_info->areq = q_req->areq;
+ preq_info->qce_cb = q_req->qce_cb;
+ preq_info->dir = q_req->dir;
+ preq_info->asg = NULL;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_AEAD;
+ preq_info->req_len = totallen;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr) {
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ } else {
+ rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
+ areq->assoclen);
+ if (rc)
+ goto bad;
+ }
+
+ preq_info->mode = q_req->mode;
+
+ if (pce_dev->ce_bam_info.minor_version == 0) {
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (totallen > SPS_MAX_PKT_SIZE) {
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ } else {
+ if (_qce_sps_add_data(GET_PHYS_ADDR(
+ pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->out_transfer,
+ SPS_IOVEC_FLAG_INT);
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ }
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ } else {
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+ &pce_sps_data->out_transfer))
+ goto bad;
+
+ if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ } else {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ }
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+ }
+ if (rc)
+ goto bad;
+ return 0;
+
+bad:
+ if (preq_info->src_nents)
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ if (areq->src != areq->dst)
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+ DMA_FROM_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+
+ return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+ int rc = 0;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)
+ c_req->areq;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ preq_info->src_nents = 0;
+ preq_info->dst_nents = 0;
+
+ /* cipher input */
+ preq_info->src_nents = count_sg(areq->src, areq->nbytes);
+
+ qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher output */
+ if (areq->src != areq->dst) {
+ preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
+ qce_dma_map_sg(pce_dev->pdev, areq->dst,
+ preq_info->dst_nents, DMA_FROM_DEVICE);
+ } else {
+ preq_info->dst_nents = preq_info->src_nents;
+ }
+ preq_info->dir = c_req->dir;
+ if ((pce_dev->ce_bam_info.minor_version == 0) &&
+ (preq_info->dir == QCE_DECRYPT) &&
+ (c_req->mode == QCE_MODE_CBC)) {
+ memcpy(preq_info->dec_iv, (unsigned char *)
+ sg_virt(areq->src) + areq->src->length - 16,
+ NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
+ }
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
+ req_info, c_req);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported cipher algorithm %d, mode %d\n",
+ c_req->alg, c_req->mode);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
+ cmdlistinfo);
+ } else {
+ rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
+ }
+ if (rc < 0)
+ goto bad;
+
+ preq_info->mode = c_req->mode;
+
+ /* setup for client callback, and issue command to BAM */
+ preq_info->areq = areq;
+ preq_info->qce_cb = c_req->qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_CIPHERING;
+ preq_info->req_len = areq->nbytes;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_sps_data->in_transfer))
+ goto bad;
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+ if (_qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+ } else {
+ pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+ }
+
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+ if (rc)
+ goto bad;
+
+ return 0;
+bad:
+ if (areq->src != areq->dst) {
+ if (preq_info->dst_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+ preq_info->dst_nents, DMA_FROM_DEVICE);
+ }
+ }
+ if (preq_info->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+ preq_info->src_nents,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ }
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+
+ struct ahash_request *areq;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+ bool is_dummy = false;
+
+ if (!sreq) {
+ sreq = &(pce_dev->dummyreq.sreq);
+ req_info = DUMMY_REQ_INDEX;
+ is_dummy = true;
+ } else {
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ }
+
+ areq = (struct ahash_request *)sreq->areq;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ preq_info->src_nents = count_sg(sreq->src, sreq->size);
+ qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
+ DMA_TO_DEVICE);
+
+ if (pce_dev->support_cmd_dscr) {
+ cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
+ if (cmdlistinfo == NULL) {
+ pr_err("Unsupported hash algorithm %d\n", sreq->alg);
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+ rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+ } else {
+ rc = _ce_setup_hash_direct(pce_dev, sreq);
+ }
+ if (rc < 0)
+ goto bad;
+
+ preq_info->areq = areq;
+ preq_info->qce_cb = sreq->qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_HASHING;
+ preq_info->req_len = sreq->size;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_sps_data->in_transfer))
+ goto bad;
+
+ /* always ensure there is input data. ZLT does not work for bam-ndp */
+ if (!areq->nbytes)
+ _qce_sps_add_data(
+ GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+ pce_dev->ce_bam_info.ce_burst_size,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+ if (pce_dev->no_get_around)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer))
+ goto bad;
+
+ if (is_dummy) {
+ _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ } else {
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+ }
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (preq_info->src_nents) {
+ qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+ preq_info->src_nents, DMA_TO_DEVICE);
+ }
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ bool key_stream_mode;
+ dma_addr_t dst;
+ int rc;
+ struct qce_cmdlist_info *cmdlistinfo;
+ int req_info = -1;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ };
+
+ key_stream_mode = (req->data_in == NULL);
+
+ /* don't support key stream mode */
+
+ if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ }
+
+ /* F8 cipher input */
+ preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out,
+ req->data_len, DMA_FROM_DEVICE);
+ preq_info->phy_ota_dst = dst;
+ } else {
+ /* in place ciphering */
+ dst = preq_info->phy_ota_src;
+ preq_info->phy_ota_dst = 0;
+ }
+ preq_info->ota_size = req->data_len;
+
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+ req->data_len, cmdlistinfo);
+ else
+ rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+ req->data_len);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ preq_info->areq = cookie;
+ preq_info->qce_cb = qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_F8;
+ preq_info->req_len = req->data_len;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
+ &pce_sps_data->in_transfer);
+
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)dst, req->data_len,
+ &pce_sps_data->out_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (preq_info->phy_ota_dst != 0)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+ req->data_len, DMA_FROM_DEVICE);
+ if (preq_info->phy_ota_src != 0)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ req->data_len,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+ void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ uint16_t num_pkt = mreq->num_pkt;
+ uint16_t cipher_start = mreq->cipher_start;
+ uint16_t cipher_size = mreq->cipher_size;
+ struct qce_f8_req *req = &mreq->qce_f8_req;
+ uint32_t total;
+ dma_addr_t dst = 0;
+ int rc = 0;
+ struct qce_cmdlist_info *cmdlistinfo;
+ int req_info = -1;
+ struct ce_request_info *preq_info;
+ struct ce_sps_data *pce_sps_data;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ };
+
+ total = num_pkt * req->data_len;
+
+ /* F8 cipher input */
+ preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+ req->data_in, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+ /* F8 cipher output */
+ if (req->data_in != req->data_out) {
+ dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+ DMA_FROM_DEVICE);
+ preq_info->phy_ota_dst = dst;
+ } else {
+ /* in place ciphering */
+ dst = preq_info->phy_ota_src;
+ preq_info->phy_ota_dst = 0;
+ }
+
+ preq_info->ota_size = total;
+
+ /* set up crypto device */
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+ cipher_size, cmdlistinfo);
+ else
+ rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+ cipher_start, cipher_size);
+ if (rc)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ preq_info->areq = cookie;
+ preq_info->qce_cb = qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_F8;
+ preq_info->req_len = total;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data((uint32_t)dst, total,
+ &pce_sps_data->out_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+
+ if (rc == 0)
+ return 0;
+bad:
+ if (preq_info->phy_ota_dst)
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
+ (req->data_in == req->data_out) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+ qce_comp_func_ptr_t qce_cb)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+ struct qce_cmdlist_info *cmdlistinfo;
+ int req_info = -1;
+ struct ce_sps_data *pce_sps_data;
+ struct ce_request_info *preq_info;
+
+ req_info = qce_alloc_req_info(pce_dev);
+ if (req_info < 0)
+ return -EBUSY;
+ preq_info = &pce_dev->ce_request_info[req_info];
+ pce_sps_data = &preq_info->ce_sps;
+ switch (req->algorithm) {
+ case QCE_OTA_ALGO_KASUMI:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
+ break;
+ case QCE_OTA_ALGO_SNOW3G:
+ cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
+ break;
+ default:
+ qce_free_req_info(pce_dev, req_info, false);
+ return -EINVAL;
+ };
+
+ preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+ req->msize, DMA_TO_DEVICE);
+
+ preq_info->ota_size = req->msize;
+
+ if (pce_dev->support_cmd_dscr)
+ rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+ else
+ rc = _ce_f9_setup_direct(pce_dev, req);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for callback, and issue command to sps */
+ preq_info->areq = cookie;
+ preq_info->qce_cb = qce_cb;
+
+ /* setup xfer type for producer callback handling */
+ preq_info->xfer_type = QCE_XFER_F9;
+ preq_info->req_len = req->msize;
+
+ _qce_sps_iovec_count_init(pce_dev, req_info);
+ if (pce_dev->support_cmd_dscr)
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+ &pce_sps_data->in_transfer);
+ _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
+ &pce_sps_data->in_transfer);
+ _qce_set_flag(&pce_sps_data->in_transfer,
+ SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+ _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+ &pce_sps_data->cmdlistptr.unlock_all_pipes,
+ &pce_sps_data->in_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_sps_data->out_transfer);
+
+ select_mode(pce_dev, preq_info);
+ rc = _qce_sps_transfer(pce_dev, req_info);
+ cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+ req->msize, DMA_TO_DEVICE);
+ qce_free_req_info(pce_dev, req_info, false);
+ return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+ struct qce_device *pce_dev)
+{
+ struct resource *resource;
+ int rc = 0;
+
+ pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,ce-hw-shared");
+ pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,ce-hw-key");
+
+ pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-cbc-ecb-ctr-algo");
+ pce_dev->use_sw_aead_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aead-algo");
+ pce_dev->use_sw_aes_xts_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-xts-algo");
+ pce_dev->use_sw_ahash_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-ahash-algo");
+ pce_dev->use_sw_hmac_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-hmac-algo");
+ pce_dev->use_sw_aes_ccm_algo =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,use-sw-aes-ccm-algo");
+ pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
+ (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
+ pce_dev->support_only_core_src_clk = of_property_read_bool(
+ (&pdev->dev)->of_node, "qcom,support-core-clk-only");
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,bam-pipe-pair",
+ &pce_dev->ce_bam_info.pipe_pair_index)) {
+ pr_err("Fail to get bam pipe pair information.\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-device",
+ &pce_dev->ce_bam_info.ce_device)) {
+ pr_err("Fail to get CE device information.\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-hw-instance",
+ &pce_dev->ce_bam_info.ce_hw_instance)) {
+ pr_err("Fail to get CE hw instance information.\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,bam-ee",
+ &pce_dev->ce_bam_info.bam_ee)) {
+ pr_info("BAM Apps EE is not defined, setting to default 1\n");
+ pce_dev->ce_bam_info.bam_ee = 1;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-opp-freq",
+ &pce_dev->ce_opp_freq_hz)) {
+ pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
+ pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
+ }
+ pce_dev->ce_bam_info.dest_pipe_index =
+ 2 * pce_dev->ce_bam_info.pipe_pair_index;
+ pce_dev->ce_bam_info.src_pipe_index =
+ pce_dev->ce_bam_info.dest_pipe_index + 1;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "crypto-base");
+ if (resource) {
+ pce_dev->phy_iobase = resource->start;
+ pce_dev->iobase = ioremap_nocache(resource->start,
+ resource_size(resource));
+ if (!pce_dev->iobase) {
+ pr_err("Can not map CRYPTO io memory\n");
+ return -ENOMEM;
+ }
+ } else {
+ pr_err("CRYPTO HW mem unavailable.\n");
+ return -ENODEV;
+ }
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "crypto-bam-base");
+ if (resource) {
+ pce_dev->bam_mem = resource->start;
+ pce_dev->bam_mem_size = resource_size(resource);
+ } else {
+ pr_err("CRYPTO BAM mem unavailable.\n");
+ rc = -ENODEV;
+ goto err_getting_bam_info;
+ }
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (resource) {
+ pce_dev->ce_bam_info.bam_irq = resource->start;
+ } else {
+ pr_err("CRYPTO BAM IRQ unavailable.\n");
+ goto err_dev;
+ }
+ return rc;
+err_dev:
+ if (pce_dev->ce_bam_info.bam_iobase)
+ iounmap(pce_dev->ce_bam_info.bam_iobase);
+
+err_getting_bam_info:
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+
+ return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+ int rc = 0;
+
+ pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+ if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+ rc = clk_set_rate(pce_dev->ce_core_src_clk,
+ pce_dev->ce_opp_freq_hz);
+ if (rc) {
+ pr_err("Unable to set the core src clk @%uMhz.\n",
+ pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
+ goto exit_put_core_src_clk;
+ }
+ } else {
+ if (pce_dev->support_only_core_src_clk) {
+ rc = PTR_ERR(pce_dev->ce_core_src_clk);
+ pce_dev->ce_core_src_clk = NULL;
+ pr_err("Unable to get CE core src clk\n");
+ return rc;
+ }
+ pr_warn("Unable to get CE core src clk, set to NULL\n");
+ pce_dev->ce_core_src_clk = NULL;
+ }
+
+ if (pce_dev->support_only_core_src_clk) {
+ pce_dev->ce_core_clk = NULL;
+ pce_dev->ce_clk = NULL;
+ pce_dev->ce_bus_clk = NULL;
+ } else {
+ pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+ if (IS_ERR(pce_dev->ce_core_clk)) {
+ rc = PTR_ERR(pce_dev->ce_core_clk);
+ pr_err("Unable to get CE core clk\n");
+ goto exit_put_core_src_clk;
+ }
+ pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+ if (IS_ERR(pce_dev->ce_clk)) {
+ rc = PTR_ERR(pce_dev->ce_clk);
+ pr_err("Unable to get CE interface clk\n");
+ goto exit_put_core_clk;
+ }
+
+ pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
+ if (IS_ERR(pce_dev->ce_bus_clk)) {
+ rc = PTR_ERR(pce_dev->ce_bus_clk);
+ pr_err("Unable to get CE BUS interface clk\n");
+ goto exit_put_iface_clk;
+ }
+ }
+ return rc;
+
+exit_put_iface_clk:
+ if (pce_dev->ce_clk)
+ clk_put(pce_dev->ce_clk);
+exit_put_core_clk:
+ if (pce_dev->ce_core_clk)
+ clk_put(pce_dev->ce_core_clk);
+exit_put_core_src_clk:
+ if (pce_dev->ce_core_src_clk)
+ clk_put(pce_dev->ce_core_src_clk);
+ pr_err("Unable to init CE clks, rc = %d\n", rc);
+ return rc;
+}
+
+static void __qce_deinit_clk(struct qce_device *pce_dev)
+{
+ if (pce_dev->ce_bus_clk)
+ clk_put(pce_dev->ce_bus_clk);
+ if (pce_dev->ce_clk)
+ clk_put(pce_dev->ce_clk);
+ if (pce_dev->ce_core_clk)
+ clk_put(pce_dev->ce_core_clk);
+ if (pce_dev->ce_core_src_clk)
+ clk_put(pce_dev->ce_core_src_clk);
+}
+
+int qce_enable_clk(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+ int rc = 0;
+
+ if (pce_dev->ce_core_src_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE core src clk\n");
+ return rc;
+ }
+ }
+
+ if (pce_dev->support_only_core_src_clk)
+ return rc;
+
+ if (pce_dev->ce_core_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_core_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE core clk\n");
+ goto exit_disable_core_src_clk;
+ }
+ }
+
+ if (pce_dev->ce_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE iface clk\n");
+ goto exit_disable_core_clk;
+ }
+ }
+
+ if (pce_dev->ce_bus_clk) {
+ rc = clk_prepare_enable(pce_dev->ce_bus_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE BUS clk\n");
+ goto exit_disable_ce_clk;
+ }
+ }
+ return rc;
+
+exit_disable_ce_clk:
+ if (pce_dev->ce_clk)
+ clk_disable_unprepare(pce_dev->ce_clk);
+exit_disable_core_clk:
+ if (pce_dev->ce_core_clk)
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+exit_disable_core_src_clk:
+ if (pce_dev->ce_core_src_clk)
+ clk_disable_unprepare(pce_dev->ce_core_src_clk);
+ return rc;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc = 0;
+
+ if (pce_dev->ce_bus_clk)
+ clk_disable_unprepare(pce_dev->ce_bus_clk);
+ if (pce_dev->ce_clk)
+ clk_disable_unprepare(pce_dev->ce_clk);
+ if (pce_dev->ce_core_clk)
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+ if (pce_dev->ce_core_src_clk)
+ clk_disable_unprepare(pce_dev->ce_core_src_clk);
+
+ return rc;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/* dummy req setup */
+static int setup_dummy_req(struct qce_device *pce_dev)
+{
+ char *input =
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
+ int len = DUMMY_REQ_DATA_LEN;
+
+ memcpy(pce_dev->dummyreq_in_buf, input, len);
+ sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
+ sg_mark_end(&pce_dev->dummyreq.sg);
+
+ pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
+ pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
+ pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
+ pce_dev->dummyreq.sreq.auth_data[0] = 0;
+ pce_dev->dummyreq.sreq.auth_data[1] = 0;
+ pce_dev->dummyreq.sreq.auth_data[2] = 0;
+ pce_dev->dummyreq.sreq.auth_data[3] = 0;
+ pce_dev->dummyreq.sreq.first_blk = 1;
+ pce_dev->dummyreq.sreq.last_blk = 1;
+ pce_dev->dummyreq.sreq.size = len;
+ pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
+ pce_dev->dummyreq.sreq.flags = 0;
+ pce_dev->dummyreq.sreq.authkey = NULL;
+
+ pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
+ pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
+
+ return 0;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+ struct qce_device *pce_dev;
+ int i;
+ static int pcedev_no = 1;
+
+ pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+ if (!pce_dev) {
+ *rc = -ENOMEM;
+ pr_err("Can not allocate memory: %d\n", *rc);
+ return NULL;
+ }
+ pce_dev->pdev = &pdev->dev;
+
+ mutex_lock(&qce_iomap_mutex);
+ if (pdev->dev.of_node) {
+ *rc = __qce_get_device_tree_data(pdev, pce_dev);
+ if (*rc)
+ goto err_pce_dev;
+ } else {
+ *rc = -EINVAL;
+ pr_err("Device Node not found.\n");
+ goto err_pce_dev;
+ }
+
+ for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
+ pce_dev->ce_request_info[i].in_use = false;
+ pce_dev->ce_request_index = 0;
+
+ pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
+ pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+ pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+
+ if (pce_dev->coh_vmem == NULL) {
+ *rc = -ENOMEM;
+ pr_err("Can not allocate coherent memory for sps data\n");
+ goto err_iobase;
+ }
+
+ pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
+ MAX_QCE_ALLOC_BAM_REQ * 2;
+ pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
+ if (pce_dev->iovec_vmem == NULL)
+ goto err_mem;
+
+ pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
+ if (pce_dev->dummyreq_in_buf == NULL)
+ goto err_mem;
+
+ *rc = __qce_init_clk(pce_dev);
+ if (*rc)
+ goto err_mem;
+ *rc = qce_enable_clk(pce_dev);
+ if (*rc)
+ goto err_enable_clk;
+
+ if (_probe_ce_engine(pce_dev)) {
+ *rc = -ENXIO;
+ goto err;
+ }
+ *rc = 0;
+
+ qce_init_ce_cfg_val(pce_dev);
+ *rc = qce_sps_init(pce_dev);
+ if (*rc)
+ goto err;
+ qce_setup_ce_sps_data(pce_dev);
+ qce_disable_clk(pce_dev);
+ setup_dummy_req(pce_dev);
+ atomic_set(&pce_dev->no_of_queued_req, 0);
+ pce_dev->mode = IN_INTERRUPT_MODE;
+ init_timer(&(pce_dev->timer));
+ pce_dev->timer.function = qce_multireq_timeout;
+ pce_dev->timer.data = (unsigned long)pce_dev;
+ pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
+ pce_dev->intr_cadence = 0;
+ pce_dev->dev_no = pcedev_no;
+ pcedev_no++;
+ pce_dev->owner = QCE_OWNER_NONE;
+ mutex_unlock(&qce_iomap_mutex);
+ return pce_dev;
+err:
+ qce_disable_clk(pce_dev);
+
+err_enable_clk:
+ __qce_deinit_clk(pce_dev);
+
+err_mem:
+ kfree(pce_dev->dummyreq_in_buf);
+ kfree(pce_dev->iovec_vmem);
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+ pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+err_pce_dev:
+ mutex_unlock(&qce_iomap_mutex);
+ kfree(pce_dev);
+ return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ mutex_lock(&qce_iomap_mutex);
+ qce_enable_clk(pce_dev);
+ qce_sps_exit(pce_dev);
+
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+ pce_dev->coh_vmem, pce_dev->coh_pmem);
+ kfree(pce_dev->dummyreq_in_buf);
+ kfree(pce_dev->iovec_vmem);
+
+ qce_disable_clk(pce_dev);
+ __qce_deinit_clk(pce_dev);
+ mutex_unlock(&qce_iomap_mutex);
+ kfree(handle);
+
+ return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+ 1 << CRYPTO_ENCR_KASUMI_SEL |\
+ 1 << CRYPTO_AUTH_SNOW3G_SEL |\
+ 1 << CRYPTO_AUTH_KASUMI_SEL)
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+
+ if (ce_support == NULL)
+ return -EINVAL;
+
+ ce_support->sha1_hmac_20 = false;
+ ce_support->sha1_hmac = false;
+ ce_support->sha256_hmac = false;
+ ce_support->sha_hmac = true;
+ ce_support->cmac = true;
+ ce_support->aes_key_192 = false;
+ ce_support->aes_xts = true;
+ if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+ ce_support->ota = true;
+ else
+ ce_support->ota = false;
+ ce_support->bam = true;
+ ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
+ ce_support->hw_key = pce_dev->support_hw_key;
+ ce_support->aes_ccm = true;
+ ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+ if (pce_dev->ce_bam_info.minor_version)
+ ce_support->aligned_only = false;
+ else
+ ce_support->aligned_only = true;
+
+ ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+ pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+ ce_support->use_sw_aead_algo =
+ pce_dev->use_sw_aead_algo;
+ ce_support->use_sw_aes_xts_algo =
+ pce_dev->use_sw_aes_xts_algo;
+ ce_support->use_sw_ahash_algo =
+ pce_dev->use_sw_ahash_algo;
+ ce_support->use_sw_hmac_algo =
+ pce_dev->use_sw_hmac_algo;
+ ce_support->use_sw_aes_ccm_algo =
+ pce_dev->use_sw_aes_ccm_algo;
+ ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
+ ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
+ if (pce_dev->no_get_around)
+ ce_support->max_request = MAX_QCE_BAM_REQ;
+ else
+ ce_support->max_request = 1;
+ return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+void qce_dump_req(void *handle)
+{
+ int i;
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+
+ for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+ pr_info("qce_dump_req %d %d\n", i,
+ pce_dev->ce_request_info[i].in_use);
+ if (pce_dev->ce_request_info[i].in_use == true)
+ _qce_dump_descr_fifos(pce_dev, i);
+ }
+}
+EXPORT_SYMBOL(qce_dump_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 0000000..0e60bd2
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <linux/msm-sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE 0x40
+#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
+
+#define GET_VIRT_ADDR(x) \
+ ((uintptr_t)pce_dev->coh_vmem + \
+ ((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x) \
+ (phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \
+ ((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED 26
+#define CRYPTO_RESULT_DUMP_SIZE \
+ ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+ QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC 128
+#define SPS_MAX_PKT_SIZE (32 * 1024 - 64)
+
+/* default bam ipc log level */
+#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+ QCE_PIPE_STATE_IDLE = 0,
+ QCE_PIPE_STATE_IN_PROG = 1,
+ QCE_PIPE_STATE_COMP = 2,
+ QCE_PIPE_STATE_LAST
+};
+
+enum qce_xfer_type_enum {
+ QCE_XFER_HASHING,
+ QCE_XFER_CIPHERING,
+ QCE_XFER_AEAD,
+ QCE_XFER_F8,
+ QCE_XFER_F9,
+ QCE_XFER_TYPE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+ struct sps_pipe *pipe;
+ struct sps_connect connect;
+ struct sps_register_event event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+ uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+ uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+ uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+ uint32_t status;
+ uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+ unsigned long cmdlist;
+ struct sps_command_element *crypto_cfg;
+ struct sps_command_element *encr_seg_cfg;
+ struct sps_command_element *encr_seg_size;
+ struct sps_command_element *encr_seg_start;
+ struct sps_command_element *encr_key;
+ struct sps_command_element *encr_xts_key;
+ struct sps_command_element *encr_cntr_iv;
+ struct sps_command_element *encr_ccm_cntr_iv;
+ struct sps_command_element *encr_mask;
+ struct sps_command_element *encr_xts_du_size;
+
+ struct sps_command_element *auth_seg_cfg;
+ struct sps_command_element *auth_seg_size;
+ struct sps_command_element *auth_seg_start;
+ struct sps_command_element *auth_key;
+ struct sps_command_element *auth_iv;
+ struct sps_command_element *auth_nonce_info;
+ struct sps_command_element *auth_bytecount;
+ struct sps_command_element *seg_size;
+ struct sps_command_element *go_proc;
+ ptrdiff_t size;
+};
+
+struct qce_cmdlistptr_ops {
+ struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+ struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+ struct qce_cmdlist_info cipher_aes_128_ecb;
+ struct qce_cmdlist_info cipher_aes_256_ecb;
+ struct qce_cmdlist_info cipher_aes_128_xts;
+ struct qce_cmdlist_info cipher_aes_256_xts;
+ struct qce_cmdlist_info cipher_des_cbc;
+ struct qce_cmdlist_info cipher_des_ecb;
+ struct qce_cmdlist_info cipher_3des_cbc;
+ struct qce_cmdlist_info cipher_3des_ecb;
+ struct qce_cmdlist_info auth_sha1;
+ struct qce_cmdlist_info auth_sha256;
+ struct qce_cmdlist_info auth_sha1_hmac;
+ struct qce_cmdlist_info auth_sha256_hmac;
+ struct qce_cmdlist_info auth_aes_128_cmac;
+ struct qce_cmdlist_info auth_aes_256_cmac;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
+ struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
+ struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
+ struct qce_cmdlist_info aead_aes_128_ccm;
+ struct qce_cmdlist_info aead_aes_256_ccm;
+ struct qce_cmdlist_info cipher_null;
+ struct qce_cmdlist_info f8_kasumi;
+ struct qce_cmdlist_info f8_snow3g;
+ struct qce_cmdlist_info f9_kasumi;
+ struct qce_cmdlist_info f9_snow3g;
+ struct qce_cmdlist_info unlock_all_pipes;
+};
+
+struct qce_ce_cfg_reg_setting {
+ uint32_t crypto_cfg_be;
+ uint32_t crypto_cfg_le;
+
+ uint32_t encr_cfg_aes_cbc_128;
+ uint32_t encr_cfg_aes_cbc_256;
+
+ uint32_t encr_cfg_aes_ecb_128;
+ uint32_t encr_cfg_aes_ecb_256;
+
+ uint32_t encr_cfg_aes_xts_128;
+ uint32_t encr_cfg_aes_xts_256;
+
+ uint32_t encr_cfg_aes_ctr_128;
+ uint32_t encr_cfg_aes_ctr_256;
+
+ uint32_t encr_cfg_aes_ccm_128;
+ uint32_t encr_cfg_aes_ccm_256;
+
+ uint32_t encr_cfg_des_cbc;
+ uint32_t encr_cfg_des_ecb;
+
+ uint32_t encr_cfg_3des_cbc;
+ uint32_t encr_cfg_3des_ecb;
+ uint32_t encr_cfg_kasumi;
+ uint32_t encr_cfg_snow3g;
+
+ uint32_t auth_cfg_cmac_128;
+ uint32_t auth_cfg_cmac_256;
+
+ uint32_t auth_cfg_sha1;
+ uint32_t auth_cfg_sha256;
+
+ uint32_t auth_cfg_hmac_sha1;
+ uint32_t auth_cfg_hmac_sha256;
+
+ uint32_t auth_cfg_aes_ccm_128;
+ uint32_t auth_cfg_aes_ccm_256;
+ uint32_t auth_cfg_aead_sha1_hmac;
+ uint32_t auth_cfg_aead_sha256_hmac;
+ uint32_t auth_cfg_kasumi;
+ uint32_t auth_cfg_snow3g;
+};
+
+struct ce_bam_info {
+ uint32_t bam_irq;
+ uint32_t bam_mem;
+ void __iomem *bam_iobase;
+ uint32_t ce_device;
+ uint32_t ce_hw_instance;
+ uint32_t bam_ee;
+ unsigned int pipe_pair_index;
+ unsigned int src_pipe_index;
+ unsigned int dest_pipe_index;
+ unsigned long bam_handle;
+ int ce_burst_size;
+ uint32_t minor_version;
+ struct qce_sps_ep_conn_data producer;
+ struct qce_sps_ep_conn_data consumer;
+};
+
+/* SPS data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+ enum qce_pipe_st_enum producer_state; /* Producer pipe state */
+ int consumer_status; /* consumer pipe status */
+ int producer_status; /* producer pipe status */
+ struct sps_transfer in_transfer;
+ struct sps_transfer out_transfer;
+ struct qce_cmdlistptr_ops cmdlistptr;
+ uint32_t result_dump; /* reuslt dump virtual address */
+ uint32_t result_dump_null;
+ uint32_t result_dump_phy; /* result dump physical address (32 bits) */
+ uint32_t result_dump_null_phy;
+
+ uint32_t ignore_buffer; /* ignore buffer virtual address */
+ struct ce_result_dump_format *result; /* ponter to result dump */
+ struct ce_result_dump_format *result_null;
+};
+
+struct ce_request_info {
+ bool in_use;
+ bool in_prog;
+ enum qce_xfer_type_enum xfer_type;
+ struct ce_sps_data ce_sps;
+ qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
+ void *user;
+ void *areq;
+ int assoc_nents;
+ struct scatterlist *asg; /* Formatted associated data sg */
+ int src_nents;
+ int dst_nents;
+ dma_addr_t phy_iv_in;
+ unsigned char dec_iv[16];
+ int dir;
+ enum qce_cipher_mode_enum mode;
+ dma_addr_t phy_ota_src;
+ dma_addr_t phy_ota_dst;
+ unsigned int ota_size;
+ unsigned int req_len;
+};
+
+struct qce_driver_stats {
+ int no_of_timeouts;
+ int no_of_dummy_reqs;
+ int current_mode;
+ int outstanding_reqs;
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qce_ota.h b/drivers/crypto/msm/qce_ota.h
new file mode 100644
index 0000000..2f985fa
--- /dev/null
+++ b/drivers/crypto/msm/qce_ota.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* QTI Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+ void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
new file mode 100644
index 0000000..0860e59
--- /dev/null
+++ b/drivers/crypto/msm/qcedev.c
@@ -0,0 +1,2054 @@
+/*
+ * QTI CE device driver.
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/qcedev.h>
+
+#include <crypto/hash.h>
+#include "qcedevi.h"
+#include "qce.h"
+
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t _std_init_vector_sha1_uint8[] = {
+ 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+ 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+ 0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+ 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+ 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+ 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+ 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
+
+static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
+ bool high_bw_req)
+{
+ int ret = 0;
+
+ mutex_lock(&qcedev_sent_bw_req);
+ if (high_bw_req) {
+ if (podev->high_bw_req_count == 0) {
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ ret = qce_disable_clk(podev->qce);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
+ podev->high_bw_req_count++;
+ } else {
+ if (podev->high_bw_req_count == 1) {
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ mutex_unlock(&qcedev_sent_bw_req);
+ return;
+ }
+ }
+ podev->high_bw_req_count--;
+ }
+ mutex_unlock(&qcedev_sent_bw_req);
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev);
+static inline long qcedev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+#include "compat_qcedev.c"
+#else
+#define compat_qcedev_ioctl NULL
+#endif
+
+static const struct file_operations qcedev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qcedev_ioctl,
+ .compat_ioctl = compat_qcedev_ioctl,
+ .open = qcedev_open,
+ .release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+ {
+ .miscdevice = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qce",
+ .fops = &qcedev_fops,
+ },
+ .magic = QCEDEV_MAGIC,
+ },
+};
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME 16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+ u32 qcedev_dec_success;
+ u32 qcedev_dec_fail;
+ u32 qcedev_enc_success;
+ u32 qcedev_enc_fail;
+ u32 qcedev_sha_success;
+ u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev;
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
+{
+ int i;
+
+ for (i = 0; i < MAX_QCE_DEVICE; i++) {
+ if (qce_dev[i].miscdevice.minor == n)
+ return &qce_dev[i];
+ }
+ return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+ struct qcedev_handle *handle;
+ struct qcedev_control *podev;
+
+ podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+ if (podev == NULL) {
+ pr_err("%s: no such device %d\n", __func__,
+ MINOR(inode->i_rdev));
+ return -ENOENT;
+ }
+
+ handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ handle->cntl = podev;
+ file->private_data = handle;
+ if (podev->platform_support.bus_scale_table != NULL)
+ qcedev_ce_high_bw_req(podev, true);
+ return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+ struct qcedev_control *podev;
+ struct qcedev_handle *handle;
+
+ handle = file->private_data;
+ podev = handle->cntl;
+ if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ }
+ kzfree(handle);
+ file->private_data = NULL;
+ if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
+ qcedev_ce_high_bw_req(podev, false);
+ return 0;
+}
+
+static void req_done(unsigned long data)
+{
+ struct qcedev_control *podev = (struct qcedev_control *)data;
+ struct qcedev_async_req *areq;
+ unsigned long flags = 0;
+ struct qcedev_async_req *new_req = NULL;
+ int ret = 0;
+
+ spin_lock_irqsave(&podev->lock, flags);
+ areq = podev->active_command;
+ podev->active_command = NULL;
+
+again:
+ if (!list_empty(&podev->ready_commands)) {
+ new_req = container_of(podev->ready_commands.next,
+ struct qcedev_async_req, list);
+ list_del(&new_req->list);
+ podev->active_command = new_req;
+ new_req->err = 0;
+ if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+ ret = start_cipher_req(podev);
+ else
+ ret = start_sha_req(podev);
+ }
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ if (areq)
+ complete(&areq->complete);
+
+ if (new_req && ret) {
+ complete(&new_req->complete);
+ spin_lock_irqsave(&podev->lock, flags);
+ podev->active_command = NULL;
+ areq = NULL;
+ ret = 0;
+ new_req = NULL;
+ goto again;
+ }
+}
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret)
+{
+ struct qcedev_sha_req *areq;
+ struct qcedev_control *pdev;
+ struct qcedev_handle *handle;
+
+ uint32_t *auth32 = (uint32_t *)authdata;
+
+ areq = (struct qcedev_sha_req *) cookie;
+ handle = (struct qcedev_handle *) areq->cookie;
+ pdev = handle->cntl;
+
+ if (digest)
+ memcpy(&handle->sha_ctxt.digest[0], digest, 32);
+
+ if (authdata) {
+ handle->sha_ctxt.auth_data[0] = auth32[0];
+ handle->sha_ctxt.auth_data[1] = auth32[1];
+ handle->sha_ctxt.auth_data[2] = auth32[2];
+ handle->sha_ctxt.auth_data[3] = auth32[3];
+ }
+
+ tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+ unsigned char *iv, int ret)
+{
+ struct qcedev_cipher_req *areq;
+ struct qcedev_handle *handle;
+ struct qcedev_control *podev;
+ struct qcedev_async_req *qcedev_areq;
+
+ areq = (struct qcedev_cipher_req *) cookie;
+ handle = (struct qcedev_handle *) areq->cookie;
+ podev = handle->cntl;
+ qcedev_areq = podev->active_command;
+
+ if (iv)
+ memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+ qcedev_areq->cipher_op_req.ivlen);
+ tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+ struct qcedev_async_req *qcedev_areq;
+ struct qce_req creq;
+ int ret = 0;
+
+ /* start the command on the podev->active_command */
+ qcedev_areq = podev->active_command;
+ qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+ if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
+ pr_err("%s: Use of PMEM is not supported\n", __func__);
+ goto unsupported;
+ }
+ creq.pmem = NULL;
+ switch (qcedev_areq->cipher_op_req.alg) {
+ case QCEDEV_ALG_DES:
+ creq.alg = CIPHER_ALG_DES;
+ break;
+ case QCEDEV_ALG_3DES:
+ creq.alg = CIPHER_ALG_3DES;
+ break;
+ case QCEDEV_ALG_AES:
+ creq.alg = CIPHER_ALG_AES;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ switch (qcedev_areq->cipher_op_req.mode) {
+ case QCEDEV_AES_MODE_CBC:
+ case QCEDEV_DES_MODE_CBC:
+ creq.mode = QCE_MODE_CBC;
+ break;
+ case QCEDEV_AES_MODE_ECB:
+ case QCEDEV_DES_MODE_ECB:
+ creq.mode = QCE_MODE_ECB;
+ break;
+ case QCEDEV_AES_MODE_CTR:
+ creq.mode = QCE_MODE_CTR;
+ break;
+ case QCEDEV_AES_MODE_XTS:
+ creq.mode = QCE_MODE_XTS;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if ((creq.alg == CIPHER_ALG_AES) &&
+ (creq.mode == QCE_MODE_CTR)) {
+ creq.dir = QCE_ENCRYPT;
+ } else {
+ if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
+ creq.dir = QCE_ENCRYPT;
+ else
+ creq.dir = QCE_DECRYPT;
+ }
+
+ creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+ creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+ creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
+ creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+ creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+ if (qcedev_areq->cipher_op_req.encklen == 0) {
+ if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+ || (qcedev_areq->cipher_op_req.op ==
+ QCEDEV_OPER_DEC_NO_KEY))
+ creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+ else {
+ int i;
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+ break;
+ }
+
+ if ((podev->platform_support.hw_key_support == 1) &&
+ (i == QCEDEV_MAX_KEY_SIZE))
+ creq.op = QCE_REQ_ABLK_CIPHER;
+ else {
+ ret = -EINVAL;
+ goto unsupported;
+ }
+ }
+ } else {
+ creq.op = QCE_REQ_ABLK_CIPHER;
+ }
+
+ creq.qce_cb = qcedev_cipher_req_cb;
+ creq.areq = (void *)&qcedev_areq->cipher_req;
+ creq.flags = 0;
+ ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+ if (ret)
+ qcedev_areq->err = -ENXIO;
+ else
+ qcedev_areq->err = 0;
+ return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev)
+{
+ struct qcedev_async_req *qcedev_areq;
+ struct qce_sha_req sreq;
+ int ret = 0;
+ struct qcedev_handle *handle;
+
+ /* start the command on the podev->active_command */
+ qcedev_areq = podev->active_command;
+ handle = qcedev_areq->handle;
+
+ switch (qcedev_areq->sha_op_req.alg) {
+ case QCEDEV_ALG_SHA1:
+ sreq.alg = QCE_HASH_SHA1;
+ break;
+ case QCEDEV_ALG_SHA256:
+ sreq.alg = QCE_HASH_SHA256;
+ break;
+ case QCEDEV_ALG_SHA1_HMAC:
+ if (podev->ce_support.sha_hmac) {
+ sreq.alg = QCE_HASH_SHA1_HMAC;
+ sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+
+ } else {
+ sreq.alg = QCE_HASH_SHA1;
+ sreq.authkey = NULL;
+ }
+ break;
+ case QCEDEV_ALG_SHA256_HMAC:
+ if (podev->ce_support.sha_hmac) {
+ sreq.alg = QCE_HASH_SHA256_HMAC;
+ sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+ } else {
+ sreq.alg = QCE_HASH_SHA256;
+ sreq.authkey = NULL;
+ }
+ break;
+ case QCEDEV_ALG_AES_CMAC:
+ sreq.alg = QCE_HASH_AES_CMAC;
+ sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = qcedev_areq->sha_op_req.authklen;
+ break;
+ default:
+ pr_err("Algorithm %d not supported, exiting\n",
+ qcedev_areq->sha_op_req.alg);
+ return -EINVAL;
+ };
+
+ qcedev_areq->sha_req.cookie = handle;
+
+ sreq.qce_cb = qcedev_sha_req_cb;
+ if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+ sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
+ sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
+ sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
+ sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
+ sreq.digest = &handle->sha_ctxt.digest[0];
+ sreq.first_blk = handle->sha_ctxt.first_blk;
+ sreq.last_blk = handle->sha_ctxt.last_blk;
+ }
+ sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+ sreq.src = qcedev_areq->sha_req.sreq.src;
+ sreq.areq = (void *)&qcedev_areq->sha_req;
+ sreq.flags = 0;
+
+ ret = qce_process_sha_req(podev->qce, &sreq);
+
+ if (ret)
+ qcedev_areq->err = -ENXIO;
+ else
+ qcedev_areq->err = 0;
+ return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ struct qcedev_control *podev;
+ unsigned long flags = 0;
+ int ret = 0;
+ struct qcedev_stat *pstat;
+
+ qcedev_areq->err = 0;
+ podev = handle->cntl;
+
+ spin_lock_irqsave(&podev->lock, flags);
+
+ if (podev->active_command == NULL) {
+ podev->active_command = qcedev_areq;
+ if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+ ret = start_cipher_req(podev);
+ else
+ ret = start_sha_req(podev);
+ } else {
+ list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+ }
+
+ if (ret != 0)
+ podev->active_command = NULL;
+
+ spin_unlock_irqrestore(&podev->lock, flags);
+
+ if (ret == 0)
+ wait_for_completion(&qcedev_areq->complete);
+
+ if (ret)
+ qcedev_areq->err = -EIO;
+
+ pstat = &_qcedev_stat;
+ if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+ switch (qcedev_areq->cipher_op_req.op) {
+ case QCEDEV_OPER_DEC:
+ if (qcedev_areq->err)
+ pstat->qcedev_dec_fail++;
+ else
+ pstat->qcedev_dec_success++;
+ break;
+ case QCEDEV_OPER_ENC:
+ if (qcedev_areq->err)
+ pstat->qcedev_enc_fail++;
+ else
+ pstat->qcedev_enc_success++;
+ break;
+ default:
+ break;
+ };
+ } else {
+ if (qcedev_areq->err)
+ pstat->qcedev_sha_fail++;
+ else
+ pstat->qcedev_sha_success++;
+ }
+
+ return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
+
+ memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+ sha_ctxt->first_blk = 1;
+
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+ memcpy(&sha_ctxt->digest[0],
+ &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+ sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+ } else {
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+ memcpy(&sha_ctxt->digest[0],
+ &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+ }
+ }
+ sha_ctxt->init_done = true;
+ return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+ int i = 0;
+ uint32_t total;
+
+ uint8_t *user_src = NULL;
+ uint8_t *k_src = NULL;
+ uint8_t *k_buf_src = NULL;
+ uint8_t *k_align_src = NULL;
+
+ uint32_t sha_pad_len = 0;
+ uint32_t trailing_buf_len = 0;
+ uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
+ uint32_t sha_block_size;
+
+ total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+ sha_block_size = SHA1_BLOCK_SIZE;
+ else
+ sha_block_size = SHA256_BLOCK_SIZE;
+
+ if (total <= sha_block_size) {
+ uint32_t len = qcedev_areq->sha_op_req.data_len;
+
+ i = 0;
+
+ k_src = &handle->sha_ctxt.trailing_buf[t_buf];
+
+ /* Copy data from user src(s) */
+ while (len > 0) {
+ user_src =
+ (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+ if (user_src && copy_from_user(k_src,
+ (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[i].len))
+ return -EFAULT;
+
+ len -= qcedev_areq->sha_op_req.data[i].len;
+ k_src += qcedev_areq->sha_op_req.data[i].len;
+ i++;
+ }
+ handle->sha_ctxt.trailing_buf_len = total;
+
+ return 0;
+ }
+
+
+ k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+ GFP_KERNEL);
+ if (k_buf_src == NULL)
+ return -ENOMEM;
+
+ k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+ CACHE_LINE_SIZE);
+ k_src = k_align_src;
+
+ /* check for trailing buffer from previous updates and append it */
+ if (t_buf > 0) {
+ memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+ t_buf);
+ k_src += t_buf;
+ }
+
+ /* Copy data from user src(s) */
+ user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+ if (user_src && copy_from_user(k_src,
+ (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[0].len)) {
+ kzfree(k_buf_src);
+ return -EFAULT;
+ }
+ k_src += qcedev_areq->sha_op_req.data[0].len;
+ for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+ user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+ if (user_src && copy_from_user(k_src,
+ (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[i].len)) {
+ kzfree(k_buf_src);
+ return -EFAULT;
+ }
+ k_src += qcedev_areq->sha_op_req.data[i].len;
+ }
+
+ /* get new trailing buffer */
+ sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+ trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+ qcedev_areq->sha_req.sreq.src = sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+ total-trailing_buf_len);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+ /* update sha_ctxt trailing buf content to new trailing buf */
+ if (trailing_buf_len > 0) {
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+ memcpy(&handle->sha_ctxt.trailing_buf[0],
+ (k_src - trailing_buf_len),
+ trailing_buf_len);
+ }
+ handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
+
+ err = submit_req(qcedev_areq, handle);
+
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.first_blk = 0;
+
+ kzfree(k_buf_src);
+ return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ int num_entries = 0;
+ uint32_t total = 0;
+
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+ struct qcedev_sha_op_req *saved_req;
+ struct qcedev_sha_op_req req;
+ struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+ /* save the original req structure */
+ saved_req =
+ kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+ if (saved_req == NULL) {
+ pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
+ __func__, (uintptr_t)saved_req);
+ return -ENOMEM;
+ }
+ memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+ memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+ i = 0;
+ /* Address 32 KB at a time */
+ while ((i < req.entries) && (err == 0)) {
+ if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+ sreq->data[0].len = QCE_MAX_OPER_DATA;
+ if (i > 0) {
+ sreq->data[0].vaddr =
+ sreq->data[i].vaddr;
+ }
+
+ sreq->data_len = QCE_MAX_OPER_DATA;
+ sreq->entries = 1;
+
+ err = qcedev_sha_update_max_xfer(qcedev_areq,
+ handle, sg_src);
+
+ sreq->data[i].len = req.data[i].len -
+ QCE_MAX_OPER_DATA;
+ sreq->data[i].vaddr = req.data[i].vaddr +
+ QCE_MAX_OPER_DATA;
+ req.data[i].vaddr = sreq->data[i].vaddr;
+ req.data[i].len = sreq->data[i].len;
+ } else {
+ total = 0;
+ for (j = i; j < req.entries; j++) {
+ num_entries++;
+ if ((total + sreq->data[j].len) >=
+ QCE_MAX_OPER_DATA) {
+ sreq->data[j].len =
+ (QCE_MAX_OPER_DATA - total);
+ total = QCE_MAX_OPER_DATA;
+ break;
+ }
+ total += sreq->data[j].len;
+ }
+
+ sreq->data_len = total;
+ if (i > 0)
+ for (k = 0; k < num_entries; k++) {
+ sreq->data[k].len =
+ sreq->data[i+k].len;
+ sreq->data[k].vaddr =
+ sreq->data[i+k].vaddr;
+ }
+ sreq->entries = num_entries;
+
+ i = j;
+ err = qcedev_sha_update_max_xfer(qcedev_areq,
+ handle, sg_src);
+ num_entries = 0;
+
+ sreq->data[i].vaddr = req.data[i].vaddr +
+ sreq->data[i].len;
+ sreq->data[i].len = req.data[i].len -
+ sreq->data[i].len;
+ req.data[i].vaddr = sreq->data[i].vaddr;
+ req.data[i].len = sreq->data[i].len;
+
+ if (sreq->data[i].len == 0)
+ i++;
+ }
+ } /* end of while ((i < req.entries) && (err == 0)) */
+
+ /* Restore the original req structure */
+ for (i = 0; i < saved_req->entries; i++) {
+ sreq->data[i].len = saved_req->data[i].len;
+ sreq->data[i].vaddr = saved_req->data[i].vaddr;
+ }
+ sreq->entries = saved_req->entries;
+ sreq->data_len = saved_req->data_len;
+ kzfree(saved_req);
+ } else
+ err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
+
+ return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ struct scatterlist sg_src;
+ uint32_t total;
+ uint8_t *k_buf_src = NULL;
+ uint8_t *k_align_src = NULL;
+
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+
+ handle->sha_ctxt.last_blk = 1;
+
+ total = handle->sha_ctxt.trailing_buf_len;
+
+ if (total) {
+ k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+ GFP_KERNEL);
+ if (k_buf_src == NULL)
+ return -ENOMEM;
+
+ k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+ CACHE_LINE_SIZE);
+ memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
+ }
+ qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = total;
+
+ err = submit_req(qcedev_areq, handle);
+
+ handle->sha_ctxt.first_blk = 0;
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.auth_data[0] = 0;
+ handle->sha_ctxt.auth_data[1] = 0;
+ handle->sha_ctxt.trailing_buf_len = 0;
+ handle->sha_ctxt.init_done = false;
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+
+ kzfree(k_buf_src);
+ return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+ int i = 0;
+ uint32_t total;
+
+ uint8_t *user_src = NULL;
+ uint8_t *k_src = NULL;
+ uint8_t *k_buf_src = NULL;
+
+ total = qcedev_areq->sha_op_req.data_len;
+
+ if (copy_from_user(&handle->sha_ctxt.authkey[0],
+ (void __user *)qcedev_areq->sha_op_req.authkey,
+ qcedev_areq->sha_op_req.authklen))
+ return -EFAULT;
+
+
+ k_buf_src = kmalloc(total, GFP_KERNEL);
+ if (k_buf_src == NULL)
+ return -ENOMEM;
+
+ k_src = k_buf_src;
+
+ /* Copy data from user src(s) */
+ user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+ for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+ user_src =
+ (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+ if (user_src && copy_from_user(k_src, (void __user *)user_src,
+ qcedev_areq->sha_op_req.data[i].len)) {
+ kzfree(k_buf_src);
+ return -EFAULT;
+ }
+ k_src += qcedev_areq->sha_op_req.data[i].len;
+ }
+
+ qcedev_areq->sha_req.sreq.src = sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = total;
+ handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+ err = submit_req(qcedev_areq, handle);
+
+ kzfree(k_buf_src);
+ return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err = 0;
+
+ if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+ qcedev_sha_init(areq, handle);
+ if (copy_from_user(&handle->sha_ctxt.authkey[0],
+ (void __user *)areq->sha_op_req.authkey,
+ areq->sha_op_req.authklen))
+ return -EFAULT;
+ } else {
+ struct qcedev_async_req authkey_areq;
+ uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+
+ init_completion(&authkey_areq.complete);
+
+ authkey_areq.sha_op_req.entries = 1;
+ authkey_areq.sha_op_req.data[0].vaddr =
+ areq->sha_op_req.authkey;
+ authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+ authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+ authkey_areq.sha_op_req.diglen = 0;
+ authkey_areq.handle = handle;
+
+ memset(&authkey_areq.sha_op_req.digest[0], 0,
+ QCEDEV_MAX_SHA_DIGEST);
+ if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+ authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+ if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+ authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+ authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+ qcedev_sha_init(&authkey_areq, handle);
+ err = qcedev_sha_update(&authkey_areq, handle, sg_src);
+ if (!err)
+ err = qcedev_sha_final(&authkey_areq, handle);
+ else
+ return err;
+ memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ qcedev_sha_init(areq, handle);
+
+ memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
+ handle->sha_ctxt.diglen);
+ }
+ return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ struct scatterlist sg_src;
+ uint8_t *k_src = NULL;
+ uint32_t sha_block_size = 0;
+ uint32_t sha_digest_size = 0;
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+ sha_digest_size = SHA1_DIGEST_SIZE;
+ sha_block_size = SHA1_BLOCK_SIZE;
+ } else {
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+ sha_digest_size = SHA256_DIGEST_SIZE;
+ sha_block_size = SHA256_BLOCK_SIZE;
+ }
+ }
+ k_src = kmalloc(sha_block_size, GFP_KERNEL);
+ if (k_src == NULL)
+ return -ENOMEM;
+
+ /* check for trailing buffer from previous updates and append it */
+ memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+ handle->sha_ctxt.trailing_buf_len);
+
+ qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+ sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+ sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+ qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+ memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
+ sha_digest_size);
+ handle->sha_ctxt.trailing_buf_len = sha_digest_size;
+
+ handle->sha_ctxt.first_blk = 1;
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.auth_data[0] = 0;
+ handle->sha_ctxt.auth_data[1] = 0;
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+ memcpy(&handle->sha_ctxt.digest[0],
+ &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+ handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
+ }
+
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+ memcpy(&handle->sha_ctxt.digest[0],
+ &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+ handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
+ }
+ err = submit_req(qcedev_areq, handle);
+
+ handle->sha_ctxt.last_blk = 0;
+ handle->sha_ctxt.first_blk = 0;
+
+ kzfree(k_src);
+ return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle, bool ikey)
+{
+ int i;
+ uint32_t constant;
+ uint32_t sha_block_size;
+
+ if (ikey)
+ constant = 0x36;
+ else
+ constant = 0x5c;
+
+ if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+ sha_block_size = SHA1_BLOCK_SIZE;
+ else
+ sha_block_size = SHA256_BLOCK_SIZE;
+
+ memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+ for (i = 0; i < sha_block_size; i++)
+ handle->sha_ctxt.trailing_buf[i] =
+ (handle->sha_ctxt.authkey[i] ^ constant);
+
+ handle->sha_ctxt.trailing_buf_len = sha_block_size;
+ return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ int err;
+ struct qcedev_control *podev = handle->cntl;
+
+ err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
+ if (err)
+ return err;
+ if (!podev->ce_support.sha_hmac)
+ qcedev_hmac_update_iokey(areq, handle, true);
+ return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ int err;
+ struct qcedev_control *podev = handle->cntl;
+
+ err = qcedev_sha_final(areq, handle);
+ if (podev->ce_support.sha_hmac)
+ return err;
+
+ qcedev_hmac_update_iokey(areq, handle, false);
+ err = qcedev_hmac_get_ohash(areq, handle);
+ if (err)
+ return err;
+ err = qcedev_sha_final(areq, handle);
+
+ return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+ return qcedev_sha_init(areq, handle);
+ else
+ return qcedev_hmac_init(areq, handle, sg_src);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+ struct qcedev_handle *handle,
+ struct scatterlist *sg_src)
+{
+ return qcedev_sha_update(qcedev_areq, handle, sg_src);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+ (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+ return qcedev_sha_final(areq, handle);
+ else
+ return qcedev_hmac_final(areq, handle);
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+ int *di, struct qcedev_handle *handle,
+ uint8_t *k_align_src)
+{
+ int err = 0;
+ int i = 0;
+ int dst_i = *di;
+ struct scatterlist sg_src;
+ uint32_t byteoffset = 0;
+ uint8_t *user_src = NULL;
+ uint8_t *k_align_dst = k_align_src;
+ struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+ if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+ byteoffset = areq->cipher_op_req.byteoffset;
+
+ user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+ if (user_src && copy_from_user((k_align_src + byteoffset),
+ (void __user *)user_src,
+ areq->cipher_op_req.vbuf.src[0].len))
+ return -EFAULT;
+
+ k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
+
+ for (i = 1; i < areq->cipher_op_req.entries; i++) {
+ user_src =
+ (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+ if (user_src && copy_from_user(k_align_src,
+ (void __user *)user_src,
+ areq->cipher_op_req.vbuf.src[i].len)) {
+ return -EFAULT;
+ }
+ k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+ }
+
+ /* restore src beginning */
+ k_align_src = k_align_dst;
+ areq->cipher_op_req.data_len += byteoffset;
+
+ areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+ areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+ /* In place encryption/decryption */
+ sg_set_buf(areq->cipher_req.creq.src,
+ k_align_dst,
+ areq->cipher_op_req.data_len);
+ sg_mark_end(areq->cipher_req.creq.src);
+
+ areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+ areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+ areq->cipher_op_req.entries = 1;
+
+ err = submit_req(areq, handle);
+
+ /* copy data to destination buffer*/
+ creq->data_len -= byteoffset;
+
+ while (creq->data_len > 0) {
+ if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+ if (err == 0 && copy_to_user(
+ (void __user *)creq->vbuf.dst[dst_i].vaddr,
+ (k_align_dst + byteoffset),
+ creq->vbuf.dst[dst_i].len))
+ return -EFAULT;
+
+ k_align_dst += creq->vbuf.dst[dst_i].len +
+ byteoffset;
+ creq->data_len -= creq->vbuf.dst[dst_i].len;
+ dst_i++;
+ } else {
+ if (err == 0 && copy_to_user(
+ (void __user *)creq->vbuf.dst[dst_i].vaddr,
+ (k_align_dst + byteoffset),
+ creq->data_len))
+ return -EFAULT;
+
+ k_align_dst += creq->data_len;
+ creq->vbuf.dst[dst_i].len -= creq->data_len;
+ creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+ creq->data_len = 0;
+ }
+ }
+ *di = dst_i;
+
+ return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+ struct qcedev_handle *handle)
+{
+ int err = 0;
+ int di = 0;
+ int i = 0;
+ int j = 0;
+ int k = 0;
+ uint32_t byteoffset = 0;
+ int num_entries = 0;
+ uint32_t total = 0;
+ uint32_t len;
+ uint8_t *k_buf_src = NULL;
+ uint8_t *k_align_src = NULL;
+ uint32_t max_data_xfer;
+ struct qcedev_cipher_op_req *saved_req;
+ struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+ total = 0;
+
+ if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+ byteoffset = areq->cipher_op_req.byteoffset;
+ k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+ GFP_KERNEL);
+ if (k_buf_src == NULL)
+ return -ENOMEM;
+ k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+ CACHE_LINE_SIZE);
+ max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+ saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+ if (saved_req == NULL) {
+ kzfree(k_buf_src);
+ return -ENOMEM;
+
+ }
+ memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+ if (areq->cipher_op_req.data_len > max_data_xfer) {
+ struct qcedev_cipher_op_req req;
+
+ /* save the original req structure */
+ memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+ i = 0;
+ /* Address 32 KB at a time */
+ while ((i < req.entries) && (err == 0)) {
+ if (creq->vbuf.src[i].len > max_data_xfer) {
+ creq->vbuf.src[0].len = max_data_xfer;
+ if (i > 0) {
+ creq->vbuf.src[0].vaddr =
+ creq->vbuf.src[i].vaddr;
+ }
+
+ creq->data_len = max_data_xfer;
+ creq->entries = 1;
+
+ err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+ &di, handle, k_align_src);
+ if (err < 0) {
+ kzfree(k_buf_src);
+ kzfree(saved_req);
+ return err;
+ }
+
+ creq->vbuf.src[i].len = req.vbuf.src[i].len -
+ max_data_xfer;
+ creq->vbuf.src[i].vaddr =
+ req.vbuf.src[i].vaddr +
+ max_data_xfer;
+ req.vbuf.src[i].vaddr =
+ creq->vbuf.src[i].vaddr;
+ req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+ } else {
+ total = areq->cipher_op_req.byteoffset;
+ for (j = i; j < req.entries; j++) {
+ num_entries++;
+ if ((total + creq->vbuf.src[j].len)
+ >= max_data_xfer) {
+ creq->vbuf.src[j].len =
+ max_data_xfer - total;
+ total = max_data_xfer;
+ break;
+ }
+ total += creq->vbuf.src[j].len;
+ }
+
+ creq->data_len = total;
+ if (i > 0)
+ for (k = 0; k < num_entries; k++) {
+ creq->vbuf.src[k].len =
+ creq->vbuf.src[i+k].len;
+ creq->vbuf.src[k].vaddr =
+ creq->vbuf.src[i+k].vaddr;
+ }
+ creq->entries = num_entries;
+
+ i = j;
+ err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+ &di, handle, k_align_src);
+ if (err < 0) {
+ kzfree(k_buf_src);
+ kzfree(saved_req);
+ return err;
+ }
+
+ num_entries = 0;
+ areq->cipher_op_req.byteoffset = 0;
+
+ creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+ + creq->vbuf.src[i].len;
+ creq->vbuf.src[i].len = req.vbuf.src[i].len -
+ creq->vbuf.src[i].len;
+
+ req.vbuf.src[i].vaddr =
+ creq->vbuf.src[i].vaddr;
+ req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+ if (creq->vbuf.src[i].len == 0)
+ i++;
+ }
+
+ areq->cipher_op_req.byteoffset = 0;
+ max_data_xfer = QCE_MAX_OPER_DATA;
+ byteoffset = 0;
+
+ } /* end of while ((i < req.entries) && (err == 0)) */
+ } else
+ err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
+ k_align_src);
+
+ /* Restore the original req structure */
+ for (i = 0; i < saved_req->entries; i++) {
+ creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+ creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+ }
+ for (len = 0, i = 0; len < saved_req->data_len; i++) {
+ creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+ creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+ len += saved_req->vbuf.dst[i].len;
+ }
+ creq->entries = saved_req->entries;
+ creq->data_len = saved_req->data_len;
+ creq->byteoffset = saved_req->byteoffset;
+
+ kzfree(saved_req);
+ kzfree(k_buf_src);
+ return err;
+
+}
+
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+ struct qcedev_control *podev)
+{
+ /* if intending to use HW key make sure key fields are set
+ * correctly and HW key is indeed supported in target
+ */
+ if (req->encklen == 0) {
+ int i;
+
+ for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+ if (req->enckey[i]) {
+ pr_err("%s: Invalid key: non-zero key input\n",
+ __func__);
+ goto error;
+ }
+ }
+ if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+ (req->op != QCEDEV_OPER_DEC_NO_KEY))
+ if (!podev->platform_support.hw_key_support) {
+ pr_err("%s: Invalid op %d\n", __func__,
+ (uint32_t)req->op);
+ goto error;
+ }
+ } else {
+ if (req->encklen == QCEDEV_AES_KEY_192) {
+ if (!podev->ce_support.aes_key_192) {
+ pr_err("%s: AES-192 not supported\n", __func__);
+ goto error;
+ }
+ } else {
+ /* if not using HW key make sure key
+ * length is valid
+ */
+ if (req->mode == QCEDEV_AES_MODE_XTS) {
+ if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
+ (req->encklen != QCEDEV_AES_KEY_256*2)) {
+ pr_err("%s: unsupported key size: %d\n",
+ __func__, req->encklen);
+ goto error;
+ }
+ } else {
+ if ((req->encklen != QCEDEV_AES_KEY_128) &&
+ (req->encklen != QCEDEV_AES_KEY_256)) {
+ pr_err("%s: unsupported key size %d\n",
+ __func__, req->encklen);
+ goto error;
+ }
+ }
+ }
+ }
+ return 0;
+error:
+ return -EINVAL;
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+ struct qcedev_control *podev)
+{
+ uint32_t total = 0;
+ uint32_t i;
+
+ if (req->use_pmem) {
+ pr_err("%s: Use of PMEM is not supported\n", __func__);
+ goto error;
+ }
+ if ((req->entries == 0) || (req->data_len == 0) ||
+ (req->entries > QCEDEV_MAX_BUFFERS)) {
+ pr_err("%s: Invalid cipher length/entries\n", __func__);
+ goto error;
+ }
+ if ((req->alg >= QCEDEV_ALG_LAST) ||
+ (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
+ pr_err("%s: Invalid algorithm %d\n", __func__,
+ (uint32_t)req->alg);
+ goto error;
+ }
+ if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+ (!podev->ce_support.aes_xts)) {
+ pr_err("%s: XTS algorithm is not supported\n", __func__);
+ goto error;
+ }
+ if (req->alg == QCEDEV_ALG_AES) {
+ if (qcedev_check_cipher_key(req, podev))
+ goto error;
+
+ }
+ /* if using a byteoffset, make sure it is CTR mode using vbuf */
+ if (req->byteoffset) {
+ if (req->mode != QCEDEV_AES_MODE_CTR) {
+ pr_err("%s: Operation on byte offset not supported\n",
+ __func__);
+ goto error;
+ }
+ if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+ pr_err("%s: Invalid byte offset\n", __func__);
+ goto error;
+ }
+ total = req->byteoffset;
+ for (i = 0; i < req->entries; i++) {
+ if (total > U32_MAX - req->vbuf.src[i].len) {
+ pr_err("%s:Integer overflow on total src len\n",
+ __func__);
+ goto error;
+ }
+ total += req->vbuf.src[i].len;
+ }
+ }
+
+ if (req->data_len < req->byteoffset) {
+ pr_err("%s: req data length %u is less than byteoffset %u\n",
+ __func__, req->data_len, req->byteoffset);
+ goto error;
+ }
+
+ /* Ensure IV size */
+ if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+ pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+ goto error;
+ }
+
+ /* Ensure Key size */
+ if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+ pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+ goto error;
+ }
+
+ /* Ensure zer ivlen for ECB mode */
+ if (req->ivlen > 0) {
+ if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+ (req->mode == QCEDEV_DES_MODE_ECB)) {
+ pr_err("%s: Expecting a zero length IV\n", __func__);
+ goto error;
+ }
+ } else {
+ if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+ (req->mode != QCEDEV_DES_MODE_ECB)) {
+ pr_err("%s: Expecting a non-zero ength IV\n", __func__);
+ goto error;
+ }
+ }
+ /* Check for sum of all dst length is equal to data_len */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+ pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.dst[i].len);
+ goto error;
+ }
+ if (req->vbuf.dst[i].len >= U32_MAX - total) {
+ pr_err("%s: Integer overflow on total req dst vbuf length\n",
+ __func__);
+ goto error;
+ }
+ total += req->vbuf.dst[i].len;
+ }
+ if (total != req->data_len) {
+ pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+ __func__, i, total, req->data_len);
+ goto error;
+ }
+ /* Check for sum of all src length is equal to data_len */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+ pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.src[i].len);
+ goto error;
+ }
+ if (req->vbuf.src[i].len > U32_MAX - total) {
+ pr_err("%s: Integer overflow on total req src vbuf length\n",
+ __func__);
+ goto error;
+ }
+ total += req->vbuf.src[i].len;
+ }
+ if (total != req->data_len) {
+ pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+ __func__, total, req->data_len);
+ goto error;
+ }
+ return 0;
+error:
+ return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+ struct qcedev_control *podev)
+{
+ uint32_t total = 0;
+ uint32_t i;
+
+ if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+ (!podev->ce_support.cmac)) {
+ pr_err("%s: CMAC not supported\n", __func__);
+ goto sha_error;
+ }
+ if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
+ pr_err("%s: Invalid num entries (%d)\n",
+ __func__, req->entries);
+ goto sha_error;
+ }
+
+ if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
+ pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
+ goto sha_error;
+ }
+ if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+ (req->alg == QCEDEV_ALG_SHA1_HMAC)) {
+ if (req->authkey == NULL) {
+ pr_err("%s: Invalid authkey pointer\n", __func__);
+ goto sha_error;
+ }
+ if (req->authklen <= 0) {
+ pr_err("%s: Invalid authkey length (%d)\n",
+ __func__, req->authklen);
+ goto sha_error;
+ }
+ }
+
+ if (req->alg == QCEDEV_ALG_AES_CMAC) {
+ if ((req->authklen != QCEDEV_AES_KEY_128) &&
+ (req->authklen != QCEDEV_AES_KEY_256)) {
+ pr_err("%s: unsupported key length\n", __func__);
+ goto sha_error;
+ }
+ }
+
+ /* Check for sum of all src length is equal to data_len */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (req->data[i].len > U32_MAX - total) {
+ pr_err("%s: Integer overflow on total req buf length\n",
+ __func__);
+ goto sha_error;
+ }
+ total += req->data[i].len;
+ }
+
+ if (total != req->data_len) {
+ pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+ __func__, total, req->data_len);
+ goto sha_error;
+ }
+ return 0;
+sha_error:
+ return -EINVAL;
+}
+
+static inline long qcedev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct qcedev_handle *handle;
+ struct qcedev_control *podev;
+ struct qcedev_async_req qcedev_areq;
+ struct qcedev_stat *pstat;
+
+ handle = file->private_data;
+ podev = handle->cntl;
+ qcedev_areq.handle = handle;
+ if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+ pr_err("%s: invalid handle %p\n",
+ __func__, podev);
+ return -ENOENT;
+ }
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+ return -ENOTTY;
+
+ init_completion(&qcedev_areq.complete);
+ pstat = &_qcedev_stat;
+
+ switch (cmd) {
+ case QCEDEV_IOCTL_ENC_REQ:
+ case QCEDEV_IOCTL_DEC_REQ:
+ if (copy_from_user(&qcedev_areq.cipher_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_cipher_op_req)))
+ return -EFAULT;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+ if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+ podev))
+ return -EINVAL;
+
+ err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+ if (err)
+ return err;
+ if (copy_to_user((void __user *)arg,
+ &qcedev_areq.cipher_op_req,
+ sizeof(struct qcedev_cipher_op_req)))
+ return -EFAULT;
+ break;
+
+ case QCEDEV_IOCTL_SHA_INIT_REQ:
+ {
+ struct scatterlist sg_src;
+
+ if (copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ }
+ handle->sha_ctxt.init_done = true;
+ break;
+ case QCEDEV_IOCTL_GET_CMAC_REQ:
+ if (!podev->ce_support.cmac)
+ return -ENOTTY;
+ case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+ {
+ struct scatterlist sg_src;
+
+ if (copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+ if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+ err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ } else {
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+ err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ }
+
+ if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+ pr_err("Invalid sha_ctxt.diglen %d\n",
+ handle->sha_ctxt.diglen);
+ return -EINVAL;
+ }
+ memcpy(&qcedev_areq.sha_op_req.digest[0],
+ &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ }
+ break;
+
+ case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+ if (handle->sha_ctxt.init_done == false) {
+ pr_err("%s Init was not called\n", __func__);
+ return -EINVAL;
+ }
+ if (copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ err = qcedev_hash_final(&qcedev_areq, handle);
+ if (err)
+ return err;
+ qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+ memcpy(&qcedev_areq.sha_op_req.digest[0],
+ &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ handle->sha_ctxt.init_done = false;
+ break;
+
+ case QCEDEV_IOCTL_GET_SHA_REQ:
+ {
+ struct scatterlist sg_src;
+
+ if (copy_from_user(&qcedev_areq.sha_op_req,
+ (void __user *)arg,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ return -EINVAL;
+ qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+ err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+ if (err)
+ return err;
+ err = qcedev_hash_final(&qcedev_areq, handle);
+ if (err)
+ return err;
+ qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+ memcpy(&qcedev_areq.sha_op_req.digest[0],
+ &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ sizeof(struct qcedev_sha_op_req)))
+ return -EFAULT;
+ }
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return err;
+}
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+ void *handle = NULL;
+ int rc = 0;
+ struct qcedev_control *podev;
+ struct msm_ce_hw_support *platform_support;
+
+ podev = &qce_dev[0];
+
+ podev->high_bw_req_count = 0;
+ INIT_LIST_HEAD(&podev->ready_commands);
+ podev->active_command = NULL;
+
+ spin_lock_init(&podev->lock);
+
+ tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+ /* open qce */
+ handle = qce_open(pdev, &rc);
+ if (handle == NULL) {
+ platform_set_drvdata(pdev, NULL);
+ return rc;
+ }
+
+ podev->qce = handle;
+ podev->pdev = pdev;
+ platform_set_drvdata(pdev, podev);
+
+ rc = misc_register(&podev->miscdevice);
+ qce_hw_support(podev->qce, &podev->ce_support);
+ if (podev->ce_support.bam) {
+ podev->platform_support.ce_shared = 0;
+ podev->platform_support.shared_ce_resource = 0;
+ podev->platform_support.hw_key_support =
+ podev->ce_support.hw_key;
+ podev->platform_support.bus_scale_table = NULL;
+ podev->platform_support.sha_hmac = 1;
+
+ podev->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!podev->platform_support.bus_scale_table)
+ pr_err("bus_scale_table is NULL\n");
+ } else {
+ platform_support =
+ (struct msm_ce_hw_support *)pdev->dev.platform_data;
+ podev->platform_support.ce_shared = platform_support->ce_shared;
+ podev->platform_support.shared_ce_resource =
+ platform_support->shared_ce_resource;
+ podev->platform_support.hw_key_support =
+ platform_support->hw_key_support;
+ podev->platform_support.bus_scale_table =
+ platform_support->bus_scale_table;
+ podev->platform_support.sha_hmac = platform_support->sha_hmac;
+ }
+ if (podev->platform_support.bus_scale_table != NULL) {
+ podev->bus_scale_handle =
+ msm_bus_scale_register_client(
+ (struct msm_bus_scale_pdata *)
+ podev->platform_support.bus_scale_table);
+ if (!podev->bus_scale_handle) {
+ pr_err("%s not able to get bus scale\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (rc >= 0)
+ return 0;
+
+ if (podev->platform_support.bus_scale_table != NULL)
+ msm_bus_scale_unregister_client(podev->bus_scale_handle);
+err:
+
+ if (handle)
+ qce_close(handle);
+ platform_set_drvdata(pdev, NULL);
+ podev->qce = NULL;
+ podev->pdev = NULL;
+ return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+ struct qcedev_control *podev;
+
+ podev = platform_get_drvdata(pdev);
+ if (!podev)
+ return 0;
+ if (podev->qce)
+ qce_close(podev->qce);
+
+ if (podev->platform_support.bus_scale_table != NULL)
+ msm_bus_scale_unregister_client(podev->bus_scale_handle);
+
+ if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+ misc_deregister(&podev->miscdevice);
+ tasklet_kill(&podev->done_tasklet);
+ return 0;
+};
+
+static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct qcedev_control *podev;
+ int ret;
+
+ podev = platform_get_drvdata(pdev);
+
+ if (!podev || !podev->platform_support.bus_scale_table)
+ return 0;
+
+ mutex_lock(&qcedev_sent_bw_req);
+ if (podev->high_bw_req_count) {
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ goto suspend_exit;
+ }
+ ret = qce_disable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ goto suspend_exit;
+ }
+ }
+
+suspend_exit:
+ mutex_unlock(&qcedev_sent_bw_req);
+ return 0;
+}
+
+static int qcedev_resume(struct platform_device *pdev)
+{
+ struct qcedev_control *podev;
+ int ret;
+
+ podev = platform_get_drvdata(pdev);
+
+ if (!podev || !podev->platform_support.bus_scale_table)
+ return 0;
+
+ mutex_lock(&qcedev_sent_bw_req);
+ if (podev->high_bw_req_count) {
+ ret = qce_enable_clk(podev->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ goto resume_exit;
+ }
+ ret = msm_bus_scale_client_update_request(
+ podev->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ ret = qce_disable_clk(podev->qce);
+ if (ret)
+ pr_err("%s Unable enable clk\n",
+ __func__);
+ goto resume_exit;
+ }
+ }
+
+resume_exit:
+ mutex_unlock(&qcedev_sent_bw_req);
+ return 0;
+}
+
+static const struct of_device_id qcedev_match[] = {
+ { .compatible = "qcom,qcedev",
+ },
+ {}
+};
+
+static struct platform_driver qcedev_plat_driver = {
+ .probe = qcedev_probe,
+ .remove = qcedev_remove,
+ .suspend = qcedev_suspend,
+ .resume = qcedev_resume,
+ .driver = {
+ .name = "qce",
+ .owner = THIS_MODULE,
+ .of_match_table = qcedev_match,
+ },
+};
+
+static int _disp_stats(int id)
+{
+ struct qcedev_stat *pstat;
+ int len = 0;
+
+ pstat = &_qcedev_stat;
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ "\nQTI QCE dev driver %d Statistics:\n",
+ id + 1);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Encryption operation success : %d\n",
+ pstat->qcedev_enc_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Encryption operation fail : %d\n",
+ pstat->qcedev_enc_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Decryption operation success : %d\n",
+ pstat->qcedev_dec_success);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " Encryption operation fail : %d\n",
+ pstat->qcedev_dec_fail);
+
+ return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t rc = -EINVAL;
+ int qcedev = *((int *) file->private_data);
+ int len;
+
+ len = _disp_stats(qcedev);
+
+ if (len <= count)
+ rc = simple_read_from_buffer((void __user *) buf, len,
+ ppos, (void *) _debug_read_buf, len);
+ return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
+ return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+ .open = _debug_stats_open,
+ .read = _debug_stats_read,
+ .write = _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+ int rc;
+ char name[DEBUG_MAX_FNAME];
+ struct dentry *dent;
+
+ _debug_dent = debugfs_create_dir("qcedev", NULL);
+ if (IS_ERR(_debug_dent)) {
+ pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(_debug_dent));
+ return PTR_ERR(_debug_dent);
+ }
+
+ snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+ _debug_qcedev = 0;
+ dent = debugfs_create_file(name, 0644, _debug_dent,
+ &_debug_qcedev, &_debug_stats_ops);
+ if (dent == NULL) {
+ pr_err("qcedev debugfs_create_file fail, error %ld\n",
+ PTR_ERR(dent));
+ rc = PTR_ERR(dent);
+ goto err;
+ }
+ return 0;
+err:
+ debugfs_remove_recursive(_debug_dent);
+ return rc;
+}
+
+static int qcedev_init(void)
+{
+ int rc;
+
+ rc = _qcedev_debug_init();
+ if (rc)
+ return rc;
+ return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+ debugfs_remove_recursive(_debug_dent);
+ platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DEV Crypto driver");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcedevi.h b/drivers/crypto/msm/qcedevi.h
new file mode 100644
index 0000000..c26ed71
--- /dev/null
+++ b/drivers/crypto/msm/qcedevi.h
@@ -0,0 +1,125 @@
+/* QTI crypto Driver
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/fips_status.h>
+#include "qce.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+enum qcedev_crypto_oper_type {
+ QCEDEV_CRYPTO_OPER_CIPHER = 0,
+ QCEDEV_CRYPTO_OPER_SHA = 1,
+ QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+ struct ablkcipher_request creq;
+ void *cookie;
+};
+
+struct qcedev_sha_req {
+ struct ahash_request sreq;
+ void *cookie;
+};
+
+struct qcedev_sha_ctxt {
+ uint32_t auth_data[4];
+ uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
+ uint32_t diglen;
+ uint8_t trailing_buf[64];
+ uint32_t trailing_buf_len;
+ uint8_t first_blk;
+ uint8_t last_blk;
+ uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+ bool init_done;
+};
+
+struct qcedev_async_req {
+ struct list_head list;
+ struct completion complete;
+ enum qcedev_crypto_oper_type op_type;
+ union {
+ struct qcedev_cipher_op_req cipher_op_req;
+ struct qcedev_sha_op_req sha_op_req;
+ };
+
+ union {
+ struct qcedev_cipher_req cipher_req;
+ struct qcedev_sha_req sha_req;
+ };
+ struct qcedev_handle *handle;
+ int err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace.
+ */
+
+#define QCEDEV_DEV "qcedev"
+
+struct qcedev_control {
+
+ /* CE features supported by platform */
+ struct msm_ce_hw_support platform_support;
+
+ uint32_t ce_lock_count;
+ uint32_t high_bw_req_count;
+
+ /* CE features/algorithms supported by HW engine*/
+ struct ce_hw_support ce_support;
+
+ uint32_t bus_scale_handle;
+
+ /* misc device */
+ struct miscdevice miscdevice;
+
+ /* qce handle */
+ void *qce;
+
+ /* platform device */
+ struct platform_device *pdev;
+
+ unsigned int magic;
+
+ struct list_head ready_commands;
+ struct qcedev_async_req *active_command;
+ spinlock_t lock;
+ struct tasklet_struct done_tasklet;
+};
+
+struct qcedev_handle {
+ /* qcedev control handle */
+ struct qcedev_control *cntl;
+ /* qce internal sha context*/
+ struct qcedev_sha_ctxt sha_ctxt;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+ unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret);
+
+#endif /* __CRYPTO_MSM_QCEDEVI_H */
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
new file mode 100644
index 0000000..f184ee1
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto.c
@@ -0,0 +1,5515 @@
+/*
+ * QTI Crypto driver
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/hardirq.h>
+#include <linux/qcrypto.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+
+#include <linux/fips_status.h>
+
+#include "qce.h"
+
+#define DEBUG_MAX_FNAME 16
+#define DEBUG_MAX_RW_BUF 4096
+#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
+
+/*
+ * For crypto 5.0 which has burst size alignment requirement.
+ */
+#define MAX_ALIGN_SIZE 0x40
+
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
+
+
+/* Status of response workq */
+enum resp_workq_sts {
+ NOT_SCHEDULED = 0,
+ IS_SCHEDULED = 1,
+ SCHEDULE_AGAIN = 2
+};
+
+/* Status of req processing by CEs */
+enum req_processing_sts {
+ STOPPED = 0,
+ IN_PROGRESS = 1
+};
+
+enum qcrypto_bus_state {
+ BUS_NO_BANDWIDTH = 0,
+ BUS_HAS_BANDWIDTH,
+ BUS_BANDWIDTH_RELEASING,
+ BUS_BANDWIDTH_ALLOCATING,
+ BUS_SUSPENDED,
+ BUS_SUSPENDING,
+};
+
+struct crypto_stat {
+ u64 aead_sha1_aes_enc;
+ u64 aead_sha1_aes_dec;
+ u64 aead_sha1_des_enc;
+ u64 aead_sha1_des_dec;
+ u64 aead_sha1_3des_enc;
+ u64 aead_sha1_3des_dec;
+ u64 aead_sha256_aes_enc;
+ u64 aead_sha256_aes_dec;
+ u64 aead_sha256_des_enc;
+ u64 aead_sha256_des_dec;
+ u64 aead_sha256_3des_enc;
+ u64 aead_sha256_3des_dec;
+ u64 aead_ccm_aes_enc;
+ u64 aead_ccm_aes_dec;
+ u64 aead_rfc4309_ccm_aes_enc;
+ u64 aead_rfc4309_ccm_aes_dec;
+ u64 aead_op_success;
+ u64 aead_op_fail;
+ u64 aead_bad_msg;
+ u64 ablk_cipher_aes_enc;
+ u64 ablk_cipher_aes_dec;
+ u64 ablk_cipher_des_enc;
+ u64 ablk_cipher_des_dec;
+ u64 ablk_cipher_3des_enc;
+ u64 ablk_cipher_3des_dec;
+ u64 ablk_cipher_op_success;
+ u64 ablk_cipher_op_fail;
+ u64 sha1_digest;
+ u64 sha256_digest;
+ u64 sha1_hmac_digest;
+ u64 sha256_hmac_digest;
+ u64 ahash_op_success;
+ u64 ahash_op_fail;
+};
+static struct crypto_stat _qcrypto_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
+struct crypto_priv;
+struct qcrypto_req_control {
+ unsigned int index;
+ bool in_use;
+ struct crypto_engine *pce;
+ struct crypto_async_request *req;
+ struct qcrypto_resp_ctx *arsp;
+ int res; /* execution result */
+};
+
+struct crypto_engine {
+ struct list_head elist;
+ void *qce; /* qce handle */
+ struct platform_device *pdev; /* platform device */
+ struct crypto_priv *pcp;
+ uint32_t bus_scale_handle;
+ struct crypto_queue req_queue; /*
+ * request queue for those requests
+ * that have this engine assigned
+ * waiting to be executed
+ */
+ u64 total_req;
+ u64 err_req;
+ u32 unit;
+ u32 ce_device;
+ u32 ce_hw_instance;
+ unsigned int signature;
+
+ enum qcrypto_bus_state bw_state;
+ bool high_bw_req;
+ struct timer_list bw_reaper_timer;
+ struct work_struct bw_reaper_ws;
+ struct work_struct bw_allocate_ws;
+
+ /* engine execution sequence number */
+ u32 active_seq;
+ /* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+ u32 last_active_seq;
+
+ bool check_flag;
+ /*Added to support multi-requests*/
+ unsigned int max_req;
+ struct qcrypto_req_control *preq_pool;
+ atomic_t req_count;
+ bool issue_req; /* an request is being issued to qce */
+ bool first_engine; /* this engine is the first engine or not */
+ unsigned int irq_cpu; /* the cpu running the irq of this engine */
+ unsigned int max_req_used; /* debug stats */
+};
+
+#define MAX_SMP_CPU 8
+
+struct crypto_priv {
+ /* CE features supported by target device*/
+ struct msm_ce_hw_support platform_support;
+
+ /* CE features/algorithms supported by HW engine*/
+ struct ce_hw_support ce_support;
+
+ /* the lock protects crypto queue and req */
+ spinlock_t lock;
+
+ /* list of registered algorithms */
+ struct list_head alg_list;
+
+ /* current active request */
+ struct crypto_async_request *req;
+
+ struct work_struct unlock_ce_ws;
+ struct list_head engine_list; /* list of qcrypto engines */
+ int32_t total_units; /* total units of engines */
+ struct mutex engine_lock;
+
+ struct crypto_engine *next_engine; /* next assign engine */
+ struct crypto_queue req_queue; /*
+ * request queue for those requests
+ * that waiting for an available
+ * engine.
+ */
+ struct llist_head ordered_resp_list; /* Queue to maintain
+ * responses in sequence.
+ */
+ atomic_t resp_cnt;
+ struct workqueue_struct *resp_wq;
+ struct work_struct resp_work; /*
+ * Workq to send responses
+ * in sequence.
+ */
+ enum resp_workq_sts sched_resp_workq_status;
+ enum req_processing_sts ce_req_proc_sts;
+ int cpu_getting_irqs_frm_first_ce;
+ struct crypto_engine *first_engine;
+ struct crypto_engine *scheduled_eng; /* last engine scheduled */
+
+ /* debug stats */
+ unsigned int no_avail;
+ unsigned int resp_stop;
+ unsigned int resp_start;
+ unsigned int max_qlen;
+ unsigned int queue_work_eng3;
+ unsigned int queue_work_not_eng3;
+ unsigned int queue_work_not_eng3_nz;
+ unsigned int max_resp_qlen;
+ unsigned int max_reorder_cnt;
+ unsigned int cpu_req[MAX_SMP_CPU+1];
+};
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+ struct crypto_priv *cp);
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
+static struct qcrypto_req_control *qcrypto_alloc_req_control(
+ struct crypto_engine *pce)
+{
+ int i;
+ struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+ unsigned int req_count;
+
+ for (i = 0; i < pce->max_req; i++) {
+ if (xchg(&pqcrypto_req_control->in_use, true) == false) {
+ req_count = atomic_inc_return(&pce->req_count);
+ if (req_count > pce->max_req_used)
+ pce->max_req_used = req_count;
+ return pqcrypto_req_control;
+ }
+ pqcrypto_req_control++;
+ }
+ return NULL;
+}
+
+static void qcrypto_free_req_control(struct crypto_engine *pce,
+ struct qcrypto_req_control *preq)
+{
+ /* do this before free req */
+ preq->req = NULL;
+ preq->arsp = NULL;
+ /* free req */
+ if (xchg(&preq->in_use, false) == false)
+ pr_warn("request info %p free already\n", preq);
+ else
+ atomic_dec(&pce->req_count);
+}
+
+static struct qcrypto_req_control *find_req_control_for_areq(
+ struct crypto_engine *pce,
+ struct crypto_async_request *areq)
+{
+ int i;
+ struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+ for (i = 0; i < pce->max_req; i++) {
+ if (pqcrypto_req_control->req == areq)
+ return pqcrypto_req_control;
+ pqcrypto_req_control++;
+ }
+ return NULL;
+}
+
+static void qcrypto_init_req_control(struct crypto_engine *pce,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ int i;
+
+ pce->preq_pool = pqcrypto_req_control;
+ atomic_set(&pce->req_count, 0);
+ for (i = 0; i < pce->max_req; i++) {
+ pqcrypto_req_control->index = i;
+ pqcrypto_req_control->in_use = false;
+ pqcrypto_req_control->pce = pce;
+ pqcrypto_req_control++;
+ }
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
+ unsigned int device)
+{
+ struct crypto_engine *entry = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ if (entry->ce_device == device)
+ break;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (((entry != NULL) && (entry->ce_device != device)) ||
+ (entry == NULL)) {
+ pr_err("Device node for CE device %d NOT FOUND!!\n",
+ device);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device_hw
+ (struct crypto_priv *cp,
+ u32 device,
+ u32 hw_instance)
+{
+ struct crypto_engine *entry = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ if ((entry->ce_device == device) &&
+ (entry->ce_hw_instance == hw_instance))
+ break;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (((entry != NULL) &&
+ ((entry->ce_device != device)
+ || (entry->ce_hw_instance != hw_instance)))
+ || (entry == NULL)) {
+ pr_err("Device node for CE device %d NOT FOUND!!\n",
+ device);
+ return NULL;
+ }
+ return entry;
+}
+
+int qcrypto_get_num_engines(void)
+{
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *entry = NULL;
+ int count = 0;
+
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ count++;
+ }
+ return count;
+}
+EXPORT_SYMBOL(qcrypto_get_num_engines);
+
+void qcrypto_get_engine_list(size_t num_engines,
+ struct crypto_engine_entry *arr)
+{
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *entry = NULL;
+ size_t arr_index = 0;
+
+ list_for_each_entry(entry, &cp->engine_list, elist) {
+ arr[arr_index].ce_device = entry->ce_device;
+ arr[arr_index].hw_instance = entry->ce_hw_instance;
+ arr_index++;
+ if (arr_index >= num_engines)
+ break;
+ }
+}
+EXPORT_SYMBOL(qcrypto_get_engine_list);
+
+enum qcrypto_alg_type {
+ QCRYPTO_ALG_CIPHER = 0,
+ QCRYPTO_ALG_SHA = 1,
+ QCRYPTO_ALG_AEAD = 2,
+ QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+ struct list_head entry;
+ struct crypto_alg cipher_alg;
+ struct ahash_alg sha_alg;
+ struct aead_alg aead_alg;
+ enum qcrypto_alg_type alg_type;
+ struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE 64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH 16
+
+#define QCRYPTO_CCM4309_NONCE_LEN 3
+
+struct qcrypto_cipher_ctx {
+ struct list_head rsp_queue; /* response queue */
+ struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
+ struct crypto_priv *cp;
+ unsigned int flags;
+
+ enum qce_hash_alg_enum auth_alg; /* for aead */
+ u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+ u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+ u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+ unsigned int enc_key_len;
+
+ unsigned int authsize;
+ unsigned int auth_key_len;
+
+ u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
+
+ struct crypto_skcipher *cipher_aes192_fb;
+
+ struct crypto_ahash *ahash_aead_aes192_fb;
+};
+
+struct qcrypto_resp_ctx {
+ struct list_head list;
+ struct llist_node llist;
+ struct crypto_async_request *async_req; /* async req */
+ int res; /* execution result */
+};
+
+struct qcrypto_cipher_req_ctx {
+ struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+ struct crypto_engine *pengine; /* engine assigned to this request */
+ u8 *iv;
+ u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
+ unsigned int ivsize;
+ int aead;
+ struct scatterlist asg; /* Formatted associated data sg */
+ unsigned char *adata; /* Pointer to formatted assoc data */
+ enum qce_cipher_alg_enum alg;
+ enum qce_cipher_dir_enum dir;
+ enum qce_cipher_mode_enum mode;
+
+ struct scatterlist *orig_src; /* Original src sg ptr */
+ struct scatterlist *orig_dst; /* Original dst sg ptr */
+ struct scatterlist dsg; /* Dest Data sg */
+ struct scatterlist ssg; /* Source Data sg */
+ unsigned char *data; /* Incoming data pointer*/
+
+ struct aead_request *aead_req;
+ struct ahash_request *fb_hash_req;
+ uint8_t fb_ahash_digest[SHA256_DIGEST_SIZE];
+ struct scatterlist fb_ablkcipher_src_sg[2];
+ struct scatterlist fb_ablkcipher_dst_sg[2];
+ char *fb_aes_iv;
+ unsigned int fb_ahash_length;
+ struct skcipher_request *fb_aes_req;
+ struct scatterlist *fb_aes_src;
+ struct scatterlist *fb_aes_dst;
+ unsigned int fb_aes_cryptlen;
+};
+
+#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
+
+#define MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
+#define COMPLETION_CB_BACKLOG_LENGTH_STOP 400
+#define COMPLETION_CB_BACKLOG_LENGTH_START \
+ (COMPLETION_CB_BACKLOG_LENGTH_STOP / 2)
+
+static uint8_t _std_init_vector_sha1_uint8[] = {
+ 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+ 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+ 0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+ 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+ 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+ 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+ 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+ struct list_head rsp_queue; /* response queue */
+ struct crypto_engine *pengine; /* fixed engine assigned to this tfm */
+ struct crypto_priv *cp;
+ unsigned int flags;
+ enum qce_hash_alg_enum alg;
+ uint32_t diglen;
+ uint32_t authkey_in_len;
+ uint8_t authkey[SHA_MAX_BLOCK_SIZE];
+ struct ahash_request *ahash_req;
+ struct completion ahash_req_complete;
+};
+
+struct qcrypto_sha_req_ctx {
+ struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+ struct crypto_engine *pengine; /* engine assigned to this request */
+
+ struct scatterlist *src;
+ uint32_t nbytes;
+
+ struct scatterlist *orig_src; /* Original src sg ptr */
+ struct scatterlist dsg; /* Data sg */
+ unsigned char *data; /* Incoming data pointer*/
+ unsigned char *data2; /* Updated data pointer*/
+
+ uint32_t byte_count[4];
+ u64 count;
+ uint8_t first_blk;
+ uint8_t last_blk;
+ uint8_t trailing_buf[SHA_MAX_BLOCK_SIZE];
+ uint32_t trailing_buf_len;
+
+ /* dma buffer, Internal use */
+ uint8_t staging_dmabuf
+ [SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+ uint8_t digest[SHA_MAX_DIGEST_SIZE];
+ struct scatterlist sg[2];
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned int n;
+
+ n = len / sizeof(uint32_t);
+ for (; n > 0; n--) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) |
+ (*(b+3) & 0xff);
+ b += sizeof(uint32_t);
+ iv++;
+ }
+
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00);
+ } else if (n == 2) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000);
+ } else if (n == 1) {
+ *iv = ((*b << 24) & 0xff000000);
+ }
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned int n = len / sizeof(uint32_t);
+
+ for (; n > 0; n--) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 16) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 8) & 0xff);
+ *b++ = (unsigned char) (*iv & 0xff);
+ iv++;
+ }
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b++ = (unsigned char) ((*iv >> 16) & 0xff);
+ *b = (unsigned char) ((*iv >> 8) & 0xff);
+ } else if (n == 2) {
+ *b++ = (unsigned char) ((*iv >> 24) & 0xff);
+ *b = (unsigned char) ((*iv >> 16) & 0xff);
+ } else if (n == 1) {
+ *b = (unsigned char) ((*iv >> 24) & 0xff);
+ }
+}
+
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+ bool high_bw_req)
+{
+ int ret = 0;
+
+ if (high_bw_req) {
+ ret = qce_enable_clk(pengine->qce);
+ if (ret) {
+ pr_err("%s Unable enable clk\n", __func__);
+ goto clk_err;
+ }
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 1);
+ if (ret) {
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ qce_disable_clk(pengine->qce);
+ goto clk_err;
+ }
+ } else {
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 0);
+ if (ret) {
+ pr_err("%s Unable to set to low bandwidth\n",
+ __func__);
+ goto clk_err;
+ }
+ ret = qce_disable_clk(pengine->qce);
+ if (ret) {
+ pr_err("%s Unable disable clk\n", __func__);
+ ret = msm_bus_scale_client_update_request(
+ pengine->bus_scale_handle, 1);
+ if (ret)
+ pr_err("%s Unable to set to high bandwidth\n",
+ __func__);
+ goto clk_err;
+ }
+ }
+clk_err:
+ return;
+
+}
+
+static void qcrypto_bw_reaper_timer_callback(unsigned long data)
+{
+ struct crypto_engine *pengine = (struct crypto_engine *)data;
+
+ schedule_work(&pengine->bw_reaper_ws);
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+ pengine->bw_reaper_timer.data =
+ (unsigned long)(pengine);
+ pengine->bw_reaper_timer.expires = jiffies +
+ msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+ mod_timer(&(pengine->bw_reaper_timer),
+ pengine->bw_reaper_timer.expires);
+}
+
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
+{
+ schedule_work(&pengine->bw_allocate_ws);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+ struct crypto_engine *pengine);
+
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+ struct crypto_engine *pengine = container_of(work,
+ struct crypto_engine, bw_allocate_ws);
+ unsigned long flags;
+ struct crypto_priv *cp = pengine->pcp;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qcrypto_ce_set_bus(pengine, true);
+ qcrypto_bw_set_timeout(pengine);
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_HAS_BANDWIDTH;
+ pengine->high_bw_req = false;
+ pengine->active_seq++;
+ pengine->check_flag = true;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ _start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+ struct crypto_engine *pengine = container_of(work,
+ struct crypto_engine, bw_reaper_ws);
+ struct crypto_priv *cp = pengine->pcp;
+ unsigned long flags;
+ u32 active_seq;
+ bool restart = false;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ active_seq = pengine->active_seq;
+ if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+ (active_seq == pengine->last_active_seq)) {
+
+ /* check if engine is stuck */
+ if (atomic_read(&pengine->req_count) > 0) {
+ if (pengine->check_flag)
+ dev_warn(&pengine->pdev->dev,
+ "The engine appears to be stuck seq %d.\n",
+ active_seq);
+ pengine->check_flag = false;
+ goto ret;
+ }
+ if (cp->platform_support.bus_scale_table == NULL)
+ goto ret;
+ pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qcrypto_ce_set_bus(pengine, false);
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ if (pengine->high_bw_req == true) {
+ /* we got request while we are disabling clock */
+ pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qcrypto_ce_set_bus(pengine, true);
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_HAS_BANDWIDTH;
+ pengine->high_bw_req = false;
+ restart = true;
+ } else
+ pengine->bw_state = BUS_NO_BANDWIDTH;
+ }
+ret:
+ pengine->last_active_seq = active_seq;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (restart)
+ _start_qcrypto_process(cp, pengine);
+ if (pengine->bw_state != BUS_NO_BANDWIDTH)
+ qcrypto_bw_set_timeout(pengine);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+ int i;
+
+ for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg))
+ nbytes -= sg->length;
+
+ return i;
+}
+
+static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
+ unsigned int nents, void *buf, size_t buflen)
+{
+ int i;
+ size_t offset, len;
+
+ for (i = 0, offset = 0; i < nents; ++i) {
+ len = sg_copy_from_buffer(sgl, 1, buf, buflen);
+ buf += len;
+ buflen -= len;
+ offset += len;
+ sgl = sg_next(sgl);
+ }
+
+ return offset;
+}
+
+static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
+ unsigned int nents, void *buf, size_t buflen)
+{
+ int i;
+ size_t offset, len;
+
+ for (i = 0, offset = 0; i < nents; ++i) {
+ len = sg_copy_to_buffer(sgl, 1, buf, buflen);
+ buf += len;
+ buflen -= len;
+ offset += len;
+ sgl = sg_next(sgl);
+ }
+
+ return offset;
+}
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+ struct ahash_alg *template)
+{
+ struct qcrypto_alg *q_alg;
+
+ q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+ if (!q_alg)
+ return ERR_PTR(-ENOMEM);
+
+ q_alg->alg_type = QCRYPTO_ALG_SHA;
+ q_alg->sha_alg = *template;
+ q_alg->cp = cp;
+
+ return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+ struct crypto_alg *template)
+{
+ struct qcrypto_alg *q_alg;
+
+ q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+ if (!q_alg)
+ return ERR_PTR(-ENOMEM);
+
+ q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+ q_alg->cipher_alg = *template;
+ q_alg->cp = cp;
+
+ return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
+ struct aead_alg *template)
+{
+ struct qcrypto_alg *q_alg;
+
+ q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+ if (!q_alg)
+ return ERR_PTR(-ENOMEM);
+
+ q_alg->alg_type = QCRYPTO_ALG_AEAD;
+ q_alg->aead_alg = *template;
+ q_alg->cp = cp;
+
+ return q_alg;
+};
+
+static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx,
+ struct qcrypto_alg *q_alg)
+{
+ if (!ctx || !q_alg) {
+ pr_err("ctx or q_alg is NULL\n");
+ return -EINVAL;
+ }
+ ctx->flags = 0;
+ /* update context with ptr to cp */
+ ctx->cp = q_alg->cp;
+ /* random first IV */
+ get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+ if (_qcrypto_init_assign) {
+ ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+ if (ctx->pengine == NULL)
+ return -ENODEV;
+ } else
+ ctx->pengine = NULL;
+ INIT_LIST_HEAD(&ctx->rsp_queue);
+ ctx->auth_alg = QCE_HASH_LAST;
+ return 0;
+}
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct qcrypto_alg *q_alg;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+ return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+ struct ahash_alg, halg);
+ struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+ sha_alg);
+
+ crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+ /* update context with ptr to cp */
+ sha_ctx->cp = q_alg->cp;
+ sha_ctx->flags = 0;
+ sha_ctx->ahash_req = NULL;
+ if (_qcrypto_init_assign) {
+ sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+ if (sha_ctx->pengine == NULL)
+ return -ENODEV;
+ } else
+ sha_ctx->pengine = NULL;
+ INIT_LIST_HEAD(&sha_ctx->rsp_queue);
+ return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+ if (!list_empty(&sha_ctx->rsp_queue))
+ pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
+ if (sha_ctx->ahash_req != NULL) {
+ ahash_request_free(sha_ctx->ahash_req);
+ sha_ctx->ahash_req = NULL;
+ }
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+ struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+ int ret = 0;
+
+ ret = _qcrypto_ahash_cra_init(tfm);
+ if (ret)
+ return ret;
+ sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+ if (sha_ctx->ahash_req == NULL) {
+ _qcrypto_ahash_cra_exit(tfm);
+ return -ENOMEM;
+ }
+
+ init_completion(&sha_ctx->ahash_req_complete);
+ ahash_request_set_callback(sha_ctx->ahash_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _crypto_sha_hmac_ahash_req_complete,
+ &sha_ctx->ahash_req_complete);
+ crypto_ahash_clear_flags(ahash, ~0);
+
+ return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+ return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+ struct crypto_priv *cp = &qcrypto_dev;
+
+ if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+ ctx->cipher_aes192_fb = NULL;
+ return _qcrypto_cra_ablkcipher_init(tfm);
+ }
+ ctx->cipher_aes192_fb = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->cipher_aes192_fb)) {
+ pr_err("Error allocating fallback algo %s\n", name);
+ ret = PTR_ERR(ctx->cipher_aes192_fb);
+ ctx->cipher_aes192_fb = NULL;
+ return ret;
+ }
+ return _qcrypto_cra_ablkcipher_init(tfm);
+};
+
+static int _qcrypto_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_alg *aeadalg = crypto_aead_alg(tfm);
+ struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg,
+ aead_alg);
+ return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_aead_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_aead_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_ccm_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_aead_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_AES_CMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_aead_cra_init(tfm);
+ ctx->auth_alg = QCE_HASH_AES_CMAC;
+ return rc;
+}
+
+static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_priv *cp = &qcrypto_dev;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_aead_cra_init(tfm);
+ if (rc)
+ return rc;
+ ctx->cipher_aes192_fb = NULL;
+ ctx->ahash_aead_aes192_fb = NULL;
+ if (!cp->ce_support.aes_key_192) {
+ ctx->cipher_aes192_fb = crypto_alloc_skcipher(
+ "cbc(aes)", 0, 0);
+ if (IS_ERR(ctx->cipher_aes192_fb)) {
+ ctx->cipher_aes192_fb = NULL;
+ } else {
+ ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+ "hmac(sha1)", 0, 0);
+ if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+ ctx->ahash_aead_aes192_fb = NULL;
+ crypto_free_skcipher(ctx->cipher_aes192_fb);
+ ctx->cipher_aes192_fb = NULL;
+ }
+ }
+ }
+ ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+ return 0;
+}
+
+static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
+{
+ int rc;
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_priv *cp = &qcrypto_dev;
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+ rc = _qcrypto_aead_cra_init(tfm);
+ if (rc)
+ return rc;
+ ctx->cipher_aes192_fb = NULL;
+ ctx->ahash_aead_aes192_fb = NULL;
+ if (!cp->ce_support.aes_key_192) {
+ ctx->cipher_aes192_fb = crypto_alloc_skcipher(
+ "cbc(aes)", 0, 0);
+ if (IS_ERR(ctx->cipher_aes192_fb)) {
+ ctx->cipher_aes192_fb = NULL;
+ } else {
+ ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+ "hmac(sha256)", 0, 0);
+ if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+ ctx->ahash_aead_aes192_fb = NULL;
+ crypto_free_skcipher(ctx->cipher_aes192_fb);
+ ctx->cipher_aes192_fb = NULL;
+ }
+ }
+ }
+ ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+ return 0;
+}
+
+static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
+};
+
+static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ _qcrypto_cra_ablkcipher_exit(tfm);
+ if (ctx->cipher_aes192_fb)
+ crypto_free_skcipher(ctx->cipher_aes192_fb);
+ ctx->cipher_aes192_fb = NULL;
+}
+
+static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+}
+
+static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (!list_empty(&ctx->rsp_queue))
+ pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+ if (ctx->cipher_aes192_fb)
+ crypto_free_skcipher(ctx->cipher_aes192_fb);
+ if (ctx->ahash_aead_aes192_fb)
+ crypto_free_ahash(ctx->ahash_aead_aes192_fb);
+ ctx->cipher_aes192_fb = NULL;
+ ctx->ahash_aead_aes192_fb = NULL;
+}
+
+static int _disp_stats(int id)
+{
+ struct crypto_stat *pstat;
+ int len = 0;
+ unsigned long flags;
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *pe;
+ int i;
+
+ pstat = &_qcrypto_stat;
+ len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+ "\nQTI crypto accelerator %d Statistics\n",
+ id + 1);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER AES encryption : %llu\n",
+ pstat->ablk_cipher_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER AES decryption : %llu\n",
+ pstat->ablk_cipher_aes_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER DES encryption : %llu\n",
+ pstat->ablk_cipher_des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER DES decryption : %llu\n",
+ pstat->ablk_cipher_des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER 3DES encryption : %llu\n",
+ pstat->ablk_cipher_3des_enc);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER 3DES decryption : %llu\n",
+ pstat->ablk_cipher_3des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER operation success : %llu\n",
+ pstat->ablk_cipher_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " ABLK CIPHER operation fail : %llu\n",
+ pstat->ablk_cipher_op_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ "\n");
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-AES encryption : %llu\n",
+ pstat->aead_sha1_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-AES decryption : %llu\n",
+ pstat->aead_sha1_aes_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-DES encryption : %llu\n",
+ pstat->aead_sha1_des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-DES decryption : %llu\n",
+ pstat->aead_sha1_des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-3DES encryption : %llu\n",
+ pstat->aead_sha1_3des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA1-3DES decryption : %llu\n",
+ pstat->aead_sha1_3des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-AES encryption : %llu\n",
+ pstat->aead_sha256_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-AES decryption : %llu\n",
+ pstat->aead_sha256_aes_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-DES encryption : %llu\n",
+ pstat->aead_sha256_des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-DES decryption : %llu\n",
+ pstat->aead_sha256_des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-3DES encryption : %llu\n",
+ pstat->aead_sha256_3des_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD SHA256-3DES decryption : %llu\n",
+ pstat->aead_sha256_3des_dec);
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD CCM-AES encryption : %llu\n",
+ pstat->aead_ccm_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD CCM-AES decryption : %llu\n",
+ pstat->aead_ccm_aes_dec);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD RFC4309-CCM-AES encryption : %llu\n",
+ pstat->aead_rfc4309_ccm_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD RFC4309-CCM-AES decryption : %llu\n",
+ pstat->aead_rfc4309_ccm_aes_dec);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD operation success : %llu\n",
+ pstat->aead_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD operation fail : %llu\n",
+ pstat->aead_op_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD bad message : %llu\n",
+ pstat->aead_bad_msg);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ "\n");
+
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA1 digest : %llu\n",
+ pstat->sha1_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA256 digest : %llu\n",
+ pstat->sha256_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA1 HMAC digest : %llu\n",
+ pstat->sha1_hmac_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH SHA256 HMAC digest : %llu\n",
+ pstat->sha256_hmac_digest);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH operation success : %llu\n",
+ pstat->ahash_op_success);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AHASH operation fail : %llu\n",
+ pstat->ahash_op_fail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n",
+ cp->resp_start, cp->resp_stop,
+ cp->max_resp_qlen, cp->max_reorder_cnt);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " max queue legnth, no avail : %u %u\n",
+ cp->max_qlen, cp->no_avail);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " work queue : %u %u %u\n",
+ cp->queue_work_eng3,
+ cp->queue_work_not_eng3,
+ cp->queue_work_not_eng3_nz);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ "\n");
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req max %d : %llu\n",
+ pe->unit,
+ pe->max_req_used,
+ pe->total_req
+ );
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ " Engine %4d Req Error : %llu\n",
+ pe->unit,
+ pe->err_req
+ );
+ qce_get_driver_stats(pe->qce);
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ for (i = 0; i < MAX_SMP_CPU+1; i++)
+ if (cp->cpu_req[i])
+ len += scnprintf(
+ _debug_read_buf + len,
+ DEBUG_MAX_RW_BUF - len - 1,
+ "CPU %d Issue Req : %d\n",
+ i, cp->cpu_req[i]);
+ return len;
+}
+
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
+{
+ struct crypto_priv *cp;
+ struct qcrypto_alg *q_alg;
+ struct qcrypto_alg *n;
+ unsigned long flags;
+ struct crypto_engine *pe;
+
+ cp = pengine->pcp;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ list_del(&pengine->elist);
+ if (pengine->first_engine) {
+ cp->first_engine = NULL;
+ pe = list_first_entry(&cp->engine_list, struct crypto_engine,
+ elist);
+ if (pe) {
+ pe->first_engine = true;
+ cp->first_engine = pe;
+ }
+ }
+ if (cp->next_engine == pengine)
+ cp->next_engine = NULL;
+ if (cp->scheduled_eng == pengine)
+ cp->scheduled_eng = NULL;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ cp->total_units--;
+
+ cancel_work_sync(&pengine->bw_reaper_ws);
+ cancel_work_sync(&pengine->bw_allocate_ws);
+ del_timer_sync(&pengine->bw_reaper_timer);
+
+ if (pengine->bus_scale_handle != 0)
+ msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+ pengine->bus_scale_handle = 0;
+
+ kzfree(pengine->preq_pool);
+
+ if (cp->total_units)
+ return;
+
+ list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+ if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+ crypto_unregister_alg(&q_alg->cipher_alg);
+ if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+ crypto_unregister_ahash(&q_alg->sha_alg);
+ if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
+ crypto_unregister_aead(&q_alg->aead_alg);
+ list_del(&q_alg->entry);
+ kzfree(q_alg);
+ }
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+
+ pengine = platform_get_drvdata(pdev);
+
+ if (!pengine)
+ return 0;
+ cp = pengine->pcp;
+ mutex_lock(&cp->engine_lock);
+ _qcrypto_remove_engine(pengine);
+ mutex_unlock(&cp->engine_lock);
+ if (pengine->qce)
+ qce_close(pengine->qce);
+ kzfree(pengine);
+ return 0;
+}
+
+static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
+ struct crypto_priv *cp, unsigned int len)
+{
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ case AES_KEYSIZE_192:
+ if (cp->ce_support.aes_key_192)
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
+ const u8 *key)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->cipher_aes192_fb->base.crt_flags |=
+ (cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb, key,
+ AES_KEYSIZE_192);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |=
+ (cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+ return 0;
+
+ if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
+ && ctx->cipher_aes192_fb)
+ return _qcrypto_setkey_aes_192_fallback(cipher, key);
+
+ if (_qcrypto_check_aes_keylen(cipher, cp, len))
+ return -EINVAL;
+
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+ if (key != NULL) {
+ memcpy(ctx->enc_key, key, len);
+ } else {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+};
+
+static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+ return 0;
+ if (_qcrypto_check_aes_keylen(cipher, cp, len/2))
+ return -EINVAL;
+
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+ if (key != NULL) {
+ memcpy(ctx->enc_key, key, len);
+ } else {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (!key) {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ pr_err("%s HW KEY usage not supported for DES algorithm\n",
+ __func__);
+ return 0;
+ };
+
+ if (len != DES_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+
+ if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
+ memcpy(ctx->enc_key, key, len);
+
+ return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+ pr_err("%s HW KEY usage not supported for 3DES algorithm\n",
+ __func__);
+ return 0;
+ };
+ if (len != DES3_EDE_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+ ctx->enc_key_len = len;
+ if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+ if (key != NULL) {
+ memcpy(ctx->enc_key, key, len);
+ } else {
+ pr_err("%s Inavlid key pointer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+};
+
+static void seq_response(struct work_struct *work)
+{
+ struct crypto_priv *cp = container_of(work, struct crypto_priv,
+ resp_work);
+ struct llist_node *list;
+ struct llist_node *rev = NULL;
+ struct crypto_engine *pengine;
+ unsigned long flags;
+ int total_unit;
+
+again:
+ list = llist_del_all(&cp->ordered_resp_list);
+
+ if (!list)
+ goto end;
+
+ while (list) {
+ struct llist_node *t = list;
+
+ list = llist_next(list);
+ t->next = rev;
+ rev = t;
+ }
+
+ while (rev) {
+ struct qcrypto_resp_ctx *arsp;
+ struct crypto_async_request *areq;
+
+ arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
+ rev = llist_next(rev);
+
+ areq = arsp->async_req;
+ local_bh_disable();
+ areq->complete(areq, arsp->res);
+ local_bh_enable();
+ atomic_dec(&cp->resp_cnt);
+ }
+
+ if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START &&
+ (cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS)
+ == STOPPED)) {
+ cp->resp_start++;
+ for (total_unit = cp->total_units; total_unit-- > 0;) {
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine = _avail_eng(cp);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (pengine)
+ _start_qcrypto_process(cp, pengine);
+ else
+ break;
+ }
+ }
+end:
+ if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
+ IS_SCHEDULED) == SCHEDULE_AGAIN)
+ goto again;
+ else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+ NOT_SCHEDULED) == SCHEDULE_AGAIN)
+ goto end;
+}
+
+#define SCHEUDLE_RSP_QLEN_THRESHOLD 64
+
+static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type,
+ void *tfm_ctx,
+ struct qcrypto_resp_ctx *cur_arsp,
+ int res)
+{
+ struct crypto_priv *cp = pengine->pcp;
+ unsigned long flags;
+ struct qcrypto_resp_ctx *arsp;
+ struct list_head *plist;
+ unsigned int resp_qlen;
+ unsigned int cnt = 0;
+
+ switch (type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ default:
+ plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+ break;
+ }
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ cur_arsp->res = res;
+ while (!list_empty(plist)) {
+ arsp = list_first_entry(plist,
+ struct qcrypto_resp_ctx, list);
+ if (arsp->res == -EINPROGRESS)
+ break;
+ list_del(&arsp->list);
+ llist_add(&arsp->llist, &cp->ordered_resp_list);
+ atomic_inc(&cp->resp_cnt);
+ cnt++;
+ }
+ resp_qlen = atomic_read(&cp->resp_cnt);
+ if (resp_qlen > cp->max_resp_qlen)
+ cp->max_resp_qlen = resp_qlen;
+ if (cnt > cp->max_reorder_cnt)
+ cp->max_reorder_cnt = cnt;
+ if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) &&
+ cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS,
+ STOPPED) == IN_PROGRESS) {
+ cp->resp_stop++;
+ }
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+retry:
+ if (!llist_empty(&cp->ordered_resp_list)) {
+ unsigned int cpu;
+
+ if (pengine->first_engine) {
+ cpu = WORK_CPU_UNBOUND;
+ cp->queue_work_eng3++;
+ } else {
+ cp->queue_work_not_eng3++;
+ cpu = cp->cpu_getting_irqs_frm_first_ce;
+ /*
+ * If source not the first engine, and there
+ * are outstanding requests going on first engine,
+ * skip scheduling of work queue to anticipate
+ * more may be coming. If the response queue
+ * length exceeds threshold, to avoid further
+ * delay, schedule work queue immediately.
+ */
+ if (cp->first_engine && atomic_read(
+ &cp->first_engine->req_count)) {
+ if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD)
+ return;
+ cp->queue_work_not_eng3_nz++;
+ }
+ }
+ if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
+ IS_SCHEDULED) == NOT_SCHEDULED)
+ queue_work_on(cpu, cp->resp_wq, &cp->resp_work);
+ else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+ SCHEDULE_AGAIN) == NOT_SCHEDULED)
+ goto retry;
+ }
+}
+
+static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_engine *pengine;
+ struct crypto_async_request *areq;
+ struct crypto_priv *cp;
+ struct qcrypto_resp_ctx *arsp;
+ u32 type = 0;
+ void *tfm_ctx = NULL;
+ unsigned int cpu;
+ int res;
+
+ pengine = pqcrypto_req_control->pce;
+ cp = pengine->pcp;
+ areq = pqcrypto_req_control->req;
+ arsp = pqcrypto_req_control->arsp;
+ res = pqcrypto_req_control->res;
+ qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+ if (areq) {
+ type = crypto_tfm_alg_type(areq->tfm);
+ tfm_ctx = crypto_tfm_ctx(areq->tfm);
+ }
+ cpu = smp_processor_id();
+ pengine->irq_cpu = cpu;
+ if (pengine->first_engine) {
+ if (cpu != cp->cpu_getting_irqs_frm_first_ce)
+ cp->cpu_getting_irqs_frm_first_ce = cpu;
+ }
+ if (areq)
+ _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res);
+ if (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)
+ _start_qcrypto_process(cp, pengine);
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+ unsigned char *authdata, int ret)
+{
+ struct ahash_request *areq = (struct ahash_request *) cookie;
+ struct crypto_async_request *async_req;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct crypto_stat *pstat;
+ uint32_t diglen = crypto_ahash_digestsize(ahash);
+ uint32_t *auth32 = (uint32_t *)authdata;
+ struct crypto_engine *pengine;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ async_req = &areq->base;
+ pstat = &_qcrypto_stat;
+
+ pengine = rctx->pengine;
+ pqcrypto_req_control = find_req_control_for_areq(pengine,
+ async_req);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("async request not found\n");
+ return;
+ }
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+ areq, ret);
+#endif
+ if (digest) {
+ memcpy(rctx->digest, digest, diglen);
+ if (rctx->last_blk)
+ memcpy(areq->result, digest, diglen);
+ }
+ if (authdata) {
+ rctx->byte_count[0] = auth32[0];
+ rctx->byte_count[1] = auth32[1];
+ rctx->byte_count[2] = auth32[2];
+ rctx->byte_count[3] = auth32[3];
+ }
+ areq->src = rctx->src;
+ areq->nbytes = rctx->nbytes;
+
+ rctx->last_blk = 0;
+ rctx->first_blk = 0;
+
+ if (ret) {
+ pqcrypto_req_control->res = -ENXIO;
+ pstat->ahash_op_fail++;
+ } else {
+ pqcrypto_req_control->res = 0;
+ pstat->ahash_op_success++;
+ }
+ if (cp->ce_support.aligned_only) {
+ areq->src = rctx->orig_src;
+ kfree(rctx->data);
+ }
+ req_done(pqcrypto_req_control);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+ unsigned char *iv, int ret)
+{
+ struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+ struct crypto_async_request *async_req;
+ struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_engine *pengine;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ async_req = &areq->base;
+ pstat = &_qcrypto_stat;
+ rctx = ablkcipher_request_ctx(areq);
+ pengine = rctx->pengine;
+ pqcrypto_req_control = find_req_control_for_areq(pengine,
+ async_req);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("async request not found\n");
+ return;
+ }
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+ areq, ret);
+#endif
+ if (iv)
+ memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+ if (ret) {
+ pqcrypto_req_control->res = -ENXIO;
+ pstat->ablk_cipher_op_fail++;
+ } else {
+ pqcrypto_req_control->res = 0;
+ pstat->ablk_cipher_op_success++;
+ }
+
+ if (cp->ce_support.aligned_only) {
+ struct qcrypto_cipher_req_ctx *rctx;
+ uint32_t num_sg = 0;
+ uint32_t bytes = 0;
+
+ rctx = ablkcipher_request_ctx(areq);
+ areq->src = rctx->orig_src;
+ areq->dst = rctx->orig_dst;
+
+ num_sg = qcrypto_count_sg(areq->dst, areq->nbytes);
+ bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+ rctx->data, areq->nbytes);
+ if (bytes != areq->nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+ areq->nbytes);
+ kzfree(rctx->data);
+ }
+ req_done(pqcrypto_req_control);
+};
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+ unsigned char *iv, int ret)
+{
+ struct aead_request *areq = (struct aead_request *) cookie;
+ struct crypto_async_request *async_req;
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct crypto_stat *pstat;
+ struct crypto_engine *pengine;
+ struct qcrypto_req_control *pqcrypto_req_control;
+
+ async_req = &areq->base;
+ pstat = &_qcrypto_stat;
+ rctx = aead_request_ctx(areq);
+ pengine = rctx->pengine;
+ pqcrypto_req_control = find_req_control_for_areq(pengine,
+ async_req);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("async request not found\n");
+ return;
+ }
+
+ if (rctx->mode == QCE_MODE_CCM) {
+ kzfree(rctx->adata);
+ } else {
+ uint32_t ivsize = crypto_aead_ivsize(aead);
+
+ if (ret == 0) {
+ if (rctx->dir == QCE_ENCRYPT) {
+ /* copy the icv to dst */
+ scatterwalk_map_and_copy(icv, areq->dst,
+ areq->cryptlen + areq->assoclen,
+ ctx->authsize, 1);
+
+ } else {
+ unsigned char tmp[SHA256_DIGESTSIZE] = {0};
+
+ /* compare icv from src */
+ scatterwalk_map_and_copy(tmp,
+ areq->src, areq->assoclen +
+ areq->cryptlen - ctx->authsize,
+ ctx->authsize, 0);
+ ret = memcmp(icv, tmp, ctx->authsize);
+ if (ret != 0)
+ ret = -EBADMSG;
+
+ }
+ } else {
+ ret = -ENXIO;
+ }
+
+ if (iv)
+ memcpy(ctx->iv, iv, ivsize);
+ }
+
+ if (ret == (-EBADMSG))
+ pstat->aead_bad_msg++;
+ else if (ret)
+ pstat->aead_op_fail++;
+ else
+ pstat->aead_op_success++;
+
+ pqcrypto_req_control->res = ret;
+ req_done(pqcrypto_req_control);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+{
+ struct aead_request *areq = (struct aead_request *) qreq->areq;
+ unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+ memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize);
+ /*
+ * Format control info per RFC 3610 and
+ * NIST Special Publication 800-38C
+ */
+ qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+ if (areq->assoclen)
+ qreq->nonce[0] |= 64;
+
+ if (i > MAX_NONCE)
+ return -EINVAL;
+
+ return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+ struct scatterlist *sg, unsigned char *adata)
+{
+ uint32_t len;
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ /*
+ * Add control info for associated data
+ * RFC 3610 and NIST Special Publication 800-38C
+ */
+ if (alen < 65280) {
+ *(__be16 *)adata = cpu_to_be16(alen);
+ len = 2;
+ } else {
+ if ((alen >= 65280) && (alen <= 0xffffffff)) {
+ *(__be16 *)adata = cpu_to_be16(0xfffe);
+ *(__be32 *)&adata[2] = cpu_to_be32(alen);
+ len = 6;
+ } else {
+ *(__be16 *)adata = cpu_to_be16(0xffff);
+ *(__be32 *)&adata[6] = cpu_to_be32(alen);
+ len = 10;
+ }
+ }
+ adata += len;
+ qreq->assoclen = ALIGN((alen + len), 16);
+
+ num_sg = qcrypto_count_sg(sg, alen);
+ bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
+ if (bytes != alen)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes, alen);
+
+ return 0;
+}
+
+static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_async_request *async_req;
+ struct qce_req qreq;
+ int ret;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *cipher_ctx;
+ struct ablkcipher_request *req;
+ struct crypto_ablkcipher *tfm;
+
+ async_req = pqcrypto_req_control->req;
+ req = container_of(async_req, struct ablkcipher_request, base);
+ cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->pengine = pengine;
+ tfm = crypto_ablkcipher_reqtfm(req);
+ if (pengine->pcp->ce_support.aligned_only) {
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ rctx->orig_src = req->src;
+ rctx->orig_dst = req->dst;
+ rctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+ if (rctx->data == NULL)
+ return -ENOMEM;
+ num_sg = qcrypto_count_sg(req->src, req->nbytes);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
+ req->nbytes);
+ if (bytes != req->nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+ req->nbytes);
+ sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+ sg_mark_end(&rctx->dsg);
+ rctx->iv = req->info;
+
+ req->src = &rctx->dsg;
+ req->dst = &rctx->dsg;
+ }
+ qreq.op = QCE_REQ_ABLK_CIPHER;
+ qreq.qce_cb = _qce_ablk_cipher_complete;
+ qreq.areq = req;
+ qreq.alg = rctx->alg;
+ qreq.dir = rctx->dir;
+ qreq.mode = rctx->mode;
+ qreq.enckey = cipher_ctx->enc_key;
+ qreq.encklen = cipher_ctx->enc_key_len;
+ qreq.iv = req->info;
+ qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+ qreq.cryptlen = req->nbytes;
+ qreq.use_pmem = 0;
+ qreq.flags = cipher_ctx->flags;
+
+ if ((cipher_ctx->enc_key_len == 0) &&
+ (pengine->pcp->platform_support.hw_key_support == 0))
+ ret = -EINVAL;
+ else
+ ret = qce_ablk_cipher_req(pengine->qce, &qreq);
+
+ return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_async_request *async_req;
+ struct ahash_request *req;
+ struct qce_sha_req sreq;
+ struct qcrypto_sha_req_ctx *rctx;
+ struct qcrypto_sha_ctx *sha_ctx;
+ int ret = 0;
+
+ async_req = pqcrypto_req_control->req;
+ req = container_of(async_req,
+ struct ahash_request, base);
+ rctx = ahash_request_ctx(req);
+ sha_ctx = crypto_tfm_ctx(async_req->tfm);
+ rctx->pengine = pengine;
+
+ sreq.qce_cb = _qce_ahash_complete;
+ sreq.digest = &rctx->digest[0];
+ sreq.src = req->src;
+ sreq.auth_data[0] = rctx->byte_count[0];
+ sreq.auth_data[1] = rctx->byte_count[1];
+ sreq.auth_data[2] = rctx->byte_count[2];
+ sreq.auth_data[3] = rctx->byte_count[3];
+ sreq.first_blk = rctx->first_blk;
+ sreq.last_blk = rctx->last_blk;
+ sreq.size = req->nbytes;
+ sreq.areq = req;
+ sreq.flags = sha_ctx->flags;
+
+ switch (sha_ctx->alg) {
+ case QCE_HASH_SHA1:
+ sreq.alg = QCE_HASH_SHA1;
+ sreq.authkey = NULL;
+ break;
+ case QCE_HASH_SHA256:
+ sreq.alg = QCE_HASH_SHA256;
+ sreq.authkey = NULL;
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ sreq.alg = QCE_HASH_SHA1_HMAC;
+ sreq.authkey = &sha_ctx->authkey[0];
+ sreq.authklen = SHA_HMAC_KEY_SIZE;
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ sreq.alg = QCE_HASH_SHA256_HMAC;
+ sreq.authkey = &sha_ctx->authkey[0];
+ sreq.authklen = SHA_HMAC_KEY_SIZE;
+ break;
+ default:
+ pr_err("Algorithm %d not supported, exiting", sha_ctx->alg);
+ ret = -1;
+ break;
+ };
+ ret = qce_process_sha_req(pengine->qce, &sreq);
+
+ return ret;
+}
+
+static int _qcrypto_process_aead(struct crypto_engine *pengine,
+ struct qcrypto_req_control *pqcrypto_req_control)
+{
+ struct crypto_async_request *async_req;
+ struct qce_req qreq;
+ int ret = 0;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *cipher_ctx;
+ struct aead_request *req;
+ struct crypto_aead *aead;
+
+ async_req = pqcrypto_req_control->req;
+ req = container_of(async_req, struct aead_request, base);
+ aead = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ rctx->pengine = pengine;
+ cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+ qreq.op = QCE_REQ_AEAD;
+ qreq.qce_cb = _qce_aead_complete;
+
+ qreq.areq = req;
+ qreq.alg = rctx->alg;
+ qreq.dir = rctx->dir;
+ qreq.mode = rctx->mode;
+ qreq.iv = rctx->iv;
+
+ qreq.enckey = cipher_ctx->enc_key;
+ qreq.encklen = cipher_ctx->enc_key_len;
+ qreq.authkey = cipher_ctx->auth_key;
+ qreq.authklen = cipher_ctx->auth_key_len;
+ qreq.authsize = crypto_aead_authsize(aead);
+ qreq.auth_alg = cipher_ctx->auth_alg;
+ if (qreq.mode == QCE_MODE_CCM)
+ qreq.ivsize = AES_BLOCK_SIZE;
+ else
+ qreq.ivsize = crypto_aead_ivsize(aead);
+ qreq.flags = cipher_ctx->flags;
+
+ if (qreq.mode == QCE_MODE_CCM) {
+ if (qreq.dir == QCE_ENCRYPT)
+ qreq.cryptlen = req->cryptlen;
+ else
+ qreq.cryptlen = req->cryptlen -
+ qreq.authsize;
+ /* Get NONCE */
+ ret = qccrypto_set_aead_ccm_nonce(&qreq);
+ if (ret)
+ return ret;
+
+ if (req->assoclen) {
+ rctx->adata = kzalloc((req->assoclen + 0x64),
+ GFP_ATOMIC);
+ if (!rctx->adata)
+ return -ENOMEM;
+ /* Format Associated data */
+ ret = qcrypto_aead_ccm_format_adata(&qreq,
+ req->assoclen,
+ req->src,
+ rctx->adata);
+ } else {
+ qreq.assoclen = 0;
+ rctx->adata = NULL;
+ }
+ if (ret) {
+ kzfree(rctx->adata);
+ return ret;
+ }
+
+ /*
+ * update req with new formatted associated
+ * data info
+ */
+ qreq.asg = &rctx->asg;
+ if (rctx->adata)
+ sg_set_buf(qreq.asg, rctx->adata,
+ qreq.assoclen);
+ sg_mark_end(qreq.asg);
+ }
+ ret = qce_aead_req(pengine->qce, &qreq);
+
+ return ret;
+}
+
+static struct crypto_engine *_qcrypto_static_assign_engine(
+ struct crypto_priv *cp)
+{
+ struct crypto_engine *pengine;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (cp->next_engine)
+ pengine = cp->next_engine;
+ else
+ pengine = list_first_entry(&cp->engine_list,
+ struct crypto_engine, elist);
+
+ if (list_is_last(&pengine->elist, &cp->engine_list))
+ cp->next_engine = list_first_entry(
+ &cp->engine_list, struct crypto_engine, elist);
+ else
+ cp->next_engine = list_next_entry(pengine, elist);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+ struct crypto_engine *pengine)
+{
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog_eng = NULL;
+ struct crypto_async_request *backlog_cp = NULL;
+ unsigned long flags;
+ u32 type;
+ int ret = 0;
+ struct crypto_stat *pstat;
+ void *tfm_ctx;
+ struct qcrypto_cipher_req_ctx *cipher_rctx;
+ struct qcrypto_sha_req_ctx *ahash_rctx;
+ struct ablkcipher_request *ablkcipher_req;
+ struct ahash_request *ahash_req;
+ struct aead_request *aead_req;
+ struct qcrypto_resp_ctx *arsp;
+ struct qcrypto_req_control *pqcrypto_req_control;
+ unsigned int cpu = MAX_SMP_CPU;
+
+ if (READ_ONCE(cp->ce_req_proc_sts) == STOPPED)
+ return 0;
+
+ if (in_interrupt()) {
+ cpu = smp_processor_id();
+ if (cpu >= MAX_SMP_CPU)
+ cpu = MAX_SMP_CPU - 1;
+ } else
+ cpu = MAX_SMP_CPU;
+
+ pstat = &_qcrypto_stat;
+
+again:
+ spin_lock_irqsave(&cp->lock, flags);
+ if (pengine->issue_req ||
+ atomic_read(&pengine->req_count) >= (pengine->max_req)) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+
+ backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+ /* make sure it is in high bandwidth state */
+ if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+
+ /* try to get request from request queue of the engine first */
+ async_req = crypto_dequeue_request(&pengine->req_queue);
+ if (!async_req) {
+ /*
+ * if no request from the engine,
+ * try to get from request queue of driver
+ */
+ backlog_cp = crypto_get_backlog(&cp->req_queue);
+ async_req = crypto_dequeue_request(&cp->req_queue);
+ if (!async_req) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+ }
+ pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
+ if (pqcrypto_req_control == NULL) {
+ pr_err("Allocation of request failed\n");
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+ }
+
+ /* add associated rsp entry to tfm response queue */
+ type = crypto_tfm_alg_type(async_req->tfm);
+ tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+ switch (type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ ahash_req = container_of(async_req,
+ struct ahash_request, base);
+ ahash_rctx = ahash_request_ctx(ahash_req);
+ arsp = &ahash_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_sha_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ ablkcipher_req = container_of(async_req,
+ struct ablkcipher_request, base);
+ cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
+ arsp = &cipher_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_cipher_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ default:
+ aead_req = container_of(async_req,
+ struct aead_request, base);
+ cipher_rctx = aead_request_ctx(aead_req);
+ arsp = &cipher_rctx->rsp_entry;
+ list_add_tail(
+ &arsp->list,
+ &((struct qcrypto_cipher_ctx *)tfm_ctx)
+ ->rsp_queue);
+ break;
+ }
+
+ arsp->res = -EINPROGRESS;
+ arsp->async_req = async_req;
+ pqcrypto_req_control->pce = pengine;
+ pqcrypto_req_control->req = async_req;
+ pqcrypto_req_control->arsp = arsp;
+ pengine->active_seq++;
+ pengine->check_flag = true;
+
+ pengine->issue_req = true;
+ cp->cpu_req[cpu]++;
+ smp_mb(); /* make it visible */
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (backlog_eng)
+ backlog_eng->complete(backlog_eng, -EINPROGRESS);
+ if (backlog_cp)
+ backlog_cp->complete(backlog_cp, -EINPROGRESS);
+ switch (type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ ret = _qcrypto_process_ablkcipher(pengine,
+ pqcrypto_req_control);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
+ break;
+ default:
+ ret = -EINVAL;
+ };
+
+ pengine->issue_req = false;
+ smp_mb(); /* make it visible */
+
+ pengine->total_req++;
+ if (ret) {
+ pengine->err_req++;
+ qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+ if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+ pstat->ablk_cipher_op_fail++;
+ else
+ if (type == CRYPTO_ALG_TYPE_AHASH)
+ pstat->ahash_op_fail++;
+ else
+ pstat->aead_op_fail++;
+
+ _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret);
+ goto again;
+ };
+ return ret;
+}
+
+static inline struct crypto_engine *_next_eng(struct crypto_priv *cp,
+ struct crypto_engine *p)
+{
+
+ if (p == NULL || list_is_last(&p->elist, &cp->engine_list))
+ p = list_first_entry(&cp->engine_list, struct crypto_engine,
+ elist);
+ else
+ p = list_entry(p->elist.next, struct crypto_engine, elist);
+ return p;
+}
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+ /* call this function with spinlock set */
+ struct crypto_engine *q = NULL;
+ struct crypto_engine *p = cp->scheduled_eng;
+ struct crypto_engine *q1;
+ int eng_cnt = cp->total_units;
+
+ if (unlikely(list_empty(&cp->engine_list))) {
+ pr_err("%s: no valid ce to schedule\n", __func__);
+ return NULL;
+ }
+
+ p = _next_eng(cp, p);
+ q1 = p;
+ while (eng_cnt-- > 0) {
+ if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) {
+ q = p;
+ break;
+ }
+ p = _next_eng(cp, p);
+ if (q1 == p)
+ break;
+ }
+ cp->scheduled_eng = q;
+ return q;
+}
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+ struct crypto_engine *pengine,
+ struct crypto_async_request *req)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ if (pengine) {
+ ret = crypto_enqueue_request(&pengine->req_queue, req);
+ } else {
+ ret = crypto_enqueue_request(&cp->req_queue, req);
+ pengine = _avail_eng(cp);
+ if (cp->req_queue.qlen > cp->max_qlen)
+ cp->max_qlen = cp->req_queue.qlen;
+ }
+ if (pengine) {
+ switch (pengine->bw_state) {
+ case BUS_NO_BANDWIDTH:
+ if (pengine->high_bw_req == false) {
+ qcrypto_ce_bw_allocate_req(pengine);
+ pengine->high_bw_req = true;
+ }
+ pengine = NULL;
+ break;
+ case BUS_HAS_BANDWIDTH:
+ break;
+ case BUS_BANDWIDTH_RELEASING:
+ pengine->high_bw_req = true;
+ pengine = NULL;
+ break;
+ case BUS_BANDWIDTH_ALLOCATING:
+ pengine = NULL;
+ break;
+ case BUS_SUSPENDED:
+ case BUS_SUSPENDING:
+ default:
+ pengine = NULL;
+ break;
+ }
+ } else {
+ cp->no_avail++;
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (pengine && (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS))
+ _start_qcrypto_process(cp, pengine);
+ return ret;
+}
+
+static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int err;
+
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+ skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int err;
+
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+ skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+}
+
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->cipher_aes192_fb)
+ return _qcrypto_enc_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->cipher_aes192_fb)
+ return _qcrypto_enc_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->cipher_aes192_fb)
+ return _qcrypto_enc_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CTR;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_XTS;
+
+ pstat->ablk_cipher_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+ return -EINVAL;
+ if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+ (ctx->auth_key_len != AES_KEYSIZE_256))
+ return -EINVAL;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ rctx->iv = req->iv;
+
+ pstat->aead_ccm_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+ rctx->rfc4309_iv[0] = 3; /* L -1 */
+ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+ memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->rfc4309_iv;
+ pstat->aead_rfc4309_ccm_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->cipher_aes192_fb)
+ return _qcrypto_dec_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->cipher_aes192_fb)
+ return _qcrypto_dec_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+#endif
+
+ if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+ (!cp->ce_support.aes_key_192) &&
+ ctx->cipher_aes192_fb)
+ return _qcrypto_dec_aes_192_fallback(req);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->mode = QCE_MODE_CTR;
+
+ /* Note. There is no such thing as aes/counter mode, decrypt */
+ rctx->dir = QCE_ENCRYPT;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_ECB;
+
+ pstat->ablk_cipher_3des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+
+ pstat->ablk_cipher_3des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+ CRYPTO_ALG_TYPE_ABLKCIPHER);
+ rctx = ablkcipher_request_ctx(req);
+ rctx->aead = 0;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->mode = QCE_MODE_XTS;
+ rctx->dir = QCE_DECRYPT;
+
+ pstat->ablk_cipher_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+ return -EINVAL;
+ if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+ (ctx->auth_key_len != AES_KEYSIZE_256))
+ return -EINVAL;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ rctx->iv = req->iv;
+
+ pstat->aead_ccm_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+ rctx->rfc4309_iv[0] = 3; /* L -1 */
+ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+ memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->rfc4309_iv;
+ pstat->aead_rfc4309_ccm_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+ return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+ return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+ struct rtattr *rta = (struct rtattr *)key;
+ struct crypto_authenc_key_param *param;
+ int ret;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < ctx->enc_key_len)
+ goto badkey;
+
+ ctx->auth_key_len = keylen - ctx->enc_key_len;
+ if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+ ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+ goto badkey;
+ memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+ memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+ memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+ if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
+ ctx->ahash_aead_aes192_fb) {
+ crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
+ ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
+ ctx->auth_key, ctx->auth_key_len);
+ if (ret)
+ goto badkey;
+ crypto_skcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
+ ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb,
+ ctx->enc_key, ctx->enc_key_len);
+ if (ret)
+ goto badkey;
+ }
+
+ return 0;
+badkey:
+ ctx->enc_key_len = 0;
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ case AES_KEYSIZE_192:
+ if (cp->ce_support.aes_key_192)
+ break;
+ default:
+ ctx->enc_key_len = 0;
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ };
+ ctx->enc_key_len = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ ctx->auth_key_len = keylen;
+ memcpy(ctx->auth_key, key, keylen);
+
+ return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int key_len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+ return -EINVAL;
+ key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+ memcpy(ctx->ccm4309_nonce, key + key_len, QCRYPTO_CCM4309_NONCE_LEN);
+ ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+ return ret;
+};
+
+static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
+ int res)
+{
+ struct aead_request *req;
+ struct crypto_async_request *areq;
+
+ req = rctx->aead_req;
+ areq = &req->base;
+ if (rctx->fb_aes_req)
+ skcipher_request_free(rctx->fb_aes_req);
+ if (rctx->fb_hash_req)
+ ahash_request_free(rctx->fb_hash_req);
+ rctx->fb_aes_req = NULL;
+ rctx->fb_hash_req = NULL;
+ kfree(rctx->fb_aes_iv);
+ areq->complete(areq, res);
+}
+
+static void _aead_aes_fb_stage2_ahash_complete(
+ struct crypto_async_request *base, int err)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct aead_request *req;
+ struct qcrypto_cipher_ctx *ctx;
+
+ rctx = base->data;
+ req = rctx->aead_req;
+ ctx = crypto_tfm_ctx(req->base.tfm);
+ /* copy icv */
+ if (err == 0)
+ scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+ rctx->fb_aes_dst,
+ req->cryptlen,
+ ctx->authsize, 1);
+ _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+
+static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
+{
+ struct ahash_request *ahash_req;
+
+ ahash_req = rctx->fb_hash_req;
+ ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _aead_aes_fb_stage2_ahash_complete, rctx);
+
+ return crypto_ahash_digest(ahash_req);
+}
+
+static void _aead_aes_fb_stage2_decrypt_complete(
+ struct crypto_async_request *base, int err)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+
+ rctx = base->data;
+ _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _start_aead_aes_fb_stage2_decrypt(
+ struct qcrypto_cipher_req_ctx *rctx)
+{
+ struct skcipher_request *aes_req;
+
+ aes_req = rctx->fb_aes_req;
+ skcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _aead_aes_fb_stage2_decrypt_complete, rctx);
+ return crypto_skcipher_decrypt(aes_req);
+}
+
+static void _aead_aes_fb_stage1_ahash_complete(
+ struct crypto_async_request *base, int err)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct aead_request *req;
+ struct qcrypto_cipher_ctx *ctx;
+
+ rctx = base->data;
+ req = rctx->aead_req;
+ ctx = crypto_tfm_ctx(req->base.tfm);
+
+ /* compare icv */
+ if (err == 0) {
+ unsigned char tmp[ctx->authsize];
+
+ scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
+ req->cryptlen - ctx->authsize, ctx->authsize, 0);
+ if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
+ err = -EBADMSG;
+ }
+ if (err)
+ _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+ else {
+ err = _start_aead_aes_fb_stage2_decrypt(rctx);
+ if (err != -EINPROGRESS && err != -EBUSY)
+ _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+ }
+}
+
+static void _aead_aes_fb_stage1_encrypt_complete(
+ struct crypto_async_request *base, int err)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct aead_request *req;
+ struct qcrypto_cipher_ctx *ctx;
+
+ rctx = base->data;
+ req = rctx->aead_req;
+ ctx = crypto_tfm_ctx(req->base.tfm);
+
+ memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+
+ if (err) {
+ _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+ return;
+ }
+
+ err = _start_aead_aes_fb_stage2_hmac(rctx);
+
+ /* copy icv */
+ if (err == 0) {
+ scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+ rctx->fb_aes_dst,
+ req->cryptlen,
+ ctx->authsize, 1);
+ }
+ if (err != -EINPROGRESS && err != -EBUSY)
+ _qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
+ bool is_encrypt)
+{
+ int rc = -EINVAL;
+ struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
+ struct skcipher_request *aes_req = NULL;
+ struct ahash_request *ahash_req = NULL;
+ int nbytes;
+ struct scatterlist *src, *dst;
+
+ rctx->fb_aes_iv = NULL;
+ aes_req = skcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL);
+ if (!aes_req)
+ return -ENOMEM;
+ ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
+ if (!ahash_req)
+ goto ret;
+ rctx->fb_aes_req = aes_req;
+ rctx->fb_hash_req = ahash_req;
+ rctx->aead_req = req;
+ /* assoc and iv are sitting in the beginning of src sg list */
+ /* Similarly, assoc and iv are sitting in the beginning of dst list */
+ src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src,
+ req->assoclen);
+ dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst,
+ req->assoclen);
+
+ nbytes = req->cryptlen;
+ if (!is_encrypt)
+ nbytes -= ctx->authsize;
+ rctx->fb_ahash_length = nbytes + req->assoclen;
+ rctx->fb_aes_src = src;
+ rctx->fb_aes_dst = dst;
+ rctx->fb_aes_cryptlen = nbytes;
+ rctx->ivsize = crypto_aead_ivsize(aead_tfm);
+ rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC);
+ if (!rctx->fb_aes_iv)
+ goto ret;
+ memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize);
+ skcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
+ rctx->fb_aes_dst,
+ rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
+ if (is_encrypt)
+ ahash_request_set_crypt(ahash_req, req->dst,
+ rctx->fb_ahash_digest,
+ rctx->fb_ahash_length);
+ else
+ ahash_request_set_crypt(ahash_req, req->src,
+ rctx->fb_ahash_digest,
+ rctx->fb_ahash_length);
+
+ if (is_encrypt) {
+
+ skcipher_request_set_callback(aes_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _aead_aes_fb_stage1_encrypt_complete, rctx);
+
+ rc = crypto_skcipher_encrypt(aes_req);
+ if (rc == 0) {
+ memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+ rc = _start_aead_aes_fb_stage2_hmac(rctx);
+ if (rc == 0) {
+ /* copy icv */
+ scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+ dst,
+ req->cryptlen,
+ ctx->authsize, 1);
+ }
+ }
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+ goto ret;
+
+ } else {
+ ahash_request_set_callback(ahash_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _aead_aes_fb_stage1_ahash_complete, rctx);
+
+ rc = crypto_ahash_digest(ahash_req);
+ if (rc == 0) {
+ unsigned char tmp[ctx->authsize];
+
+ /* compare icv */
+ scatterwalk_map_and_copy(tmp,
+ src, req->cryptlen - ctx->authsize,
+ ctx->authsize, 0);
+ if (memcmp(rctx->fb_ahash_digest, tmp,
+ ctx->authsize) != 0)
+ rc = -EBADMSG;
+ else
+ rc = _start_aead_aes_fb_stage2_decrypt(rctx);
+ }
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+ goto ret;
+ }
+ret:
+ if (aes_req)
+ skcipher_request_free(aes_req);
+ if (ahash_req)
+ ahash_request_free(ahash_req);
+ kfree(rctx->fb_aes_iv);
+ return rc;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev,
+ "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+#endif
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+ rctx->aead_req = req;
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_aes_enc++;
+ else
+ pstat->aead_sha256_aes_enc++;
+ if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
+ ctx->ahash_aead_aes192_fb)
+ return _qcrypto_aead_aes_192_fallback(req, true);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+ dev_info(&ctx->pengine->pdev->dev,
+ "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+#endif
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+ rctx->aead_req = req;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_aes_dec++;
+ else
+ pstat->aead_sha256_aes_dec++;
+
+ if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb &&
+ ctx->ahash_aead_aes192_fb)
+ return _qcrypto_aead_aes_192_fallback(req, false);
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_des_enc++;
+ else
+ pstat->aead_sha256_des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_des_dec++;
+ else
+ pstat->aead_sha256_des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_3des_enc++;
+ else
+ pstat->aead_sha256_3des_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_3DES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CBC;
+ rctx->iv = req->iv;
+
+ if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+ pstat->aead_sha1_3des_dec++;
+ else
+ pstat->aead_sha256_3des_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _sha_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->first_blk = 1;
+ rctx->last_blk = 0;
+ rctx->byte_count[0] = 0;
+ rctx->byte_count[1] = 0;
+ rctx->byte_count[2] = 0;
+ rctx->byte_count[3] = 0;
+ rctx->trailing_buf_len = 0;
+ rctx->count = 0;
+
+ return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+
+ _sha_init(req);
+ sha_ctx->alg = QCE_HASH_SHA1;
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ pstat->sha1_digest++;
+ return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+
+ _sha_init(req);
+ sha_ctx->alg = QCE_HASH_SHA256;
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ pstat->sha256_digest++;
+ return 0;
+};
+
+
+static int _sha1_export(struct ahash_request *req, void *out)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+ out_ctx->count = rctx->count;
+ _byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+ memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
+
+ return 0;
+};
+
+static int _sha1_hmac_export(struct ahash_request *req, void *out)
+{
+ return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request *req, const void *in,
+ bool hmac)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha1_state *in_ctx = (struct sha1_state *)in;
+ u64 hw_count = in_ctx->count;
+
+ rctx->count = in_ctx->count;
+ memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+ if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * For hmac, there is a hardware padding done
+ * when first is set. So the byte_count will be
+ * incremened by 64 after the operstion of first
+ */
+ if (hmac)
+ hw_count += HMAC_PADDING;
+ }
+ rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
+ rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
+ _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+ rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+ (SHA1_BLOCK_SIZE-1));
+ return 0;
+}
+
+static int _sha1_import(struct ahash_request *req, const void *in)
+{
+ return __sha1_import_common(req, in, false);
+}
+
+static int _sha1_hmac_import(struct ahash_request *req, const void *in)
+{
+ return __sha1_import_common(req, in, true);
+}
+
+static int _sha256_export(struct ahash_request *req, void *out)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+ out_ctx->count = rctx->count;
+ _byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+ memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
+
+ return 0;
+};
+
+static int _sha256_hmac_export(struct ahash_request *req, void *out)
+{
+ return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request *req, const void *in,
+ bool hmac)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sha256_state *in_ctx = (struct sha256_state *)in;
+ u64 hw_count = in_ctx->count;
+
+ rctx->count = in_ctx->count;
+ memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+ if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+ rctx->first_blk = 1;
+ } else {
+ rctx->first_blk = 0;
+ /*
+ * for hmac, there is a hardware padding done
+ * when first is set. So the byte_count will be
+ * incremened by 64 after the operstion of first
+ */
+ if (hmac)
+ hw_count += HMAC_PADDING;
+ }
+
+ rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0);
+ rctx->byte_count[1] = (uint32_t)(hw_count >> 32);
+ _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+ rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+ (SHA256_BLOCK_SIZE-1));
+
+
+ return 0;
+}
+
+static int _sha256_import(struct ahash_request *req, const void *in)
+{
+ return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request *req, const void *in)
+{
+ return __sha256_import_common(req, in, true);
+}
+
+static int _copy_source(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *srctx = NULL;
+ uint32_t bytes = 0;
+ uint32_t num_sg = 0;
+
+ srctx = ahash_request_ctx(req);
+ srctx->orig_src = req->src;
+ srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+ if (srctx->data == NULL) {
+ pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+ PTR_ERR(srctx->data), (req->nbytes + 64));
+ return -ENOMEM;
+ }
+
+ num_sg = qcrypto_count_sg(req->src, req->nbytes);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
+ req->nbytes);
+ if (bytes != req->nbytes)
+ pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+ req->nbytes);
+ sg_set_buf(&srctx->dsg, srctx->data,
+ req->nbytes);
+ sg_mark_end(&srctx->dsg);
+ req->src = &srctx->dsg;
+
+ return 0;
+}
+
+static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ uint32_t total, len, num_sg;
+ struct scatterlist *sg_last;
+ uint8_t *k_src = NULL;
+ uint32_t sha_pad_len = 0;
+ uint32_t trailing_buf_len = 0;
+ uint32_t nbytes;
+ uint32_t offset = 0;
+ uint32_t bytes = 0;
+ uint8_t *staging;
+ int ret = 0;
+
+ /* check for trailing buffer from previous updates and append it */
+ total = req->nbytes + rctx->trailing_buf_len;
+ len = req->nbytes;
+
+ if (total <= sha_block_size) {
+ k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
+ num_sg = qcrypto_count_sg(req->src, len);
+ bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
+
+ rctx->trailing_buf_len = total;
+ return 0;
+ }
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ k_src = &rctx->trailing_buf[0];
+ /* get new trailing buffer */
+ sha_pad_len = ALIGN(total, sha_block_size) - total;
+ trailing_buf_len = sha_block_size - sha_pad_len;
+ offset = req->nbytes - trailing_buf_len;
+
+ if (offset != req->nbytes)
+ scatterwalk_map_and_copy(k_src, req->src, offset,
+ trailing_buf_len, 0);
+
+ nbytes = total - trailing_buf_len;
+ num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+ len = rctx->trailing_buf_len;
+ sg_last = req->src;
+
+ while (len < nbytes) {
+ if ((len + sg_last->length) > nbytes)
+ break;
+ len += sg_last->length;
+ sg_last = sg_next(sg_last);
+ }
+ if (rctx->trailing_buf_len) {
+ if (cp->ce_support.aligned_only) {
+ rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+ if (rctx->data2 == NULL) {
+ pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+ PTR_ERR(rctx->data2));
+ return -ENOMEM;
+ }
+ memcpy(rctx->data2, staging,
+ rctx->trailing_buf_len);
+ memcpy((rctx->data2 + rctx->trailing_buf_len),
+ rctx->data, req->src->length);
+ kzfree(rctx->data);
+ rctx->data = rctx->data2;
+ sg_set_buf(&rctx->sg[0], rctx->data,
+ (rctx->trailing_buf_len +
+ req->src->length));
+ req->src = rctx->sg;
+ sg_mark_end(&rctx->sg[0]);
+ } else {
+ sg_mark_end(sg_last);
+ memset(rctx->sg, 0, sizeof(rctx->sg));
+ sg_set_buf(&rctx->sg[0], staging,
+ rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[1]);
+ sg_chain(rctx->sg, 2, req->src);
+ req->src = rctx->sg;
+ }
+ } else
+ sg_mark_end(sg_last);
+
+ req->nbytes = nbytes;
+ rctx->trailing_buf_len = trailing_buf_len;
+
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+ return ret;
+};
+
+static int _sha1_update(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+ rctx->count += req->nbytes;
+ return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+
+ rctx->count += req->nbytes;
+ return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ int ret = 0;
+ uint8_t *staging;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+
+ rctx->last_blk = 1;
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[0]);
+
+ req->src = &rctx->sg[0];
+ req->nbytes = rctx->trailing_buf_len;
+
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+ return ret;
+};
+
+static int _sha1_final(struct ahash_request *req)
+{
+ return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request *req)
+{
+ return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+
+ if (cp->ce_support.aligned_only) {
+ if (_copy_source(req))
+ return -ENOMEM;
+ }
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+ rctx->first_blk = 1;
+ rctx->last_blk = 1;
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+ return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+ _sha1_init(req);
+ return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+ _sha256_init(req);
+ return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+ struct crypto_async_request *req, int err)
+{
+ struct completion *ahash_req_complete = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+ complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ uint8_t *in_buf;
+ int ret = 0;
+ struct scatterlist sg;
+ struct ahash_request *ahash_req;
+ struct completion ahash_req_complete;
+
+ ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (ahash_req == NULL)
+ return -ENOMEM;
+ init_completion(&ahash_req_complete);
+ ahash_request_set_callback(ahash_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ _crypto_sha_hmac_ahash_req_complete,
+ &ahash_req_complete);
+ crypto_ahash_clear_flags(tfm, ~0);
+
+ in_buf = kzalloc(len + 64, GFP_KERNEL);
+ if (in_buf == NULL) {
+ ahash_request_free(ahash_req);
+ return -ENOMEM;
+ }
+ memcpy(in_buf, key, len);
+ sg_set_buf(&sg, in_buf, len);
+ sg_mark_end(&sg);
+
+ ahash_request_set_crypt(ahash_req, &sg,
+ &sha_ctx->authkey[0], len);
+
+ if (sha_ctx->alg == QCE_HASH_SHA1)
+ ret = _sha1_digest(ahash_req);
+ else
+ ret = _sha256_digest(ahash_req);
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret =
+ wait_for_completion_interruptible(
+ &ahash_req_complete);
+ reinit_completion(&sha_ctx->ahash_req_complete);
+ }
+
+ kzfree(in_buf);
+ ahash_request_free(ahash_req);
+
+ return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+ memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+ if (len <= SHA1_BLOCK_SIZE) {
+ memcpy(&sha_ctx->authkey[0], key, len);
+ sha_ctx->authkey_in_len = len;
+ } else {
+ sha_ctx->alg = QCE_HASH_SHA1;
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ _sha_hmac_setkey(tfm, key, len);
+ sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
+ }
+ return 0;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+ memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+ if (len <= SHA256_BLOCK_SIZE) {
+ memcpy(&sha_ctx->authkey[0], key, len);
+ sha_ctx->authkey_in_len = len;
+ } else {
+ sha_ctx->alg = QCE_HASH_SHA256;
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ _sha_hmac_setkey(tfm, key, len);
+ sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
+ }
+
+ return 0;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+ uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ int i;
+
+ for (i = 0; i < sha_block_size; i++)
+ rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+ rctx->trailing_buf_len = sha_block_size;
+
+ return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct crypto_stat *pstat;
+ int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha1_hmac_digest++;
+
+ _sha_init(req);
+ memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+
+ if (cp->ce_support.sha_hmac)
+ sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+ else {
+ sha_ctx->alg = QCE_HASH_SHA1;
+ ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ struct crypto_stat *pstat;
+ int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha256_hmac_digest++;
+
+ _sha_init(req);
+
+ memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+
+ if (cp->ce_support.sha_hmac)
+ sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+ else {
+ sha_ctx->alg = QCE_HASH_SHA256;
+ ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+ }
+
+ return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+ return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+ return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+ uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int i;
+ uint8_t *staging;
+ uint8_t *p;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ p = staging;
+ for (i = 0; i < sha_block_size; i++)
+ *p++ = sha_ctx->authkey[i] ^ 0x5c;
+ memcpy(p, &rctx->digest[0], sha_digest_size);
+ sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+ sha_digest_size);
+ sg_mark_end(&rctx->sg[0]);
+
+ /* save the original req structure fields*/
+ rctx->src = req->src;
+ rctx->nbytes = req->nbytes;
+
+ req->src = &rctx->sg[0];
+ req->nbytes = sha_block_size + sha_digest_size;
+
+ _sha_init(req);
+ if (sha_ctx->alg == QCE_HASH_SHA1) {
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ } else {
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ }
+
+ rctx->last_blk = 1;
+ return _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+ uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct ahash_request *areq = sha_ctx->ahash_req;
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+ uint8_t *staging;
+
+ staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+ L1_CACHE_BYTES);
+ memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+ sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+ sg_mark_end(&rctx->sg[0]);
+
+ ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+ rctx->trailing_buf_len);
+ rctx->last_blk = 1;
+ ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
+
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret =
+ wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+ reinit_completion(&sha_ctx->ahash_req_complete);
+ }
+
+ return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+
+ if (cp->ce_support.sha_hmac)
+ return _sha_final(req, SHA1_BLOCK_SIZE);
+ ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+ if (ret)
+ return ret;
+ return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+ int ret = 0;
+
+ if (cp->ce_support.sha_hmac)
+ return _sha_final(req, SHA256_BLOCK_SIZE);
+
+ ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+ if (ret)
+ return ret;
+
+ return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha1_hmac_digest++;
+
+ _sha_init(req);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+ SHA1_DIGEST_SIZE);
+ sha_ctx->diglen = SHA1_DIGEST_SIZE;
+ sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+ return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_stat *pstat;
+ struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+ pstat = &_qcrypto_stat;
+ pstat->sha256_hmac_digest++;
+
+ _sha_init(req);
+ memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+ SHA256_DIGEST_SIZE);
+ sha_ctx->diglen = SHA256_DIGEST_SIZE;
+ sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+ return _sha_digest(req);
+}
+
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+ char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+
+ if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
+ return -EINVAL;
+ strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+ strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+ return 0;
+}
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device(cp, dev);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_device);
+
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+ u32 hw_inst)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
+
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device(cp, dev);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_device);
+
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
+{
+ struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_engine *pengine = NULL;
+
+ pengine = _qrypto_find_pengine_device(cp, dev);
+ if (pengine == NULL)
+ return -ENODEV;
+ ctx->pengine = pengine;
+
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_device);
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+ (cp->platform_support.hw_key_support == false)) {
+ pr_err("%s HW key usage not supported\n", __func__);
+ return -EINVAL;
+ }
+ if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+ QCRYPTO_CTX_KEY_MASK) {
+ pr_err("%s Cannot set all key flags\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->flags |= flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_flag);
+
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+ (cp->platform_support.hw_key_support == false)) {
+ pr_err("%s HW key usage not supported\n", __func__);
+ return -EINVAL;
+ }
+ if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+ QCRYPTO_CTX_KEY_MASK) {
+ pr_err("%s Cannot set all key flags\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->flags |= flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_flag);
+
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
+{
+ struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+
+ if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+ (cp->platform_support.hw_key_support == false)) {
+ pr_err("%s HW key usage not supported\n", __func__);
+ return -EINVAL;
+ }
+ if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+ QCRYPTO_CTX_KEY_MASK) {
+ pr_err("%s Cannot set all key flags\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->flags |= flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_flag);
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+ unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->flags &= ~flags;
+ return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
+
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->flags &= ~flags;
+ return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_aead_clear_flag);
+
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
+{
+ struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->flags &= ~flags;
+ return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+ {
+ .init = _sha1_init,
+ .update = _sha1_update,
+ .final = _sha1_final,
+ .export = _sha1_export,
+ .import = _sha1_import,
+ .digest = _sha1_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "qcrypto-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+ {
+ .init = _sha256_init,
+ .update = _sha256_update,
+ .final = _sha256_final,
+ .export = _sha256_export,
+ .import = _sha256_import,
+ .digest = _sha256_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "qcrypto-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+ {
+ .init = _sha1_hmac_init,
+ .update = _sha1_hmac_update,
+ .final = _sha1_hmac_final,
+ .export = _sha1_hmac_export,
+ .import = _sha1_hmac_import,
+ .digest = _sha1_hmac_digest,
+ .setkey = _sha1_hmac_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "qcrypto-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_hmac_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+ {
+ .init = _sha256_hmac_init,
+ .update = _sha256_hmac_update,
+ .final = _sha256_hmac_final,
+ .export = _sha256_hmac_export,
+ .import = _sha256_hmac_import,
+ .digest = _sha256_hmac_digest,
+ .setkey = _sha256_hmac_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "qcrypto-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct qcrypto_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_ahash_hmac_cra_init,
+ .cra_exit = _qcrypto_ahash_cra_exit,
+ },
+ },
+ },
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "qcrypto-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aes_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes,
+ .encrypt = _qcrypto_enc_aes_ecb,
+ .decrypt = _qcrypto_dec_aes_ecb,
+ },
+ },
+ },
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "qcrypto-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aes_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes,
+ .encrypt = _qcrypto_enc_aes_cbc,
+ .decrypt = _qcrypto_dec_aes_cbc,
+ },
+ },
+ },
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "qcrypto-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aes_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_aes_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes,
+ .encrypt = _qcrypto_enc_aes_ctr,
+ .decrypt = _qcrypto_dec_aes_ctr,
+ },
+ },
+ },
+ {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "qcrypto-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = _qcrypto_setkey_des,
+ .encrypt = _qcrypto_enc_des_ecb,
+ .decrypt = _qcrypto_dec_des_ecb,
+ },
+ },
+ },
+ {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "qcrypto-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = _qcrypto_setkey_des,
+ .encrypt = _qcrypto_enc_des_cbc,
+ .decrypt = _qcrypto_dec_des_cbc,
+ },
+ },
+ },
+ {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "qcrypto-ecb-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = _qcrypto_setkey_3des,
+ .encrypt = _qcrypto_enc_3des_ecb,
+ .decrypt = _qcrypto_dec_3des_ecb,
+ },
+ },
+ },
+ {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "qcrypto-cbc-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = _qcrypto_setkey_3des,
+ .encrypt = _qcrypto_enc_3des_cbc,
+ .decrypt = _qcrypto_dec_3des_cbc,
+ },
+ },
+ },
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "qcrypto-xts-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_ablkcipher_init,
+ .cra_exit = _qcrypto_cra_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = _qcrypto_setkey_aes_xts,
+ .encrypt = _qcrypto_enc_aes_xts,
+ .decrypt = _qcrypto_dec_aes_xts,
+ },
+ },
+};
+
+static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
+ {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_cbc,
+ .decrypt = _qcrypto_aead_decrypt_aes_cbc,
+ .init = _qcrypto_cra_aead_aes_sha1_init,
+ .exit = _qcrypto_cra_aead_aes_exit,
+ },
+ {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_des_cbc,
+ .init = _qcrypto_cra_aead_sha1_init,
+ .exit = _qcrypto_cra_aead_exit,
+ },
+ {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_3des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_3des_cbc,
+ .init = _qcrypto_cra_aead_sha1_init,
+ .exit = _qcrypto_cra_aead_exit,
+ },
+};
+
+static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
+ {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_cbc,
+ .decrypt = _qcrypto_aead_decrypt_aes_cbc,
+ .init = _qcrypto_cra_aead_aes_sha256_init,
+ .exit = _qcrypto_cra_aead_aes_exit,
+ },
+
+ {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_des_cbc,
+ .init = _qcrypto_cra_aead_sha256_init,
+ .exit = _qcrypto_cra_aead_exit,
+ },
+ {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = _qcrypto_aead_setkey,
+ .setauthsize = _qcrypto_aead_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_3des_cbc,
+ .decrypt = _qcrypto_aead_decrypt_3des_cbc,
+ .init = _qcrypto_cra_aead_sha256_init,
+ .exit = _qcrypto_cra_aead_exit,
+ },
+};
+
+static struct aead_alg _qcrypto_aead_ccm_algo = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "qcrypto-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = _qcrypto_aead_ccm_setkey,
+ .setauthsize = _qcrypto_aead_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_encrypt_aes_ccm,
+ .decrypt = _qcrypto_aead_decrypt_aes_ccm,
+ .init = _qcrypto_cra_aead_ccm_init,
+ .exit = _qcrypto_cra_aead_exit,
+};
+
+static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+ .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+ .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+ .init = _qcrypto_cra_aead_rfc4309_ccm_init,
+ .exit = _qcrypto_cra_aead_exit,
+};
+
+static int _qcrypto_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ void *handle;
+ struct crypto_priv *cp = &qcrypto_dev;
+ int i;
+ struct msm_ce_hw_support *platform_support;
+ struct crypto_engine *pengine;
+ unsigned long flags;
+ struct qcrypto_req_control *pqcrypto_req_control = NULL;
+
+ pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+ if (!pengine)
+ return -ENOMEM;
+
+ /* open qce */
+ handle = qce_open(pdev, &rc);
+ if (handle == NULL) {
+ kzfree(pengine);
+ platform_set_drvdata(pdev, NULL);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, pengine);
+ pengine->qce = handle;
+ pengine->pcp = cp;
+ pengine->pdev = pdev;
+ pengine->signature = 0xdeadbeef;
+
+ init_timer(&(pengine->bw_reaper_timer));
+ INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+ pengine->bw_reaper_timer.function =
+ qcrypto_bw_reaper_timer_callback;
+ INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
+ pengine->high_bw_req = false;
+ pengine->active_seq = 0;
+ pengine->last_active_seq = 0;
+ pengine->check_flag = false;
+ pengine->max_req_used = 0;
+ pengine->issue_req = false;
+
+ crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+
+ mutex_lock(&cp->engine_lock);
+ cp->total_units++;
+ pengine->unit = cp->total_units;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->first_engine = list_empty(&cp->engine_list);
+ if (pengine->first_engine)
+ cp->first_engine = pengine;
+ list_add_tail(&pengine->elist, &cp->engine_list);
+ cp->next_engine = pengine;
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ qce_hw_support(pengine->qce, &cp->ce_support);
+ pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
+ pengine->max_req = cp->ce_support.max_request;
+ pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
+ pengine->max_req, GFP_KERNEL);
+ if (pqcrypto_req_control == NULL) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ qcrypto_init_req_control(pengine, pqcrypto_req_control);
+ if (cp->ce_support.bam) {
+ cp->platform_support.ce_shared = cp->ce_support.is_shared;
+ cp->platform_support.shared_ce_resource = 0;
+ cp->platform_support.hw_key_support = cp->ce_support.hw_key;
+ cp->platform_support.sha_hmac = 1;
+
+ cp->platform_support.bus_scale_table =
+ (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (!cp->platform_support.bus_scale_table)
+ pr_warn("bus_scale_table is NULL\n");
+
+ pengine->ce_device = cp->ce_support.ce_device;
+
+ } else {
+ platform_support =
+ (struct msm_ce_hw_support *)pdev->dev.platform_data;
+ cp->platform_support.ce_shared = platform_support->ce_shared;
+ cp->platform_support.shared_ce_resource =
+ platform_support->shared_ce_resource;
+ cp->platform_support.hw_key_support =
+ platform_support->hw_key_support;
+ cp->platform_support.bus_scale_table =
+ platform_support->bus_scale_table;
+ cp->platform_support.sha_hmac = platform_support->sha_hmac;
+ }
+
+ pengine->bus_scale_handle = 0;
+
+ if (cp->platform_support.bus_scale_table != NULL) {
+ pengine->bus_scale_handle =
+ msm_bus_scale_register_client(
+ (struct msm_bus_scale_pdata *)
+ cp->platform_support.bus_scale_table);
+ if (!pengine->bus_scale_handle) {
+ pr_err("%s not able to get bus scale\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err;
+ }
+ pengine->bw_state = BUS_NO_BANDWIDTH;
+ } else {
+ pengine->bw_state = BUS_HAS_BANDWIDTH;
+ }
+
+ if (cp->total_units != 1) {
+ mutex_unlock(&cp->engine_lock);
+ return 0;
+ }
+
+ /* register crypto cipher algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_ablk_cipher_algos[i]);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+
+ /* register crypto cipher algorithms the device supports */
+ if (cp->ce_support.aes_xts) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_ablk_cipher_xts_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aes_xts_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
+ }
+
+ /*
+ * Register crypto hash (sha1 and sha256) algorithms the
+ * device supports
+ */
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+ struct qcrypto_alg *q_alg = NULL;
+
+ q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_ahash_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->sha_alg.halg.base.cra_name,
+ strlen(q_alg->sha_alg.halg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->sha_alg.halg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_ahash(&q_alg->sha_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ }
+ }
+
+ /* register crypto aead (hmac-sha1) algorithms the device supports */
+ if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
+ || cp->ce_support.sha_hmac) {
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+ i++) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_aead_alg_alloc(cp,
+ &_qcrypto_aead_sha1_hmac_algos[i]);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aead_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->aead_alg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_aead(&q_alg->aead_alg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s alg registration failed\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ }
+ }
+ }
+
+ /* register crypto aead (hmac-sha256) algorithms the device supports */
+ if (cp->ce_support.sha_hmac) {
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
+ i++) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_aead_alg_alloc(cp,
+ &_qcrypto_aead_sha256_hmac_algos[i]);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aead_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->aead_alg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_aead(&q_alg->aead_alg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s alg registration failed\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ }
+ }
+ }
+
+ if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+ /* register crypto hmac algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+ struct qcrypto_alg *q_alg = NULL;
+
+ q_alg = _qcrypto_sha_alg_alloc(cp,
+ &_qcrypto_sha_hmac_algos[i]);
+
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_hmac_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->sha_alg.halg.base.cra_name,
+ strlen(
+ q_alg->sha_alg.halg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->sha_alg.halg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_ahash(&q_alg->sha_alg);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s alg registration failed\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->sha_alg.halg.base.cra_driver_name);
+ }
+ }
+ }
+ /*
+ * Register crypto cipher (aes-ccm) algorithms the
+ * device supports
+ */
+ if (cp->ce_support.aes_ccm) {
+ struct qcrypto_alg *q_alg;
+
+ q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+ if (cp->ce_support.use_sw_aes_ccm_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->aead_alg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_aead(&q_alg->aead_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ kzfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ }
+
+ q_alg = _qcrypto_aead_alg_alloc(cp,
+ &_qcrypto_aead_rfc4309_ccm_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+
+ if (cp->ce_support.use_sw_aes_ccm_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->aead_alg.base.cra_name,
+ strlen(q_alg->aead_alg.base.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->aead_alg.base.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_aead(&q_alg->aead_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->aead_alg.base.cra_driver_name);
+ }
+ }
+ mutex_unlock(&cp->engine_lock);
+
+
+ return 0;
+err:
+ _qcrypto_remove_engine(pengine);
+ mutex_unlock(&cp->engine_lock);
+ if (pengine->qce)
+ qce_close(pengine->qce);
+ kzfree(pengine);
+ return rc;
+};
+
+static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
+{
+ struct crypto_priv *cp = pengine->pcp;
+
+ if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
+ || cp->req_queue.qlen)
+ return 1;
+ return 0;
+}
+
+static void _qcrypto_do_suspending(struct crypto_engine *pengine)
+{
+ struct crypto_priv *cp = pengine->pcp;
+
+ if (cp->platform_support.bus_scale_table == NULL)
+ return;
+ del_timer_sync(&pengine->bw_reaper_timer);
+ qcrypto_ce_set_bus(pengine, false);
+}
+
+static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret = 0;
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+ unsigned long flags;
+
+ pengine = platform_get_drvdata(pdev);
+ if (!pengine)
+ return -EINVAL;
+
+ /*
+ * Check if this platform supports clock management in suspend/resume
+ * If not, just simply return 0.
+ */
+ cp = pengine->pcp;
+ if (!cp->ce_support.clk_mgmt_sus_res)
+ return 0;
+ spin_lock_irqsave(&cp->lock, flags);
+ switch (pengine->bw_state) {
+ case BUS_NO_BANDWIDTH:
+ if (pengine->high_bw_req == false)
+ pengine->bw_state = BUS_SUSPENDED;
+ else
+ ret = -EBUSY;
+ break;
+ case BUS_HAS_BANDWIDTH:
+ if (_qcrypto_engine_in_use(pengine)) {
+ ret = -EBUSY;
+ } else {
+ pengine->bw_state = BUS_SUSPENDING;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ _qcrypto_do_suspending(pengine);
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_SUSPENDED;
+ }
+ break;
+ case BUS_BANDWIDTH_RELEASING:
+ case BUS_BANDWIDTH_ALLOCATING:
+ case BUS_SUSPENDED:
+ case BUS_SUSPENDING:
+ default:
+ ret = -EBUSY;
+ break;
+ }
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (ret)
+ return ret;
+ if (qce_pm_table.suspend)
+ qce_pm_table.suspend(pengine->qce);
+ return 0;
+}
+
+static int _qcrypto_resume(struct platform_device *pdev)
+{
+ struct crypto_engine *pengine;
+ struct crypto_priv *cp;
+ unsigned long flags;
+ int ret = 0;
+
+ pengine = platform_get_drvdata(pdev);
+
+ if (!pengine)
+ return -EINVAL;
+ cp = pengine->pcp;
+ if (!cp->ce_support.clk_mgmt_sus_res)
+ return 0;
+ spin_lock_irqsave(&cp->lock, flags);
+ if (pengine->bw_state == BUS_SUSPENDED) {
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (qce_pm_table.resume)
+ qce_pm_table.resume(pengine->qce);
+
+ spin_lock_irqsave(&cp->lock, flags);
+ pengine->bw_state = BUS_NO_BANDWIDTH;
+ pengine->active_seq++;
+ pengine->check_flag = false;
+ if (cp->req_queue.qlen || pengine->req_queue.qlen) {
+ if (pengine->high_bw_req == false) {
+ qcrypto_ce_bw_allocate_req(pengine);
+ pengine->high_bw_req = true;
+ }
+ }
+ } else
+ ret = -EBUSY;
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return ret;
+}
+
+static const struct of_device_id qcrypto_match[] = {
+ { .compatible = "qcom,qcrypto",
+ },
+ {}
+};
+
+static struct platform_driver __qcrypto = {
+ .probe = _qcrypto_probe,
+ .remove = _qcrypto_remove,
+ .suspend = _qcrypto_suspend,
+ .resume = _qcrypto_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcrypto",
+ .of_match_table = qcrypto_match,
+ },
+};
+
+static int _debug_qcrypto;
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int rc = -EINVAL;
+ int qcrypto = *((int *) file->private_data);
+ int len;
+
+ len = _disp_stats(qcrypto);
+
+ if (len <= count)
+ rc = simple_read_from_buffer((void __user *) buf, len,
+ ppos, (void *) _debug_read_buf, len);
+ return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long flags;
+ struct crypto_priv *cp = &qcrypto_dev;
+ struct crypto_engine *pe;
+ int i;
+
+ memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+ spin_lock_irqsave(&cp->lock, flags);
+ list_for_each_entry(pe, &cp->engine_list, elist) {
+ pe->total_req = 0;
+ pe->err_req = 0;
+ qce_clear_driver_stats(pe->qce);
+ pe->max_req_used = 0;
+ }
+ cp->max_qlen = 0;
+ cp->resp_start = 0;
+ cp->resp_stop = 0;
+ cp->no_avail = 0;
+ cp->max_resp_qlen = 0;
+ cp->queue_work_eng3 = 0;
+ cp->queue_work_not_eng3 = 0;
+ cp->queue_work_not_eng3_nz = 0;
+ cp->max_reorder_cnt = 0;
+ for (i = 0; i < MAX_SMP_CPU + 1; i++)
+ cp->cpu_req[i] = 0;
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+ .open = _debug_stats_open,
+ .read = _debug_stats_read,
+ .write = _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+ int rc;
+ char name[DEBUG_MAX_FNAME];
+ struct dentry *dent;
+
+ _debug_dent = debugfs_create_dir("qcrypto", NULL);
+ if (IS_ERR(_debug_dent)) {
+ pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(_debug_dent));
+ return PTR_ERR(_debug_dent);
+ }
+
+ snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+ _debug_qcrypto = 0;
+ dent = debugfs_create_file(name, 0644, _debug_dent,
+ &_debug_qcrypto, &_debug_stats_ops);
+ if (dent == NULL) {
+ pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+ PTR_ERR(dent));
+ rc = PTR_ERR(dent);
+ goto err;
+ }
+ return 0;
+err:
+ debugfs_remove_recursive(_debug_dent);
+ return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+ int rc;
+ struct crypto_priv *pcp = &qcrypto_dev;
+
+ rc = _qcrypto_debug_init();
+ if (rc)
+ return rc;
+ INIT_LIST_HEAD(&pcp->alg_list);
+ INIT_LIST_HEAD(&pcp->engine_list);
+ init_llist_head(&pcp->ordered_resp_list);
+ spin_lock_init(&pcp->lock);
+ mutex_init(&pcp->engine_lock);
+ pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+ if (!pcp->resp_wq) {
+ pr_err("Error allocating workqueue\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&pcp->resp_work, seq_response);
+ pcp->total_units = 0;
+ pcp->platform_support.bus_scale_table = NULL;
+ pcp->next_engine = NULL;
+ pcp->scheduled_eng = NULL;
+ pcp->ce_req_proc_sts = IN_PROGRESS;
+ crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+ return platform_driver_register(&__qcrypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+ pr_debug("%s Unregister QCRYPTO\n", __func__);
+ debugfs_remove_recursive(_debug_dent);
+ platform_driver_unregister(&__qcrypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Crypto driver");
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 0000000..574f579
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,528 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define CRYPTO_BAM_CNFG_BITS_REG 0x0007C
+#define CRYPTO_BAM_CD_ENABLE 27
+#define CRYPTO_BAM_CD_ENABLE_MASK (1 << CRYPTO_BAM_CD_ENABLE)
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG 0x1A000
+
+#define CRYPTO_DATA_IN0_REG 0x1A010
+#define CRYPTO_DATA_IN1_REG 0x1A014
+#define CRYPTO_DATA_IN2_REG 0x1A018
+#define CRYPTO_DATA_IN3_REG 0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG 0x1A020
+#define CRYPTO_DATA_OUT1_REG 0x1A024
+#define CRYPTO_DATA_OUT2_REG 0x1A028
+#define CRYPTO_DATA_OUT3_REG 0x1A02C
+
+#define CRYPTO_STATUS_REG 0x1A100
+#define CRYPTO_STATUS2_REG 0x1A104
+#define CRYPTO_ENGINES_AVAIL 0x1A108
+#define CRYPTO_FIFO_SIZES_REG 0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG 0x1A110
+#define CRYPTO_GOPROC_REG 0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
+#define CRYPTO_ENCR_SEG_START_REG 0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG 0x1D000
+#define CRYPTO_ENCR_KEY1_REG 0x1D004
+#define CRYPTO_ENCR_KEY2_REG 0x1D008
+#define CRYPTO_ENCR_KEY3_REG 0x1D00C
+#define CRYPTO_ENCR_KEY4_REG 0x1D010
+#define CRYPTO_ENCR_KEY5_REG 0x1D014
+#define CRYPTO_ENCR_KEY6_REG 0x1D018
+#define CRYPTO_ENCR_KEY7_REG 0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C
+
+#define CRYPTO_ENCR_PIPE0_KEY0_REG 0x1E000
+#define CRYPTO_ENCR_PIPE0_KEY1_REG 0x1E004
+#define CRYPTO_ENCR_PIPE0_KEY2_REG 0x1E008
+#define CRYPTO_ENCR_PIPE0_KEY3_REG 0x1E00C
+#define CRYPTO_ENCR_PIPE0_KEY4_REG 0x1E010
+#define CRYPTO_ENCR_PIPE0_KEY5_REG 0x1E014
+#define CRYPTO_ENCR_PIPE0_KEY6_REG 0x1E018
+#define CRYPTO_ENCR_PIPE0_KEY7_REG 0x1E01C
+
+#define CRYPTO_ENCR_PIPE1_KEY0_REG 0x1E020
+#define CRYPTO_ENCR_PIPE1_KEY1_REG 0x1E024
+#define CRYPTO_ENCR_PIPE1_KEY2_REG 0x1E028
+#define CRYPTO_ENCR_PIPE1_KEY3_REG 0x1E02C
+#define CRYPTO_ENCR_PIPE1_KEY4_REG 0x1E030
+#define CRYPTO_ENCR_PIPE1_KEY5_REG 0x1E034
+#define CRYPTO_ENCR_PIPE1_KEY6_REG 0x1E038
+#define CRYPTO_ENCR_PIPE1_KEY7_REG 0x1E03C
+
+#define CRYPTO_ENCR_PIPE2_KEY0_REG 0x1E040
+#define CRYPTO_ENCR_PIPE2_KEY1_REG 0x1E044
+#define CRYPTO_ENCR_PIPE2_KEY2_REG 0x1E048
+#define CRYPTO_ENCR_PIPE2_KEY3_REG 0x1E04C
+#define CRYPTO_ENCR_PIPE2_KEY4_REG 0x1E050
+#define CRYPTO_ENCR_PIPE2_KEY5_REG 0x1E054
+#define CRYPTO_ENCR_PIPE2_KEY6_REG 0x1E058
+#define CRYPTO_ENCR_PIPE2_KEY7_REG 0x1E05C
+
+#define CRYPTO_ENCR_PIPE3_KEY0_REG 0x1E060
+#define CRYPTO_ENCR_PIPE3_KEY1_REG 0x1E064
+#define CRYPTO_ENCR_PIPE3_KEY2_REG 0x1E068
+#define CRYPTO_ENCR_PIPE3_KEY3_REG 0x1E06C
+#define CRYPTO_ENCR_PIPE3_KEY4_REG 0x1E070
+#define CRYPTO_ENCR_PIPE3_KEY5_REG 0x1E074
+#define CRYPTO_ENCR_PIPE3_KEY6_REG 0x1E078
+#define CRYPTO_ENCR_PIPE3_KEY7_REG 0x1E07C
+
+
+#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG 0x1E200
+#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG 0x1E204
+#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG 0x1E208
+#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG 0x1E20C
+#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG 0x1E210
+#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG 0x1E214
+#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG 0x1E218
+#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG 0x1E21C
+
+#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG 0x1E220
+#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG 0x1E224
+#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG 0x1E228
+#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG 0x1E22C
+#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG 0x1E230
+#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG 0x1E234
+#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG 0x1E238
+#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG 0x1E23C
+
+#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG 0x1E240
+#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG 0x1E244
+#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG 0x1E248
+#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG 0x1E24C
+#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG 0x1E250
+#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG 0x1E254
+#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG 0x1E258
+#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG 0x1E25C
+
+#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG 0x1E260
+#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG 0x1E264
+#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG 0x1E268
+#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG 0x1E26C
+#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG 0x1E270
+#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG 0x1E274
+#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG 0x1E278
+#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG 0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG 0x1A20C
+#define CRYPTO_CNTR1_IV1_REG 0x1A210
+#define CRYPTO_CNTR2_IV2_REG 0x1A214
+#define CRYPTO_CNTR3_IV3_REG 0x1A218
+
+#define CRYPTO_CNTR_MASK_REG0 0x1A23C
+#define CRYPTO_CNTR_MASK_REG1 0x1A238
+#define CRYPTO_CNTR_MASK_REG2 0x1A234
+#define CRYPTO_CNTR_MASK_REG 0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x1A230
+
+#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304
+#define CRYPTO_AUTH_SEG_START_REG 0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG 0x1D040
+#define CRYPTO_AUTH_KEY1_REG 0x1D044
+#define CRYPTO_AUTH_KEY2_REG 0x1D048
+#define CRYPTO_AUTH_KEY3_REG 0x1D04C
+#define CRYPTO_AUTH_KEY4_REG 0x1D050
+#define CRYPTO_AUTH_KEY5_REG 0x1D054
+#define CRYPTO_AUTH_KEY6_REG 0x1D058
+#define CRYPTO_AUTH_KEY7_REG 0x1D05C
+#define CRYPTO_AUTH_KEY8_REG 0x1D060
+#define CRYPTO_AUTH_KEY9_REG 0x1D064
+#define CRYPTO_AUTH_KEY10_REG 0x1D068
+#define CRYPTO_AUTH_KEY11_REG 0x1D06C
+#define CRYPTO_AUTH_KEY12_REG 0x1D070
+#define CRYPTO_AUTH_KEY13_REG 0x1D074
+#define CRYPTO_AUTH_KEY14_REG 0x1D078
+#define CRYPTO_AUTH_KEY15_REG 0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E880
+#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E884
+#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E888
+#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E88C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E890
+#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E894
+#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E898
+#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E89C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E8A0
+#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E8A4
+#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E8A8
+#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E8AC
+#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E8B0
+#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E8B4
+#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E8B8
+#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E8BC
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E900
+#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E904
+#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E908
+#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E90C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E910
+#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E914
+#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E918
+#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E91C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E920
+#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E924
+#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E928
+#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E92C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E930
+#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E934
+#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E938
+#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E93C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E980
+#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E984
+#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E988
+#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E98C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E990
+#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E994
+#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E998
+#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E99C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E9A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E9A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E9A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E9AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E9B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E9B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E9B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E9BC
+
+
+#define CRYPTO_AUTH_IV0_REG 0x1A310
+#define CRYPTO_AUTH_IV1_REG 0x1A314
+#define CRYPTO_AUTH_IV2_REG 0x1A318
+#define CRYPTO_AUTH_IV3_REG 0x1A31C
+#define CRYPTO_AUTH_IV4_REG 0x1A320
+#define CRYPTO_AUTH_IV5_REG 0x1A324
+#define CRYPTO_AUTH_IV6_REG 0x1A328
+#define CRYPTO_AUTH_IV7_REG 0x1A32C
+#define CRYPTO_AUTH_IV8_REG 0x1A330
+#define CRYPTO_AUTH_IV9_REG 0x1A334
+#define CRYPTO_AUTH_IV10_REG 0x1A338
+#define CRYPTO_AUTH_IV11_REG 0x1A33C
+#define CRYPTO_AUTH_IV12_REG 0x1A340
+#define CRYPTO_AUTH_IV13_REG 0x1A344
+#define CRYPTO_AUTH_IV14_REG 0x1A348
+#define CRYPTO_AUTH_IV15_REG 0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC
+
+#define CRYPTO_CONFIG_REG 0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00
+#define CRYPTO_DEBUG_REG 0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF
+#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000
+#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000
+#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */
+
+/* status reg */
+#define CRYPTO_MAC_FAILED 31
+#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_HSD_ERR 20
+#define CRYPTO_ACCESS_VIOL 19
+#define CRYPTO_PIPE_ACTIVE_ERR 18
+#define CRYPTO_CFG_CHNG_ERR 17
+#define CRYPTO_DOUT_ERR 16
+#define CRYPTO_DIN_ERR 15
+#define CRYPTO_AXI_ERR 14
+#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY 9
+#define CRYPTO_AUTH_BUSY 8
+#define CRYPTO_DOUT_INTR 7
+#define CRYPTO_DIN_INTR 6
+#define CRYPTO_OP_DONE_INTR 5
+#define CRYPTO_ERR_INTR 4
+#define CRYPTO_DOUT_RDY 3
+#define CRYPTO_DIN_RDY 2
+#define CRYPTO_OPERATION_DONE 1
+#define CRYPTO_SW_ERR 0
+
+/* status2 reg */
+#define CRYPTO_AXI_EXTRA 1
+#define CRYPTO_LOCKED 2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15
+
+#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS 0
+#define CRYPTO_ENUM_2_QUEUED_REQS 1
+#define CRYPTO_ENUM_3_QUEUED_REQS 2
+
+#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE 9
+#define CRYPTO_LITTLE_ENDIAN_MASK (1 << CRYPTO_LITTLE_ENDIAN_MODE)
+#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N 4
+
+#define CRYPTO_MASK_DOUT_INTR 3
+#define CRYPTO_MASK_DIN_INTR 2
+#define CRYPTO_MASK_OP_DONE_INTR 1
+#define CRYPTO_MASK_ERR_INTR 0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC 24
+#define CRYPTO_COMP_EXP_MAC_DISABLED 0
+#define CRYPTO_COMP_EXP_MAC_ENABLED 1
+
+#define CRYPTO_F9_DIRECTION 23
+#define CRYPTO_F9_DIRECTION_UPLINK 0
+#define CRYPTO_F9_DIRECTION_DOWNLINK 1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+ (0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH 19
+#define CRYPTO_USE_HW_KEY_AUTH 18
+#define CRYPTO_FIRST 17
+#define CRYPTO_LAST 16
+
+#define CRYPTO_AUTH_POS 14 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE 0
+#define CRYPTO_AUTH_POS_AFTER 1
+
+#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1 0
+#define CRYPTO_AUTH_SIZE_SHA256 1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15
+
+
+#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH 0
+#define CRYPTO_AUTH_MODE_HMAC 1
+#define CRYPTO_AUTH_MODE_CCM 0
+#define CRYPTO_AUTH_MODE_CMAC 1
+
+#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128 0
+#define CRYPTO_AUTH_KEY_SZ_AES256 2
+
+#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK 7
+#define CRYPTO_AUTH_ALG_NONE 0
+#define CRYPTO_AUTH_ALG_SHA 1
+#define CRYPTO_AUTH_ALG_AES 2
+#define CRYPTO_AUTH_ALG_KASUMI 3
+#define CRYPTO_AUTH_ALG_SNOW3G 4
+#define CRYPTO_AUTH_ALG_ZUC 5
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED 0
+#define CRYPTO_F8_KEYSTREAM_ENABLED 1
+
+#define CRYPTO_F8_DIRECTION 16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK 0
+#define CRYPTO_F8_DIRECTION_DOWNLINK 1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1
+#define CRYPTO_USE_KEY_REGISTERS 0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR 14
+#define CRYPTO_USE_KEY_REG 0
+#define CRYPTO_USE_HW_KEY 1
+
+#define CRYPTO_LAST_CCM 13
+#define CRYPTO_LAST_CCM_XFR 1
+#define CRYPTO_INTERM_CCM_XFR 0
+
+
+#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST 0
+
+#define CRYPTO_ENCODE 10
+
+#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB 0
+#define CRYPTO_ENCR_MODE_CBC 1
+#define CRYPTO_ENCR_MODE_CTR 2
+#define CRYPTO_ENCR_MODE_XTS 3
+#define CRYPTO_ENCR_MODE_CCM 4
+
+#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES 0
+#define CRYPTO_ENCR_KEY_SZ_3DES 1
+#define CRYPTO_ENCR_KEY_SZ_AES128 0
+#define CRYPTO_ENCR_KEY_SZ_AES256 2
+
+#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE 0
+#define CRYPTO_ENCR_ALG_DES 1
+#define CRYPTO_ENCR_ALG_AES 2
+#define CRYPTO_ENCR_ALG_KASUMI 4
+#define CRYPTO_ENCR_ALG_SNOW_3G 5
+#define CRYPTO_ENCR_ALG_ZUC 6
+
+/* goproc reg */
+#define CRYPTO_GO 0
+#define CRYPTO_CLR_CNTXT 1
+#define CRYPTO_RESULTS_DUMP 2
+
+/* F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+ (0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+ (0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4 REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+ (0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL 0
+#define CRYPTO_DES_SEL 1
+#define CRYPTO_ENCR_SNOW3G_SEL 2
+#define CRYPTO_ENCR_KASUMI_SEL 3
+#define CRYPTO_SHA_SEL 4
+#define CRYPTO_SHA512_SEL 5
+#define CRYPTO_AUTH_AES_SEL 6
+#define CRYPTO_AUTH_SNOW3G_SEL 7
+#define CRYPTO_AUTH_KASUMI_SEL 8
+#define CRYPTO_BAM_PIPE_SETS 9 /* bit 12 - 9 */
+#define CRYPTO_AXI_WR_BEATS 13 /* bit 18 - 13 */
+#define CRYPTO_AXI_RD_BEATS 19 /* bit 24 - 19 */
+#define CRYPTO_ENCR_ZUC_SEL 26
+#define CRYPTO_AUTH_ZUC_SEL 27
+#define CRYPTO_ZUC_ENABLE 28
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 348dc31..7f584ee 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -1,5 +1,5 @@
-obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
-qcrypto-objs := core.o \
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypt.o
+qcrypt-objs := core.o \
common.o \
dma.o \
sha.o \
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3e2ab3b..9e95bf9 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -2,6 +2,7 @@
tristate "DAX: direct access to differentiated memory"
default m if NVDIMM_DAX
depends on TRANSPARENT_HUGEPAGE
+ select SRCU
help
Support raw access to differentiated (persistence, bandwidth,
latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 152552d..1932248 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -24,6 +24,7 @@
#include "dax.h"
static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
static struct class *dax_class;
static DEFINE_IDA(dax_minor_ida);
static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -59,7 +60,7 @@
* @region - parent region
* @dev - device backing the character device
* @cdev - core chardev data
- * @alive - !alive + rcu grace period == no new mappings can be established
+ * @alive - !alive + srcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
* @res - array of physical address ranges
@@ -437,7 +438,7 @@
static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags)
{
- int rc;
+ int rc, id;
struct file *filp = vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
@@ -445,9 +446,9 @@
current->comm, (flags & FAULT_FLAG_WRITE)
? "write" : "read", vma->vm_start, vma->vm_end);
- rcu_read_lock();
+ id = srcu_read_lock(&dax_srcu);
rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
- rcu_read_unlock();
+ srcu_read_unlock(&dax_srcu, id);
return rc;
}
@@ -563,11 +564,11 @@
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
- * that start after synchronize_rcu() has started will abort
+ * that start after synchronize_srcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev->alive = false;
- synchronize_rcu();
+ synchronize_srcu(&dax_srcu);
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
cdev_del(cdev);
device_unregister(dev);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 6476c5e..b8effac 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -156,6 +156,15 @@
governor is unlikely to be useful for other
devices.
+config DEVFREQ_GOV_QCOM_GPUBW_MON
+ tristate "GPU BW voting governor"
+ depends on DEVFREQ_GOV_QCOM_ADRENO_TZ
+ help
+ This governor works together with Adreno Trustzone governor,
+ and select bus frequency votes using an "on-demand" alorithm.
+ This governor is unlikely to be useful for non-Adreno
+ devices.
+
config ARM_EXYNOS_BUS_DEVFREQ
tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index f488f12..f248e02 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ) += governor_cpufreq.o
obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_gpubw_mon.o
obj-$(CONFIG_QCOM_BIMC_BWMON) += bimc-bwmon.o
obj-$(CONFIG_ARM_MEMLAT_MON) += arm-memlat-mon.o
obj-$(CONFIG_QCOMCCI_HWMON) += msmcci-hwmon.o
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index a4a1cfb..d70104d 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,11 @@
#define MON2_ZONE_CNT(m) ((m)->base + 0x2D8)
#define MON2_ZONE_MAX(m, zone) ((m)->base + 0x2E0 + 0x4 * zone)
+enum bwmon_type {
+ BWMON_1,
+ BWMON_2,
+};
+
struct bwmon_spec {
bool wrap_on_thres;
bool overflow;
@@ -65,7 +70,7 @@
void __iomem *base;
void __iomem *global_base;
unsigned int mport;
- unsigned int irq;
+ int irq;
const struct bwmon_spec *spec;
struct device *dev;
struct bw_hwmon hw;
@@ -76,7 +81,6 @@
};
#define to_bwmon(ptr) container_of(ptr, struct bwmon, hw)
-#define has_hw_sampling(m) (m->spec->hw_sampling)
#define ENABLE_MASK BIT(0)
#define THROTTLE_MASK 0x1F
@@ -86,20 +90,29 @@
#define INT_STATUS_MASK_HWS 0xF0
static DEFINE_SPINLOCK(glb_lock);
-static void mon_enable(struct bwmon *m)
+
+static __always_inline void mon_enable(struct bwmon *m, enum bwmon_type type)
{
- if (has_hw_sampling(m))
- writel_relaxed((ENABLE_MASK | m->throttle_adj), MON2_EN(m));
- else
- writel_relaxed((ENABLE_MASK | m->throttle_adj), MON_EN(m));
+ switch (type) {
+ case BWMON_1:
+ writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
+ break;
+ case BWMON_2:
+ writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
+ break;
+ }
}
-static void mon_disable(struct bwmon *m)
+static __always_inline void mon_disable(struct bwmon *m, enum bwmon_type type)
{
- if (has_hw_sampling(m))
- writel_relaxed(m->throttle_adj, MON2_EN(m));
- else
+ switch (type) {
+ case BWMON_1:
writel_relaxed(m->throttle_adj, MON_EN(m));
+ break;
+ case BWMON_2:
+ writel_relaxed(m->throttle_adj, MON2_EN(m));
+ break;
+ }
/*
* mon_disable() and mon_irq_clear(),
* If latter goes first and count happen to trigger irq, we would
@@ -110,24 +123,25 @@
#define MON_CLEAR_BIT 0x1
#define MON_CLEAR_ALL_BIT 0x2
-static void mon_clear(struct bwmon *m, bool clear_all)
+static __always_inline
+void mon_clear(struct bwmon *m, bool clear_all, enum bwmon_type type)
{
- if (!has_hw_sampling(m)) {
+ switch (type) {
+ case BWMON_1:
writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
- goto out;
+ break;
+ case BWMON_2:
+ if (clear_all)
+ writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
+ else
+ writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
+ break;
}
-
- if (clear_all)
- writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
- else
- writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
-
/*
* The counter clear and IRQ clear bits are not in the same 4KB
* region. So, we need to make sure the counter clear is completed
* before we try to clear the IRQ or do any other counter operations.
*/
-out:
mb();
}
@@ -148,72 +162,141 @@
}
}
-static void mon_irq_enable(struct bwmon *m)
+static void mon_glb_irq_enable(struct bwmon *m)
{
u32 val;
- spin_lock(&glb_lock);
val = readl_relaxed(GLB_INT_EN(m));
val |= 1 << m->mport;
writel_relaxed(val, GLB_INT_EN(m));
-
- val = readl_relaxed(MON_INT_EN(m));
- val |= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_ENABLE_V1;
- writel_relaxed(val, MON_INT_EN(m));
- spin_unlock(&glb_lock);
- /*
- * make Sure irq enable complete for local and global
- * to avoid race with other monitor calls
- */
- mb();
}
-static void mon_irq_disable(struct bwmon *m)
+static __always_inline
+void mon_irq_enable(struct bwmon *m, enum bwmon_type type)
{
u32 val;
spin_lock(&glb_lock);
- val = readl_relaxed(GLB_INT_EN(m));
- val &= ~(1 << m->mport);
- writel_relaxed(val, GLB_INT_EN(m));
-
- val = readl_relaxed(MON_INT_EN(m));
- val &= has_hw_sampling(m) ? ~INT_STATUS_MASK_HWS : ~INT_ENABLE_V1;
- writel_relaxed(val, MON_INT_EN(m));
+ switch (type) {
+ case BWMON_1:
+ mon_glb_irq_enable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val |= INT_ENABLE_V1;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ case BWMON_2:
+ mon_glb_irq_enable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val |= INT_STATUS_MASK_HWS;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ }
spin_unlock(&glb_lock);
/*
- * make Sure irq disable complete for local and global
+ * make sure irq enable complete for local and global
* to avoid race with other monitor calls
*/
mb();
}
-static unsigned int mon_irq_status(struct bwmon *m)
+static void mon_glb_irq_disable(struct bwmon *m)
+{
+ u32 val;
+
+ val = readl_relaxed(GLB_INT_EN(m));
+ val &= ~(1 << m->mport);
+ writel_relaxed(val, GLB_INT_EN(m));
+}
+
+static __always_inline
+void mon_irq_disable(struct bwmon *m, enum bwmon_type type)
+{
+ u32 val;
+
+ spin_lock(&glb_lock);
+
+ switch (type) {
+ case BWMON_1:
+ mon_glb_irq_disable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val &= ~INT_ENABLE_V1;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ case BWMON_2:
+ mon_glb_irq_disable(m);
+ val = readl_relaxed(MON_INT_EN(m));
+ val &= ~INT_STATUS_MASK_HWS;
+ writel_relaxed(val, MON_INT_EN(m));
+ break;
+ }
+ spin_unlock(&glb_lock);
+ /*
+ * make sure irq disable complete for local and global
+ * to avoid race with other monitor calls
+ */
+ mb();
+}
+
+static __always_inline
+unsigned int mon_irq_status(struct bwmon *m, enum bwmon_type type)
{
u32 mval;
- mval = readl_relaxed(MON_INT_STATUS(m));
-
- dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
- readl_relaxed(GLB_INT_STATUS(m)));
-
- mval &= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+ switch (type) {
+ case BWMON_1:
+ mval = readl_relaxed(MON_INT_STATUS(m));
+ dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+ readl_relaxed(GLB_INT_STATUS(m)));
+ mval &= INT_STATUS_MASK;
+ break;
+ case BWMON_2:
+ mval = readl_relaxed(MON_INT_STATUS(m));
+ dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+ readl_relaxed(GLB_INT_STATUS(m)));
+ mval &= INT_STATUS_MASK_HWS;
+ break;
+ }
return mval;
}
-static void mon_irq_clear(struct bwmon *m)
+
+static void mon_glb_irq_clear(struct bwmon *m)
{
- u32 intclr;
-
- intclr = has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
-
- writel_relaxed(intclr, MON_INT_CLR(m));
+ /*
+ * Synchronize the local interrupt clear in mon_irq_clear()
+ * with the global interrupt clear here. Otherwise, the CPU
+ * may reorder the two writes and clear the global interrupt
+ * before the local interrupt, causing the global interrupt
+ * to be retriggered by the local interrupt still being high.
+ */
mb();
writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+ /*
+ * Similarly, because the global registers are in a different
+ * region than the local registers, we need to ensure any register
+ * writes to enable the monitor after this call are ordered with the
+ * clearing here so that local writes don't happen before the
+ * interrupt is cleared.
+ */
mb();
}
+static __always_inline
+void mon_irq_clear(struct bwmon *m, enum bwmon_type type)
+{
+ switch (type) {
+ case BWMON_1:
+ writel_relaxed(INT_STATUS_MASK, MON_INT_CLR(m));
+ mon_glb_irq_clear(m);
+ break;
+ case BWMON_2:
+ writel_relaxed(INT_STATUS_MASK_HWS, MON_INT_CLR(m));
+ mon_glb_irq_clear(m);
+ break;
+ }
+}
+
static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
{
struct bwmon *m = to_bwmon(hw);
@@ -331,12 +414,12 @@
#define THRES_HIT(status) (status & BIT(0))
#define OVERFLOW(status) (status & BIT(1))
-static unsigned long mon_get_count(struct bwmon *m)
+static unsigned long mon_get_count1(struct bwmon *m)
{
unsigned long count, status;
count = readl_relaxed(MON_CNT(m));
- status = mon_irq_status(m);
+ status = mon_irq_status(m, BWMON_1);
dev_dbg(m->dev, "Counter: %08lx\n", count);
@@ -385,6 +468,23 @@
return count;
}
+static __always_inline
+unsigned long mon_get_count(struct bwmon *m, enum bwmon_type type)
+{
+ unsigned long count;
+
+ switch (type) {
+ case BWMON_1:
+ count = mon_get_count1(m);
+ break;
+ case BWMON_2:
+ count = mon_get_zone_stats(m);
+ break;
+ }
+
+ return count;
+}
+
/* ********** CPUBW specific code ********** */
/* Returns MBps of read/writes for the sampling window. */
@@ -398,30 +498,41 @@
return mbps;
}
-static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+static __always_inline
+unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum bwmon_type type)
{
struct bwmon *m = to_bwmon(hw);
unsigned long count;
- mon_disable(m);
- count = has_hw_sampling(m) ? mon_get_zone_stats(m) : mon_get_count(m);
- mon_clear(m, false);
- mon_irq_clear(m);
- mon_enable(m);
+ mon_disable(m, type);
+ count = mon_get_count(m, type);
+ mon_clear(m, false, type);
+ mon_irq_clear(m, type);
+ mon_enable(m, type);
return count;
}
+static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+{
+ return __get_bytes_and_clear(hw, BWMON_1);
+}
+
+static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
+{
+ return __get_bytes_and_clear(hw, BWMON_2);
+}
+
static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
{
unsigned long count;
u32 limit;
struct bwmon *m = to_bwmon(hw);
- mon_disable(m);
- count = mon_get_count(m);
- mon_clear(m, false);
- mon_irq_clear(m);
+ mon_disable(m, BWMON_1);
+ count = mon_get_count1(m);
+ mon_clear(m, false, BWMON_1);
+ mon_irq_clear(m, BWMON_1);
if (likely(!m->spec->wrap_on_thres))
limit = bytes;
@@ -429,7 +540,7 @@
limit = max(bytes, 500000UL);
mon_set_limit(m, limit);
- mon_enable(m);
+ mon_enable(m, BWMON_1);
return count;
}
@@ -438,21 +549,22 @@
{
struct bwmon *m = to_bwmon(hw);
- mon_disable(m);
- mon_clear(m, false);
- mon_irq_clear(m);
+ mon_disable(m, BWMON_2);
+ mon_clear(m, false, BWMON_2);
+ mon_irq_clear(m, BWMON_2);
mon_set_zones(m, sample_ms);
- mon_enable(m);
+ mon_enable(m, BWMON_2);
return 0;
}
-static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+static irqreturn_t
+__bwmon_intr_handler(int irq, void *dev, enum bwmon_type type)
{
struct bwmon *m = dev;
- m->intr_status = mon_irq_status(m);
+ m->intr_status = mon_irq_status(m, type);
if (!m->intr_status)
return IRQ_NONE;
@@ -462,6 +574,16 @@
return IRQ_HANDLED;
}
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+ return __bwmon_intr_handler(irq, dev, BWMON_1);
+}
+
+static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
+{
+ return __bwmon_intr_handler(irq, dev, BWMON_2);
+}
+
static irqreturn_t bwmon_intr_thread(int irq, void *dev)
{
struct bwmon *m = dev;
@@ -470,98 +592,180 @@
return IRQ_HANDLED;
}
-static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+static __always_inline int
+__start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps, enum bwmon_type type)
{
struct bwmon *m = to_bwmon(hw);
- u32 limit;
- u32 zone_actions = calc_zone_actions();
+ u32 limit, zone_actions;
int ret;
+ irq_handler_t handler;
- ret = request_threaded_irq(m->irq, bwmon_intr_handler,
- bwmon_intr_thread,
+ switch (type) {
+ case BWMON_1:
+ handler = bwmon_intr_handler;
+ limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+ break;
+ case BWMON_2:
+ zone_actions = calc_zone_actions();
+ handler = bwmon_intr_handler2;
+ break;
+ }
+
+ ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
IRQF_ONESHOT | IRQF_SHARED,
dev_name(m->dev), m);
if (ret) {
dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
- ret);
+ ret);
return ret;
}
- mon_disable(m);
+ mon_disable(m, type);
- mon_clear(m, true);
- limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
- if (has_hw_sampling(m)) {
+ mon_clear(m, false, type);
+
+ switch (type) {
+ case BWMON_1:
+ handler = bwmon_intr_handler;
+ mon_set_limit(m, limit);
+ break;
+ case BWMON_2:
mon_set_zones(m, hw->df->profile->polling_ms);
/* Set the zone actions to increment appropriate counters */
writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
- } else {
- mon_set_limit(m, limit);
+ break;
}
- mon_irq_clear(m);
- mon_irq_enable(m);
- mon_enable(m);
+ mon_irq_clear(m, type);
+ mon_irq_enable(m, type);
+ mon_enable(m, type);
return 0;
}
-static void stop_bw_hwmon(struct bw_hwmon *hw)
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+ return __start_bw_hwmon(hw, mbps, BWMON_1);
+}
+
+static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
+{
+ return __start_bw_hwmon(hw, mbps, BWMON_2);
+}
+
+static __always_inline
+void __stop_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
{
struct bwmon *m = to_bwmon(hw);
- mon_irq_disable(m);
+ mon_irq_disable(m, type);
free_irq(m->irq, m);
- mon_disable(m);
- mon_clear(m, true);
- mon_irq_clear(m);
+ mon_disable(m, type);
+ mon_clear(m, true, type);
+ mon_irq_clear(m, type);
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+ return __stop_bw_hwmon(hw, BWMON_1);
+}
+
+static void stop_bw_hwmon2(struct bw_hwmon *hw)
+{
+ return __stop_bw_hwmon(hw, BWMON_2);
+}
+
+static __always_inline
+int __suspend_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ mon_irq_disable(m, type);
+ free_irq(m->irq, m);
+ mon_disable(m, type);
+ mon_irq_clear(m, type);
+
+ return 0;
}
static int suspend_bw_hwmon(struct bw_hwmon *hw)
{
- struct bwmon *m = to_bwmon(hw);
+ return __suspend_bw_hwmon(hw, BWMON_1);
+}
- mon_irq_disable(m);
- free_irq(m->irq, m);
- mon_disable(m);
- mon_irq_clear(m);
+static int suspend_bw_hwmon2(struct bw_hwmon *hw)
+{
+ return __suspend_bw_hwmon(hw, BWMON_2);
+}
+
+static int __resume_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+{
+ struct bwmon *m = to_bwmon(hw);
+ int ret;
+ irq_handler_t handler;
+
+ switch (type) {
+ case BWMON_1:
+ handler = bwmon_intr_handler;
+ break;
+ case BWMON_2:
+ handler = bwmon_intr_handler2;
+ break;
+ }
+
+ mon_clear(m, false, type);
+ ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(m->dev), m);
+ if (ret) {
+ dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+ ret);
+ return ret;
+ }
+
+ mon_irq_enable(m, type);
+ mon_enable(m, type);
return 0;
}
static int resume_bw_hwmon(struct bw_hwmon *hw)
{
- struct bwmon *m = to_bwmon(hw);
- int ret;
+ return __resume_bw_hwmon(hw, BWMON_1);
+}
- mon_clear(m, false);
- ret = request_threaded_irq(m->irq, bwmon_intr_handler,
- bwmon_intr_thread,
- IRQF_ONESHOT | IRQF_SHARED,
- dev_name(m->dev), m);
- if (ret) {
- dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
- ret);
- return ret;
- }
-
- mon_irq_enable(m);
- mon_enable(m);
-
- return 0;
+static int resume_bw_hwmon2(struct bw_hwmon *hw)
+{
+ return __resume_bw_hwmon(hw, BWMON_2);
}
/*************************************************************************/
static const struct bwmon_spec spec[] = {
- { .wrap_on_thres = true, .overflow = false, .throt_adj = false,
- .hw_sampling = false},
- { .wrap_on_thres = false, .overflow = true, .throt_adj = false,
- .hw_sampling = false},
- { .wrap_on_thres = false, .overflow = true, .throt_adj = true,
- .hw_sampling = false},
- { .wrap_on_thres = false, .overflow = true, .throt_adj = true,
- .hw_sampling = true},
+ [0] = {
+ .wrap_on_thres = true,
+ .overflow = false,
+ .throt_adj = false,
+ .hw_sampling = false
+ },
+ [1] = {
+ .wrap_on_thres = false,
+ .overflow = true,
+ .throt_adj = false,
+ .hw_sampling = false
+ },
+ [2] = {
+ .wrap_on_thres = false,
+ .overflow = true,
+ .throt_adj = true,
+ .hw_sampling = false
+ },
+ [3] = {
+ .wrap_on_thres = false,
+ .overflow = true,
+ .throt_adj = true,
+ .hw_sampling = true
+ },
};
static const struct of_device_id bimc_bwmon_match_table[] = {
@@ -577,7 +781,6 @@
struct device *dev = &pdev->dev;
struct resource *res;
struct bwmon *m;
- const struct of_device_id *id;
int ret;
u32 data;
@@ -593,22 +796,11 @@
}
m->mport = data;
- id = of_match_device(bimc_bwmon_match_table, dev);
- if (!id) {
+ m->spec = of_device_get_match_data(dev);
+ if (!m->spec) {
dev_err(dev, "Unknown device type!\n");
return -ENODEV;
}
- m->spec = id->data;
-
- if (has_hw_sampling(m)) {
- ret = of_property_read_u32(dev->of_node,
- "qcom,hw-timer-hz", &data);
- if (ret) {
- dev_err(dev, "HW sampling rate not specified!\n");
- return ret;
- }
- m->hw_timer_hz = data;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
if (!res) {
@@ -641,17 +833,33 @@
m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
if (!m->hw.of_node)
return -EINVAL;
- m->hw.start_hwmon = &start_bw_hwmon;
- m->hw.stop_hwmon = &stop_bw_hwmon;
- m->hw.suspend_hwmon = &suspend_bw_hwmon;
- m->hw.resume_hwmon = &resume_bw_hwmon;
- m->hw.get_bytes_and_clear = &get_bytes_and_clear;
- m->hw.set_thres = &set_thres;
- if (has_hw_sampling(m))
- m->hw.set_hw_events = &set_hw_events;
+
+ if (m->spec->hw_sampling) {
+ ret = of_property_read_u32(dev->of_node, "qcom,hw-timer-hz",
+ &m->hw_timer_hz);
+ if (ret) {
+ dev_err(dev, "HW sampling rate not specified!\n");
+ return ret;
+ }
+
+ m->hw.start_hwmon = start_bw_hwmon2;
+ m->hw.stop_hwmon = stop_bw_hwmon2;
+ m->hw.suspend_hwmon = suspend_bw_hwmon2;
+ m->hw.resume_hwmon = resume_bw_hwmon2;
+ m->hw.get_bytes_and_clear = get_bytes_and_clear2;
+ m->hw.set_hw_events = set_hw_events;
+ } else {
+ m->hw.start_hwmon = start_bw_hwmon;
+ m->hw.stop_hwmon = stop_bw_hwmon;
+ m->hw.suspend_hwmon = suspend_bw_hwmon;
+ m->hw.resume_hwmon = resume_bw_hwmon;
+ m->hw.get_bytes_and_clear = get_bytes_and_clear;
+ m->hw.set_thres = set_thres;
+ }
+
if (m->spec->throt_adj) {
- m->hw.set_throttle_adj = &mon_set_throttle_adj;
- m->hw.get_throttle_adj = &mon_get_throttle_adj;
+ m->hw.set_throttle_adj = mon_set_throttle_adj;
+ m->hw.get_throttle_adj = mon_get_throttle_adj;
}
ret = register_bw_hwmon(dev, &m->hw);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 5b85b8d..8f582f6 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -70,6 +70,29 @@
}
/**
+ * devfreq_set_freq_limits() - Set min and max frequency from freq_table
+ * @devfreq: the devfreq instance
+ */
+static void devfreq_set_freq_limits(struct devfreq *devfreq)
+{
+ int idx;
+ unsigned long min = ~0, max = 0;
+
+ if (!devfreq->profile->freq_table)
+ return;
+
+ for (idx = 0; idx < devfreq->profile->max_state; idx++) {
+ if (min > devfreq->profile->freq_table[idx])
+ min = devfreq->profile->freq_table[idx];
+ if (max < devfreq->profile->freq_table[idx])
+ max = devfreq->profile->freq_table[idx];
+ }
+
+ devfreq->min_freq = min;
+ devfreq->max_freq = max;
+}
+
+/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
* @freq: the target frequency
@@ -569,6 +592,7 @@
devfreq_set_freq_table(devfreq);
mutex_lock(&devfreq->lock);
}
+ devfreq_set_freq_limits(devfreq);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
diff --git a/drivers/devfreq/devfreq_trace.h b/drivers/devfreq/devfreq_trace.h
new file mode 100644
index 0000000..7dacc0e
--- /dev/null
+++ b/drivers/devfreq/devfreq_trace.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_DEVFREQ_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DEVFREQ_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM devfreq
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE devfreq_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(devfreq_msg,
+ TP_PROTO(const char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+ TP_printk(
+ "%s", __get_str(msg)
+ )
+);
+
+#endif /* _DEVFREQ_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index d7cc425..53c0f8a 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,25 +143,27 @@
{ \
struct devfreq *df = to_devfreq(dev); \
struct hwmon_node *hw = df->data; \
- int ret; \
+ int ret, numvals; \
unsigned int i = 0, val; \
+ char **strlist; \
\
- do { \
- ret = kstrtoint(buf, 10, &val); \
+ strlist = argv_split(GFP_KERNEL, buf, &numvals); \
+ if (!strlist) \
+ return -ENOMEM; \
+ numvals = min(numvals, n - 1); \
+ for (i = 0; i < numvals; i++) { \
+ ret = kstrtouint(strlist[i], 10, &val); \
if (ret) \
- break; \
- buf = strnchr(buf, PAGE_SIZE, ' '); \
- if (buf) \
- buf++; \
+ goto out; \
val = max(val, _min); \
val = min(val, _max); \
hw->name[i] = val; \
- i++; \
- } while (buf && i < n - 1); \
- if (i < 1) \
- return -EINVAL; \
+ } \
+ ret = count; \
+out: \
+ argv_free(strlist); \
hw->name[i] = 0; \
- return count; \
+ return ret; \
}
#define gov_list_attr(__attr, n, min, max) \
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
new file mode 100644
index 0000000..9c24eef
--- /dev/null
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <linux/slab.h>
+
+#include "devfreq_trace.h"
+#include "governor.h"
+
+#define MIN_BUSY 1000
+#define LONG_FLOOR 50000
+#define HIST 5
+#define TARGET 80
+#define CAP 75
+/* AB vote is in multiple of BW_STEP Mega bytes */
+#define BW_STEP 160
+
+static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
+ unsigned int norm_max)
+{
+ int i;
+
+ priv->bus.max = norm_max;
+ for (i = 0; i < priv->bus.num; i++) {
+ priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
+ priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
+ }
+}
+
+static inline int devfreq_get_freq_level(struct devfreq *devfreq,
+ unsigned long freq)
+{
+ int lev;
+
+ for (lev = 0; lev < devfreq->profile->max_state; lev++)
+ if (freq == devfreq->profile->freq_table[lev])
+ return lev;
+
+ return -EINVAL;
+}
+
+static int devfreq_gpubw_get_target(struct devfreq *df,
+ unsigned long *freq)
+{
+
+ struct devfreq_msm_adreno_tz_data *priv = df->data;
+ struct msm_busmon_extended_profile *bus_profile = container_of(
+ (df->profile),
+ struct msm_busmon_extended_profile,
+ profile);
+ struct devfreq_dev_status stats;
+ struct xstats b;
+ int result;
+ int level = 0;
+ int act_level;
+ int norm_cycles;
+ int gpu_percent;
+ /*
+ * Normalized AB should at max usage be the gpu_bimc frequency in MHz.
+ * Start with a reasonable value and let the system push it up to max.
+ */
+ static int norm_ab_max = 300;
+ int norm_ab;
+ unsigned long ab_mbytes = 0;
+
+ if (priv == NULL)
+ return 0;
+
+ stats.private_data = &b;
+
+ result = df->profile->get_dev_status(df->dev.parent, &stats);
+
+ *freq = stats.current_frequency;
+
+ priv->bus.total_time += stats.total_time;
+ priv->bus.gpu_time += stats.busy_time;
+ priv->bus.ram_time += b.ram_time;
+ priv->bus.ram_wait += b.ram_wait;
+
+ level = devfreq_get_freq_level(df, stats.current_frequency);
+
+ if (priv->bus.total_time < LONG_FLOOR)
+ return result;
+
+ norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) /
+ (unsigned int) priv->bus.total_time;
+ gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
+ (unsigned int) priv->bus.total_time;
+
+ /*
+ * If there's a new high watermark, update the cutoffs and send the
+ * FAST hint. Otherwise check the current value against the current
+ * cutoffs.
+ */
+ if (norm_cycles > priv->bus.max) {
+ _update_cutoff(priv, norm_cycles);
+ bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+ } else {
+ /* GPU votes for IB not AB so don't under vote the system */
+ norm_cycles = (100 * norm_cycles) / TARGET;
+ act_level = priv->bus.index[level] + b.mod;
+ act_level = (act_level < 0) ? 0 : act_level;
+ act_level = (act_level >= priv->bus.num) ?
+ (priv->bus.num - 1) : act_level;
+ if (norm_cycles > priv->bus.up[act_level] &&
+ gpu_percent > CAP)
+ bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+ else if (norm_cycles < priv->bus.down[act_level] && level)
+ bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT;
+ }
+
+ /* Calculate the AB vote based on bus width if defined */
+ if (priv->bus.width) {
+ norm_ab = (unsigned int)priv->bus.ram_time /
+ (unsigned int) priv->bus.total_time;
+ /* Calculate AB in Mega Bytes and roundup in BW_STEP */
+ ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20;
+ bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP);
+ } else if (bus_profile->flag) {
+ /* Re-calculate the AB percentage for a new IB vote */
+ norm_ab = (unsigned int)priv->bus.ram_time /
+ (unsigned int) priv->bus.total_time;
+ if (norm_ab > norm_ab_max)
+ norm_ab_max = norm_ab;
+ bus_profile->percent_ab = (100 * norm_ab) / norm_ab_max;
+ }
+
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ priv->bus.ram_wait = 0;
+
+ return result;
+}
+
+static int gpubw_start(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv;
+
+ struct msm_busmon_extended_profile *bus_profile = container_of(
+ (devfreq->profile),
+ struct msm_busmon_extended_profile,
+ profile);
+ unsigned int t1, t2 = 2 * HIST;
+ int i, bus_size;
+
+
+ devfreq->data = bus_profile->private_data;
+ priv = devfreq->data;
+
+ bus_size = sizeof(u32) * priv->bus.num;
+ priv->bus.up = kzalloc(bus_size, GFP_KERNEL);
+ priv->bus.down = kzalloc(bus_size, GFP_KERNEL);
+ priv->bus.p_up = kzalloc(bus_size, GFP_KERNEL);
+ priv->bus.p_down = kzalloc(bus_size, GFP_KERNEL);
+ if (priv->bus.up == NULL || priv->bus.down == NULL ||
+ priv->bus.p_up == NULL || priv->bus.p_down == NULL)
+ return -ENOMEM;
+
+ /* Set up the cut-over percentages for the bus calculation. */
+ for (i = 0; i < priv->bus.num; i++) {
+ t1 = (u32)(100 * priv->bus.ib[i]) /
+ (u32)priv->bus.ib[priv->bus.num - 1];
+ priv->bus.p_up[i] = t1 - HIST;
+ priv->bus.p_down[i] = t2 - 2 * HIST;
+ t2 = t1;
+ }
+ /* Set the upper-most and lower-most bounds correctly. */
+ priv->bus.p_down[0] = 0;
+ priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
+ priv->bus.p_down[1] : (2 * HIST);
+ if (priv->bus.num >= 1)
+ priv->bus.p_up[priv->bus.num - 1] = 100;
+ _update_cutoff(priv, priv->bus.max);
+
+ return 0;
+}
+
+static int gpubw_stop(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ if (priv) {
+ kfree(priv->bus.up);
+ kfree(priv->bus.down);
+ kfree(priv->bus.p_up);
+ kfree(priv->bus.p_down);
+ }
+ devfreq->data = NULL;
+ return 0;
+}
+
+static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int result = 0;
+ unsigned long freq;
+
+ mutex_lock(&devfreq->lock);
+ freq = devfreq->previous_freq;
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ result = gpubw_start(devfreq);
+ break;
+ case DEVFREQ_GOV_STOP:
+ result = gpubw_stop(devfreq);
+ break;
+ case DEVFREQ_GOV_RESUME:
+ /* TODO ..... */
+ /* ret = update_devfreq(devfreq); */
+ break;
+ case DEVFREQ_GOV_SUSPEND:
+ {
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ }
+ break;
+ default:
+ result = 0;
+ break;
+ }
+ mutex_unlock(&devfreq->lock);
+ return result;
+}
+
+static struct devfreq_governor devfreq_gpubw = {
+ .name = "gpubw_mon",
+ .get_target_freq = devfreq_gpubw_get_target,
+ .event_handler = devfreq_gpubw_event_handler,
+};
+
+static int __init devfreq_gpubw_init(void)
+{
+ return devfreq_add_governor(&devfreq_gpubw);
+}
+subsys_initcall(devfreq_gpubw_init);
+
+static void __exit devfreq_gpubw_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_gpubw);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+}
+module_exit(devfreq_gpubw_exit);
+
+MODULE_DESCRIPTION("GPU bus bandwidth voting driver. Uses VBIF counters");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index cf04d24..6b54e02 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -303,6 +303,9 @@
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
.unlocked_ioctl = dma_buf_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_buf_ioctl,
+#endif
};
/*
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 192080e..13b8b71 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -514,6 +514,17 @@
For debugging issues having to do with stability and overall system
health, you should probably say 'Y' here.
+config EDAC_LLCC_POLL
+ depends on EDAC_QCOM_LLCC
+ bool "Poll on LLCC ECC registers - LLCC"
+ help
+ This option chooses whether or not you want to poll on the LLCC
+ ECC registers. When this is enabled, the polling rate can be set as
+ a module parameter. By default, it will call the polling function
+ every second.
+ This option should only be used if the associated interrupt lines
+ are not enabled.
+
config EDAC_QCOM_LLCC_PANIC_ON_CE
depends on EDAC_QCOM_LLCC
bool "panic on correctable errors - qcom llcc"
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 7e2aadc..4ac880b 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,13 @@
asm volatile("msr s3_0_c5_c4_1, %0" : : "r" (val));
}
+static inline void set_errxmisc_overflow(void)
+{
+ u64 val = 0x7F7F00000000;
+
+ asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val));
+}
+
static inline void write_errselr_el1(u64 val)
{
asm volatile("msr s3_0_c5_c3_1, %0" : : "r" (val));
@@ -319,9 +326,7 @@
static irqreturn_t kryo3xx_l1_l2_handler(int irq, void *drvdata)
{
- struct erp_drvdata *drv = *(struct erp_drvdata **)(drvdata);
-
- kryo3xx_check_l1_l2_ecc(drv->edev_ctl);
+ kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
return IRQ_HANDLED;
}
@@ -334,14 +339,24 @@
return IRQ_HANDLED;
}
+static void initialize_registers(void *info)
+{
+ set_errxctlr_el1();
+ set_errxmisc_overflow();
+}
+
static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct erp_drvdata *drv;
int rc = 0;
int fail = 0;
+ int cpu;
- set_errxctlr_el1();
+ for_each_possible_cpu(cpu)
+ smp_call_function_single(cpu, initialize_registers, NULL, 1);
+
+
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index 6bec860..4403f86 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -78,10 +78,12 @@
#define DRP_TRP_INT_CLEAR 0x3
#define DRP_TRP_CNT_CLEAR 0x3
+#ifdef CONFIG_EDAC_LLCC_POLL
static int poll_msec = 5000;
module_param(poll_msec, int, 0444);
+#endif
-static int interrupt_mode;
+static int interrupt_mode = 1;
module_param(interrupt_mode, int, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Controls whether to use interrupt or poll mode");
@@ -331,10 +333,12 @@
}
}
+#ifdef CONFIG_EDAC_LLCC_POLL
static void qcom_llcc_poll_cache_errors(struct edac_device_ctl_info *edev_ctl)
{
qcom_llcc_check_cache_errors(edev_ctl);
}
+#endif
static irqreturn_t llcc_ecc_irq_handler
(int irq, void *edev_ctl)
@@ -360,9 +364,11 @@
edev_ctl->mod_name = dev_name(dev);
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
+#ifdef CONFIG_EDAC_LLCC_POLL
edev_ctl->poll_msec = poll_msec;
edev_ctl->edac_check = qcom_llcc_poll_cache_errors;
edev_ctl->defer_work = 1;
+#endif
edev_ctl->panic_on_ce = LLCC_ERP_PANIC_ON_CE;
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
@@ -383,7 +389,7 @@
}
rc = devm_request_irq(dev, drv->ecc_irq, llcc_ecc_irq_handler,
- IRQF_TRIGGER_RISING, "llcc_ecc", edev_ctl);
+ IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
if (rc) {
dev_err(dev, "failed to request ecc irq\n");
goto out;
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a27d350..2e093c3 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
@@ -36,7 +35,9 @@
struct extcon_dev *edev;
struct gpio_desc *id_gpiod;
+ struct gpio_desc *vbus_gpiod;
int id_irq;
+ int vbus_irq;
unsigned long debounce_jiffies;
struct delayed_work wq_detcable;
@@ -48,31 +49,47 @@
EXTCON_NONE,
};
+/*
+ * "USB" = VBUS and "USB-HOST" = !ID, so we have:
+ * Both "USB" and "USB-HOST" can't be set as active at the
+ * same time so if "USB-HOST" is active (i.e. ID is 0) we keep "USB" inactive
+ * even if VBUS is on.
+ *
+ * State | ID | VBUS
+ * ----------------------------------------
+ * [1] USB | H | H
+ * [2] none | H | L
+ * [3] USB-HOST | L | H
+ * [4] USB-HOST | L | L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
+ */
static void usb_extcon_detect_cable(struct work_struct *work)
{
- int id;
+ int id, vbus;
struct usb_extcon_info *info = container_of(to_delayed_work(work),
struct usb_extcon_info,
wq_detcable);
- /* check ID and update cable state */
- id = gpiod_get_value_cansleep(info->id_gpiod);
- if (id) {
- /*
- * ID = 1 means USB HOST cable detached.
- * As we don't have event for USB peripheral cable attached,
- * we simulate USB peripheral attach here.
- */
+ /* check ID and VBUS and update cable state */
+ id = info->id_gpiod ?
+ gpiod_get_value_cansleep(info->id_gpiod) : 1;
+ vbus = info->vbus_gpiod ?
+ gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+ /* at first we clean states which are no longer active */
+ if (id)
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
- extcon_set_state_sync(info->edev, EXTCON_USB, true);
- } else {
- /*
- * ID = 0 means USB HOST cable attached.
- * As we don't have event for USB peripheral cable detached,
- * we simulate USB peripheral detach here.
- */
+ if (!vbus)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+ if (!id) {
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ } else {
+ if (vbus)
+ extcon_set_state_sync(info->edev, EXTCON_USB, true);
}
}
@@ -101,12 +118,21 @@
return -ENOMEM;
info->dev = dev;
- info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
- if (IS_ERR(info->id_gpiod)) {
- dev_err(dev, "failed to get ID GPIO\n");
- return PTR_ERR(info->id_gpiod);
+ info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
+ info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+ GPIOD_IN);
+
+ if (!info->id_gpiod && !info->vbus_gpiod) {
+ dev_err(dev, "failed to get gpios\n");
+ return -ENODEV;
}
+ if (IS_ERR(info->id_gpiod))
+ return PTR_ERR(info->id_gpiod);
+
+ if (IS_ERR(info->vbus_gpiod))
+ return PTR_ERR(info->vbus_gpiod);
+
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(dev, "failed to allocate extcon device\n");
@@ -119,32 +145,58 @@
return ret;
}
- ret = gpiod_set_debounce(info->id_gpiod,
- USB_GPIO_DEBOUNCE_MS * 1000);
+ if (info->id_gpiod)
+ ret = gpiod_set_debounce(info->id_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+ if (!ret && info->vbus_gpiod)
+ ret = gpiod_set_debounce(info->vbus_gpiod,
+ USB_GPIO_DEBOUNCE_MS * 1000);
+
if (ret < 0)
info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
- info->id_irq = gpiod_to_irq(info->id_gpiod);
- if (info->id_irq < 0) {
- dev_err(dev, "failed to get ID IRQ\n");
- return info->id_irq;
+ if (info->id_gpiod) {
+ info->id_irq = gpiod_to_irq(info->id_gpiod);
+ if (info->id_irq < 0) {
+ dev_err(dev, "failed to get ID IRQ\n");
+ return info->id_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for ID IRQ\n");
+ return ret;
+ }
}
- ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
- usb_irq_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- pdev->name, info);
- if (ret < 0) {
- dev_err(dev, "failed to request handler for ID IRQ\n");
- return ret;
+ if (info->vbus_gpiod) {
+ info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+ if (info->vbus_irq < 0) {
+ dev_err(dev, "failed to get VBUS IRQ\n");
+ return info->vbus_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+ usb_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for VBUS IRQ\n");
+ return ret;
+ }
}
platform_set_drvdata(pdev, info);
device_init_wakeup(dev, true);
- dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -157,8 +209,6 @@
struct usb_extcon_info *info = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&info->wq_detcable);
-
- dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return 0;
@@ -170,12 +220,32 @@
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = enable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = enable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ disable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
* accessible until resume completes. So disable IRQ.
*/
- disable_irq(info->id_irq);
+ if (info->id_gpiod)
+ disable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ disable_irq(info->vbus_irq);
return ret;
}
@@ -185,7 +255,28 @@
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- enable_irq(info->id_irq);
+ if (device_may_wakeup(dev)) {
+ if (info->id_gpiod) {
+ ret = disable_irq_wake(info->id_irq);
+ if (ret)
+ return ret;
+ }
+ if (info->vbus_gpiod) {
+ ret = disable_irq_wake(info->vbus_irq);
+ if (ret) {
+ if (info->id_gpiod)
+ enable_irq_wake(info->id_irq);
+
+ return ret;
+ }
+ }
+ }
+
+ if (info->id_gpiod)
+ enable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ enable_irq(info->vbus_irq);
+
if (!device_may_wakeup(dev))
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d..8e2eb35 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -210,5 +210,6 @@
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/qcom/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41..b1c1b21 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -26,3 +26,4 @@
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
+obj-$(CONFIG_MSM_TZ_LOG) += qcom/
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e..24c461d 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@
status = __gop_query32(sys_table_arg, gop32, &info, &size,
¤t_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@
status = __gop_query64(sys_table_arg, gop64, &info, &size,
¤t_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
* provide multiple GOP devices, not all of which are
diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
new file mode 100644
index 0000000..61c7974
--- /dev/null
+++ b/drivers/firmware/qcom/Kconfig
@@ -0,0 +1,7 @@
+config MSM_TZ_LOG
+ tristate "MSM Trust Zone (TZ) Log Driver"
+ depends on DEBUG_FS
+ help
+ This option enables a driver with a debugfs interface for messages
+ produced by the Secure code (Trust zone). These messages provide
+ diagnostic information about TZ operation.
diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile
new file mode 100644
index 0000000..635f60c
--- /dev/null
+++ b/drivers/firmware/qcom/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
new file mode 100644
index 0000000..1b51d08
--- /dev/null
+++ b/drivers/firmware/qcom/tz_log.c
@@ -0,0 +1,1209 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+
+/* QSEE_LOG_BUF_SIZE = 32K */
+#define QSEE_LOG_BUF_SIZE 0x8000
+
+
+/* TZ Diagnostic Area legacy version number */
+#define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2
+/*
+ * Preprocessor Definitions and Constants
+ */
+#define TZBSP_MAX_CPU_COUNT 0x08
+/*
+ * Number of VMID Tables
+ */
+#define TZBSP_DIAG_NUM_OF_VMID 16
+/*
+ * VMID Description length
+ */
+#define TZBSP_DIAG_VMID_DESC_LEN 7
+/*
+ * Number of Interrupts
+ */
+#define TZBSP_DIAG_INT_NUM 32
+/*
+ * Length of descriptive name associated with Interrupt
+ */
+#define TZBSP_MAX_INT_DESC 16
+/*
+ * TZ 3.X version info
+ */
+#define QSEE_VERSION_TZ_3_X 0x800000
+/*
+ * TZ 4.X version info
+ */
+#define QSEE_VERSION_TZ_4_X 0x1000000
+
+#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
+#define TZBSP_NONCE_LEN 12
+#define TZBSP_TAG_LEN 16
+
+/*
+ * VMID Table
+ */
+struct tzdbg_vmid_t {
+ uint8_t vmid; /* Virtual Machine Identifier */
+ uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */
+};
+/*
+ * Boot Info Table
+ */
+struct tzdbg_boot_info_t {
+ uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
+ uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
+ uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
+ uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
+ uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */
+ uint32_t spare; /* Reserved for future use. */
+};
+/*
+ * Boot Info Table for 64-bit
+ */
+struct tzdbg_boot_info64_t {
+ uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */
+ uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */
+ uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */
+ uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */
+ uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
+ uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */
+ uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
+ uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
+};
+/*
+ * Reset Info Table
+ */
+struct tzdbg_reset_info_t {
+ uint32_t reset_type; /* Reset Reason */
+ uint32_t reset_cnt; /* Number of resets occurred/CPU */
+};
+/*
+ * Interrupt Info Table
+ */
+struct tzdbg_int_t {
+ /*
+ * Type of Interrupt/exception
+ */
+ uint16_t int_info;
+ /*
+ * Availability of the slot
+ */
+ uint8_t avail;
+ /*
+ * Reserved for future use
+ */
+ uint8_t spare;
+ /*
+ * Interrupt # for IRQ and FIQ
+ */
+ uint32_t int_num;
+ /*
+ * ASCII text describing type of interrupt e.g:
+ * Secure Timer, EBI XPU. This string is always null terminated,
+ * supporting at most TZBSP_MAX_INT_DESC characters.
+ * Any additional characters are truncated.
+ */
+ uint8_t int_desc[TZBSP_MAX_INT_DESC];
+ uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
+};
+
+/*
+ * Interrupt Info Table used in tz version >=4.X
+ */
+struct tzdbg_int_t_tz40 {
+ uint16_t int_info;
+ uint8_t avail;
+ uint8_t spare;
+ uint32_t int_num;
+ uint8_t int_desc[TZBSP_MAX_INT_DESC];
+ uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
+};
+
+/* warm boot reason for cores */
+struct tzbsp_diag_wakeup_info_t {
+ /* Wake source info : APCS_GICC_HPPIR */
+ uint32_t HPPIR;
+ /* Wake source info : APCS_GICC_AHPPIR */
+ uint32_t AHPPIR;
+};
+
+/*
+ * Log ring buffer position
+ */
+struct tzdbg_log_pos_t {
+ uint16_t wrap;
+ uint16_t offset;
+};
+
+ /*
+ * Log ring buffer
+ */
+struct tzdbg_log_t {
+ struct tzdbg_log_pos_t log_pos;
+ /* open ended array to the end of the 4K IMEM buffer */
+ uint8_t log_buf[];
+};
+
+/*
+ * Diagnostic Table
+ * Note: This is the reference data structure for tz diagnostic table
+ * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
+ * copied into buffer from i/o memory.
+ */
+struct tzdbg_t {
+ uint32_t magic_num;
+ uint32_t version;
+ /*
+ * Number of CPU's
+ */
+ uint32_t cpu_count;
+ /*
+ * Offset of VMID Table
+ */
+ uint32_t vmid_info_off;
+ /*
+ * Offset of Boot Table
+ */
+ uint32_t boot_info_off;
+ /*
+ * Offset of Reset info Table
+ */
+ uint32_t reset_info_off;
+ /*
+ * Offset of Interrupt info Table
+ */
+ uint32_t int_info_off;
+ /*
+ * Ring Buffer Offset
+ */
+ uint32_t ring_off;
+ /*
+ * Ring Buffer Length
+ */
+ uint32_t ring_len;
+
+ /* Offset for Wakeup info */
+ uint32_t wakeup_info_off;
+
+ /*
+ * VMID to EE Mapping
+ */
+ struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
+ /*
+ * Boot Info
+ */
+ struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
+ /*
+ * Reset Info
+ */
+ struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
+ uint32_t num_interrupts;
+ struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
+
+ /* Wake up info */
+ struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
+
+ uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+
+ uint8_t nonce[TZBSP_NONCE_LEN];
+
+ uint8_t tag[TZBSP_TAG_LEN];
+
+ /*
+ * We need at least 2K for the ring buffer
+ */
+ struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */
+};
+
+struct hypdbg_log_pos_t {
+ uint16_t wrap;
+ uint16_t offset;
+};
+
+struct hypdbg_boot_info_t {
+ uint32_t warm_entry_cnt;
+ uint32_t warm_exit_cnt;
+};
+
+struct hypdbg_t {
+ /* Magic Number */
+ uint32_t magic_num;
+
+ /* Number of CPU's */
+ uint32_t cpu_count;
+
+ /* Ring Buffer Offset */
+ uint32_t ring_off;
+
+ /* Ring buffer position mgmt */
+ struct hypdbg_log_pos_t log_pos;
+ uint32_t log_len;
+
+ /* S2 fault numbers */
+ uint32_t s2_fault_counter;
+
+ /* Boot Info */
+ struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
+
+ /* Ring buffer pointer */
+ uint8_t log_buf_p[];
+};
+
+/*
+ * Enumeration order for VMID's
+ */
+enum tzdbg_stats_type {
+ TZDBG_BOOT = 0,
+ TZDBG_RESET,
+ TZDBG_INTERRUPT,
+ TZDBG_VMID,
+ TZDBG_GENERAL,
+ TZDBG_LOG,
+ TZDBG_QSEE_LOG,
+ TZDBG_HYP_GENERAL,
+ TZDBG_HYP_LOG,
+ TZDBG_STATS_MAX
+};
+
+struct tzdbg_stat {
+ char *name;
+ char *data;
+};
+
+struct tzdbg {
+ void __iomem *virt_iobase;
+ void __iomem *hyp_virt_iobase;
+ struct tzdbg_t *diag_buf;
+ struct hypdbg_t *hyp_diag_buf;
+ char *disp_buf;
+ int debug_tz[TZDBG_STATS_MAX];
+ struct tzdbg_stat stat[TZDBG_STATS_MAX];
+ uint32_t hyp_debug_rw_buf_size;
+ bool is_hyplog_enabled;
+ uint32_t tz_version;
+};
+
+static struct tzdbg tzdbg = {
+ .stat[TZDBG_BOOT].name = "boot",
+ .stat[TZDBG_RESET].name = "reset",
+ .stat[TZDBG_INTERRUPT].name = "interrupt",
+ .stat[TZDBG_VMID].name = "vmid",
+ .stat[TZDBG_GENERAL].name = "general",
+ .stat[TZDBG_LOG].name = "log",
+ .stat[TZDBG_QSEE_LOG].name = "qsee_log",
+ .stat[TZDBG_HYP_GENERAL].name = "hyp_general",
+ .stat[TZDBG_HYP_LOG].name = "hyp_log",
+};
+
+static struct tzdbg_log_t *g_qsee_log;
+static uint32_t debug_rw_buf_size;
+
+/*
+ * Debugfs data structure and functions
+ */
+
+static int _disp_tz_general_stats(void)
+{
+ int len = 0;
+
+ len += snprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
+ " Version : 0x%x\n"
+ " Magic Number : 0x%x\n"
+ " Number of CPU : %d\n",
+ tzdbg.diag_buf->version,
+ tzdbg.diag_buf->magic_num,
+ tzdbg.diag_buf->cpu_count);
+ tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_tz_vmid_stats(void)
+{
+ int i, num_vmid;
+ int len = 0;
+ struct tzdbg_vmid_t *ptr;
+
+ ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
+ tzdbg.diag_buf->vmid_info_off);
+ num_vmid = ((tzdbg.diag_buf->boot_info_off -
+ tzdbg.diag_buf->vmid_info_off)/
+ (sizeof(struct tzdbg_vmid_t)));
+
+ for (i = 0; i < num_vmid; i++) {
+ if (ptr->vmid < 0xFF) {
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " 0x%x %s\n",
+ (uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
+ }
+ if (len > (debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into the buffer\n",
+ __func__);
+ break;
+ }
+ ptr++;
+ }
+
+ tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_tz_boot_stats(void)
+{
+ int i;
+ int len = 0;
+ struct tzdbg_boot_info_t *ptr = NULL;
+ struct tzdbg_boot_info64_t *ptr_64 = NULL;
+
+ pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+ if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+ ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
+ tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+ } else {
+ ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
+ tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+ }
+
+ for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+ if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " CPU #: %d\n"
+ " Warmboot jump address : 0x%llx\n"
+ " Warmboot entry CPU counter : 0x%x\n"
+ " Warmboot exit CPU counter : 0x%x\n"
+ " Power Collapse entry CPU counter : 0x%x\n"
+ " Power Collapse exit CPU counter : 0x%x\n"
+ " Psci entry CPU counter : 0x%x\n"
+ " Psci exit CPU counter : 0x%x\n"
+ " Warmboot Jump Address Instruction : 0x%x\n",
+ i, (uint64_t)ptr_64->warm_jmp_addr,
+ ptr_64->wb_entry_cnt,
+ ptr_64->wb_exit_cnt,
+ ptr_64->pc_entry_cnt,
+ ptr_64->pc_exit_cnt,
+ ptr_64->psci_entry_cnt,
+ ptr_64->psci_exit_cnt,
+ ptr_64->warm_jmp_instr);
+
+ if (len > (debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into the buffer\n",
+ __func__);
+ break;
+ }
+ ptr_64++;
+ } else {
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " CPU #: %d\n"
+ " Warmboot jump address : 0x%x\n"
+ " Warmboot entry CPU counter: 0x%x\n"
+ " Warmboot exit CPU counter : 0x%x\n"
+ " Power Collapse entry CPU counter: 0x%x\n"
+ " Power Collapse exit CPU counter : 0x%x\n",
+ i, ptr->warm_jmp_addr,
+ ptr->wb_entry_cnt,
+ ptr->wb_exit_cnt,
+ ptr->pc_entry_cnt,
+ ptr->pc_exit_cnt);
+
+ if (len > (debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into the buffer\n",
+ __func__);
+ break;
+ }
+ ptr++;
+ }
+ }
+ tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_tz_reset_stats(void)
+{
+ int i;
+ int len = 0;
+ struct tzdbg_reset_info_t *ptr;
+
+ ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
+ tzdbg.diag_buf->reset_info_off);
+
+ for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " CPU #: %d\n"
+ " Reset Type (reason) : 0x%x\n"
+ " Reset counter : 0x%x\n",
+ i, ptr->reset_type, ptr->reset_cnt);
+
+ if (len > (debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into the buffer\n",
+ __func__);
+ break;
+ }
+
+ ptr++;
+ }
+ tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_tz_interrupt_stats(void)
+{
+ int i, j, int_info_size;
+ int len = 0;
+ int *num_int;
+ unsigned char *ptr;
+ struct tzdbg_int_t *tzdbg_ptr;
+ struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
+
+ num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
+ (tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
+ ptr = ((unsigned char *)tzdbg.diag_buf +
+ tzdbg.diag_buf->int_info_off);
+ int_info_size = ((tzdbg.diag_buf->ring_off -
+ tzdbg.diag_buf->int_info_off)/(*num_int));
+
+ pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+
+ if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+ for (i = 0; i < (*num_int); i++) {
+ tzdbg_ptr = (struct tzdbg_int_t *)ptr;
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " Interrupt Number : 0x%x\n"
+ " Type of Interrupt : 0x%x\n"
+ " Description of interrupt : %s\n",
+ tzdbg_ptr->int_num,
+ (uint32_t)tzdbg_ptr->int_info,
+ (uint8_t *)tzdbg_ptr->int_desc);
+ for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " int_count on CPU # %d : %u\n",
+ (uint32_t)j,
+ (uint32_t)tzdbg_ptr->int_count[j]);
+ }
+ len += snprintf(tzdbg.disp_buf + len,
+ debug_rw_buf_size - 1, "\n");
+
+ if (len > (debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into buf\n",
+ __func__);
+ break;
+ }
+ ptr += int_info_size;
+ }
+ } else {
+ for (i = 0; i < (*num_int); i++) {
+ tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr;
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " Interrupt Number : 0x%x\n"
+ " Type of Interrupt : 0x%x\n"
+ " Description of interrupt : %s\n",
+ tzdbg_ptr_tz40->int_num,
+ (uint32_t)tzdbg_ptr_tz40->int_info,
+ (uint8_t *)tzdbg_ptr_tz40->int_desc);
+ for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+ len += snprintf(tzdbg.disp_buf + len,
+ (debug_rw_buf_size - 1) - len,
+ " int_count on CPU # %d : %u\n",
+ (uint32_t)j,
+ (uint32_t)tzdbg_ptr_tz40->int_count[j]);
+ }
+ len += snprintf(tzdbg.disp_buf + len,
+ debug_rw_buf_size - 1, "\n");
+
+ if (len > (debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into buf\n",
+ __func__);
+ break;
+ }
+ ptr += int_info_size;
+ }
+ }
+
+ tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_tz_log_stats_legacy(void)
+{
+ int len = 0;
+ unsigned char *ptr;
+
+ ptr = (unsigned char *)tzdbg.diag_buf +
+ tzdbg.diag_buf->ring_off;
+ len += snprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
+ "%s\n", ptr);
+
+ tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_log_stats(struct tzdbg_log_t *log,
+ struct tzdbg_log_pos_t *log_start, uint32_t log_len,
+ size_t count, uint32_t buf_idx)
+{
+ uint32_t wrap_start;
+ uint32_t wrap_end;
+ uint32_t wrap_cnt;
+ int max_len;
+ int len = 0;
+ int i = 0;
+
+ wrap_start = log_start->wrap;
+ wrap_end = log->log_pos.wrap;
+
+ /* Calculate difference in # of buffer wrap-arounds */
+ if (wrap_end >= wrap_start) {
+ wrap_cnt = wrap_end - wrap_start;
+ } else {
+ /* wrap counter has wrapped around, invalidate start position */
+ wrap_cnt = 2;
+ }
+
+ if (wrap_cnt > 1) {
+ /* end position has wrapped around more than once, */
+ /* current start no longer valid */
+ log_start->wrap = log->log_pos.wrap - 1;
+ log_start->offset = (log->log_pos.offset + 1) % log_len;
+ } else if ((wrap_cnt == 1) &&
+ (log->log_pos.offset > log_start->offset)) {
+ /* end position has overwritten start */
+ log_start->offset = (log->log_pos.offset + 1) % log_len;
+ }
+
+ while (log_start->offset == log->log_pos.offset) {
+ /*
+ * No data in ring buffer,
+ * so we'll hang around until something happens
+ */
+ unsigned long t = msleep_interruptible(50);
+
+ if (t != 0) {
+ /* Some event woke us up, so let's quit */
+ return 0;
+ }
+
+ if (buf_idx == TZDBG_LOG)
+ memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+ debug_rw_buf_size);
+
+ }
+
+ max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
+
+ /*
+ * Read from ring buff while there is data and space in return buff
+ */
+ while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
+ tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
+ log_start->offset = (log_start->offset + 1) % log_len;
+ if (log_start->offset == 0)
+ ++log_start->wrap;
+ ++len;
+ }
+
+ /*
+ * return buffer to caller
+ */
+ tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int __disp_hyp_log_stats(uint8_t *log,
+ struct hypdbg_log_pos_t *log_start, uint32_t log_len,
+ size_t count, uint32_t buf_idx)
+{
+ struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
+ unsigned long t = 0;
+ uint32_t wrap_start;
+ uint32_t wrap_end;
+ uint32_t wrap_cnt;
+ int max_len;
+ int len = 0;
+ int i = 0;
+
+ wrap_start = log_start->wrap;
+ wrap_end = hyp->log_pos.wrap;
+
+ /* Calculate difference in # of buffer wrap-arounds */
+ if (wrap_end >= wrap_start) {
+ wrap_cnt = wrap_end - wrap_start;
+ } else {
+ /* wrap counter has wrapped around, invalidate start position */
+ wrap_cnt = 2;
+ }
+
+ if (wrap_cnt > 1) {
+ /* end position has wrapped around more than once, */
+ /* current start no longer valid */
+ log_start->wrap = hyp->log_pos.wrap - 1;
+ log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+ } else if ((wrap_cnt == 1) &&
+ (hyp->log_pos.offset > log_start->offset)) {
+ /* end position has overwritten start */
+ log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+ }
+
+ while (log_start->offset == hyp->log_pos.offset) {
+ /*
+ * No data in ring buffer,
+ * so we'll hang around until something happens
+ */
+ t = msleep_interruptible(50);
+ if (t != 0) {
+ /* Some event woke us up, so let's quit */
+ return 0;
+ }
+
+ /* TZDBG_HYP_LOG */
+ memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+ tzdbg.hyp_debug_rw_buf_size);
+ }
+
+ max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
+ tzdbg.hyp_debug_rw_buf_size : count;
+
+ /*
+ * Read from ring buff while there is data and space in return buff
+ */
+ while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
+ tzdbg.disp_buf[i++] = log[log_start->offset];
+ log_start->offset = (log_start->offset + 1) % log_len;
+ if (log_start->offset == 0)
+ ++log_start->wrap;
+ ++len;
+ }
+
+ /*
+ * return buffer to caller
+ */
+ tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+ return len;
+}
+
+static int _disp_tz_log_stats(size_t count)
+{
+ static struct tzdbg_log_pos_t log_start = {0};
+ struct tzdbg_log_t *log_ptr;
+
+ log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
+ tzdbg.diag_buf->ring_off -
+ offsetof(struct tzdbg_log_t, log_buf));
+
+ return _disp_log_stats(log_ptr, &log_start,
+ tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
+}
+
+static int _disp_hyp_log_stats(size_t count)
+{
+ static struct hypdbg_log_pos_t log_start = {0};
+ uint8_t *log_ptr;
+
+ log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
+ tzdbg.hyp_diag_buf->ring_off);
+
+ return __disp_hyp_log_stats(log_ptr, &log_start,
+ tzdbg.hyp_debug_rw_buf_size, count, TZDBG_HYP_LOG);
+}
+
+static int _disp_qsee_log_stats(size_t count)
+{
+ static struct tzdbg_log_pos_t log_start = {0};
+
+ return _disp_log_stats(g_qsee_log, &log_start,
+ QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
+ count, TZDBG_QSEE_LOG);
+}
+
+static int _disp_hyp_general_stats(size_t count)
+{
+ int len = 0;
+ int i;
+ struct hypdbg_boot_info_t *ptr = NULL;
+
+ len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+ tzdbg.hyp_debug_rw_buf_size - 1,
+ " Magic Number : 0x%x\n"
+ " CPU Count : 0x%x\n"
+ " S2 Fault Counter: 0x%x\n",
+ tzdbg.hyp_diag_buf->magic_num,
+ tzdbg.hyp_diag_buf->cpu_count,
+ tzdbg.hyp_diag_buf->s2_fault_counter);
+
+ ptr = tzdbg.hyp_diag_buf->boot_info;
+ for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
+ len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+ (tzdbg.hyp_debug_rw_buf_size - 1) - len,
+ " CPU #: %d\n"
+ " Warmboot entry CPU counter: 0x%x\n"
+ " Warmboot exit CPU counter : 0x%x\n",
+ i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
+
+ if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
+ pr_warn("%s: Cannot fit all info into the buffer\n",
+ __func__);
+ break;
+ }
+ ptr++;
+ }
+
+ tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
+ return len;
+}
+
+static ssize_t tzdbgfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offp)
+{
+ int len = 0;
+ int *tz_id = file->private_data;
+
+ if (*tz_id == TZDBG_BOOT || *tz_id == TZDBG_RESET ||
+ *tz_id == TZDBG_INTERRUPT || *tz_id == TZDBG_GENERAL ||
+ *tz_id == TZDBG_VMID || *tz_id == TZDBG_LOG)
+ memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+ debug_rw_buf_size);
+
+ if (*tz_id == TZDBG_HYP_GENERAL || *tz_id == TZDBG_HYP_LOG)
+ memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+ tzdbg.hyp_debug_rw_buf_size);
+
+ switch (*tz_id) {
+ case TZDBG_BOOT:
+ len = _disp_tz_boot_stats();
+ break;
+ case TZDBG_RESET:
+ len = _disp_tz_reset_stats();
+ break;
+ case TZDBG_INTERRUPT:
+ len = _disp_tz_interrupt_stats();
+ break;
+ case TZDBG_GENERAL:
+ len = _disp_tz_general_stats();
+ break;
+ case TZDBG_VMID:
+ len = _disp_tz_vmid_stats();
+ break;
+ case TZDBG_LOG:
+ if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
+ (tzdbg.diag_buf->version >> 16)) {
+ len = _disp_tz_log_stats(count);
+ *offp = 0;
+ } else {
+ len = _disp_tz_log_stats_legacy();
+ }
+ break;
+ case TZDBG_QSEE_LOG:
+ len = _disp_qsee_log_stats(count);
+ *offp = 0;
+ break;
+ case TZDBG_HYP_GENERAL:
+ len = _disp_hyp_general_stats(count);
+ break;
+ case TZDBG_HYP_LOG:
+ len = _disp_hyp_log_stats(count);
+ *offp = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (len > count)
+ len = count;
+
+ return simple_read_from_buffer(buf, len, offp,
+ tzdbg.stat[(*tz_id)].data, len);
+}
+
+static int tzdbgfs_open(struct inode *inode, struct file *pfile)
+{
+ pfile->private_data = inode->i_private;
+ return 0;
+}
+
+const struct file_operations tzdbg_fops = {
+ .owner = THIS_MODULE,
+ .read = tzdbgfs_read,
+ .open = tzdbgfs_open,
+};
+
+static struct ion_client *g_ion_clnt;
+static struct ion_handle *g_ihandle;
+
+/*
+ * Allocates log buffer from ION, registers the buffer at TZ
+ */
+static void tzdbg_register_qsee_log_buf(void)
+{
+ /* register log buffer scm request */
+ struct qseecom_reg_log_buf_ireq req;
+
+ /* scm response */
+ struct qseecom_command_scm_resp resp = {};
+ ion_phys_addr_t pa = 0;
+ size_t len;
+ int ret = 0;
+
+ /* Create ION msm client */
+ g_ion_clnt = msm_ion_client_create("qsee_log");
+ if (g_ion_clnt == NULL) {
+ pr_err("%s: Ion client cannot be created\n", __func__);
+ return;
+ }
+
+ g_ihandle = ion_alloc(g_ion_clnt, QSEE_LOG_BUF_SIZE,
+ 4096, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(g_ihandle)) {
+ pr_err("%s: Ion client could not retrieve the handle\n",
+ __func__);
+ goto err1;
+ }
+
+ ret = ion_phys(g_ion_clnt, g_ihandle, &pa, &len);
+ if (ret) {
+ pr_err("%s: Ion conversion to physical address failed\n",
+ __func__);
+ goto err2;
+ }
+
+ req.qsee_cmd_id = QSEOS_REGISTER_LOG_BUF_COMMAND;
+ req.phy_addr = (uint32_t)pa;
+ req.len = len;
+
+ if (!is_scm_armv8()) {
+ /* SCM_CALL to register the log buffer */
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
+ &resp, sizeof(resp));
+ } else {
+ struct scm_desc desc = {0};
+
+ desc.args[0] = pa;
+ desc.args[1] = len;
+ desc.arginfo = 0x22;
+ ret = scm_call2(SCM_QSEEOS_FNID(1, 6), &desc);
+ resp.result = desc.ret[0];
+ }
+
+ if (ret) {
+ pr_err("%s: scm_call to register log buffer failed\n",
+ __func__);
+ goto err2;
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err(
+ "%s: scm_call to register log buf failed, resp result =%d\n",
+ __func__, resp.result);
+ goto err2;
+ }
+
+ g_qsee_log =
+ (struct tzdbg_log_t *)ion_map_kernel(g_ion_clnt, g_ihandle);
+
+ if (IS_ERR(g_qsee_log)) {
+ pr_err("%s: Couldn't map ion buffer to kernel\n",
+ __func__);
+ goto err2;
+ }
+
+ g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
+ return;
+
+err2:
+ ion_free(g_ion_clnt, g_ihandle);
+ g_ihandle = NULL;
+err1:
+ ion_client_destroy(g_ion_clnt);
+ g_ion_clnt = NULL;
+}
+
+static int tzdbgfs_init(struct platform_device *pdev)
+{
+ int rc = 0;
+ int i;
+ struct dentry *dent_dir;
+ struct dentry *dent;
+
+ dent_dir = debugfs_create_dir("tzdbg", NULL);
+ if (dent_dir == NULL) {
+ dev_err(&pdev->dev, "tzdbg debugfs_create_dir failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TZDBG_STATS_MAX; i++) {
+ tzdbg.debug_tz[i] = i;
+ dent = debugfs_create_file(tzdbg.stat[i].name,
+ 0444, dent_dir,
+ &tzdbg.debug_tz[i], &tzdbg_fops);
+ if (dent == NULL) {
+ dev_err(&pdev->dev, "TZ debugfs_create_file failed\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+ tzdbg.disp_buf = kzalloc(max(debug_rw_buf_size,
+ tzdbg.hyp_debug_rw_buf_size), GFP_KERNEL);
+ if (tzdbg.disp_buf == NULL)
+ goto err;
+ platform_set_drvdata(pdev, dent_dir);
+ return 0;
+err:
+ debugfs_remove_recursive(dent_dir);
+
+ return rc;
+}
+
+static void tzdbgfs_exit(struct platform_device *pdev)
+{
+ struct dentry *dent_dir;
+
+ kzfree(tzdbg.disp_buf);
+ dent_dir = platform_get_drvdata(pdev);
+ debugfs_remove_recursive(dent_dir);
+ if (g_ion_clnt != NULL) {
+ if (!IS_ERR_OR_NULL(g_ihandle)) {
+ ion_unmap_kernel(g_ion_clnt, g_ihandle);
+ ion_free(g_ion_clnt, g_ihandle);
+ }
+ ion_client_destroy(g_ion_clnt);
+ }
+}
+
+static int __update_hypdbg_base(struct platform_device *pdev,
+ void __iomem *virt_iobase)
+{
+ phys_addr_t hypdiag_phy_iobase;
+ uint32_t hyp_address_offset;
+ uint32_t hyp_size_offset;
+ struct hypdbg_t *hyp;
+ uint32_t *ptr = NULL;
+
+ if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
+ &hyp_address_offset)) {
+ dev_err(&pdev->dev, "hyplog address offset is not defined\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
+ &hyp_size_offset)) {
+ dev_err(&pdev->dev, "hyplog size offset is not defined\n");
+ return -EINVAL;
+ }
+
+ hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
+ tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
+ hyp_size_offset);
+
+ tzdbg.hyp_virt_iobase = devm_ioremap_nocache(&pdev->dev,
+ hypdiag_phy_iobase,
+ tzdbg.hyp_debug_rw_buf_size);
+ if (!tzdbg.hyp_virt_iobase) {
+ dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
+ &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
+ return -ENXIO;
+ }
+
+ ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
+ hyp = tzdbg.hyp_diag_buf;
+ hyp->log_pos.wrap = hyp->log_pos.offset = 0;
+ return 0;
+}
+
+static void tzdbg_get_tz_version(void)
+{
+ uint32_t smc_id = 0;
+ uint32_t feature = 10;
+ struct qseecom_command_scm_resp resp = {0};
+ struct scm_desc desc = {0};
+ int ret = 0;
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_INFO, SCM_SVC_UTIL, &feature,
+ sizeof(feature), &resp, sizeof(resp));
+ } else {
+ smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+ desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+ desc.args[0] = feature;
+ ret = scm_call2(smc_id, &desc);
+ resp.result = desc.ret[0];
+ }
+
+ if (ret)
+ pr_err("%s: scm_call to get tz version failed\n",
+ __func__);
+ else
+ tzdbg.tz_version = resp.result;
+
+}
+
+/*
+ * Driver functions
+ */
+static int tz_log_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ void __iomem *virt_iobase;
+ phys_addr_t tzdiag_phy_iobase;
+ uint32_t *ptr = NULL;
+ int ret = 0;
+
+ /*
+ * Get address that stores the physical location diagnostic data
+ */
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource) {
+ dev_err(&pdev->dev,
+ "%s: ERROR Missing MEM resource\n", __func__);
+ return -ENXIO;
+ };
+
+ /*
+ * Get the debug buffer size
+ */
+ debug_rw_buf_size = resource->end - resource->start + 1;
+
+ /*
+ * Map address that stores the physical location diagnostic data
+ */
+ virt_iobase = devm_ioremap_nocache(&pdev->dev, resource->start,
+ debug_rw_buf_size);
+ if (!virt_iobase) {
+ dev_err(&pdev->dev,
+ "%s: ERROR could not ioremap: start=%pr, len=%u\n",
+ __func__, &resource->start,
+ (unsigned int)(debug_rw_buf_size));
+ return -ENXIO;
+ }
+
+ if (pdev->dev.of_node) {
+ tzdbg.is_hyplog_enabled = of_property_read_bool(
+ (&pdev->dev)->of_node, "qcom,hyplog-enabled");
+ if (tzdbg.is_hyplog_enabled) {
+ ret = __update_hypdbg_base(pdev, virt_iobase);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() failed to get device tree data ret = %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "Hyp log service is not supported\n");
+ }
+ } else {
+ dev_dbg(&pdev->dev, "Device tree data is not found\n");
+ }
+
+ /*
+ * Retrieve the address of diagnostic data
+ */
+ tzdiag_phy_iobase = readl_relaxed(virt_iobase);
+
+ /*
+ * Map the diagnostic information area
+ */
+ tzdbg.virt_iobase = devm_ioremap_nocache(&pdev->dev,
+ tzdiag_phy_iobase, debug_rw_buf_size);
+
+ if (!tzdbg.virt_iobase) {
+ dev_err(&pdev->dev,
+ "%s: ERROR could not ioremap: start=%pr, len=%u\n",
+ __func__, &tzdiag_phy_iobase,
+ debug_rw_buf_size);
+ return -ENXIO;
+ }
+
+ ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
+ if (ptr == NULL)
+ return -ENXIO;
+
+ tzdbg.diag_buf = (struct tzdbg_t *)ptr;
+
+ if (tzdbgfs_init(pdev))
+ goto err;
+
+ tzdbg_register_qsee_log_buf();
+
+ tzdbg_get_tz_version();
+
+ return 0;
+err:
+ kfree(tzdbg.diag_buf);
+ return -ENXIO;
+}
+
+
+static int tz_log_remove(struct platform_device *pdev)
+{
+ kzfree(tzdbg.diag_buf);
+ if (tzdbg.hyp_diag_buf)
+ kzfree(tzdbg.hyp_diag_buf);
+ tzdbgfs_exit(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id tzlog_match[] = {
+ { .compatible = "qcom,tz-log",
+ },
+ {}
+};
+
+static struct platform_driver tz_log_driver = {
+ .probe = tz_log_probe,
+ .remove = tz_log_remove,
+ .driver = {
+ .name = "tz_log",
+ .owner = THIS_MODULE,
+ .of_match_table = tzlog_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init tz_log_init(void)
+{
+ return platform_driver_register(&tz_log_driver);
+}
+
+static void __exit tz_log_exit(void)
+{
+ platform_driver_unregister(&tz_log_driver);
+}
+
+module_init(tz_log_init);
+module_exit(tz_log_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TZ Log driver");
+MODULE_ALIAS("platform:tz_log");
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 4a0f5ea..1e2e519 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -91,6 +91,7 @@
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
+ struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
@@ -131,10 +132,16 @@
qcom_smccc_convention,
ARM_SMCCC_OWNER_SIP, fn_id);
+ quirk.state.a6 = 0;
+
do {
- arm_smccc_smc(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5, 0, 0,
- res);
+ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5,
+ quirk.state.a6, 0, res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ cmd = res->a0;
+
} while (res->a0 == QCOM_SCM_INTERRUPTED);
mutex_unlock(&qcom_scm_lock);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 72a4b32..986248f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -571,8 +571,10 @@
}
desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ if (!IS_ERR(desc))
break;
+ if (PTR_ERR(desc) == -EPROBE_DEFER)
+ return ERR_CAST(desc);
}
/* Then from plain _CRS GPIOs */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7491180..0bc0afb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -90,7 +90,7 @@
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
-static struct edid_quirk {
+static const struct edid_quirk {
char vendor[4];
int product_id;
u32 quirks;
@@ -1449,7 +1449,7 @@
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, char *vendor)
+static bool edid_vendor(struct edid *edid, const char *vendor)
{
char edid_vendor[3];
@@ -1469,7 +1469,7 @@
*/
static u32 edid_get_quirks(struct edid *edid)
{
- struct edid_quirk *quirk;
+ const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f8..a336754 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1299,15 +1299,15 @@
goto out_pm_put;
}
+ mutex_lock(&gpu->lock);
+
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
- goto out_pm_put;
+ goto out_unlock;
}
- mutex_lock(&gpu->lock);
-
gpu->event[event].fence = fence;
submit->fence = fence->seqno;
gpu->active_fence = submit->fence;
@@ -1345,6 +1345,7 @@
hangcheck_timer_reset(gpu);
ret = 0;
+out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 670beeb..923150d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -240,6 +240,7 @@
case I915_PARAM_IRQ_ACTIVE:
case I915_PARAM_ALLOW_BATCHBUFFER:
case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
@@ -266,9 +267,6 @@
case I915_PARAM_HAS_BSD2:
value = intel_engine_initialized(&dev_priv->engine[VCS2]);
break;
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- value = INTEL_GEN(dev_priv) >= 4;
- break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
break;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index da832d3..e0d7245 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1225,7 +1225,7 @@
unsigned boosts;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -1751,8 +1751,6 @@
const struct intel_device_info info;
- int relative_constants_mode;
-
void __iomem *regs;
struct intel_uncore uncore;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00eb481..7b203092 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4587,8 +4587,6 @@
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0c400f8..2117f17 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1454,10 +1454,7 @@
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas)
{
- struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
- int instp_mode;
- u32 instp_mask;
int ret;
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1468,56 +1465,11 @@
if (ret)
return ret;
- instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- instp_mask = I915_EXEC_CONSTANTS_MASK;
- switch (instp_mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && params->engine->id != RCS) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (instp_mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev_priv)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev_priv)->gen > 5 &&
- instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev_priv)->gen >= 6)
- instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ if (args->flags & I915_EXEC_CONSTANTS_MASK) {
+ DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
return -EINVAL;
}
- if (params->engine->id == RCS &&
- instp_mode != dev_priv->relative_constants_mode) {
- struct intel_ring *ring = params->request->ring;
-
- ret = intel_ring_begin(params->request, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = instp_mode;
- }
-
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(params->request);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1c237d0..755d788 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -233,7 +233,7 @@
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+ synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
return freed;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fc286cd..02908e3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -990,68 +990,51 @@
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
+ if (prev->cz_clock) {
+ u64 time, c0;
+ unsigned int mul;
+
+ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ mul <<= 8;
+
+ time = now.cz_clock - prev->cz_clock;
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ c0 = now.render_c0 - prev->render_c0;
+ c0 += now.media_c0 - prev->media_c0;
+ c0 *= mul;
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
- }
-
+ dev_priv->rps.ei = now;
return events;
}
@@ -4490,7 +4473,7 @@
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4531,6 +4514,16 @@
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
+ /* Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block registers
+ * outside of the power domain. We defer setting up the display irqs
+ * in this case to the runtime pm.
+ */
+ dev_priv->display_irqs_enabled = true;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display_irqs_enabled = false;
+
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 31e6edd..9e94886 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -417,6 +417,7 @@
INTEL_VLV_IDS(&intel_valleyview_info),
INTEL_BDW_GT12_IDS(&intel_broadwell_info),
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
+ INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b9be8a6..5dc6082 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3696,10 +3696,6 @@
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
crtc->base.mode = crtc->base.state->mode;
- DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
- old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
@@ -4832,23 +4828,17 @@
struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state;
- DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
if (crtc->config->pch_pfit.enabled) {
int id;
- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
- DRM_ERROR("Requesting pfit without getting a scaler first\n");
+ if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
- }
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
- DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
}
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 334d47b..db3afdf 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -219,7 +219,7 @@
}
}
}
- if (dev_priv->display.hpd_irq_setup)
+ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -425,7 +425,7 @@
}
}
- if (storm_detected)
+ if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
@@ -471,10 +471,12 @@
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
}
static void i915_hpd_poll_init_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4147e51..67db157 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2152,42 +2152,30 @@
void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
+ struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- /* Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
- for_each_engine(engine, dev_priv) {
- struct intel_context *ce = &ctx->engine[engine->id];
- u32 *reg;
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+ void *vaddr;
+ uint32_t *reg_state;
- if (!ce->state)
- continue;
+ if (!ce->state)
+ continue;
- reg = i915_gem_object_pin_map(ce->state->obj,
- I915_MAP_WB);
- if (WARN_ON(IS_ERR(reg)))
- continue;
+ vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+ if (WARN_ON(IS_ERR(vaddr)))
+ continue;
- reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
- reg[CTX_RING_HEAD+1] = 0;
- reg[CTX_RING_TAIL+1] = 0;
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ce->state->obj->dirty = true;
- i915_gem_object_unpin_map(ce->state->obj);
+ reg_state[CTX_RING_HEAD+1] = 0;
+ reg_state[CTX_RING_TAIL+1] = 0;
- ce->ring->head = ce->ring->tail = 0;
- ce->ring->last_retired_head = -1;
- intel_ring_update_space(ce->ring);
- }
+ ce->state->obj->dirty = true;
+ i915_gem_object_unpin_map(ce->state->obj);
+
+ ce->ring->head = 0;
+ ce->ring->tail = 0;
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e559a45..2c6d59d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4903,6 +4903,12 @@
break;
}
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ goto skip_hw_write;
+
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4923,6 +4929,7 @@
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
@@ -4933,8 +4940,9 @@
{
u32 mask = 0;
+ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
@@ -5034,7 +5042,7 @@
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -7960,10 +7968,10 @@
* @timeout_base_ms: timeout for polling with preemption enabled
*
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
* The request is acknowledged once the PCODE reply dword equals @reply after
* applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 10 ms with
+ * for @timeout_base_ms and if this times out for another 50 ms with
* preemption disabled.
*
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7999,14 +8007,15 @@
* worst case) _and_ PCODE was busy for some reason even after a
* (queued) request and @timeout_base_ms delay. As a workaround retry
* the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
* account for interrupts that could reduce the number of these
- * requests.
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
*/
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
- ret = wait_for_atomic(COND, 10);
+ ret = wait_for_atomic(COND, 50);
preempt_enable();
out:
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 1f2f9ca..4556e2b 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -392,6 +392,24 @@
drm_mga_private_t *dev_priv;
int ret;
+ /* There are PCI versions of the G450. These cards have the
+ * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
+ * bridge chip. We detect these cards, which are not currently
+ * supported by this driver, by looking at the device ID of the
+ * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
+ * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
+ * device.
+ */
+ if ((dev->pdev->device == 0x0525) && dev->pdev->bus->self
+ && (dev->pdev->bus->self->vendor == 0x3388)
+ && (dev->pdev->bus->self->device == 0x0021)
+ && dev->agp) {
+ /* FIXME: This should be quirked in the pci core, but oh well
+ * the hw probably stopped existing. */
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
if (!dev_priv)
return -ENOMEM;
@@ -698,7 +716,7 @@
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && dev->agp;
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 25b2a1a..63ba0699 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -37,8 +37,6 @@
#include <drm/drm_pciids.h>
-static int mga_driver_device_is_agp(struct drm_device *dev);
-
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
@@ -66,7 +64,6 @@
.lastclose = mga_driver_lastclose,
.set_busid = drm_pci_set_busid,
.dma_quiescent = mga_driver_dma_quiescent,
- .device_is_agp = mga_driver_device_is_agp,
.get_vblank_counter = mga_get_vblank_counter,
.enable_vblank = mga_enable_vblank,
.disable_vblank = mga_disable_vblank,
@@ -107,37 +104,3 @@
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
-/**
- * Determine if the device really is AGP or not.
- *
- * In addition to the usual tests performed by \c drm_device_is_agp, this
- * function detects PCI G450 cards that appear to the system exactly like
- * AGP G450 cards.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
- */
-static int mga_driver_device_is_agp(struct drm_device *dev)
-{
- const struct pci_dev *const pdev = dev->pdev;
-
- /* There are PCI versions of the G450. These cards have the
- * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
- * bridge chip. We detect these cards, which are not currently
- * supported by this driver, by looking at the device ID of the
- * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
- * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
- * device.
- */
-
- if ((pdev->device == 0x0525) && pdev->bus->self
- && (pdev->bus->self->vendor == 0x3388)
- && (pdev->bus->self->device == 0x0021)) {
- return 0;
- }
-
- return 2;
-}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index b5d78b1..4112bef 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -109,6 +109,7 @@
dsi-staging/dsi_ctrl_hw_cmn.o \
dsi-staging/dsi_ctrl_hw_1_4.o \
dsi-staging/dsi_ctrl_hw_2_0.o \
+ dsi-staging/dsi_ctrl_hw_2_2.o \
dsi-staging/dsi_ctrl.o \
dsi-staging/dsi_catalog.o \
dsi-staging/dsi_drm.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75..7250ffc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -25,9 +25,6 @@
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a54f6e0..07d99bd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -311,4 +311,7 @@
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 976be99..3625ed0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -83,6 +83,19 @@
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
break;
+ case DSI_CTRL_VERSION_2_2:
+ ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config;
+ ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
+ ctrl->ops.wait_for_lane_idle =
+ dsi_ctrl_hw_20_wait_for_lane_idle;
+ ctrl->ops.reg_dump_to_buffer =
+ dsi_ctrl_hw_20_reg_dump_to_buffer;
+ ctrl->ops.ulps_ops.ulps_request = NULL;
+ ctrl->ops.ulps_ops.ulps_exit = NULL;
+ ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
+ ctrl->ops.clamp_enable = NULL;
+ ctrl->ops.clamp_disable = NULL;
+ break;
default:
break;
}
@@ -121,6 +134,7 @@
switch (version) {
case DSI_CTRL_VERSION_1_4:
case DSI_CTRL_VERSION_2_0:
+ case DSI_CTRL_VERSION_2_2:
dsi_catalog_cmn_init(ctrl, version);
break;
default:
@@ -167,6 +181,8 @@
dsi_phy_hw_v3_0_ulps_exit;
phy->ops.ulps_ops.get_lanes_in_ulps =
dsi_phy_hw_v3_0_get_lanes_in_ulps;
+ phy->ops.ulps_ops.is_lanes_in_ulps =
+ dsi_phy_hw_v3_0_is_lanes_in_ulps;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 4a6a934..5dcdf46 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -97,6 +97,7 @@
void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
+bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
@@ -157,6 +158,8 @@
void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl,
bool enable);
+void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
+ bool enable);
/* Definitions specific to 1.4 DSI controller hardware */
int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 2fcf10ba..560964e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -236,32 +236,52 @@
return rc;
error_disable_mmss_clk:
- clk_disable_unprepare(c_clks->clks.core_mmss_clk);
+ if (c_clks->clks.core_mmss_clk)
+ clk_disable_unprepare(c_clks->clks.core_mmss_clk);
error_disable_bus_clk:
- clk_disable_unprepare(c_clks->clks.bus_clk);
+ if (c_clks->clks.bus_clk)
+ clk_disable_unprepare(c_clks->clks.bus_clk);
error_disable_iface_clk:
- clk_disable_unprepare(c_clks->clks.iface_clk);
+ if (c_clks->clks.iface_clk)
+ clk_disable_unprepare(c_clks->clks.iface_clk);
error_disable_mnoc_clk:
if (c_clks->clks.mnoc_clk)
clk_disable_unprepare(c_clks->clks.mnoc_clk);
error_disable_core_clk:
- clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+ if (c_clks->clks.mdp_core_clk)
+ clk_disable_unprepare(c_clks->clks.mdp_core_clk);
error:
return rc;
}
int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
{
- if (msm_bus_scale_client_update_request(c_clks->bus_handle, 0))
- pr_err("bus scale client disable failed\n");
- clk_disable_unprepare(c_clks->clks.core_mmss_clk);
- clk_disable_unprepare(c_clks->clks.bus_clk);
- clk_disable_unprepare(c_clks->clks.iface_clk);
+ int rc = 0;
+
+ if (c_clks->bus_handle) {
+ rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 0);
+ if (rc) {
+ pr_err("bus scale client disable failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (c_clks->clks.core_mmss_clk)
+ clk_disable_unprepare(c_clks->clks.core_mmss_clk);
+
+ if (c_clks->clks.bus_clk)
+ clk_disable_unprepare(c_clks->clks.bus_clk);
+
+ if (c_clks->clks.iface_clk)
+ clk_disable_unprepare(c_clks->clks.iface_clk);
+
if (c_clks->clks.mnoc_clk)
clk_disable_unprepare(c_clks->clks.mnoc_clk);
- clk_disable_unprepare(c_clks->clks.mdp_core_clk);
- return 0;
+ if (c_clks->clks.mdp_core_clk)
+ clk_disable_unprepare(c_clks->clks.mdp_core_clk);
+
+ return rc;
}
static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
@@ -503,7 +523,7 @@
goto error_disable_master;
}
}
-
+ return rc;
error_disable_master:
(void)dsi_core_clk_stop(m_clks);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5df48c3..9a71ea0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -62,6 +62,7 @@
static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
+static const enum dsi_ctrl_version dsi_ctrl_v2_2 = DSI_CTRL_VERSION_2_2;
static const struct of_device_id msm_dsi_of_match[] = {
{
@@ -72,6 +73,10 @@
.compatible = "qcom,dsi-ctrl-hw-v2.0",
.data = &dsi_ctrl_v2_0,
},
+ {
+ .compatible = "qcom,dsi-ctrl-hw-v2.2",
+ .data = &dsi_ctrl_v2_2,
+ },
{}
};
@@ -428,15 +433,34 @@
pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name,
ctrl->hw.base);
- ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
- if (IS_ERR(ptr)) {
- rc = PTR_ERR(ptr);
- return rc;
+ switch (ctrl->version) {
+ case DSI_CTRL_VERSION_1_4:
+ case DSI_CTRL_VERSION_2_0:
+ ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
+ if (IS_ERR(ptr)) {
+ pr_err("mmss_misc base address not found for [%s]\n",
+ ctrl->name);
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+ ctrl->hw.mmss_misc_base = ptr;
+ ctrl->hw.disp_cc_base = NULL;
+ break;
+ case DSI_CTRL_VERSION_2_2:
+ ptr = msm_ioremap(pdev, "disp_cc_base", ctrl->name);
+ if (IS_ERR(ptr)) {
+ pr_err("disp_cc base address not found for [%s]\n",
+ ctrl->name);
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+ ctrl->hw.disp_cc_base = ptr;
+ ctrl->hw.mmss_misc_base = NULL;
+ break;
+ default:
+ break;
}
- ctrl->hw.mmss_misc_base = ptr;
- pr_debug("[%s] map mmss_misc registers to %p\n", ctrl->name,
- ctrl->hw.mmss_misc_base);
return rc;
}
@@ -532,7 +556,7 @@
goto fail;
}
- link->esc_clk = devm_clk_get(&pdev->dev, "core_clk");
+ link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
if (IS_ERR(link->esc_clk)) {
rc = PTR_ERR(link->esc_clk);
pr_err("failed to get esc_clk, rc=%d\n", rc);
@@ -613,10 +637,8 @@
rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
&ctrl->pwr_info.digital,
"qcom,core-supply-entries");
- if (rc) {
- pr_err("failed to get digital supply, rc = %d\n", rc);
- goto error;
- }
+ if (rc)
+ pr_debug("failed to get digital supply, rc = %d\n", rc);
rc = dsi_pwr_get_dt_vreg_data(&pdev->dev,
&ctrl->pwr_info.host_pwr,
@@ -663,10 +685,10 @@
ctrl->pwr_info.host_pwr.vregs = NULL;
ctrl->pwr_info.host_pwr.count = 0;
error_digital:
- devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
+ if (ctrl->pwr_info.digital.vregs)
+ devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
ctrl->pwr_info.digital.vregs = NULL;
ctrl->pwr_info.digital.count = 0;
-error:
return rc;
}
@@ -1204,6 +1226,7 @@
}
dsi_ctrl->cell_index = index;
+ dsi_ctrl->version = version;
dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
if (!dsi_ctrl->name)
@@ -1227,7 +1250,6 @@
goto fail_clks;
}
- dsi_ctrl->version = version;
rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
dsi_ctrl->cell_index);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 161024a..859d707 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -41,6 +41,7 @@
DSI_CTRL_VERSION_UNKNOWN,
DSI_CTRL_VERSION_1_4,
DSI_CTRL_VERSION_2_0,
+ DSI_CTRL_VERSION_2_2,
DSI_CTRL_VERSION_MAX
};
@@ -575,18 +576,26 @@
/*
* struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
- * @base: VA for the DSI controller base address.
- * @length: Length of the DSI controller register map.
- * @index: Instance ID of the controller.
- * @feature_map: Features supported by the DSI controller.
- * @ops: Function pointers to the operations supported by the
- * controller.
+ * @base: VA for the DSI controller base address.
+ * @length: Length of the DSI controller register map.
+ * @mmss_misc_base: Base address of mmss_misc register map.
+ * @mmss_misc_length: Length of mmss_misc register map.
+ * @disp_cc_base: Base address of disp_cc register map.
+ * @disp_cc_length: Length of disp_cc register map.
+ * @index: Instance ID of the controller.
+ * @feature_map: Features supported by the DSI controller.
+ * @ops: Function pointers to the operations supported by the
+ * controller.
+ * @supported_interrupts: Number of supported interrupts.
+ * @supported_errors: Number of supported errors.
*/
struct dsi_ctrl_hw {
void __iomem *base;
u32 length;
void __iomem *mmss_misc_base;
u32 mmss_misc_length;
+ void __iomem *disp_cc_base;
+ u32 disp_cc_length;
u32 index;
/* features */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
new file mode 100644
index 0000000..1b1e811
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg.h"
+#include "dsi_hw.h"
+
+/* Equivalent to register DISP_CC_MISC_CMD */
+#define DISP_CC_CLAMP_REG_OFF 0x00
+
+/**
+ * dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: boolean to specify enable/disable.
+ */
+void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
+ bool enable)
+{
+ u32 reg = 0;
+
+ reg = DSI_DISP_CC_R32(ctrl, DISP_CC_CLAMP_REG_OFF);
+
+ /* Mask/unmask disable PHY reset bit */
+ if (enable)
+ reg &= ~BIT(ctrl->index);
+ else
+ reg |= BIT(ctrl->index);
+ DSI_DISP_CC_W32(ctrl, DISP_CC_CLAMP_REG_OFF, reg);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 8605338..122a63d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -320,8 +320,8 @@
reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
- /* Enable Timing double buffering */
- DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+ /* Disable Timing double buffering */
+ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0);
pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index ee39ec7..563285d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -408,6 +408,7 @@
u32 pixel_clk_khz;
enum dsi_op_mode panel_mode;
u32 dsi_mode_flags;
+ struct msm_mode_info *mode_info;
};
#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index bcaf428..86db16e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -260,7 +260,8 @@
return rc;
}
- rc = dsi_phy_set_ulps(m_ctrl->phy, &display->config, enable);
+ rc = dsi_phy_set_ulps(m_ctrl->phy, &display->config, enable,
+ display->clamp_enabled);
if (rc) {
pr_err("Ulps PHY state change(%d) failed\n", enable);
return rc;
@@ -278,7 +279,8 @@
return rc;
}
- rc = dsi_phy_set_ulps(ctrl->phy, &display->config, enable);
+ rc = dsi_phy_set_ulps(ctrl->phy, &display->config, enable,
+ display->clamp_enabled);
if (rc) {
pr_err("Ulps PHY state change(%d) failed\n", enable);
return rc;
@@ -1105,7 +1107,7 @@
goto error_disable_clks;
}
- if (display->ctrl_count > 1) {
+ if (display->ctrl_count > 1 && !(msg->flags & MIPI_DSI_MSG_UNICAST)) {
rc = dsi_display_broadcast_cmd(display, msg);
if (rc) {
pr_err("[%s] cmd broadcast failed, rc=%d\n",
@@ -1113,7 +1115,10 @@
goto error_disable_cmd_engine;
}
} else {
- rc = dsi_ctrl_cmd_transfer(display->ctrl[0].ctrl, msg,
+ int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ?
+ msg->ctrl : 0;
+
+ rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
DSI_CTRL_CMD_FIFO_STORE);
if (rc) {
pr_err("[%s] cmd transfer failed, rc=%d\n",
@@ -1362,13 +1367,17 @@
/*
* Enable DSI clamps only if entering idle power collapse.
*/
- if (dsi_panel_initialized(display->panel) &&
- dsi_panel_ulps_feature_enabled(display->panel)) {
+ if (dsi_panel_initialized(display->panel)) {
dsi_display_phy_idle_off(display);
rc = dsi_display_set_clamp(display, true);
if (rc)
pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
__func__, rc);
+
+ rc = dsi_display_phy_reset_config(display, false);
+ if (rc)
+ pr_err("%s: Failed to reset phy, rc=%d\n",
+ __func__, rc);
} else {
/* Make sure that controller is not in ULPS state when
* the DSI link is not active.
@@ -1424,6 +1433,13 @@
}
}
+ rc = dsi_display_phy_reset_config(display, true);
+ if (rc) {
+ pr_err("%s: Failed to reset phy, rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
rc = dsi_display_set_clamp(display, false);
if (rc) {
pr_err("%s: Failed to disable dsi clamps. rc=%d\n",
@@ -2969,18 +2985,11 @@
goto error_phy_disable;
}
- rc = dsi_display_phy_reset_config(display, true);
- if (rc) {
- pr_err("[%s] failed to setup DSI controller, rc=%d\n",
- display->name, rc);
- goto error_ctrl_deinit;
- }
-
rc = dsi_display_set_clk_src(display);
if (rc) {
pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
display->name, rc);
- goto error_phy_reset_off;
+ goto error_ctrl_deinit;
}
rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
@@ -2988,7 +2997,7 @@
if (rc) {
pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
display->name, rc);
- goto error_phy_reset_off;
+ goto error_ctrl_deinit;
}
rc = dsi_display_ctrl_host_enable(display);
@@ -3011,8 +3020,6 @@
error_ctrl_link_off:
(void)dsi_display_clk_ctrl(display->dsi_clk_handle,
DSI_LINK_CLK, DSI_CLK_OFF);
-error_phy_reset_off:
- (void)dsi_display_phy_reset_config(display, false);
error_ctrl_deinit:
(void)dsi_display_ctrl_deinit(display);
error_phy_disable:
@@ -3027,6 +3034,12 @@
return rc;
}
+int dsi_display_pre_kickoff(struct dsi_display *display,
+ struct msm_display_kickoff_params *params)
+{
+ return 0;
+}
+
int dsi_display_enable(struct dsi_display *display)
{
int rc = 0;
@@ -3223,11 +3236,6 @@
pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
display->name, rc);
- rc = dsi_display_phy_reset_config(display, false);
- if (rc)
- pr_err("[%s] failed to disable DSI PHY reset config, rc=%d\n",
- display->name, rc);
-
rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
DSI_CORE_CLK, DSI_CLK_OFF);
if (rc)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index cfbb14ec..d2bc7d8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -417,4 +417,14 @@
* Return: error code
*/
int dsi_display_soft_reset(void *display);
+
+/*
+ * dsi_display_pre_kickoff - program kickoff-time features
+ * @display: Pointer to private display structure
+ * @params: Parameters for kickoff-time programming
+ * Returns: Zero on success
+ */
+int dsi_display_pre_kickoff(struct dsi_display *display,
+ struct msm_display_kickoff_params *params);
+
#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 24a740b..3f4bb5a5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -50,6 +50,8 @@
dsi_mode->pixel_clk_khz = drm_mode->clock;
dsi_mode->panel_mode = 0; /* TODO: Panel Mode */
+ dsi_mode->mode_info = (struct msm_mode_info *)drm_mode->private;
+
if (msm_is_mode_seamless(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_SEAMLESS;
if (msm_is_mode_dynamic_fps(drm_mode))
@@ -81,6 +83,8 @@
drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
drm_mode->clock = dsi_mode->pixel_clk_khz;
+ drm_mode->private = (int *)dsi_mode->mode_info;
+
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)
drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DFPS)
@@ -255,6 +259,26 @@
return ret;
}
+int dsi_conn_get_topology(const struct drm_display_mode *drm_mode,
+ struct msm_display_topology *topology,
+ u32 max_mixer_width)
+{
+ struct dsi_display_mode dsi_mode;
+
+ if (!drm_mode || !topology)
+ return -EINVAL;
+
+ convert_to_dsi_mode(drm_mode, &dsi_mode);
+
+ if (!dsi_mode.mode_info)
+ return -EINVAL;
+
+ memcpy(topology, &dsi_mode.mode_info->topology,
+ sizeof(struct msm_display_topology));
+
+ return 0;
+}
+
static const struct drm_bridge_funcs dsi_bridge_ops = {
.attach = dsi_bridge_attach,
.mode_fixup = dsi_bridge_mode_fixup,
@@ -475,6 +499,18 @@
return MODE_OK;
}
+int dsi_conn_pre_kickoff(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params)
+{
+ if (!connector || !display || !params) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return dsi_display_pre_kickoff(display, params);
+}
+
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 934899b..68520a8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -64,6 +64,17 @@
void *display);
/**
+ * dsi_conn_get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: Zero on success
+ */
+int dsi_conn_get_topology(const struct drm_display_mode *drm_mode,
+ struct msm_display_topology *topology,
+ u32 max_mixer_width);
+
+/**
* dsi_conn_mode_valid - callback to determine if specified mode is valid
* @connector: Pointer to drm connector structure
* @mode: Pointer to drm mode structure
@@ -80,4 +91,15 @@
void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge);
+/**
+ * dsi_display_pre_kickoff - program kickoff-time features
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display structure
+ * @params: Parameters for kickoff-time programming
+ * Returns: Zero on success
+ */
+int dsi_conn_pre_kickoff(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params);
+
#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
index 447f613..174be9f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -33,6 +33,15 @@
writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
} while (0)
+#define DSI_DISP_CC_R32(dsi_hw, off) \
+ readl_relaxed((dsi_hw)->disp_cc_base + (off))
+#define DSI_DISP_CC_W32(dsi_hw, off, val) \
+ do {\
+ pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+ (dsi_hw)->index, #off, val); \
+ writel_relaxed((val), (dsi_hw)->disp_cc_base + (off)); \
+ } while (0)
+
#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index bda9c2d..cb4afe4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -20,6 +20,19 @@
#include "dsi_panel.h"
#include "dsi_ctrl_hw.h"
+#define MAX_CMDLINE_PARAM_LEN 256
+static char display_config[MAX_CMDLINE_PARAM_LEN];
+
+/**
+ * topology is currently defined by a set of following 3 values:
+ * 1. num of layer mixers
+ * 2. num of compression encoders
+ * 3. num of interfaces
+ */
+#define TOPOLOGY_SET_LEN 3
+#define INT_BASE_10 10
+#define MAX_TOPOLOGY 5
+
#define DSI_PANEL_DEFAULT_LABEL "Default dsi panel"
#define DEFAULT_MDP_TRANSFER_TIME 14000
@@ -1512,6 +1525,17 @@
return rc;
}
+static int dsi_panel_parse_features(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ panel->ulps_enabled =
+ of_property_read_bool(of_node, "qcom,ulps-enabled");
+
+ pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+
+ return 0;
+}
+
static int dsi_panel_parse_jitter_config(struct dsi_panel *panel,
struct device_node *of_node)
{
@@ -1901,25 +1925,18 @@
u32 data;
int rc = -EINVAL;
int intf_width;
- struct device_node *dsc_np = NULL;
if (!panel->dsc_enabled)
return 0;
- dsc_np = of_parse_phandle(of_node, "qcom,config-select", 0);
- if (!dsc_np) {
- pr_err("no dsc config found\n");
- goto error;
- }
-
- rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-slice-height", &data);
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-height", &data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-slice-height\n");
goto error;
}
panel->dsc.slice_height = data;
- rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-slice-width", &data);
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-width", &data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-slice-width\n");
goto error;
@@ -1935,14 +1952,15 @@
panel->dsc.pic_width = panel->mode.timing.h_active;
panel->dsc.pic_height = panel->mode.timing.v_active;
- rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-slice-per-pkt", &data);
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-per-pkt",
+ &data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-slice-per-pkt\n");
goto error;
}
panel->dsc.slice_per_pkt = data;
- rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-bit-per-component",
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsc-bit-per-component",
&data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-bit-per-component\n");
@@ -1950,14 +1968,15 @@
}
panel->dsc.bpc = data;
- rc = of_property_read_u32(dsc_np, "qcom,mdss-dsc-bit-per-pixel", &data);
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsc-bit-per-pixel",
+ &data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-bit-per-pixel\n");
goto error;
}
panel->dsc.bpp = data;
- panel->dsc.block_pred_enable = of_property_read_bool(dsc_np,
+ panel->dsc.block_pred_enable = of_property_read_bool(of_node,
"qcom,mdss-dsc-block-prediction-enable");
panel->dsc.full_frame_slices = DIV_ROUND_UP(intf_width,
@@ -2016,6 +2035,112 @@
return 0;
}
+static int dsi_get_cmdline_top_override(void)
+{
+ char *str = display_config;
+ int top_index = -1;
+
+ /*
+ * This module need to be updated with needed cmd line argument parsing
+ * for other dsi parameters.
+ */
+ if (strlcat(str, "\0", sizeof(str)) > sizeof(str))
+ return -EINVAL;
+
+ str = strnstr(display_config, "config", strlen(display_config));
+ if (!str)
+ return -EINVAL;
+
+ if (kstrtol(str + strlen("config"), INT_BASE_10,
+ (unsigned long *)&top_index))
+ return -EINVAL;
+
+ return top_index;
+}
+
+static int dsi_panel_parse_topology(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ struct msm_display_topology *topology;
+ u32 top_count, top_sel, *array = NULL;
+ int i, len = 0;
+ int rc = -EINVAL;
+
+ len = of_property_count_u32_elems(of_node, "qcom,display-topology");
+ if (len <= 0 || len % TOPOLOGY_SET_LEN ||
+ len > (TOPOLOGY_SET_LEN * MAX_TOPOLOGY)) {
+ pr_err("invalid topology list for the panel, rc = %d\n", rc);
+ return rc;
+ }
+
+ top_count = len / TOPOLOGY_SET_LEN;
+
+ array = kcalloc(len, sizeof(u32), GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,display-topology", array, len);
+ if (rc) {
+ pr_err("unable to read the display topologies, rc = %d\n", rc);
+ goto read_fail;
+ }
+
+ topology = kcalloc(top_count, sizeof(*topology), GFP_KERNEL);
+ if (!topology) {
+ rc = -ENOMEM;
+ goto read_fail;
+ }
+
+ for (i = 0; i < top_count; i++) {
+ struct msm_display_topology *top = &topology[i];
+
+ top->num_lm = array[i * TOPOLOGY_SET_LEN];
+ top->num_enc = array[i * TOPOLOGY_SET_LEN + 1];
+ top->num_intf = array[i * TOPOLOGY_SET_LEN + 2];
+ };
+
+ top_sel = dsi_get_cmdline_top_override();
+ if (top_sel >= 0 && top_sel < top_count) {
+ pr_info("overidden topology: lm: %d comp_enc:%d intf: %d\n",
+ topology[top_sel].num_lm,
+ topology[top_sel].num_enc,
+ topology[top_sel].num_intf);
+ goto parse_done;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,default-topology-index", &top_sel);
+ if (rc) {
+ pr_err("no default topology selected, rc = %d\n", rc);
+ goto parse_fail;
+ }
+
+ if (top_sel >= top_count) {
+ rc = -EINVAL;
+ pr_err("default topology is specified is not valid, rc = %d\n",
+ rc);
+ goto parse_fail;
+ }
+
+ pr_info("default topology: lm: %d comp_enc:%d intf: %d\n",
+ topology[top_sel].num_lm,
+ topology[top_sel].num_enc,
+ topology[top_sel].num_intf);
+
+parse_done:
+ panel->mode.mode_info = kzalloc(sizeof(struct msm_mode_info),
+ GFP_KERNEL);
+ memcpy(&panel->mode.mode_info->topology, &topology[top_sel],
+ sizeof(struct msm_display_topology));
+parse_fail:
+ kfree(topology);
+read_fail:
+ kfree(array);
+
+ return rc;
+}
+
struct dsi_panel *dsi_panel_get(struct device *parent,
struct device_node *of_node)
{
@@ -2073,6 +2198,13 @@
panel->mode.pixel_clk_khz = (DSI_H_TOTAL(&panel->mode.timing) *
DSI_V_TOTAL(&panel->mode.timing) *
panel->mode.timing.refresh_rate) / 1000;
+
+ rc = dsi_panel_parse_topology(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse panel topology, rc=%d\n", rc);
+ goto error;
+ }
+
rc = dsi_panel_parse_host_config(panel, of_node);
if (rc) {
pr_err("failed to parse host configuration, rc=%d\n", rc);
@@ -2117,6 +2249,10 @@
if (rc)
pr_err("failed to parse panel jitter config, rc=%d\n", rc);
+ rc = dsi_panel_parse_features(panel, of_node);
+ if (rc)
+ pr_err("failed to parse panel features, rc=%d\n", rc);
+
rc = dsi_panel_parse_hdr_config(panel, of_node);
if (rc)
pr_err("failed to parse hdr config, rc=%d\n", rc);
@@ -2138,6 +2274,8 @@
for (i = 0; i < DSI_CMD_SET_MAX; i++)
dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
+ kfree(panel->mode.mode_info);
+
/* TODO: more free */
kfree(panel);
}
@@ -2490,7 +2628,6 @@
panel->name, rc);
goto error;
}
- panel->panel_initialized = false;
error:
mutex_unlock(&panel->panel_lock);
return rc;
@@ -2536,6 +2673,8 @@
panel->name, rc);
goto error;
}
+ panel->panel_initialized = false;
+
error:
mutex_unlock(&panel->panel_lock);
return rc;
@@ -2595,3 +2734,6 @@
mutex_unlock(&panel->panel_lock);
return rc;
}
+
+module_param_string(display_param, display_config, MAX_CMDLINE_PARAM_LEN, 0600);
+MODULE_PARM_DESC(display_param, "format: configx - x indexes the selected topology from the display topology list. Index 0 corresponds to the first topology in the list");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 57226ba..9f63089 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -109,6 +109,7 @@
enum dsi_cmd_set_type type;
enum dsi_cmd_set_state state;
u32 count;
+ int ctrl_idx;
struct dsi_cmd_desc *cmds;
};
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 96a98bd..ebfb40b8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -388,7 +388,7 @@
/** TODO: initialize debugfs */
dsi_phy->pdev = pdev;
platform_set_drvdata(pdev, dsi_phy);
- pr_debug("Probe successful for %s\n", dsi_phy->name);
+ pr_info("Probe successful for %s\n", dsi_phy->name);
return 0;
fail_supplies:
@@ -669,7 +669,7 @@
}
static int dsi_phy_enable_ulps(struct msm_dsi_phy *phy,
- struct dsi_host_config *config)
+ struct dsi_host_config *config, bool clamp_enabled)
{
int rc = 0;
u32 lanes = 0;
@@ -679,17 +679,25 @@
lanes = config->common_config.data_lanes;
lanes |= DSI_CLOCK_LANE;
- rc = phy->hw.ops.ulps_ops.wait_for_lane_idle(&phy->hw, lanes);
- if (rc) {
- pr_err("lanes not entering idle, skip ULPS\n");
- return rc;
+ /*
+ * If DSI clamps are enabled, it means that the DSI lanes are
+ * already in idle state. Checking for lanes to be in idle state
+ * should be skipped during ULPS entry programming while coming
+ * out of idle screen.
+ */
+ if (!clamp_enabled) {
+ rc = phy->hw.ops.ulps_ops.wait_for_lane_idle(&phy->hw, lanes);
+ if (rc) {
+ pr_err("lanes not entering idle, skip ULPS\n");
+ return rc;
+ }
}
phy->hw.ops.ulps_ops.ulps_request(&phy->hw, &phy->cfg, lanes);
ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
- if ((lanes & ulps_lanes) != lanes) {
+ if (!phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
lanes, ulps_lanes);
rc = -EIO;
@@ -701,7 +709,6 @@
static int dsi_phy_disable_ulps(struct msm_dsi_phy *phy,
struct dsi_host_config *config)
{
- int rc = 0;
u32 ulps_lanes, lanes = 0;
if (config->panel_mode == DSI_OP_CMD_MODE)
@@ -710,25 +717,27 @@
ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
- if ((lanes & ulps_lanes) != lanes)
- pr_err("Mismatch between lanes in ULPS\n");
-
- lanes &= ulps_lanes;
+ if (!phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
+ pr_err("Mismatch in ULPS: lanes:%d, ulps_lanes:%d\n",
+ lanes, ulps_lanes);
+ return -EIO;
+ }
phy->hw.ops.ulps_ops.ulps_exit(&phy->hw, &phy->cfg, lanes);
ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw);
- if (ulps_lanes & lanes) {
+
+ if (phy->hw.ops.ulps_ops.is_lanes_in_ulps(lanes, ulps_lanes)) {
pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
- rc = -EIO;
+ return -EIO;
}
- return rc;
+ return 0;
}
int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
- bool enable)
+ bool enable, bool clamp_enabled)
{
int rc = 0;
@@ -738,7 +747,10 @@
}
if (!phy->hw.ops.ulps_ops.ulps_request ||
- !phy->hw.ops.ulps_ops.ulps_exit) {
+ !phy->hw.ops.ulps_ops.ulps_exit ||
+ !phy->hw.ops.ulps_ops.get_lanes_in_ulps ||
+ !phy->hw.ops.ulps_ops.is_lanes_in_ulps ||
+ !phy->hw.ops.ulps_ops.wait_for_lane_idle) {
pr_debug("DSI PHY ULPS ops not present\n");
return 0;
}
@@ -746,7 +758,7 @@
mutex_lock(&phy->phy_lock);
if (enable)
- rc = dsi_phy_enable_ulps(phy, config);
+ rc = dsi_phy_enable_ulps(phy, config, clamp_enabled);
else
rc = dsi_phy_disable_ulps(phy, config);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 4a64855..e721486 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -182,11 +182,12 @@
* @phy: DSI PHY handle
* @config: DSi host configuration information.
* @enable: Enable/Disable
+ * @clamp_enabled: mmss_clamp enabled/disabled
*
* Return: error code.
*/
int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
- bool enable);
+ bool enable, bool clamp_enabled);
/**
* dsi_phy_clk_cb_register() - Register PHY clock control callback
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index daaa78a..51c2f46 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -143,15 +143,22 @@
* @phy: Pointer to DSI PHY hardware instance.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
- * state. If 0 is returned, all the lanes are active.
+ * state.
*
* Return: List of lanes in ULPS state.
*/
u32 (*get_lanes_in_ulps)(struct dsi_phy_hw *phy);
+
+ /**
+ * is_lanes_in_ulps() - checks if the given lanes are in ulps
+ * @lanes: lanes to be checked.
+ * @ulps_lanes: lanes in ulps currenly.
+ *
+ * Return: true if all the given lanes are in ulps; false otherwise.
+ */
+ bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
};
-
-
/**
* struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
* @regulator_enable: Enable PHY regulators.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index 96f5c19..371239d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -159,7 +159,7 @@
struct dsi_phy_cfg *cfg)
{
int i;
- u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x02, 0x01};
+ u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x04, 0x01};
/* Strength ctrl settings */
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
@@ -186,6 +186,10 @@
DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
}
+
+ /* Toggle BIT 0 to release freeze I/0 */
+ DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x05);
+ DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x04);
}
/**
@@ -419,6 +423,14 @@
return lanes;
}
+bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
+{
+ if (lanes & ulps_lanes)
+ return false;
+
+ return true;
+}
+
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9d2e95b..4e0b678 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -15,6 +15,27 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
#include <linux/of_address.h>
#include <linux/kthread.h>
@@ -305,6 +326,7 @@
}
sde_dbg_destroy();
+ debugfs_remove_recursive(priv->debug_root);
component_unbind_all(dev, ddev);
sde_power_client_destroy(&priv->phandle, priv->pclient);
@@ -334,8 +356,8 @@
{
.compatible = "qcom,sde-kms",
.data = (void *)KMS_SDE,
- /* end node */
- } };
+ },
+ {} };
struct device *dev = &pdev->dev;
const struct of_device_id *match;
@@ -611,7 +633,16 @@
if (ret)
goto fail;
- ret = sde_dbg_debugfs_register(ddev->primary->debugfs_root);
+ priv->debug_root = debugfs_create_dir("debug",
+ ddev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(priv->debug_root)) {
+ pr_err("debugfs_root create_dir fail, error %ld\n",
+ PTR_ERR(priv->debug_root));
+ priv->debug_root = NULL;
+ goto fail;
+ }
+
+ ret = sde_dbg_debugfs_register(priv->debug_root);
if (ret) {
dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
goto fail;
@@ -1321,6 +1352,109 @@
return drm_release(inode, filp);
}
+/**
+ * msm_drv_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ */
+void msm_drv_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev;
+
+ if (!fb)
+ return;
+
+ dev = fb->dev;
+
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ drm_framebuffer_unreference(fb);
+}
+
+struct msm_drv_rmfb2_work {
+ struct work_struct work;
+ struct list_head fbs;
+};
+
+static void msm_drv_rmfb2_work_fn(struct work_struct *w)
+{
+ struct msm_drv_rmfb2_work *arg = container_of(w, typeof(*arg), work);
+
+ while (!list_empty(&arg->fbs)) {
+ struct drm_framebuffer *fb =
+ list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+ list_del_init(&fb->filp_head);
+ msm_drv_framebuffer_remove(fb);
+ }
+}
+
+/**
+ * msm_ioctl_rmfb2 - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int msm_ioctl_rmfb2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ return -ENOENT;
+
+ /* drop extra ref from traversing drm_framebuffer_lookup */
+ drm_framebuffer_unreference(fb);
+
+ mutex_lock(&file_priv->fbs_lock);
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+ if (!found) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -ENOENT;
+ }
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /*
+ * we now own the reference that was stored in the fbs list
+ *
+ * drm_framebuffer_remove may fail with -EINTR on pending signals,
+ * so run this in a separate stack as there's no way to correctly
+ * handle this after the fb is already removed from the lookup table.
+ */
+ if (drm_framebuffer_read_refcount(fb) > 1) {
+ struct msm_drv_rmfb2_work arg;
+
+ INIT_WORK_ONSTACK(&arg.work, msm_drv_rmfb2_work_fn);
+ INIT_LIST_HEAD(&arg.fbs);
+ list_add_tail(&fb->filp_head, &arg.fbs);
+
+ schedule_work(&arg.work);
+ flush_work(&arg.work);
+ destroy_work_on_stack(&arg.work);
+ } else
+ drm_framebuffer_unreference(fb);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_ioctl_rmfb2);
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -1335,6 +1469,8 @@
DRM_UNLOCKED|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event,
DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2,
+ DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
static const struct vm_operations_struct vm_ops = {
@@ -1696,6 +1832,13 @@
{ },
};
+#ifdef CONFIG_QCOM_KGSL
+static int add_gpu_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ return 0;
+}
+#else
static int add_gpu_components(struct device *dev,
struct component_match **matchptr)
{
@@ -1711,6 +1854,7 @@
return 0;
}
+#endif
static int msm_drm_bind(struct device *dev)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index f2fccd7..64e9544 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -138,6 +138,7 @@
CRTC_PROP_MEM_IB,
CRTC_PROP_ROT_PREFILL_BW,
CRTC_PROP_ROT_CLK,
+ CRTC_PROP_ROI_V1,
/* total # of properties */
CRTC_PROP_COUNT
@@ -158,10 +159,12 @@
CONNECTOR_PROP_DST_Y,
CONNECTOR_PROP_DST_W,
CONNECTOR_PROP_DST_H,
+ CONNECTOR_PROP_ROI_V1,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
CONNECTOR_PROP_TOPOLOGY_CONTROL,
+ CONNECTOR_PROP_AUTOREFRESH,
/* total # of properties */
CONNECTOR_PROP_COUNT
@@ -200,6 +203,38 @@
};
/**
+ * struct msm_roi_alignment - region of interest alignment restrictions
+ * @xstart_pix_align: left x offset alignment restriction
+ * @width_pix_align: width alignment restriction
+ * @ystart_pix_align: top y offset alignment restriction
+ * @height_pix_align: height alignment restriction
+ * @min_width: minimum width restriction
+ * @min_height: minimum height restriction
+ */
+struct msm_roi_alignment {
+ uint32_t xstart_pix_align;
+ uint32_t width_pix_align;
+ uint32_t ystart_pix_align;
+ uint32_t height_pix_align;
+ uint32_t min_width;
+ uint32_t min_height;
+};
+
+/**
+ * struct msm_roi_caps - display's region of interest capabilities
+ * @enabled: true if some region of interest is supported
+ * @merge_rois: merge rois before sending to display
+ * @num_roi: maximum number of rois supported
+ * @align: roi alignment restrictions
+ */
+struct msm_roi_caps {
+ bool enabled;
+ bool merge_rois;
+ uint32_t num_roi;
+ struct msm_roi_alignment align;
+};
+
+/**
* struct msm_display_dsc_info - defines dsc configuration
* @version: DSC version.
* @scr_rev: DSC revision.
@@ -319,6 +354,26 @@
};
/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+};
+
+/**
+ * struct msm_mode_info - defines all msm custom mode info
+ * @topology - supported topology for the mode
+ */
+struct msm_mode_info {
+ struct msm_display_topology topology;
+};
+
+/**
* struct msm_display_info - defines display properties
* @intf_type: DRM_MODE_CONNECTOR_ display type
* @capabilities: Bitmask of display flags
@@ -338,6 +393,7 @@
* @vtotal: display vertical total
* @jitter: display jitter configuration
* @comp_info: Compression supported by the display
+ * @roi_caps: Region of interest capability info
*/
struct msm_display_info {
int intf_type;
@@ -361,6 +417,27 @@
uint32_t jitter;
struct msm_compression_info comp_info;
+ struct msm_roi_caps roi_caps;
+};
+
+#define MSM_MAX_ROI 4
+
+/**
+ * struct msm_roi_list - list of regions of interest for a drm object
+ * @num_rects: number of valid rectangles in the roi array
+ * @roi: list of roi rectangles
+ */
+struct msm_roi_list {
+ uint32_t num_rects;
+ struct drm_clip_rect roi[MSM_MAX_ROI];
+};
+
+/**
+ * struct - msm_display_kickoff_params - info for display features at kickoff
+ * @rois: Regions of interest structure for mapping CRTC to Connector output
+ */
+struct msm_display_kickoff_params {
+ struct msm_roi_list *rois;
};
/**
@@ -492,6 +569,9 @@
/* whether registered and drm_dev_unregister should be called */
bool registered;
+
+ /* msm drv debug root node */
+ struct dentry *debug_root;
};
struct msm_format {
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index 5ed7ae2..4a664a8 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -52,6 +52,14 @@
};
/**
+ * enum ad_intr_resp_property - ad4 interrupt response enum
+ */
+enum ad_intr_resp_property {
+ AD4_BACKLIGHT,
+ AD4_RESPMAX,
+};
+
+/**
* struct sde_ad_hw_cfg - structure for setting the ad properties
* @prop: enum of ad property
* @hw_cfg: payload for the prop being set.
@@ -76,4 +84,13 @@
* @cfg: pointer to struct sde_ad_hw_cfg
*/
void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
+
+/**
+ * sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
+ * @dspp: pointer to dspp object
+ * @event: event for which response is needed
+ * @resp: value of event requested
+ */
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp);
+
#endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index cb6917a..79b39bd 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -22,6 +22,8 @@
#include "sde_hw_dspp.h"
#include "sde_hw_lm.h"
#include "sde_ad4.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
struct sde_cp_node {
u32 property_id;
@@ -35,6 +37,7 @@
struct list_head dirty_list;
bool is_dspp_feature;
u32 prop_blob_sz;
+ struct sde_irq_callback *irq;
};
struct sde_cp_prop_attach {
@@ -67,6 +70,8 @@
static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
struct sde_crtc *crtc);
+static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg);
+
#define setup_dspp_prop_install_funcs(func) \
do { \
func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -1316,3 +1321,122 @@
}
return ret;
}
+
+static void sde_cp_ad_interrupt_cb(void *arg, int irq_idx)
+{
+ struct sde_crtc *crtc = arg;
+
+ sde_crtc_event_queue(&crtc->base, sde_cp_notify_ad_event, NULL);
+}
+
+static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
+{
+ uint32_t bl = 0;
+ struct sde_hw_mixer *hw_lm = NULL;
+ struct sde_hw_dspp *hw_dspp = NULL;
+ u32 num_mixers;
+ struct sde_crtc *crtc;
+ struct drm_event event;
+ int i;
+
+ crtc = to_sde_crtc(crtc_drm);
+ num_mixers = crtc->num_mixers;
+ if (!num_mixers)
+ return;
+
+ for (i = 0; i < num_mixers; i++) {
+ hw_lm = crtc->mixers[i].hw_lm;
+ hw_dspp = crtc->mixers[i].hw_dspp;
+ if (!hw_lm->cfg.right_mixer)
+ break;
+ }
+
+ if (!hw_dspp)
+ return;
+
+ hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
+ event.length = sizeof(u32);
+ event.type = DRM_EVENT_AD_BACKLIGHT;
+ msm_send_crtc_notification(&crtc->base, &event, (u8 *)&bl);
+}
+
+int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
+ struct sde_irq_callback *ad_irq)
+{
+ struct sde_kms *kms = NULL;
+ u32 num_mixers;
+ struct sde_hw_mixer *hw_lm;
+ struct sde_hw_dspp *hw_dspp = NULL;
+ struct sde_crtc *crtc;
+ int i;
+ int irq_idx, ret;
+ struct sde_cp_node prop_node;
+
+ if (!crtc_drm || !ad_irq) {
+ DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, ad_irq);
+ return -EINVAL;
+ }
+
+ crtc = to_sde_crtc(crtc_drm);
+ if (!crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", crtc);
+ return -EINVAL;
+ }
+
+ mutex_lock(&crtc->crtc_lock);
+ kms = get_kms(crtc_drm);
+ num_mixers = crtc->num_mixers;
+
+ memset(&prop_node, 0, sizeof(prop_node));
+ prop_node.feature = SDE_CP_CRTC_DSPP_AD_BACKLIGHT;
+ ret = sde_cp_ad_validate_prop(&prop_node, crtc);
+ if (ret) {
+ DRM_ERROR("Ad not supported ret %d\n", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < num_mixers; i++) {
+ hw_lm = crtc->mixers[i].hw_lm;
+ hw_dspp = crtc->mixers[i].hw_dspp;
+ if (!hw_lm->cfg.right_mixer)
+ break;
+ }
+
+ if (!hw_dspp) {
+ DRM_ERROR("invalid dspp\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_AD4_BL_DONE,
+ hw_dspp->idx);
+ if (irq_idx < 0) {
+ DRM_ERROR("failed to get the irq idx ret %d\n", irq_idx);
+ ret = irq_idx;
+ goto exit;
+ }
+
+ if (!en) {
+ sde_core_irq_disable(kms, &irq_idx, 1);
+ sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+ ret = 0;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&ad_irq->list);
+ ad_irq->arg = crtc;
+ ad_irq->func = sde_cp_ad_interrupt_cb;
+ ret = sde_core_irq_register_callback(kms, irq_idx, ad_irq);
+ if (ret) {
+ DRM_ERROR("failed to register the callback ret %d\n", ret);
+ goto exit;
+ }
+ ret = sde_core_irq_enable(kms, &irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("failed to enable irq ret %d\n", ret);
+ sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+ }
+exit:
+ mutex_unlock(&crtc->crtc_lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index 9fa63f8..e78f690 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -15,6 +15,8 @@
#define _SDE_COLOR_PROCESSING_H
#include <drm/drm_crtc.h>
+struct sde_irq_callback;
+
/*
* PA MEMORY COLOR types
* @MEMCOLOR_SKIN Skin memory color type
@@ -92,4 +94,13 @@
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_resume(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_ad_interrupt: Api to enable/disable ad interrupt
+ * @crtc: Pointer to crtc.
+ * @en: Variable to enable/disable interrupt.
+ * @irq: Pointer to irq callback
+ */
+int sde_cp_ad_interrupt(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *irq);
#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 1f39180..9f8d7ee 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -21,6 +21,15 @@
#define BL_NODE_NAME_SIZE 32
+/* Autorefresh will occur after FRAME_CNT frames. Large values are unlikely */
+#define AUTOREFRESH_MAX_FRAME_CNT 6
+
+#define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\
+ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
+ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
static const struct drm_prop_enum_list e_topology_name[] = {
{SDE_RM_TOPOLOGY_UNKNOWN, "sde_unknown"},
{SDE_RM_TOPOLOGY_SINGLEPIPE, "sde_singlepipe"},
@@ -78,7 +87,8 @@
.get_brightness = sde_backlight_device_get_brightness,
};
-static int sde_backlight_setup(struct sde_connector *c_conn)
+static int sde_backlight_setup(struct sde_connector *c_conn,
+ struct drm_device *dev)
{
struct backlight_device *bl_device;
struct backlight_properties props;
@@ -87,7 +97,7 @@
static int display_count;
char bl_node_name[BL_NODE_NAME_SIZE];
- if (!c_conn) {
+ if (!c_conn || !dev || !dev->dev) {
SDE_ERROR("invalid param\n");
return -EINVAL;
} else if (c_conn->connector_type != DRM_MODE_CONNECTOR_DSI) {
@@ -104,7 +114,7 @@
props.brightness = bl_config->brightness_max_level;
snprintf(bl_node_name, BL_NODE_NAME_SIZE, "panel%u-backlight",
display_count);
- bl_device = backlight_device_register(bl_node_name, c_conn->base.kdev,
+ bl_device = backlight_device_register(bl_node_name, dev->dev,
c_conn, &sde_backlight_device_ops, &props);
if (IS_ERR_OR_NULL(bl_device)) {
SDE_ERROR("Failed to register backlight: %ld\n",
@@ -215,6 +225,57 @@
return c_conn->ops.get_info(info, c_conn->display);
}
+int sde_connector_pre_kickoff(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ struct msm_display_kickoff_params params;
+ int rc;
+
+ if (!connector) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(connector->state);
+
+ if (!c_conn->display) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (!c_conn->ops.pre_kickoff)
+ return 0;
+
+ params.rois = &c_state->rois;
+
+ SDE_EVT32_VERBOSE(connector->base.id);
+
+ rc = c_conn->ops.pre_kickoff(connector, c_conn->display, ¶ms);
+
+ return rc;
+}
+
+void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
+{
+ struct sde_connector *c_conn;
+ struct dsi_display *display;
+ u32 state = enable ? DSI_CLK_ON : DSI_CLK_OFF;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(connector);
+ display = (struct dsi_display *) c_conn->display;
+
+ if (display && c_conn->ops.clk_ctrl)
+ c_conn->ops.clk_ctrl(display->mdp_clk_handle,
+ DSI_ALL_CLKS, state);
+}
+
static void sde_connector_destroy(struct drm_connector *connector)
{
struct sde_connector *c_conn;
@@ -364,6 +425,122 @@
return &c_state->base;
}
+static int _sde_connector_roi_v1_check_roi(
+ struct sde_connector *c_conn,
+ struct drm_clip_rect *roi_conn,
+ const struct msm_roi_caps *caps)
+{
+ const struct msm_roi_alignment *align = &caps->align;
+ int w = roi_conn->x2 - roi_conn->x1;
+ int h = roi_conn->y2 - roi_conn->y1;
+
+ if (w <= 0 || h <= 0) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n", w, h);
+ return -EINVAL;
+ }
+
+ if (w < align->min_width || w % align->width_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi width %d min %d align %d\n",
+ w, align->min_width, align->width_pix_align);
+ return -EINVAL;
+ }
+
+ if (h < align->min_height || h % align->height_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi height %d min %d align %d\n",
+ h, align->min_height, align->height_pix_align);
+ return -EINVAL;
+ }
+
+ if (roi_conn->x1 % align->xstart_pix_align) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi x1 %d align %d\n",
+ roi_conn->x1, align->xstart_pix_align);
+ return -EINVAL;
+ }
+
+ if (roi_conn->y1 % align->ystart_pix_align) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi y1 %d align %d\n",
+ roi_conn->y1, align->ystart_pix_align);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _sde_connector_set_roi_v1(
+ struct sde_connector *c_conn,
+ struct sde_connector_state *c_state,
+ void *usr_ptr)
+{
+ struct sde_drm_roi_v1 roi_v1;
+ struct msm_display_info display_info;
+ struct msm_roi_caps *caps;
+ int i, rc;
+
+ if (!c_conn || !c_state) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ }
+
+ rc = sde_connector_get_info(&c_conn->base, &display_info);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn, "display get info error: %d\n", rc);
+ return rc;
+ }
+
+ caps = &display_info.roi_caps;
+ if (!caps->enabled) {
+ SDE_ERROR_CONN(c_conn, "display roi capability is disabled\n");
+ return -ENOTSUPP;
+ }
+
+ memset(&c_state->rois, 0, sizeof(c_state->rois));
+
+ if (!usr_ptr) {
+ SDE_DEBUG_CONN(c_conn, "rois cleared\n");
+ return 0;
+ }
+
+ if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
+ SDE_ERROR_CONN(c_conn, "failed to copy roi_v1 data\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG_CONN(c_conn, "num_rects %d\n", roi_v1.num_rects);
+
+ if (roi_v1.num_rects == 0) {
+ SDE_DEBUG_CONN(c_conn, "rois cleared\n");
+ return 0;
+ }
+
+ if (roi_v1.num_rects > SDE_MAX_ROI_V1 ||
+ roi_v1.num_rects > caps->num_roi) {
+ SDE_ERROR_CONN(c_conn, "too many rects specified: %d\n",
+ roi_v1.num_rects);
+ return -EINVAL;
+ }
+
+ c_state->rois.num_rects = roi_v1.num_rects;
+ for (i = 0; i < roi_v1.num_rects; ++i) {
+ int rc;
+
+ rc = _sde_connector_roi_v1_check_roi(c_conn, &roi_v1.roi[i],
+ caps);
+ if (rc)
+ return rc;
+
+ c_state->rois.roi[i] = roi_v1.roi[i];
+ SDE_DEBUG_CONN(c_conn, "roi%d: roi 0x%x 0x%x 0x%x 0x%x\n", i,
+ c_state->rois.roi[i].x1,
+ c_state->rois.roi[i].y1,
+ c_state->rois.roi[i].x2,
+ c_state->rois.roi[i].y2);
+ }
+
+ return 0;
+}
+
static int sde_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
@@ -429,6 +606,12 @@
SDE_ERROR("invalid topology_control: 0x%llX\n", val);
}
+ if (idx == CONNECTOR_PROP_ROI_V1) {
+ rc = _sde_connector_set_roi_v1(c_conn, c_state, (void *)val);
+ if (rc)
+ SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
+ }
+
/* check for custom property handling */
if (!rc && c_conn->ops.set_property) {
rc = c_conn->ops.set_property(connector,
@@ -478,13 +661,7 @@
idx = msm_property_index(&c_conn->property_info, property);
if (idx == CONNECTOR_PROP_RETIRE_FENCE)
- /*
- * Set a fence offset if not a virtual connector, so that the
- * fence signals after one additional commit rather than at the
- * end of the current one.
- */
- rc = sde_fence_create(&c_conn->retire_fence, val,
- c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+ rc = sde_fence_create(&c_conn->retire_fence, val, 0);
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
@@ -673,6 +850,7 @@
struct sde_kms_info *info;
struct sde_connector *c_conn = NULL;
struct dsi_display *dsi_display;
+ struct msm_display_info display_info;
int rc;
if (!dev || !dev->dev_private || !encoder) {
@@ -747,7 +925,7 @@
goto error_cleanup_fence;
}
- rc = sde_backlight_setup(c_conn);
+ rc = sde_backlight_setup(c_conn, dev);
if (rc) {
SDE_ERROR("failed to setup backlight, rc=%d\n", rc);
goto error_cleanup_fence;
@@ -805,9 +983,20 @@
}
}
+ rc = sde_connector_get_info(&c_conn->base, &display_info);
+ if (!rc && display_info.roi_caps.enabled) {
+ msm_property_install_volatile_range(
+ &c_conn->property_info, "sde_drm_roi_v1", 0x0,
+ 0, ~0, 0, CONNECTOR_PROP_ROI_V1);
+ }
+
msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+ msm_property_install_range(&c_conn->property_info, "autorefresh",
+ 0x0, 0, AUTOREFRESH_MAX_FRAME_CNT, 0,
+ CONNECTOR_PROP_AUTOREFRESH);
+
/* enum/bitmask properties */
msm_property_install_enum(&c_conn->property_info, "topology_name",
DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 9d36851..c8c0eed 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -121,6 +121,17 @@
int (*get_info)(struct msm_display_info *info, void *display);
/**
+ * get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: Zero on success
+ */
+ int (*get_topology)(const struct drm_display_mode *drm_mode,
+ struct msm_display_topology *topology,
+ u32 max_mixer_width);
+
+ /**
* enable_event - notify display of event registration/unregistration
* @connector: Pointer to drm connector structure
* @event_idx: SDE connector event index
@@ -138,6 +149,26 @@
* Return: Zero on success, -ERROR otherwise
*/
int (*soft_reset)(void *display);
+
+ /**
+ * pre_kickoff - trigger display to program kickoff-time features
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display structure
+ * @params: Parameter bundle of connector-stored information for
+ * kickoff-time programming into the display
+ * Returns: Zero on success
+ */
+ int (*pre_kickoff)(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params);
+
+ /**
+ * clk_ctrl - perform clk enable/disable on the connector
+ * @handle: Pointer to clk handle
+ * @type: Type of clks
+ * @enable: State of clks
+ */
+ int (*clk_ctrl)(void *handle, u32 type, u32 state);
};
/**
@@ -253,12 +284,15 @@
* @out_fb: Pointer to output frame buffer, if applicable
* @mmu_id: MMU ID for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
+ * @rois: Regions of interest structure for mapping CRTC to Connector output
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
int mmu_id;
uint64_t property_values[CONNECTOR_PROP_COUNT];
+
+ struct msm_roi_list rois;
};
/**
@@ -351,6 +385,13 @@
struct msm_display_info *info);
/**
+ * sde_connector_clk_ctrl - enables/disables the connector clks
+ * @connector: Pointer to drm connector object
+ * @enable: true/false to enable/disable
+ */
+void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
+
+/**
* sde_connector_trigger_event - indicate that an event has occurred
* Any callbacks that have been registered against this event will
* be called from the same thread context.
@@ -403,5 +444,29 @@
int sde_connector_register_custom_event(struct sde_kms *kms,
struct drm_connector *conn_drm, u32 event, bool en);
+/**
+ * sde_connector_pre_kickoff - trigger kickoff time feature programming
+ * @connector: Pointer to drm connector object
+ * Returns: Zero on success
+ */
+int sde_connector_pre_kickoff(struct drm_connector *connector);
+
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ * display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector)
+ return false;
+
+ c_conn = to_sde_connector(connector);
+ return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 71a8bdf..5adef2d 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -320,7 +320,7 @@
DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
-static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
struct dentry *parent)
{
sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0644,
@@ -330,20 +330,20 @@
return 0;
}
-static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
{
debugfs_remove(sde_kms->irq_obj.debugfs_file);
sde_kms->irq_obj.debugfs_file = NULL;
}
#else
-static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
struct dentry *parent)
{
return 0;
}
-static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
{
}
#endif
@@ -385,8 +385,6 @@
atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
}
-
- sde_debugfs_core_irq_init(sde_kms, sde_debugfs_get_root(sde_kms));
}
int sde_core_irq_postinstall(struct sde_kms *sde_kms)
@@ -411,8 +409,6 @@
}
priv = sde_kms->dev->dev_private;
- sde_debugfs_core_irq_destroy(sde_kms);
-
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 92642e7..64f4160 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -135,4 +135,19 @@
int irq_idx,
struct sde_irq_callback *irq_cb);
+/**
+ * sde_debugfs_core_irq_init - register core irq debugfs
+ * @sde_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+ struct dentry *parent);
+
+/**
+ * sde_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @sde_kms: pointer to kms
+ */
+void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms);
+
#endif /* __SDE_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index acb5695..f2d78cb 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -45,6 +45,16 @@
struct list_head list;
};
+struct sde_crtc_custom_events {
+ u32 event;
+ int (*func)(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *irq);
+};
+
+static struct sde_crtc_custom_events custom_events[] = {
+ {DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt}
+};
+
/* default input fence timeout, in ms */
#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000
@@ -107,6 +117,374 @@
enable);
}
+/**
+ * _sde_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_sde_crtc_rp_to_crtc(struct sde_crtc_respool *rp)
+{
+ if (!rp)
+ return NULL;
+
+ return container_of(rp, struct sde_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _sde_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _sde_crtc_rp_reclaim(struct sde_crtc_respool *rp, bool force)
+{
+ struct sde_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ crtc = _sde_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ SDE_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+ force ? "destroy" : "free_unused");
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (!force && !(res->flags & SDE_CRTC_RES_FLAG_FREE))
+ continue;
+ SDE_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ list_del(&res->list);
+ if (res->ops.put)
+ res->ops.put(res->val);
+ kfree(res);
+ }
+}
+
+/**
+ * _sde_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _sde_crtc_rp_free_unused(struct sde_crtc_respool *rp)
+{
+ _sde_crtc_rp_reclaim(rp, false);
+}
+
+/**
+ * _sde_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_destroy(struct sde_crtc_respool *rp)
+{
+ _sde_crtc_rp_reclaim(rp, true);
+}
+
+/**
+ * _sde_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_sde_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+ SDE_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+ return sde_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _sde_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _sde_crtc_hw_blk_put(void *val)
+{
+ SDE_DEBUG("res://%pK\n", val);
+ sde_hw_blk_put(val);
+}
+
+/**
+ * _sde_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_duplicate(struct sde_crtc_respool *rp,
+ struct sde_crtc_respool *dup_rp)
+{
+ struct sde_crtc_res *res, *dup_res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !dup_rp) {
+ SDE_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _sde_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ SDE_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+ dup_rp->sequence_id = rp->sequence_id + 1;
+ INIT_LIST_HEAD(&dup_rp->res_list);
+ dup_rp->ops = rp->ops;
+ list_for_each_entry(res, &rp->res_list, list) {
+ dup_res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
+ if (!dup_res)
+ return;
+ INIT_LIST_HEAD(&dup_res->list);
+ atomic_set(&dup_res->refcount, 0);
+ dup_res->type = res->type;
+ dup_res->tag = res->tag;
+ dup_res->val = res->val;
+ dup_res->ops = res->ops;
+ dup_res->flags = SDE_CRTC_RES_FLAG_FREE;
+ SDE_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, dup_rp->sequence_id,
+ dup_res->type, dup_res->tag, dup_res->val,
+ atomic_read(&dup_res->refcount));
+ list_add_tail(&dup_res->list, &dup_rp->res_list);
+ if (dup_res->ops.get)
+ dup_res->ops.get(dup_res->val, 0, -1);
+ }
+}
+
+/**
+ * _sde_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_reset(struct sde_crtc_respool *rp)
+{
+ if (!rp) {
+ SDE_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ rp->sequence_id = 0;
+ INIT_LIST_HEAD(&rp->res_list);
+ rp->ops.get = _sde_crtc_hw_blk_get;
+ rp->ops.put = _sde_crtc_hw_blk_put;
+}
+
+/**
+ * _sde_crtc_rp_add - add given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+static int _sde_crtc_rp_add(struct sde_crtc_respool *rp, u32 type, u64 tag,
+ void *val, struct sde_crtc_res_ops *ops)
+{
+ struct sde_crtc_res *res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !ops) {
+ SDE_ERROR("invalid resource pool/ops\n");
+ return -EINVAL;
+ }
+
+ crtc = _sde_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(res, &rp->res_list, list) {
+ if (res->type != type || res->tag != tag)
+ continue;
+ SDE_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ return -EEXIST;
+ }
+ res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&res->list);
+ atomic_set(&res->refcount, 1);
+ res->type = type;
+ res->tag = tag;
+ res->val = val;
+ res->ops = *ops;
+ list_add_tail(&res->list, &rp->res_list);
+ SDE_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
+ crtc->base.id, rp->sequence_id, type, tag);
+ return 0;
+}
+
+/**
+ * _sde_crtc_rp_get - lookup the resource from given resource pool and obtain
+ * if available; otherwise, obtain resource from global pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+static void *_sde_crtc_rp_get(struct sde_crtc_respool *rp, u32 type, u64 tag)
+{
+ struct sde_crtc_res *res;
+ void *val = NULL;
+ int rc;
+ struct drm_crtc *crtc;
+
+ if (!rp) {
+ SDE_ERROR("invalid resource pool\n");
+ return NULL;
+ }
+
+ crtc = _sde_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return NULL;
+ }
+
+ list_for_each_entry(res, &rp->res_list, list) {
+ if (res->type != type || res->tag != tag)
+ continue;
+ SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ atomic_inc(&res->refcount);
+ res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
+ return res->val;
+ }
+ list_for_each_entry(res, &rp->res_list, list) {
+ if (res->type != type || !(res->flags & SDE_CRTC_RES_FLAG_FREE))
+ continue;
+ SDE_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ atomic_inc(&res->refcount);
+ res->tag = tag;
+ res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
+ return res->val;
+ }
+ if (rp->ops.get)
+ val = rp->ops.get(NULL, type, -1);
+ if (IS_ERR_OR_NULL(val)) {
+ SDE_ERROR("crtc%d.%u failed to get res:0x%x//\n",
+ crtc->base.id, rp->sequence_id, type);
+ return NULL;
+ }
+ rc = _sde_crtc_rp_add(rp, type, tag, val, &rp->ops);
+ if (rc) {
+ SDE_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
+ crtc->base.id, rp->sequence_id, type, tag);
+ if (rp->ops.put)
+ rp->ops.put(val);
+ val = NULL;
+ }
+ return val;
+}
+
+/**
+ * _sde_crtc_rp_put - return given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+static void _sde_crtc_rp_put(struct sde_crtc_respool *rp, u32 type, u64 tag)
+{
+ struct sde_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ if (!rp) {
+ SDE_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _sde_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (res->type != type || res->tag != tag)
+ continue;
+ SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ if (res->flags & SDE_CRTC_RES_FLAG_FREE)
+ SDE_ERROR(
+ "crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ else if (atomic_dec_return(&res->refcount) == 0)
+ res->flags |= SDE_CRTC_RES_FLAG_FREE;
+
+ return;
+ }
+ SDE_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
+ crtc->base.id, rp->sequence_id, type, tag);
+}
+
+int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+ void *val, struct sde_crtc_res_ops *ops)
+{
+ struct sde_crtc_respool *rp;
+
+ if (!state) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rp = &to_sde_crtc_state(state)->rp;
+ return _sde_crtc_rp_add(rp, type, tag, val, ops);
+}
+
+void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+ struct sde_crtc_respool *rp;
+ void *val;
+
+ if (!state) {
+ SDE_ERROR("invalid parameters\n");
+ return NULL;
+ }
+
+ rp = &to_sde_crtc_state(state)->rp;
+ val = _sde_crtc_rp_get(rp, type, tag);
+ if (IS_ERR(val)) {
+ SDE_ERROR("failed to get res type:0x%x:0x%llx\n",
+ type, tag);
+ return NULL;
+ }
+
+ return val;
+}
+
+void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+ struct sde_crtc_respool *rp;
+
+ if (!state) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ rp = &to_sde_crtc_state(state)->rp;
+ _sde_crtc_rp_put(rp, type, tag);
+}
+
static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
{
if (!sde_crtc)
@@ -214,8 +592,9 @@
lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
bg_alpha, blend_op);
- SDE_DEBUG("format 0x%x, alpha_enable %u fg alpha:0x%x bg alpha:0x%x \"\
- blend_op:0x%x\n", format->base.pixel_format,
+ SDE_DEBUG(
+ "format: %4.4s, alpha_enable %u fg alpha:0x%x bg alpha:0x%x blend_op:0x%x\n",
+ (char *) &format->base.pixel_format,
format->alpha_enable, fg_alpha, bg_alpha, blend_op);
}
@@ -223,10 +602,9 @@
struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer,
struct sde_hw_dim_layer *dim_layer)
{
+ struct sde_crtc_state *cstate;
struct sde_hw_mixer *lm;
- struct sde_rect mixer_rect;
struct sde_hw_dim_layer split_dim_layer;
- u32 mixer_width, mixer_height;
int i;
if (!dim_layer->rect.w || !dim_layer->rect.h) {
@@ -234,9 +612,7 @@
return;
}
- mixer_width = get_crtc_split_width(crtc);
- mixer_height = get_crtc_mixer_height(crtc);
- mixer_rect = (struct sde_rect) {0, 0, mixer_width, mixer_height};
+ cstate = to_sde_crtc_state(crtc->state);
split_dim_layer.stage = dim_layer->stage;
split_dim_layer.color_fill = dim_layer->color_fill;
@@ -247,17 +623,15 @@
*/
for (i = 0; i < sde_crtc->num_mixers; i++) {
split_dim_layer.flags = dim_layer->flags;
- mixer_rect.x = i * mixer_width;
- sde_kms_rect_intersect(&split_dim_layer.rect, &mixer_rect,
- &dim_layer->rect);
- if (!split_dim_layer.rect.w && !split_dim_layer.rect.h) {
+ sde_kms_rect_intersect(&cstate->lm_bounds[i], &dim_layer->rect,
+ &split_dim_layer.rect);
+ if (sde_kms_rect_is_null(&split_dim_layer.rect)) {
/*
* no extra programming required for non-intersecting
* layer mixers with INCLUSIVE dim layer
*/
- if (split_dim_layer.flags
- & SDE_DRM_DIM_LAYER_INCLUSIVE)
+ if (split_dim_layer.flags & SDE_DRM_DIM_LAYER_INCLUSIVE)
continue;
/*
@@ -267,12 +641,13 @@
*/
split_dim_layer.flags &= ~SDE_DRM_DIM_LAYER_EXCLUSIVE;
split_dim_layer.flags |= SDE_DRM_DIM_LAYER_INCLUSIVE;
- split_dim_layer.rect = (struct sde_rect) {0, 0,
- mixer_width, mixer_height};
+ memcpy(&split_dim_layer.rect, &cstate->lm_bounds[i],
+ sizeof(split_dim_layer.rect));
} else {
- split_dim_layer.rect.x = split_dim_layer.rect.x
- - (i * mixer_width);
+ split_dim_layer.rect.x =
+ split_dim_layer.rect.x -
+ cstate->lm_bounds[i].w;
}
lm = mixer[i].hw_lm;
@@ -281,23 +656,415 @@
}
}
+void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
+ const struct sde_rect **crtc_roi)
+{
+ struct sde_crtc_state *crtc_state;
+
+ if (!state || !crtc_roi)
+ return;
+
+ crtc_state = to_sde_crtc_state(state);
+ *crtc_roi = &crtc_state->crtc_roi;
+}
+
+static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
+ void *usr_ptr)
+{
+ struct drm_crtc *crtc;
+ struct sde_crtc_state *cstate;
+ struct sde_drm_roi_v1 roi_v1;
+ int i;
+
+ if (!state) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ }
+
+ cstate = to_sde_crtc_state(state);
+ crtc = cstate->base.crtc;
+
+ memset(&cstate->user_roi_list, 0, sizeof(cstate->user_roi_list));
+
+ if (!usr_ptr) {
+ SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
+ return 0;
+ }
+
+ if (copy_from_user(&roi_v1, usr_ptr, sizeof(roi_v1))) {
+ SDE_ERROR("crtc%d: failed to copy roi_v1 data\n", DRMID(crtc));
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("crtc%d: num_rects %d\n", DRMID(crtc), roi_v1.num_rects);
+
+ if (roi_v1.num_rects == 0) {
+ SDE_DEBUG("crtc%d: rois cleared\n", DRMID(crtc));
+ return 0;
+ }
+
+ if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
+ SDE_ERROR("crtc%d: too many rects specified: %d\n", DRMID(crtc),
+ roi_v1.num_rects);
+ return -EINVAL;
+ }
+
+ cstate->user_roi_list.num_rects = roi_v1.num_rects;
+ for (i = 0; i < roi_v1.num_rects; ++i) {
+ cstate->user_roi_list.roi[i] = roi_v1.roi[i];
+ SDE_DEBUG("crtc%d: roi%d: roi (%d,%d) (%d,%d)\n",
+ DRMID(crtc), i,
+ cstate->user_roi_list.roi[i].x1,
+ cstate->user_roi_list.roi[i].y1,
+ cstate->user_roi_list.roi[i].x2,
+ cstate->user_roi_list.roi[i].y2);
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ struct sde_rect *crtc_roi;
+ struct drm_clip_rect crtc_clip, *user_rect;
+ int i, num_attached_conns = 0;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+ crtc_roi = &crtc_state->crtc_roi;
+
+ /* init to invalid range maxes */
+ crtc_clip.x1 = ~0;
+ crtc_clip.y1 = ~0;
+ crtc_clip.x2 = 0;
+ crtc_clip.y2 = 0;
+
+ for_each_connector_in_state(state->state, conn, conn_state, i) {
+ struct sde_connector_state *sde_conn_state;
+
+ if (!conn_state || conn_state->crtc != crtc)
+ continue;
+
+ if (num_attached_conns) {
+ SDE_ERROR(
+ "crtc%d: unsupported: roi on crtc w/ >1 connectors\n",
+ DRMID(crtc));
+ return -EINVAL;
+ }
+ ++num_attached_conns;
+
+ sde_conn_state = to_sde_connector_state(conn_state);
+
+ if (memcmp(&sde_conn_state->rois, &crtc_state->user_roi_list,
+ sizeof(crtc_state->user_roi_list))) {
+ SDE_ERROR("%s: crtc -> conn roi scaling unsupported\n",
+ sde_crtc->name);
+ return -EINVAL;
+ }
+ }
+
+ /* aggregate all clipping rectangles together for overall crtc roi */
+ for (i = 0; i < crtc_state->user_roi_list.num_rects; i++) {
+ user_rect = &crtc_state->user_roi_list.roi[i];
+
+ crtc_clip.x1 = min(crtc_clip.x1, user_rect->x1);
+ crtc_clip.y1 = min(crtc_clip.y1, user_rect->y1);
+ crtc_clip.x2 = max(crtc_clip.x2, user_rect->x2);
+ crtc_clip.y2 = max(crtc_clip.y2, user_rect->y2);
+
+ SDE_DEBUG(
+ "%s: conn%d roi%d (%d,%d),(%d,%d) -> crtc (%d,%d),(%d,%d)\n",
+ sde_crtc->name, DRMID(crtc), i,
+ user_rect->x1, user_rect->y1,
+ user_rect->x2, user_rect->y2,
+ crtc_clip.x1, crtc_clip.y1,
+ crtc_clip.x2, crtc_clip.y2);
+
+ }
+
+ if (crtc_clip.x2 && crtc_clip.y2) {
+ crtc_roi->x = crtc_clip.x1;
+ crtc_roi->y = crtc_clip.y1;
+ crtc_roi->w = crtc_clip.x2 - crtc_clip.x1;
+ crtc_roi->h = crtc_clip.y2 - crtc_clip.y1;
+ } else {
+ crtc_roi->x = 0;
+ crtc_roi->y = 0;
+ crtc_roi->w = 0;
+ crtc_roi->h = 0;
+ }
+
+ SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
+ crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
+
+ return 0;
+}
+
+static int _sde_crtc_check_autorefresh(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+
+ if (sde_kms_rect_is_null(&crtc_state->crtc_roi))
+ return 0;
+
+ /* partial update active, check if autorefresh is also requested */
+ for_each_connector_in_state(state->state, conn, conn_state, i) {
+ uint64_t autorefresh;
+
+ if (!conn_state || conn_state->crtc != crtc)
+ continue;
+
+ autorefresh = sde_connector_get_property(conn_state,
+ CONNECTOR_PROP_AUTOREFRESH);
+ if (autorefresh) {
+ SDE_ERROR(
+ "%s: autorefresh & partial crtc roi incompatible %llu\n",
+ sde_crtc->name, autorefresh);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_set_lm_roi(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, int lm_idx)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *crtc_roi;
+ const struct sde_rect *lm_bounds;
+ struct sde_rect *lm_roi;
+
+ if (!crtc || !state || lm_idx >= ARRAY_SIZE(crtc_state->lm_bounds))
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+ crtc_roi = &crtc_state->crtc_roi;
+ lm_bounds = &crtc_state->lm_bounds[lm_idx];
+ lm_roi = &crtc_state->lm_roi[lm_idx];
+
+ if (!sde_kms_rect_is_null(crtc_roi)) {
+ sde_kms_rect_intersect(crtc_roi, lm_bounds, lm_roi);
+ if (sde_kms_rect_is_null(lm_roi)) {
+ SDE_ERROR("unsupported R/L only partial update\n");
+ return -EINVAL;
+ }
+ } else {
+ memcpy(lm_roi, lm_bounds, sizeof(*lm_roi));
+ }
+
+ SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
+ lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
+
+ return 0;
+}
+
+static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *roi_prv, *roi_cur;
+ int lm_idx;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ /*
+ * On certain HW, ROIs must be centered on the split between LMs,
+ * and be of equal width.
+ */
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+
+ roi_prv = &crtc_state->lm_roi[0];
+ for (lm_idx = 1; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+ roi_cur = &crtc_state->lm_roi[lm_idx];
+
+ /* check lm rois are equal width & first roi ends at 2nd roi */
+ if (((roi_prv->x + roi_prv->w) != roi_cur->x) ||
+ (roi_prv->w != roi_cur->w)) {
+ SDE_ERROR("%s: roi lm%d x %d w %d lm%d x %d w %d\n",
+ sde_crtc->name,
+ lm_idx-1, roi_prv->x, roi_prv->w,
+ lm_idx, roi_cur->x, roi_cur->w);
+ return -EINVAL;
+ }
+ roi_prv = roi_cur;
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_check_planes_within_crtc_roi(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *crtc_roi;
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ /*
+ * Reject commit if a Plane CRTC destination coordinates fall outside
+ * the partial CRTC ROI. LM output is determined via connector ROIs,
+ * if they are specified, not Plane CRTC ROIs.
+ */
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(state);
+ crtc_roi = &crtc_state->crtc_roi;
+
+ if (sde_kms_rect_is_null(crtc_roi))
+ return 0;
+
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ struct sde_rect plane_roi, intersection;
+
+ pstate = drm_atomic_get_plane_state(state->state, plane);
+ if (IS_ERR_OR_NULL(pstate)) {
+ int rc = PTR_ERR(pstate);
+
+ SDE_ERROR("%s: failed to get plane%d state, %d\n",
+ sde_crtc->name, plane->base.id, rc);
+ return rc;
+ }
+
+ plane_roi.x = pstate->crtc_x;
+ plane_roi.y = pstate->crtc_y;
+ plane_roi.w = pstate->crtc_w;
+ plane_roi.h = pstate->crtc_h;
+ sde_kms_rect_intersect(crtc_roi, &plane_roi, &intersection);
+ if (!sde_kms_rect_is_equal(&plane_roi, &intersection)) {
+ SDE_ERROR(
+ "%s: plane%d crtc roi (%d,%d,%d,%d) outside crtc roi (%d,%d,%d,%d)\n",
+ sde_crtc->name, plane->base.id,
+ plane_roi.x, plane_roi.y,
+ plane_roi.w, plane_roi.h,
+ crtc_roi->x, crtc_roi->y,
+ crtc_roi->w, crtc_roi->h);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _sde_crtc_check_rois(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ int lm_idx;
+ int rc;
+
+ if (!crtc || !state)
+ return -EINVAL;
+
+ sde_crtc = to_sde_crtc(crtc);
+
+ rc = _sde_crtc_set_crtc_roi(crtc, state);
+ if (rc)
+ return rc;
+
+ rc = _sde_crtc_check_autorefresh(crtc, state);
+ if (rc)
+ return rc;
+
+ for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+ rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
+ if (rc)
+ return rc;
+ }
+
+ rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
+ if (rc)
+ return rc;
+
+ rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void _sde_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *crtc_state;
+ const struct sde_rect *lm_roi;
+ struct sde_hw_mixer *hw_lm;
+ int lm_idx, lm_horiz_position;
+
+ if (!crtc)
+ return;
+
+ sde_crtc = to_sde_crtc(crtc);
+ crtc_state = to_sde_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+ struct sde_hw_mixer_cfg cfg;
+
+ lm_roi = &crtc_state->lm_roi[lm_idx];
+ hw_lm = sde_crtc->mixers[lm_idx].hw_lm;
+
+ SDE_EVT32(DRMID(crtc_state->base.crtc), lm_idx,
+ lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
+
+ if (sde_kms_rect_is_null(lm_roi))
+ continue;
+
+ cfg.out_width = lm_roi->w;
+ cfg.out_height = lm_roi->h;
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
{
struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
struct sde_crtc_state *cstate;
struct sde_plane_state *pstate = NULL;
struct sde_format *format;
struct sde_hw_ctl *ctl;
struct sde_hw_mixer *lm;
struct sde_hw_stage_cfg *stage_cfg;
+ struct sde_rect plane_crtc_roi;
- u32 flush_mask = 0, crtc_split_width;
- uint32_t lm_idx = LEFT_MIXER, idx;
+ u32 flush_mask = 0;
+ uint32_t lm_idx = LEFT_MIXER, stage_idx;
bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
- bool lm_right = false;
- int left_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
- int right_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+ int zpos_cnt[CRTC_DUAL_MIXERS][SDE_STAGE_MAX + 1] = { {0} };
int i;
bool sbuf_mode = false;
u32 prefill = 0;
@@ -310,84 +1077,82 @@
ctl = mixer->hw_ctl;
lm = mixer->hw_lm;
stage_cfg = &sde_crtc->stage_cfg;
- crtc_split_width = get_crtc_split_width(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
- pstate = to_sde_plane_state(plane->state);
+ plane_crtc_roi.x = state->crtc_x;
+ plane_crtc_roi.y = state->crtc_y;
+ plane_crtc_roi.w = state->crtc_w;
+ plane_crtc_roi.h = state->crtc_h;
+
+ pstate = to_sde_plane_state(state);
+ fb = state->fb;
if (sde_plane_is_sbuf_mode(plane, &prefill))
sbuf_mode = true;
sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
- /* always stage plane on either left or right lm */
- if (plane->state->crtc_x >= crtc_split_width) {
- lm_idx = RIGHT_MIXER;
- idx = right_crtc_zpos_cnt[pstate->stage]++;
- } else {
- lm_idx = LEFT_MIXER;
- idx = left_crtc_zpos_cnt[pstate->stage]++;
- }
-
- /* stage plane on right LM if it crosses the boundary */
- lm_right = (lm_idx == LEFT_MIXER) &&
- (plane->state->crtc_x + plane->state->crtc_w >
- crtc_split_width);
-
- stage_cfg->stage[lm_idx][pstate->stage][idx] =
- sde_plane_pipe(plane);
- stage_cfg->multirect_index
- [lm_idx][pstate->stage][idx] =
- pstate->multirect_index;
- mixer[lm_idx].flush_mask |= flush_mask;
SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
sde_plane_pipe(plane) - SSPP_VIG0,
- plane->state->fb ?
- plane->state->fb->base.id : -1);
+ state->fb ? state->fb->base.id : -1);
format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
- /* blend config update */
- if (pstate->stage != SDE_STAGE_BASE) {
- _sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
- format);
+ SDE_EVT32(DRMID(crtc), DRMID(plane),
+ state->fb ? state->fb->base.id : -1,
+ state->src_x >> 16, state->src_y >> 16,
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h);
- if (bg_alpha_enable[lm_idx] && !format->alpha_enable)
- mixer[lm_idx].mixer_op_mode = 0;
- else
- mixer[lm_idx].mixer_op_mode |=
- 1 << pstate->stage;
- } else if (format->alpha_enable) {
- bg_alpha_enable[lm_idx] = true;
- }
+ for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+ struct sde_rect intersect;
- if (lm_right) {
- idx = right_crtc_zpos_cnt[pstate->stage]++;
- stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx] =
- sde_plane_pipe(plane);
+ /* skip if the roi doesn't fall within LM's bounds */
+ sde_kms_rect_intersect(&plane_crtc_roi,
+ &cstate->lm_bounds[lm_idx],
+ &intersect);
+ if (sde_kms_rect_is_null(&intersect))
+ continue;
+
+ stage_idx = zpos_cnt[lm_idx][pstate->stage]++;
+ stage_cfg->stage[lm_idx][pstate->stage][stage_idx] =
+ sde_plane_pipe(plane);
stage_cfg->multirect_index
- [RIGHT_MIXER][pstate->stage][idx] =
- pstate->multirect_index;
- mixer[RIGHT_MIXER].flush_mask |= flush_mask;
+ [lm_idx][pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+
+ SDE_EVT32(DRMID(plane), DRMID(crtc), lm_idx, stage_idx,
+ pstate->stage, pstate->multirect_index,
+ pstate->multirect_mode,
+ format->base.pixel_format,
+ fb ? fb->modifier[0] : 0);
/* blend config update */
if (pstate->stage != SDE_STAGE_BASE) {
- _sde_crtc_setup_blend_cfg(mixer + RIGHT_MIXER,
- pstate, format);
+ _sde_crtc_setup_blend_cfg(mixer + lm_idx,
+ pstate, format);
- if (bg_alpha_enable[RIGHT_MIXER] &&
+ if (bg_alpha_enable[lm_idx] &&
!format->alpha_enable)
- mixer[RIGHT_MIXER].mixer_op_mode = 0;
+ mixer[lm_idx].mixer_op_mode = 0;
else
- mixer[RIGHT_MIXER].mixer_op_mode |=
+ mixer[lm_idx].mixer_op_mode |=
1 << pstate->stage;
} else if (format->alpha_enable) {
- bg_alpha_enable[RIGHT_MIXER] = true;
+ bg_alpha_enable[lm_idx] = true;
}
}
}
@@ -413,6 +1178,8 @@
ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
}
+
+ _sde_crtc_program_lm_output_roi(crtc);
}
/**
@@ -478,6 +1245,8 @@
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&sde_crtc->stage_cfg, i);
}
+
+ _sde_crtc_program_lm_output_roi(crtc);
}
void sde_crtc_prepare_commit(struct drm_crtc *crtc,
@@ -494,7 +1263,7 @@
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(crtc->state);
- SDE_EVT32(DRMID(crtc));
+ SDE_EVT32_VERBOSE(DRMID(crtc));
/* identify connectors attached to this crtc */
cstate->num_connectors = 0;
@@ -576,7 +1345,7 @@
_sde_crtc_complete_flip(crtc, NULL);
drm_crtc_handle_vblank(crtc);
DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
- SDE_EVT32_IRQ(DRMID(crtc));
+ SDE_EVT32_VERBOSE(DRMID(crtc));
}
static void sde_crtc_frame_event_work(struct kthread_work *work)
@@ -622,7 +1391,8 @@
crtc->base.id,
ktime_to_ns(fevent->ts),
atomic_read(&sde_crtc->frame_pending));
- SDE_EVT32(DRMID(crtc), fevent->event, 0);
+ SDE_EVT32(DRMID(crtc), fevent->event,
+ SDE_EVTLOG_FUNC_CASE1);
/* don't propagate unexpected frame done events */
return;
@@ -631,16 +1401,18 @@
SDE_DEBUG("crtc%d ts:%lld last pending\n",
crtc->base.id,
ktime_to_ns(fevent->ts));
- SDE_EVT32(DRMID(crtc), fevent->event, 1);
+ SDE_EVT32(DRMID(crtc), fevent->event,
+ SDE_EVTLOG_FUNC_CASE2);
sde_core_perf_crtc_release_bw(crtc);
} else {
- SDE_EVT32(DRMID(crtc), fevent->event, 2);
+ SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
+ SDE_EVTLOG_FUNC_CASE3);
}
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
ktime_to_ns(fevent->ts),
fevent->event);
- SDE_EVT32(DRMID(crtc), fevent->event, 3);
+ SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE4);
}
if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
@@ -670,8 +1442,7 @@
pipe_id = drm_crtc_index(crtc);
SDE_DEBUG("crtc%d\n", crtc->base.id);
-
- SDE_EVT32(DRMID(crtc), event);
+ SDE_EVT32_VERBOSE(DRMID(crtc));
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
@@ -707,7 +1478,7 @@
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(crtc->state);
- SDE_EVT32(DRMID(crtc));
+ SDE_EVT32_VERBOSE(DRMID(crtc));
/* signal output fence(s) at end of commit */
sde_fence_signal(&sde_crtc->output_fence, 0);
@@ -903,9 +1674,48 @@
_sde_crtc_setup_mixer_for_encoder(crtc, enc);
}
+
mutex_unlock(&sde_crtc->crtc_lock);
}
+static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ struct drm_display_mode *adj_mode;
+ u32 crtc_split_width;
+ int i;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid args\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ adj_mode = &state->adjusted_mode;
+ crtc_split_width = sde_crtc_mixer_width(sde_crtc, adj_mode);
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ cstate->lm_bounds[i].x = crtc_split_width * i;
+ cstate->lm_bounds[i].y = 0;
+ cstate->lm_bounds[i].w = crtc_split_width;
+ cstate->lm_bounds[i].h = adj_mode->vdisplay;
+ memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
+ sizeof(cstate->lm_roi[i]));
+ SDE_EVT32(DRMID(crtc), i,
+ cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
+ cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
+ SDE_DEBUG("%s: lm%d bnd&roi (%d,%d,%d,%d)\n", sde_crtc->name, i,
+ cstate->lm_roi[i].x, cstate->lm_roi[i].y,
+ cstate->lm_roi[i].w, cstate->lm_roi[i].h);
+ }
+
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -930,8 +1740,10 @@
sde_crtc = to_sde_crtc(crtc);
dev = crtc->dev;
- if (!sde_crtc->num_mixers)
+ if (!sde_crtc->num_mixers) {
_sde_crtc_setup_mixers(crtc);
+ _sde_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
if (sde_crtc->event) {
WARN_ON(sde_crtc->event);
@@ -1021,7 +1833,7 @@
continue;
cstate->rsc_client =
- sde_encoder_update_rsc_client(encoder, true);
+ sde_encoder_get_rsc_client(encoder);
}
cstate->rsc_update = true;
}
@@ -1061,6 +1873,8 @@
SDE_DEBUG("crtc%d\n", crtc->base.id);
+ _sde_crtc_rp_destroy(&cstate->rp);
+
__drm_atomic_helper_crtc_destroy_state(state);
/* destroy value helper */
@@ -1252,6 +2066,8 @@
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+ _sde_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
return &cstate->base;
}
@@ -1294,10 +2110,39 @@
_sde_crtc_set_input_fence_timeout(cstate);
+ _sde_crtc_rp_reset(&cstate->rp);
+
cstate->base.crtc = crtc;
crtc->state = &cstate->base;
}
+static int _sde_crtc_vblank_no_lock(struct sde_crtc *sde_crtc, bool en)
+{
+ if (!sde_crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
+ SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id);
+ if (!sde_crtc->suspend)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, true);
+ } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
+ SDE_ERROR("crtc%d invalid vblank disable\n",
+ sde_crtc->base.base.id);
+ return -EINVAL;
+ } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
+ SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id);
+ if (!sde_crtc->suspend)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, false);
+ } else {
+ SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
+ sde_crtc->base.base.id,
+ en ? "enable" : "disable",
+ atomic_read(&sde_crtc->vblank_refcount));
+ }
+
+ return 0;
+}
+
static void sde_crtc_disable(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc;
@@ -1325,21 +2170,19 @@
if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
SDE_ERROR("crtc%d invalid vblank refcount\n",
crtc->base.id);
- SDE_EVT32(DRMID(crtc));
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
- sde_encoder_register_vblank_callback(encoder, NULL,
- NULL);
- }
- atomic_set(&sde_crtc->vblank_refcount, 0);
+ SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
+ SDE_EVTLOG_FUNC_CASE1);
+ while (atomic_read(&sde_crtc->vblank_refcount))
+ if (_sde_crtc_vblank_no_lock(sde_crtc, false))
+ break;
}
if (atomic_read(&sde_crtc->frame_pending)) {
/* release bandwidth and other resources */
SDE_ERROR("crtc%d invalid frame pending\n",
crtc->base.id);
- SDE_EVT32(DRMID(crtc));
+ SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
+ SDE_EVTLOG_FUNC_CASE2);
sde_core_perf_crtc_release_bw(crtc);
atomic_set(&sde_crtc->frame_pending, 0);
}
@@ -1350,7 +2193,6 @@
if (encoder->crtc != crtc)
continue;
sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
- sde_encoder_update_rsc_client(encoder, false);
cstate->rsc_client = NULL;
cstate->rsc_update = false;
}
@@ -1375,13 +2217,10 @@
static void sde_crtc_enable(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc;
- struct sde_crtc_mixer *mixer;
- struct sde_hw_mixer *lm;
- struct drm_display_mode *mode;
struct drm_encoder *encoder;
unsigned long flags;
struct sde_crtc_irq_info *node = NULL;
- int i, ret;
+ int ret;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -1390,16 +2229,7 @@
SDE_DEBUG("crtc%d\n", crtc->base.id);
SDE_EVT32(DRMID(crtc));
-
sde_crtc = to_sde_crtc(crtc);
- mixer = sde_crtc->mixers;
-
- if (WARN_ON(!crtc->state))
- return;
-
- mode = &crtc->state->adjusted_mode;
-
- drm_mode_debug_printmodeline(mode);
drm_for_each_encoder(encoder, crtc->dev) {
if (encoder->crtc != crtc)
@@ -1408,15 +2238,6 @@
sde_crtc_frame_event_cb, (void *)crtc);
}
- for (i = 0; i < sde_crtc->num_mixers; i++) {
- lm = mixer[i].hw_lm;
- lm->cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
- lm->cfg.out_height = mode->vdisplay;
- lm->cfg.right_mixer = (i == 0) ? false : true;
- lm->cfg.flags = 0;
- lm->ops.setup_mixer_out(lm, &lm->cfg);
- }
-
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_for_each_entry(node, &sde_crtc->user_event_list, list) {
ret = 0;
@@ -1477,14 +2298,15 @@
return -EINVAL;
}
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
if (!state->enable || !state->active) {
SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, state->enable, state->active);
- return 0;
+ goto end;
}
- sde_crtc = to_sde_crtc(crtc);
- cstate = to_sde_crtc_state(state);
mode = &state->adjusted_mode;
SDE_DEBUG("%s: check", sde_crtc->name);
@@ -1492,6 +2314,8 @@
mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
+ _sde_crtc_setup_lm_bounds(crtc, state);
+
/* get plane state for all drm planes associated with crtc state */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (IS_ERR_OR_NULL(pstate)) {
@@ -1690,15 +2514,21 @@
}
}
+ rc = _sde_crtc_check_rois(crtc, state);
+ if (rc) {
+ SDE_ERROR("crtc%d failed roi check %d\n", crtc->base.id, rc);
+ goto end;
+ }
end:
+ _sde_crtc_rp_free_unused(&cstate->rp);
return rc;
}
int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct sde_crtc *sde_crtc;
- int rc = 0;
+ int rc;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -1707,25 +2537,9 @@
sde_crtc = to_sde_crtc(crtc);
mutex_lock(&sde_crtc->crtc_lock);
- if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
- SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
- if (!sde_crtc->suspend)
- _sde_crtc_vblank_enable_nolock(sde_crtc, true);
- } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
- SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
- rc = -EINVAL;
- } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
- SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
- if (!sde_crtc->suspend)
- _sde_crtc_vblank_enable_nolock(sde_crtc, false);
- } else {
- SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
- crtc->base.id,
- en ? "enable" : "disable",
- atomic_read(&sde_crtc->vblank_refcount));
- }
-
+ rc = _sde_crtc_vblank_no_lock(sde_crtc, en);
mutex_unlock(&sde_crtc->crtc_lock);
+
return rc;
}
@@ -1815,6 +2629,9 @@
"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
}
+ msm_property_install_volatile_range(&sde_crtc->property_info,
+ "sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
+
sde_kms_info_reset(info);
sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
@@ -1887,6 +2704,9 @@
case CRTC_PROP_DIM_LAYER_V1:
_sde_crtc_set_dim_layer_v1(cstate, (void *)val);
break;
+ case CRTC_PROP_ROI_V1:
+ ret = _sde_crtc_set_roi_v1(state, (void *)val);
+ break;
default:
/* nothing to do */
break;
@@ -1897,6 +2717,9 @@
}
if (ret)
DRM_ERROR("failed to set the property\n");
+
+ SDE_DEBUG("crtc%d %s[%d] <= 0x%llx ret=%d\n", crtc->base.id,
+ property->name, property->base.id, val, ret);
}
return ret;
@@ -1933,19 +2756,28 @@
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int i, ret = -EINVAL;
+ bool conn_offset = 0;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
} else {
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
+
+ for (i = 0; i < cstate->num_connectors; ++i) {
+ conn_offset = sde_connector_needs_offset(
+ cstate->connectors[i]);
+ if (conn_offset)
+ break;
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
uint32_t offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
- ret = sde_fence_create(
- &sde_crtc->output_fence, val, offset);
+ ret = sde_fence_create(&sde_crtc->output_fence, val,
+ offset + conn_offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
@@ -2199,6 +3031,7 @@
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
+ struct sde_crtc_res *res;
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
@@ -2208,6 +3041,13 @@
seq_printf(s, "max_per_pipe_ib: %llu\n",
cstate->cur_perf.max_per_pipe_ib);
+ seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
+ list_for_each_entry(res, &cstate->rp.res_list, list)
+ seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ seq_puts(s, "\n");
+
return 0;
}
DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
@@ -2238,7 +3078,7 @@
return -EINVAL;
sde_crtc->debugfs_root = debugfs_create_dir(sde_crtc->name,
- sde_debugfs_get_root(sde_kms));
+ crtc->dev->primary->debugfs_root);
if (!sde_crtc->debugfs_root)
return -ENOMEM;
@@ -2273,7 +3113,6 @@
static void _sde_crtc_destroy_debugfs(struct drm_crtc *crtc)
{
- return 0;
}
#endif /* CONFIG_DEBUG_FS */
@@ -2322,21 +3161,22 @@
}
event = container_of(work, struct sde_crtc_event, kt_work);
- if (event->cb_func)
- event->cb_func(event->usr);
/* set sde_crtc to NULL for static work structures */
sde_crtc = event->sde_crtc;
if (!sde_crtc)
return;
+ if (event->cb_func)
+ event->cb_func(&sde_crtc->base, event->usr);
+
spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
list_add_tail(&event->list, &sde_crtc->event_free_list);
spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
}
int sde_crtc_event_queue(struct drm_crtc *crtc,
- void (*func)(void *usr), void *usr)
+ void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
{
unsigned long irq_flags;
struct sde_crtc *sde_crtc;
@@ -2346,6 +3186,8 @@
return -EINVAL;
sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc->event_thread)
+ return -EINVAL;
/*
* Obtain an event struct from the private cache. This event
* queue may be called from ISR contexts, so use a private
@@ -2474,8 +3316,128 @@
return crtc;
}
-int sde_crtc_register_custom_event(struct sde_kms *kms,
- struct drm_crtc *crtc_drm, u32 event, bool val)
+static int _sde_crtc_event_enable(struct sde_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event)
{
- return -EINVAL;
+ struct sde_crtc *crtc = NULL;
+ struct sde_crtc_irq_info *node;
+ struct msm_drm_private *priv;
+ unsigned long flags;
+ bool found = false;
+ int ret, i = 0;
+
+ crtc = to_sde_crtc(crtc_drm);
+ spin_lock_irqsave(&crtc->spin_lock, flags);
+ list_for_each_entry(node, &crtc->user_event_list, list) {
+ if (node->event == event) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&crtc->spin_lock, flags);
+
+ /* event already enabled */
+ if (found)
+ return 0;
+
+ node = NULL;
+ for (i = 0; i < ARRAY_SIZE(custom_events); i++) {
+ if (custom_events[i].event == event &&
+ custom_events[i].func) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->event = event;
+ INIT_LIST_HEAD(&node->list);
+ node->func = custom_events[i].func;
+ node->event = event;
+ break;
+ }
+ }
+
+ if (!node) {
+ SDE_ERROR("unsupported event %x\n", event);
+ return -EINVAL;
+ }
+
+ priv = kms->dev->dev_private;
+ ret = 0;
+ if (crtc_drm->enabled) {
+ sde_power_resource_enable(&priv->phandle, kms->core_client,
+ true);
+ ret = node->func(crtc_drm, true, &node->irq);
+ sde_power_resource_enable(&priv->phandle, kms->core_client,
+ false);
+ }
+
+ if (!ret) {
+ spin_lock_irqsave(&crtc->spin_lock, flags);
+ list_add_tail(&node->list, &crtc->user_event_list);
+ spin_unlock_irqrestore(&crtc->spin_lock, flags);
+ } else {
+ kfree(node);
+ }
+
+ return ret;
+}
+
+static int _sde_crtc_event_disable(struct sde_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event)
+{
+ struct sde_crtc *crtc = NULL;
+ struct sde_crtc_irq_info *node = NULL;
+ struct msm_drm_private *priv;
+ unsigned long flags;
+ bool found = false;
+ int ret;
+
+ crtc = to_sde_crtc(crtc_drm);
+ spin_lock_irqsave(&crtc->spin_lock, flags);
+ list_for_each_entry(node, &crtc->user_event_list, list) {
+ if (node->event == event) {
+ list_del(&node->list);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&crtc->spin_lock, flags);
+
+ /* event already disabled */
+ if (!found)
+ return 0;
+
+ /**
+ * crtc is disabled interrupts are cleared remove from the list,
+ * no need to disable/de-register.
+ */
+ if (!crtc_drm->enabled) {
+ kfree(node);
+ return 0;
+ }
+ priv = kms->dev->dev_private;
+ sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ ret = node->func(crtc_drm, false, &node->irq);
+ sde_power_resource_enable(&priv->phandle, kms->core_client, false);
+ return ret;
+}
+
+int sde_crtc_register_custom_event(struct sde_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en)
+{
+ struct sde_crtc *crtc = NULL;
+ int ret;
+
+ crtc = to_sde_crtc(crtc_drm);
+ if (!crtc || !kms || !kms->dev) {
+ DRM_ERROR("invalid sde_crtc %pK kms %pK dev %pK\n", crtc,
+ kms, ((kms) ? (kms->dev) : NULL));
+ return -EINVAL;
+ }
+
+ if (en)
+ ret = _sde_crtc_event_enable(kms, crtc_drm, event);
+ else
+ ret = _sde_crtc_event_disable(kms, crtc_drm, event);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 5934405..98ba711 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -25,6 +25,7 @@
#include "sde_fence.h"
#include "sde_kms.h"
#include "sde_core_perf.h"
+#include "sde_hw_blk.h"
#define SDE_CRTC_NAME_SIZE 12
@@ -92,7 +93,7 @@
struct kthread_work kt_work;
void *sde_crtc;
- void (*cb_func)(void *usr);
+ void (*cb_func)(struct drm_crtc *crtc, void *usr);
void *usr;
};
@@ -107,7 +108,7 @@
* @name : ASCII description of this crtc
* @num_ctls : Number of ctl paths in use
* @num_mixers : Number of mixers in use
- * @mixer : List of active mixers
+ * @mixers : List of active mixers
* @event : Pointer to last received drm vblank event. If there is a
* pending vblank event, this will be non-null.
* @vsync_count : Running count of received vsync events
@@ -191,12 +192,69 @@
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
/**
+ * struct sde_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct sde_crtc_res_ops {
+ void *(*get)(void *val, u32 type, u64 tag);
+ void (*put)(void *val);
+};
+
+/* crtc resource type (0x0-0xffff reserved for hw block type */
+#define SDE_CRTC_RES_ROT_OUT_FBO 0x10000
+#define SDE_CRTC_RES_ROT_OUT_FB 0x10001
+#define SDE_CRTC_RES_ROT_PLANE 0x10002
+#define SDE_CRTC_RES_ROT_IN_FB 0x10003
+
+#define SDE_CRTC_RES_FLAG_FREE BIT(0)
+
+/**
+ * struct sde_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct sde_crtc_res {
+ struct list_head list;
+ u32 type;
+ u64 tag;
+ atomic_t refcount;
+ struct sde_crtc_res_ops ops;
+ void *val;
+ u32 flags;
+};
+
+/**
+ * sde_crtc_respool - crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct sde_crtc_respool {
+ u32 sequence_id;
+ struct list_head res_list;
+ struct sde_crtc_res_ops ops;
+};
+
+/**
* struct sde_crtc_state - sde container for atomic crtc state
* @base: Base drm crtc state structure
* @connectors : Currently associated drm connectors
* @num_connectors: Number of associated drm connectors
* @intf_mode : Interface mode of the primary connector
* @rsc_client : sde rsc client when mode is valid
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @crtc_roi : Current CRTC ROI. Possibly sub-rectangle of mode.
+ * Origin top left of CRTC.
+ * @lm_roi : Current LM ROI, possibly sub-rectangle of mode.
+ * Origin top left of CRTC.
+ * @user_roi_list : List of user's requested ROIs as from set property
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
@@ -216,6 +274,11 @@
struct sde_rsc_client *rsc_client;
bool rsc_update;
+ struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
+ struct sde_rect crtc_roi;
+ struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
+ struct msm_roi_list user_roi_list;
+
uint64_t property_values[CRTC_PROP_COUNT];
uint64_t input_fence_timeout_ns;
struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
@@ -226,6 +289,8 @@
struct sde_core_perf_params new_perf;
struct sde_ctl_sbuf_cfg sbuf_cfg;
u64 sbuf_prefill_line;
+
+ struct sde_crtc_respool rp;
};
#define to_sde_crtc_state(x) \
@@ -250,28 +315,19 @@
mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay;
}
-static inline uint32_t get_crtc_split_width(struct drm_crtc *crtc)
+/**
+ * sde_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int sde_crtc_frame_pending(struct drm_crtc *crtc)
{
- struct drm_display_mode *mode;
struct sde_crtc *sde_crtc;
- if (!crtc || !crtc->state)
- return 0;
+ if (!crtc)
+ return -EINVAL;
sde_crtc = to_sde_crtc(crtc);
- mode = &crtc->state->adjusted_mode;
- return sde_crtc_mixer_width(sde_crtc, mode);
-}
-
-static inline uint32_t get_crtc_mixer_height(struct drm_crtc *crtc)
-{
- struct drm_display_mode *mode;
-
- if (!crtc || !crtc->state)
- return 0;
-
- mode = &crtc->state->adjusted_mode;
- return mode->vdisplay;
+ return atomic_read(&sde_crtc->frame_pending);
}
/**
@@ -368,6 +424,46 @@
* Returns: Zero on success
*/
int sde_crtc_event_queue(struct drm_crtc *crtc,
- void (*func)(void *usr), void *usr);
+ void (*func)(struct drm_crtc *crtc, void *usr), void *usr);
+
+/**
+ * sde_crtc_res_add - add given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+ void *val, struct sde_crtc_res_ops *ops);
+
+/**
+ * sde_crtc_res_get - get given resource from resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
+
+/**
+ * sde_crtc_res_put - return given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
+
+/**
+ * sde_crtc_get_crtc_roi - retrieve the crtc_roi from the given state object
+ * used to allow the planes to adjust their final lm out_xy value in the
+ * case of partial update
+ * @crtc_state: Pointer to crtc state
+ * @crtc_roi: Output pointer to crtc roi in the given state
+ */
+void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
+ const struct sde_rect **crtc_roi);
#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 7ab4f8d..3357642 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -32,9 +32,9 @@
#include "sde_hw_ctl.h"
#include "sde_formats.h"
#include "sde_encoder_phys.h"
-#include "sde_color_processing.h"
#include "sde_power_handle.h"
#include "sde_hw_dsc.h"
+#include "sde_crtc.h"
#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -59,6 +59,56 @@
#define MISR_BUFF_SIZE 256
+#define IDLE_TIMEOUT 64
+
+/**
+ * enum sde_enc_rc_events - events for resource control state machine
+ * @SDE_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks and request RSC with CMD state.
+ * Regardless of the previous state, the resource should be in ON state
+ * at the end of this event.
+ * @SDE_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @SDE_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks
+ * and request RSC with IDLE state. Resource state should be in OFF
+ * at the end of the event.
+ * @SDE_ENC_RC_EARLY_WAKEUP
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there will be frame update soon and the driver should
+ * wake up early to update the frame with minimum latency.
+ * @SDE_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and request RSC with IDLE state
+ * and change the resource state to IDLE.
+ */
+enum sde_enc_rc_events {
+ SDE_ENC_RC_EVENT_KICKOFF = 1,
+ SDE_ENC_RC_EVENT_FRAME_DONE,
+ SDE_ENC_RC_EVENT_STOP,
+ SDE_ENC_RC_EVENT_EARLY_WAKE_UP,
+ SDE_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum sde_enc_rc_states - states that the resource control maintains
+ * @SDE_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @SDE_ENC_RC_STATE_ON: Resource is in ON state
+ * @SDE_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum sde_enc_rc_states {
+ SDE_ENC_RC_STATE_OFF,
+ SDE_ENC_RC_STATE_ON,
+ SDE_ENC_RC_STATE_IDLE
+};
+
/**
* struct sde_encoder_virt - virtual encoder. Container of one or more physical
* encoders. Virtual encoder manages one "logical" display. Physical
@@ -92,7 +142,16 @@
* @crtc_frame_event: callback event
* @frame_done_timeout: frame done timeout in Hz
* @frame_done_timer: watchdog timer for frame done event
+ * @rsc_client: rsc client pointer
+ * @rsc_state_init: boolean to indicate rsc config init
+ * @disp_info: local copy of msm_display_info struct
* @misr_enable: misr enable/disable status
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
*/
struct sde_encoder_virt {
struct drm_encoder base;
@@ -121,9 +180,14 @@
struct timer_list frame_done_timer;
struct sde_rsc_client *rsc_client;
+ bool rsc_state_init;
struct msm_display_info disp_info;
- bool rsc_state_update;
bool misr_enable;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum sde_enc_rc_states rc_state;
+ struct delayed_work delayed_off_work;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -285,6 +349,24 @@
}
}
+static void _sde_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags = cur_mode->private_flags;
+ }
+ }
+}
+
static int sde_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
@@ -313,6 +395,15 @@
adj_mode = &crtc_state->adjusted_mode;
SDE_EVT32(DRMID(drm_enc));
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _sde_encoder_adjust_mode(conn_state->connector, adj_mode);
+
/* perform atomic check on the first physical encoder (master) */
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -657,6 +748,347 @@
return ret;
}
+static int sde_encoder_update_rsc_client(
+ struct drm_encoder *drm_enc, bool enable)
+{
+ struct sde_encoder_virt *sde_enc;
+ enum sde_rsc_state rsc_state;
+ struct sde_rsc_cmd_config rsc_config;
+ int ret;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ disp_info = &sde_enc->disp_info;
+
+ if (!sde_enc->rsc_client) {
+ SDE_DEBUG("rsc client not created\n");
+ return 0;
+ }
+
+ /**
+ * only primary command mode panel can request CMD state.
+ * all other panels/displays can request for VID state including
+ * secondary command mode panel.
+ */
+ rsc_state = enable ?
+ (((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
+ disp_info->is_primary) ? SDE_RSC_CMD_STATE :
+ SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+
+ if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
+ && disp_info->is_primary) {
+ rsc_config.fps = disp_info->frame_rate;
+ rsc_config.vtotal = disp_info->vtotal;
+ rsc_config.prefill_lines = disp_info->prefill_lines;
+ rsc_config.jitter = disp_info->jitter;
+ /* update it only once */
+ sde_enc->rsc_state_init = true;
+
+ ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+ rsc_state, &rsc_config,
+ drm_enc->crtc ? drm_enc->crtc->index : -1);
+ } else {
+ ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+ rsc_state, NULL,
+ drm_enc->crtc ? drm_enc->crtc->index : -1);
+ }
+
+ if (ret)
+ SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
+
+ return ret;
+}
+
+struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc)
+ return NULL;
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ disp_info = &sde_enc->disp_info;
+
+ return disp_info->is_primary ? sde_enc->rsc_client : NULL;
+}
+
+static void _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_encoder_virt *sde_enc;
+ int i;
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
+ SDE_EVT32(DRMID(drm_enc), enable);
+
+ if (!sde_enc->cur_master) {
+ SDE_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable SDE core clks */
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
+
+ /* enable DSI clks */
+ sde_connector_clk_ctrl(sde_enc->cur_master->connector, true);
+
+ /* enable all the irq */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, true);
+ }
+
+ /* enable RSC */
+ sde_encoder_update_rsc_client(drm_enc, true);
+
+ } else {
+
+ /* disable RSC */
+ sde_encoder_update_rsc_client(drm_enc, false);
+
+ /* disable all the irq */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys =
+ sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, false);
+ }
+
+ /* disable DSI clks */
+ sde_connector_clk_ctrl(sde_enc->cur_master->connector, false);
+
+ /* disable SDE core clks */
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ }
+
+}
+
+static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ bool schedule_off = false;
+ struct sde_encoder_virt *sde_enc;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF and STOP
+ * event and return early for other events (ie video mode).
+ */
+ if (!sde_enc->idle_pc_supported &&
+ (sw_event != SDE_ENC_RC_EVENT_KICKOFF &&
+ sw_event != SDE_ENC_RC_EVENT_STOP))
+ return 0;
+
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, idle_pc_supported:%d\n", sw_event,
+ sde_enc->idle_pc_supported);
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+ sde_enc->rc_state, SDE_EVTLOG_FUNC_ENTRY);
+
+ switch (sw_event) {
+ case SDE_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&sde_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in ON state\n",
+ sw_event);
+ mutex_unlock(&sde_enc->rc_lock);
+ return 0;
+ }
+
+ /* enable all the clks and resources */
+ _sde_encoder_resource_control_helper(drm_enc, true);
+
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+ SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE1);
+ sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
+
+ mutex_unlock(&sde_enc->rc_lock);
+ break;
+
+ case SDE_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
+ SDE_ERROR_ENC(sde_enc, "sw_event:%d,rc:%d-unexpected\n",
+ sw_event, sde_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (sde_crtc_frame_pending(drm_enc->crtc) > 1) {
+ SDE_DEBUG_ENC(sde_enc, "skip schedule work");
+ return 0;
+ }
+
+ /* schedule delayed off work */
+ schedule_delayed_work(&sde_enc->delayed_off_work,
+ msecs_to_jiffies(IDLE_TIMEOUT));
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+ SDE_EVTLOG_FUNC_CASE2);
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
+ sw_event);
+ break;
+
+ case SDE_ENC_RC_EVENT_STOP:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&sde_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
+ sw_event);
+ mutex_unlock(&sde_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * disable the clks and resources only if the resource control
+ * is in ON state, otherwise the clks and resources would have
+ * been disabled while going into IDLE state
+ */
+ if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON)
+ _sde_encoder_resource_control_helper(drm_enc, false);
+
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+ SDE_ENC_RC_STATE_OFF, SDE_EVTLOG_FUNC_CASE3);
+ sde_enc->rc_state = SDE_ENC_RC_STATE_OFF;
+
+ mutex_unlock(&sde_enc->rc_lock);
+ break;
+
+ case SDE_ENC_RC_EVENT_EARLY_WAKE_UP:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&sde_enc->delayed_off_work)) {
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+ schedule_off = true;
+ }
+
+ mutex_lock(&sde_enc->rc_lock);
+
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+ schedule_off, SDE_EVTLOG_FUNC_CASE4);
+
+ /* return if the resource control is in OFF state */
+ if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
+ sw_event);
+ mutex_unlock(&sde_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * enable all the clks and resources if resource control is
+ * coming out of IDLE state
+ */
+ if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
+ _sde_encoder_resource_control_helper(drm_enc, true);
+ sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
+ schedule_off = true;
+ }
+
+ /*
+ * schedule off work when there are no frames pending and
+ * 1. early wakeup cancelled off work
+ * 2. early wakeup changed the rc_state to ON - this is to
+ * handle cases where early wakeup is called but no
+ * frame updates
+ */
+ if (schedule_off && !sde_crtc_frame_pending(drm_enc->crtc)) {
+ /* schedule delayed off work */
+ schedule_delayed_work(&sde_enc->delayed_off_work,
+ msecs_to_jiffies(IDLE_TIMEOUT));
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
+ sw_event);
+ }
+
+ mutex_unlock(&sde_enc->rc_lock);
+ break;
+
+ case SDE_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&sde_enc->rc_lock);
+
+ if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
+ SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
+ sw_event, sde_enc->rc_state);
+ mutex_unlock(&sde_enc->rc_lock);
+ return 0;
+ }
+
+ /* disable all the clks and resources */
+ _sde_encoder_resource_control_helper(drm_enc, false);
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+ SDE_ENC_RC_STATE_IDLE, SDE_EVTLOG_FUNC_CASE5);
+ sde_enc->rc_state = SDE_ENC_RC_STATE_IDLE;
+
+ mutex_unlock(&sde_enc->rc_lock);
+ break;
+
+ default:
+ SDE_ERROR("unexpected sw_event: %d\n", sw_event);
+ break;
+ }
+
+ SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+ sde_enc->rc_state, SDE_EVTLOG_FUNC_EXIT);
+ return 0;
+}
+
+static void sde_encoder_off_work(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sde_encoder_virt *sde_enc = container_of(dw,
+ struct sde_encoder_virt, delayed_off_work);
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid sde encoder\n");
+ return;
+ }
+
+ sde_encoder_resource_control(&sde_enc->base,
+ SDE_ENC_RC_EVENT_ENTER_IDLE);
+}
+
static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -740,6 +1172,8 @@
static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
int i = 0;
int ret = 0;
@@ -755,35 +1189,56 @@
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc));
- ret = _sde_encoder_power_enable(sde_enc, true);
- if (ret)
- return;
-
sde_enc->cur_master = NULL;
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+ SDE_DEBUG_ENC(sde_enc, "master is now idx %d\n", i);
+ sde_enc->cur_master = phys;
+ break;
+ }
+ }
+
+ if (!sde_enc->cur_master) {
+ SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
+ return;
+ }
+
+ ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n",
+ ret);
+ return;
+ }
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- if (phys) {
- if (phys->ops.is_master && phys->ops.is_master(phys)) {
- SDE_DEBUG_ENC(sde_enc,
- "master is now idx %d\n", i);
- sde_enc->cur_master = phys;
- } else if (phys->ops.enable) {
- phys->ops.enable(phys);
- }
- }
+ if (phys && (phys != sde_enc->cur_master) && phys->ops.enable)
+ phys->ops.enable(phys);
}
- if (!sde_enc->cur_master)
- SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
- else if (sde_enc->cur_master->ops.enable)
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.enable)
sde_enc->cur_master->ops.enable(sde_enc->cur_master);
+ if (sde_enc->cur_master && sde_enc->cur_master->hw_mdptop &&
+ sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+ sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+ sde_enc->cur_master->hw_mdptop,
+ sde_kms->catalog);
+
if (_sde_is_dsc_enabled(sde_enc)) {
ret = _sde_encoder_dsc_setup(sde_enc);
if (ret)
@@ -821,9 +1276,8 @@
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- if (phys) {
- if (phys->ops.disable && !phys->ops.is_master(phys))
- phys->ops.disable(phys);
+ if (phys && phys->ops.disable && !phys->ops.is_master(phys)) {
+ phys->ops.disable(phys);
phys->connector = NULL;
}
}
@@ -837,12 +1291,16 @@
if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
sde_enc->cur_master->ops.disable(sde_enc->cur_master);
- sde_enc->cur_master = NULL;
- SDE_DEBUG_ENC(sde_enc, "cleared master\n");
+ sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
+
+ if (sde_enc->cur_master) {
+ sde_enc->cur_master->connector = NULL;
+ sde_enc->cur_master = NULL;
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
sde_rm_release(&sde_kms->rm, drm_enc);
-
- _sde_encoder_power_enable(sde_enc, false);
}
static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
@@ -928,57 +1386,6 @@
}
}
-struct sde_rsc_client *sde_encoder_update_rsc_client(
- struct drm_encoder *drm_enc, bool enable)
-{
- struct sde_encoder_virt *sde_enc;
- enum sde_rsc_state rsc_state;
- struct sde_rsc_cmd_config rsc_config;
- int ret;
- struct msm_display_info *disp_info;
-
- if (!drm_enc) {
- SDE_ERROR("invalid encoder\n");
- return NULL;
- }
-
- sde_enc = to_sde_encoder_virt(drm_enc);
- disp_info = &sde_enc->disp_info;
-
- /**
- * only primary command mode panel can request CMD state.
- * all other panels/displays can request for VID state including
- * secondary command mode panel.
- */
- rsc_state = enable ?
- (((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
- disp_info->is_primary) ? SDE_RSC_CMD_STATE :
- SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
-
- if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
- && disp_info->is_primary) {
- rsc_config.fps = disp_info->frame_rate;
- rsc_config.vtotal = disp_info->vtotal;
- rsc_config.prefill_lines = disp_info->prefill_lines;
- rsc_config.jitter = disp_info->jitter;
- /* update it only once */
- sde_enc->rsc_state_update = true;
-
- ret = sde_rsc_client_state_update(sde_enc->rsc_client,
- rsc_state, &rsc_config,
- drm_enc->crtc ? drm_enc->crtc->index : -1);
- } else {
- ret = sde_rsc_client_state_update(sde_enc->rsc_client,
- rsc_state, NULL,
- drm_enc->crtc ? drm_enc->crtc->index : -1);
- }
-
- if (ret)
- SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
-
- return sde_enc->disp_info.is_primary ? sde_enc->rsc_client : NULL;
-}
-
void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
void (*frame_event_cb)(void *, u32 event),
void *frame_event_cb_data)
@@ -1014,7 +1421,7 @@
if (sde_enc->phys_encs[i] == ready_phys) {
clear_bit(i, sde_enc->frame_busy_mask);
sde_enc->crtc_frame_event |= event;
- SDE_EVT32(DRMID(drm_enc), i,
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
sde_enc->frame_busy_mask[0]);
}
@@ -1022,6 +1429,9 @@
atomic_set(&sde_enc->frame_done_timeout, 0);
del_timer(&sde_enc->frame_done_timer);
+ sde_encoder_resource_control(drm_enc,
+ SDE_ENC_RC_EVENT_FRAME_DONE);
+
if (sde_enc->crtc_frame_event_cb)
sde_enc->crtc_frame_event_cb(
sde_enc->crtc_frame_event_cb_data,
@@ -1054,14 +1464,18 @@
}
pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
- SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
- phys->intf_idx, pending_kickoff_cnt);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
ctl->ops.trigger_flush(ctl);
- SDE_EVT32(DRMID(drm_enc), ctl->idx);
+
+ if (ctl->ops.get_pending_flush)
+ SDE_EVT32(DRMID(drm_enc), phys->intf_idx, pending_kickoff_cnt,
+ ctl->idx, ctl->ops.get_pending_flush(ctl));
+ else
+ SDE_EVT32(DRMID(drm_enc), phys->intf_idx, ctl->idx,
+ pending_kickoff_cnt);
}
/**
@@ -1082,7 +1496,6 @@
void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
{
struct sde_hw_ctl *ctl;
- int ctl_idx = -1;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -1092,11 +1505,8 @@
ctl = phys_enc->hw_ctl;
if (ctl && ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
- ctl_idx = ctl->idx;
+ SDE_EVT32(DRMID(phys_enc->parent), ctl->idx);
}
-
- if (phys_enc && phys_enc->parent)
- SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
}
int sde_encoder_helper_wait_event_timeout(
@@ -1242,6 +1652,7 @@
struct sde_encoder_phys *phys;
bool needs_hw_reset = false;
unsigned int i;
+ int rc;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
@@ -1263,6 +1674,8 @@
}
}
+ sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+
/* if any phys needs reset, reset all phys, in-order */
if (needs_hw_reset) {
for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -1271,6 +1684,14 @@
phys->ops.hw_reset(phys);
}
}
+
+ if (sde_enc->cur_master && sde_enc->cur_master->connector) {
+ rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
+ if (rc)
+ SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
+ sde_enc->cur_master->connector->base.id,
+ rc);
+ }
}
void sde_encoder_kickoff(struct drm_encoder *drm_enc)
@@ -1369,6 +1790,7 @@
return 0;
}
+#ifdef CONFIG_DEBUG_FS
static int _sde_encoder_status_show(struct seq_file *s, void *data)
{
struct sde_encoder_virt *sde_enc;
@@ -1414,7 +1836,6 @@
return 0;
}
-#ifdef CONFIG_DEBUG_FS
static int _sde_encoder_debugfs_status_open(struct inode *inode,
struct file *file)
{
@@ -1531,6 +1952,7 @@
struct sde_encoder_virt *sde_enc;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
+ int i;
static const struct file_operations debugfs_status_fops = {
.open = _sde_encoder_debugfs_status_open,
@@ -1560,7 +1982,7 @@
/* create overall sub-directory for the encoder */
sde_enc->debugfs_root = debugfs_create_dir(name,
- sde_debugfs_get_root(sde_kms));
+ drm_enc->dev->primary->debugfs_root);
if (!sde_enc->debugfs_root)
return -ENOMEM;
@@ -1571,6 +1993,13 @@
debugfs_create_file("misr_data", 0644,
sde_enc->debugfs_root, sde_enc, &debugfs_misr_fops);
+ for (i = 0; i < sde_enc->num_phys_encs; i++)
+ if (sde_enc->phys_encs[i] &&
+ sde_enc->phys_encs[i]->ops.late_register)
+ sde_enc->phys_encs[i]->ops.late_register(
+ sde_enc->phys_encs[i],
+ sde_enc->debugfs_root);
+
return 0;
}
@@ -1590,7 +2019,7 @@
return 0;
}
-static _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
{
}
#endif
@@ -1737,6 +2166,9 @@
phys_params.comp_type = disp_info->comp_info.comp_type;
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ sde_enc->idle_pc_supported = sde_kms->catalog->has_idle_pc;
+
mutex_lock(&sde_enc->enc_lock);
for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
/*
@@ -1897,6 +2329,9 @@
sde_enc->rsc_client = NULL;
}
+ mutex_init(&sde_enc->rc_lock);
+ INIT_DELAYED_WORK(&sde_enc->delayed_off_work, sde_encoder_off_work);
+
memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
SDE_DEBUG_ENC(sde_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index cdecd08..5795e04 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -83,13 +83,11 @@
void (*cb)(void *, u32), void *data);
/**
- * sde_encoder_update_rsc_client - updates the rsc client state for primary
+ * sde_encoder_get_rsc_client - gets the rsc client state for primary
* for primary display.
* @encoder: encoder pointer
- * @enable: enable/disable the client
*/
-struct sde_rsc_client *sde_encoder_update_rsc_client(
- struct drm_encoder *encoder, bool enable);
+struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
/**
* sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index da155b0..6942292 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -117,6 +117,7 @@
* @collect_misr: Collects MISR data on frame update
* @hw_reset: Issue HW recovery such as CTL reset and clear
* SDE_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control: Handler to enable/disable all the encoder IRQs
*/
struct sde_encoder_phys_ops {
@@ -150,6 +151,7 @@
bool enable, u32 frame_count);
u32 (*collect_misr)(struct sde_encoder_phys *phys_enc);
void (*hw_reset)(struct sde_encoder_phys *phys_enc);
+ void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 86e292f..a4f40f2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -135,8 +135,7 @@
phys_enc);
}
-static bool _sde_encoder_phys_is_ppsplit_slave(
- struct sde_encoder_phys *phys_enc)
+static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
{
enum sde_rm_topology_name topology;
@@ -144,8 +143,7 @@
return false;
topology = sde_connector_get_topology_name(phys_enc->connector);
- if (topology == SDE_RM_TOPOLOGY_PPSPLIT &&
- phys_enc->split_role == ENC_ROLE_SLAVE)
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
return true;
return false;
@@ -167,6 +165,10 @@
do_log = true;
}
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
/* to avoid flooding, only log first time, and "dead" time */
if (do_log) {
SDE_ERROR_CMDENC(cmd_enc,
@@ -176,10 +178,7 @@
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
- SDE_EVT32(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0,
- 0xbad, cmd_enc->pp_timeout_report_cnt,
- atomic_read(&phys_enc->pending_kickoff_cnt));
+ SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
"dsi1_phy", "vbif", "dbg_bus",
@@ -198,6 +197,16 @@
return -ETIMEDOUT;
}
+static bool _sde_encoder_phys_is_ppsplit_slave(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return false;
+
+ return _sde_encoder_phys_is_ppsplit(phys_enc) &&
+ phys_enc->split_role == ENC_ROLE_SLAVE;
+}
+
static int _sde_encoder_phys_cmd_wait_for_idle(
struct sde_encoder_phys *phys_enc)
{
@@ -354,6 +363,78 @@
return 0;
}
+static int sde_encoder_phys_cmd_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ /* Slave encoders don't report vblank */
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
+ __builtin_return_address(0),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ INTR_IDX_RDPTR,
+ sde_encoder_phys_cmd_pp_rd_ptr_irq,
+ "pp_rd_ptr");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret)
+ SDE_ERROR_CMDENC(cmd_enc,
+ "control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
+ return;
+
+ if (enable) {
+ sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ INTR_IDX_PINGPONG,
+ sde_encoder_phys_cmd_pp_tx_done_irq,
+ "pp_tx_done");
+
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ INTR_IDX_UNDERRUN,
+ sde_encoder_phys_cmd_underrun_irq,
+ "underrun");
+ } else {
+ sde_encoder_phys_cmd_unregister_irq(
+ phys_enc, INTR_IDX_UNDERRUN);
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ sde_encoder_phys_cmd_unregister_irq(
+ phys_enc, INTR_IDX_PINGPONG);
+ }
+}
+
static void sde_encoder_phys_cmd_tearcheck_config(
struct sde_encoder_phys *phys_enc)
{
@@ -462,56 +543,10 @@
static bool sde_encoder_phys_cmd_needs_single_flush(
struct sde_encoder_phys *phys_enc)
{
- enum sde_rm_topology_name topology;
-
if (!phys_enc)
return false;
- topology = sde_connector_get_topology_name(phys_enc->connector);
- return topology == SDE_RM_TOPOLOGY_PPSPLIT;
-}
-
-static int sde_encoder_phys_cmd_control_vblank_irq(
- struct sde_encoder_phys *phys_enc,
- bool enable)
-{
- struct sde_encoder_phys_cmd *cmd_enc =
- to_sde_encoder_phys_cmd(phys_enc);
- int ret = 0;
-
- if (!phys_enc) {
- SDE_ERROR("invalid encoder\n");
- return -EINVAL;
- }
-
- /* Slave encoders don't report vblank */
- if (!sde_encoder_phys_cmd_is_master(phys_enc))
- goto end;
-
- SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
- __builtin_return_address(0),
- enable, atomic_read(&phys_enc->vblank_refcount));
-
- SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
- enable, atomic_read(&phys_enc->vblank_refcount));
-
- if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = sde_encoder_phys_cmd_register_irq(phys_enc,
- SDE_IRQ_TYPE_PING_PONG_RD_PTR,
- INTR_IDX_RDPTR,
- sde_encoder_phys_cmd_pp_rd_ptr_irq,
- "pp_rd_ptr");
- else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
- INTR_IDX_RDPTR);
-
-end:
- if (ret)
- SDE_ERROR_CMDENC(cmd_enc,
- "control vblank irq error %d, enable %d\n",
- ret, enable);
-
- return ret;
+ return _sde_encoder_phys_is_ppsplit(phys_enc);
}
static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
@@ -520,7 +555,6 @@
to_sde_encoder_phys_cmd(phys_enc);
struct sde_hw_ctl *ctl;
u32 flush_mask = 0;
- int ret;
if (!phys_enc || !phys_enc->hw_ctl) {
SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
@@ -537,38 +571,6 @@
sde_encoder_phys_cmd_pingpong_config(phys_enc);
- if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
- goto update_flush;
-
- /* Both master and slave need to register for pp_tx_done */
- ret = sde_encoder_phys_cmd_register_irq(phys_enc,
- SDE_IRQ_TYPE_PING_PONG_COMP,
- INTR_IDX_PINGPONG,
- sde_encoder_phys_cmd_pp_tx_done_irq,
- "pp_tx_done");
- if (ret)
- return;
-
- ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
- if (ret) {
- sde_encoder_phys_cmd_unregister_irq(phys_enc,
- INTR_IDX_PINGPONG);
- return;
- }
-
- ret = sde_encoder_phys_cmd_register_irq(phys_enc,
- SDE_IRQ_TYPE_INTF_UNDER_RUN,
- INTR_IDX_UNDERRUN,
- sde_encoder_phys_cmd_underrun_irq,
- "underrun");
- if (ret) {
- sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
- sde_encoder_phys_cmd_unregister_irq(phys_enc,
- INTR_IDX_PINGPONG);
- return;
- }
-
-update_flush:
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
@@ -607,21 +609,9 @@
SDE_EVT32(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret);
}
-
- sde_encoder_phys_cmd_unregister_irq(
- phys_enc, INTR_IDX_UNDERRUN);
- sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
- sde_encoder_phys_cmd_unregister_irq(
- phys_enc, INTR_IDX_PINGPONG);
}
phys_enc->enable_state = SDE_ENC_DISABLED;
-
- if (atomic_read(&phys_enc->vblank_refcount))
- SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
- phys_enc->parent->base.id,
- phys_enc->split_role,
- atomic_read(&phys_enc->vblank_refcount));
}
static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
@@ -717,6 +707,7 @@
ops->trigger_start = sde_encoder_helper_trigger_start;
ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
ops->hw_reset = sde_encoder_helper_hw_reset;
+ ops->irq_control = sde_encoder_phys_cmd_irq_control;
}
struct sde_encoder_phys *sde_encoder_phys_cmd_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 39dfd5d..5cb84b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -227,16 +227,18 @@
struct sde_encoder_phys_vid *vid_enc =
to_sde_encoder_phys_vid(phys_enc);
struct intf_prog_fetch f = { 0 };
- struct intf_timing_params *timing = &vid_enc->timing_params;
+ struct intf_timing_params *timing;
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
u32 vert_total = 0;
u32 rot_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
- if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_rot_start))
+ if (!phys_enc || !vid_enc->hw_intf ||
+ !vid_enc->hw_intf->ops.setup_rot_start)
return;
+ timing = &vid_enc->timing_params;
vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
if (vfp_fetch_lines && rot_fetch_lines) {
vert_total = get_vertical_total(timing);
@@ -337,23 +339,40 @@
{
struct sde_encoder_phys_vid *vid_enc = arg;
struct sde_encoder_phys *phys_enc;
+ struct sde_hw_ctl *hw_ctl;
unsigned long lock_flags;
- int new_cnt;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
if (!vid_enc)
return;
phys_enc = &vid_enc->base;
+ hw_ctl = phys_enc->hw_ctl;
+
if (phys_enc->parent_ops.handle_vblank_virt)
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
phys_enc);
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
- SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
- new_cnt);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (flush_register == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+ old_cnt, new_cnt, flush_register);
+
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
}
@@ -372,10 +391,24 @@
phys_enc);
}
+static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+ return true;
+
+ return false;
+}
+
static bool sde_encoder_phys_vid_needs_single_flush(
struct sde_encoder_phys *phys_enc)
{
- return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO;
+ return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc);
}
static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
@@ -661,7 +694,7 @@
KICKOFF_TIMEOUT_MS);
if (ret <= 0) {
irq_status = sde_core_irq_read(phys_enc->sde_kms,
- INTR_IDX_VSYNC, true);
+ vid_enc->irq_idx[INTR_IDX_VSYNC], true);
if (irq_status) {
SDE_EVT32(DRMID(phys_enc->parent),
vid_enc->hw_intf->idx - INTF_0);
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 46823b6..5f257bb 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -16,6 +16,8 @@
#include "sde_kms.h"
#include "sde_fence.h"
+#define TIMELINE_VAL_LENGTH 128
+
void *sde_sync_get(uint64_t fd)
{
/* force signed compare, fdget accepts an int argument */
@@ -31,14 +33,31 @@
signed long sde_sync_wait(void *fnc, long timeout_ms)
{
struct fence *fence = fnc;
+ int rc;
+ char timeline_str[TIMELINE_VAL_LENGTH];
if (!fence)
return -EINVAL;
else if (fence_is_signaled(fence))
return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
- return fence_wait_timeout(fence, true,
+ rc = fence_wait_timeout(fence, true,
msecs_to_jiffies(timeout_ms));
+ if (!rc || (rc == -EINVAL)) {
+ if (fence->ops->timeline_value_str)
+ fence->ops->timeline_value_str(fence,
+ timeline_str, TIMELINE_VAL_LENGTH);
+
+ SDE_ERROR(
+ "fence driver name:%s timeline name:%s seqno:0x%x timeline:%s signaled:0x%x\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->seqno, timeline_str,
+ fence->ops->signaled ?
+ fence->ops->signaled(fence) : 0xffffffff);
+ }
+
+ return rc;
}
uint32_t sde_sync_get_name_prefix(void *fence)
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 01d0d20..c3477b5 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -636,8 +636,8 @@
color = _sde_format_get_media_color_ubwc(fmt);
if (color < 0) {
- DRM_ERROR("UBWC format not supported for fmt:0x%X\n",
- fmt->base.pixel_format);
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
return -EINVAL;
}
@@ -1072,7 +1072,8 @@
DRM_ERROR("invalid handle for plane %d\n", i);
return -EINVAL;
}
- bos_total_size += bos[i]->size;
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
}
if (bos_total_size < layout.total_size) {
@@ -1123,21 +1124,23 @@
case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
map = sde_format_map_ubwc;
map_size = ARRAY_SIZE(sde_format_map_ubwc);
- SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
- format);
+ SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
break;
case DRM_FORMAT_MOD_QCOM_DX:
map = sde_format_map_p010;
map_size = ARRAY_SIZE(sde_format_map_p010);
- SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format);
+ SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_DX\n",
+ (char *)&format);
break;
case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
DRM_FORMAT_MOD_QCOM_TILE):
map = sde_format_map_p010_ubwc;
map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
- SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
- format);
+ SDE_DEBUG(
+ "found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+ (char *)&format);
break;
case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
DRM_FORMAT_MOD_QCOM_TIGHT):
@@ -1146,26 +1149,28 @@
map = sde_format_map_tp10_ubwc;
map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
SDE_DEBUG(
- "found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
- format);
+ "found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
+ (char *)&format);
break;
case DRM_FORMAT_MOD_QCOM_TILE:
map = sde_format_map_tile;
map_size = ARRAY_SIZE(sde_format_map_tile);
- SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format);
+ SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE\n",
+ (char *)&format);
break;
case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
map = sde_format_map_p010_tile;
map_size = ARRAY_SIZE(sde_format_map_p010_tile);
- SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n",
- format);
+ SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+ (char *)&format);
break;
case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
DRM_FORMAT_MOD_QCOM_TIGHT):
map = sde_format_map_tp10_tile;
map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
- SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
- format);
+ SDE_DEBUG(
+ "found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+ (char *)&format);
break;
default:
SDE_ERROR("unsupported format modifier %llX\n", mod0);
@@ -1180,11 +1185,11 @@
}
if (fmt == NULL)
- SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
- format, mod0);
+ SDE_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, mod0);
else
- SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n",
- drm_get_format_name(format), mod0,
+ SDE_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, mod0,
SDE_FORMAT_IS_UBWC(fmt),
SDE_FORMAT_IS_YUV(fmt));
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index 78fa634..7d2f67d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -881,3 +881,20 @@
(*val & (BIT(16) - 1)));
return 0;
}
+
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp)
+{
+ if (!dspp || !resp) {
+ DRM_ERROR("invalid params dspp %pK resp %pK\n", dspp, resp);
+ return;
+ }
+
+ switch (event) {
+ case AD4_BACKLIGHT:
+ *resp = SDE_REG_READ(&dspp->hw,
+ dspp->cap->sblk->ad.base + 0x48);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.c b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
index 5ac017c..f59864d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_blk.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
@@ -29,9 +29,11 @@
* sde_hw_blk_init - initialize hw block object
* @type: hw block type - enum sde_hw_blk_type
* @id: instance id of the hw block
+ * @ops: Pointer to block operations
* return: 0 if success; error code otherwise
*/
-int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id)
+int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
+ struct sde_hw_blk_ops *ops)
{
if (!hw_blk) {
pr_err("invalid parameters\n");
@@ -42,7 +44,9 @@
hw_blk->type = type;
hw_blk->id = id;
atomic_set(&hw_blk->refcount, 0);
- INIT_LIST_HEAD(&hw_blk->attach_list);
+
+ if (ops)
+ hw_blk->ops = *ops;
mutex_lock(&sde_hw_blk_lock);
list_add(&hw_blk->list, &sde_hw_blk_list);
@@ -58,8 +62,6 @@
*/
void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk)
{
- struct sde_hw_blk_attachment *curr, *next;
-
if (!hw_blk) {
pr_err("invalid parameters\n");
return;
@@ -69,14 +71,6 @@
pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
hw_blk->id);
- list_for_each_entry_safe(curr, next, &hw_blk->attach_list, list) {
- pr_err("hw_blk:%d.%d tag:0x%x/0x%llx still attached\n",
- hw_blk->type, hw_blk->id,
- curr->tag, (u64) curr->value);
- list_del_init(&curr->list);
- kfree(curr);
- }
-
mutex_lock(&sde_hw_blk_lock);
list_del(&hw_blk->list);
mutex_unlock(&sde_hw_blk_lock);
@@ -92,6 +86,7 @@
struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id)
{
struct sde_hw_blk *curr;
+ int rc, refcount;
if (!hw_blk) {
mutex_lock(&sde_hw_blk_lock);
@@ -108,16 +103,28 @@
mutex_unlock(&sde_hw_blk_lock);
}
- if (hw_blk) {
- int refcount = atomic_inc_return(&hw_blk->refcount);
-
- pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
- hw_blk->id, refcount);
- } else {
- pr_err("no hw_blk:%d\n", type);
+ if (!hw_blk) {
+ pr_debug("no hw_blk:%d\n", type);
+ return NULL;
}
+ refcount = atomic_inc_return(&hw_blk->refcount);
+
+ if (refcount == 1 && hw_blk->ops.start) {
+ rc = hw_blk->ops.start(hw_blk);
+ if (rc) {
+ pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
+ goto error_start;
+ }
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+ hw_blk->id, refcount);
return hw_blk;
+
+error_start:
+ sde_hw_blk_put(hw_blk);
+ return ERR_PTR(rc);
}
/**
@@ -125,11 +132,8 @@
* @hw_blk: hw block to be freed
* @free_blk: function to be called when reference count goes to zero
*/
-void sde_hw_blk_put(struct sde_hw_blk *hw_blk,
- void (*free_blk)(struct sde_hw_blk *))
+void sde_hw_blk_put(struct sde_hw_blk *hw_blk)
{
- struct sde_hw_blk_attachment *curr, *next;
-
if (!hw_blk) {
pr_err("invalid parameters\n");
return;
@@ -146,122 +150,6 @@
if (atomic_dec_return(&hw_blk->refcount))
return;
- if (free_blk)
- free_blk(hw_blk);
-
- /* report any residual attachments */
- list_for_each_entry_safe(curr, next, &hw_blk->attach_list, list) {
- pr_err("hw_blk:%d.%d tag:0x%x/0x%llx still attached\n",
- hw_blk->type, hw_blk->id,
- curr->tag, (u64) curr->value);
- list_del_init(&curr->list);
- kfree(curr);
- }
-}
-
-/**
- * sde_hw_blk_lookup_blk - lookup hardware block that matches tag/value/type
- * tuple and increment reference count
- * @tag: search tag
- * @value: value associated with search tag
- * @type: hardware block type
- * return: Pointer to hardware block
- */
-struct sde_hw_blk *sde_hw_blk_lookup_blk(u32 tag, void *value, u32 type)
-{
- struct sde_hw_blk *hw_blk = NULL, *curr;
- struct sde_hw_blk_attachment *attach;
-
- pr_debug("hw_blk:%d tag:0x%x/0x%llx\n", type, tag, (u64) value);
-
- mutex_lock(&sde_hw_blk_lock);
- list_for_each_entry(curr, &sde_hw_blk_list, list) {
- if ((curr->type != type) || !atomic_read(&curr->refcount))
- continue;
-
- list_for_each_entry(attach, &curr->attach_list, list) {
- if ((attach->tag != tag) || (attach->value != value))
- continue;
-
- hw_blk = curr;
- break;
- }
-
- if (hw_blk)
- break;
- }
- mutex_unlock(&sde_hw_blk_lock);
-
- if (hw_blk)
- sde_hw_blk_get(hw_blk, 0, -1);
-
- return hw_blk;
-}
-
-/**
- * sde_hw_blk_attach - attach given tag/value pair to hardware block
- * and increment reference count
- * @hw_blk: Pointer hardware block
- * @tag: search tag
- * @value: value associated with search tag
- * return: 0 if success; error code otherwise
- */
-int sde_hw_blk_attach(struct sde_hw_blk *hw_blk, u32 tag, void *value)
-{
- struct sde_hw_blk_attachment *attach;
-
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return -EINVAL;
- }
-
- pr_debug("hw_blk:%d.%d tag:0x%x/0x%llx\n", hw_blk->type, hw_blk->id,
- tag, (u64) value);
-
- attach = kzalloc(sizeof(struct sde_hw_blk_attachment), GFP_KERNEL);
- if (!attach)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&attach->list);
- attach->tag = tag;
- attach->value = value;
- /* always add to the front so latest shows up first in search */
- list_add(&attach->list, &hw_blk->attach_list);
- sde_hw_blk_get(hw_blk, 0, -1);
-
- return 0;
-}
-
-/**
- * sde_hw_blk_detach - detach given tag/value pair from hardware block
- * and decrement reference count
- * @hw_blk: Pointer hardware block
- * @tag: search tag
- * @value: value associated with search tag
- * return: none
- */
-void sde_hw_blk_detach(struct sde_hw_blk *hw_blk, u32 tag, void *value)
-{
- struct sde_hw_blk_attachment *curr, *next;
-
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return;
- }
-
- pr_debug("hw_blk:%d.%d tag:0x%x/0x%llx\n", hw_blk->type, hw_blk->id,
- tag, (u64) value);
-
- list_for_each_entry_safe(curr, next, &hw_blk->attach_list, list) {
- if ((curr->tag != tag) || (curr->value != value))
- continue;
-
- list_del_init(&curr->list);
- kfree(curr);
- sde_hw_blk_put(hw_blk, NULL);
- return;
- }
-
- pr_err("hw_blk:%d.%d tag:0x%x/0x%llx not found\n", hw_blk->type,
- hw_blk->id, tag, (u64) value);
+ if (hw_blk->ops.stop)
+ hw_blk->ops.stop(hw_blk);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.h b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
index ea4ba08..d979091 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_blk.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
@@ -17,16 +17,16 @@
#include <linux/list.h>
#include <linux/atomic.h>
+struct sde_hw_blk;
+
/**
- * struct sde_hw_blk_attachment - hardware block attachment
- * @list: list of attachment
- * @tag: search tag
- * @value: value associated with the given tag
+ * struct sde_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
*/
-struct sde_hw_blk_attachment {
- struct list_head list;
- u32 tag;
- void *value;
+struct sde_hw_blk_ops {
+ int (*start)(struct sde_hw_blk *);
+ void (*stop)(struct sde_hw_blk *);
};
/**
@@ -35,53 +35,19 @@
* @type: hardware block type
* @id: instance id
* @refcount: reference/usage count
- * @attachment_list: list of attachment
*/
struct sde_hw_blk {
struct list_head list;
u32 type;
int id;
atomic_t refcount;
- struct list_head attach_list;
+ struct sde_hw_blk_ops ops;
};
-int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id);
+int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
+ struct sde_hw_blk_ops *ops);
void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk);
struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id);
-void sde_hw_blk_put(struct sde_hw_blk *hw_blk,
- void (*blk_free)(struct sde_hw_blk *));
-
-struct sde_hw_blk *sde_hw_blk_lookup_blk(u32 tag, void *value, u32 type);
-int sde_hw_blk_attach(struct sde_hw_blk *hw_blk, u32 tag, void *value);
-void sde_hw_blk_detach(struct sde_hw_blk *hw_blk, u32 tag, void *value);
-
-/**
- * sde_hw_blk_lookup_value - return value associated with the given tag
- * @hw_blk: Pointer to hardware block
- * @tag: tag to find
- * @idx: index if more than one value found, with 0 being first
- * return: value associated with the given tag
- */
-static inline void *sde_hw_blk_lookup_value(struct sde_hw_blk *hw_blk,
- u32 tag, u32 idx)
-{
- struct sde_hw_blk_attachment *attach;
-
- if (!hw_blk)
- return NULL;
-
- list_for_each_entry(attach, &hw_blk->attach_list, list) {
- if (attach->tag != tag)
- continue;
-
- if (idx == 0)
- return attach->value;
-
- idx--;
- }
-
- return NULL;
-}
-
+void sde_hw_blk_put(struct sde_hw_blk *hw_blk);
#endif /*_SDE_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 9285487..b8ab066 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -120,6 +120,7 @@
SRC_SPLIT,
DIM_LAYER,
SMART_DMA_REV,
+ IDLE_PC,
SDE_PROP_MAX,
};
@@ -313,6 +314,7 @@
{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
+ {IDLE_PC, "qcom,sde-has-idle-pc", false, PROP_TYPE_BOOL},
};
static struct sde_prop_type sde_perf_prop[] = {
@@ -490,8 +492,8 @@
return 0;
for (i = 0, cur_pos = dst_list_pos;
- (cur_pos < (dst_list_size - 1)) && src_list[i].fourcc_format
- && (i < src_list_size); ++i, ++cur_pos)
+ (cur_pos < (dst_list_size - 1)) && (i < src_list_size)
+ && src_list[i].fourcc_format; ++i, ++cur_pos)
dst_list[cur_pos] = src_list[i];
dst_list[cur_pos].fourcc_format = 0;
@@ -565,7 +567,7 @@
rc = -EINVAL;
}
*off_count = 0;
- memset(prop_count, 0, sizeof(int *) * prop_size);
+ memset(prop_count, 0, sizeof(int) * prop_size);
return rc;
}
}
@@ -752,7 +754,8 @@
sblk->maxupscale = MAX_SSPP_UPSCALE;
sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
sspp->id = SSPP_VIG0 + *vig_count;
- snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
sspp->type = SSPP_TYPE_VIG;
set_bit(SDE_SSPP_QOS, &sspp->features);
@@ -769,7 +772,7 @@
sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
VIG_QSEED_LEN, 0);
snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_scaler%u", sspp->id);
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
@@ -778,7 +781,7 @@
sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
VIG_QSEED_LEN, 0);
snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_scaler%u", sspp->id);
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
}
if (sde_cfg->has_sbuf)
@@ -786,7 +789,7 @@
sblk->csc_blk.id = SDE_SSPP_CSC;
snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_csc%u", sspp->id);
+ "sspp_csc%u", sspp->id - SSPP_VIG0);
if (sde_cfg->csc_type == SDE_SSPP_CSC) {
set_bit(SDE_SSPP_CSC, &sspp->features);
sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
@@ -799,7 +802,7 @@
sblk->hsic_blk.id = SDE_SSPP_HSIC;
snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_hsic%u", sspp->id);
+ "sspp_hsic%u", sspp->id - SSPP_VIG0);
if (prop_exists[VIG_HSIC_PROP]) {
sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
VIG_HSIC_PROP, 0);
@@ -811,7 +814,7 @@
sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_memcolor%u", sspp->id);
+ "sspp_memcolor%u", sspp->id - SSPP_VIG0);
if (prop_exists[VIG_MEMCOLOR_PROP]) {
sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
VIG_MEMCOLOR_PROP, 0);
@@ -823,7 +826,7 @@
sblk->pcc_blk.id = SDE_SSPP_PCC;
snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_pcc%u", sspp->id);
+ "sspp_pcc%u", sspp->id - SSPP_VIG0);
if (prop_exists[VIG_PCC_PROP]) {
sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
VIG_PCC_PROP, 0);
@@ -843,7 +846,8 @@
sblk->maxupscale = MAX_SSPP_UPSCALE;
sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
sspp->id = SSPP_RGB0 + *rgb_count;
- snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
sspp->type = SSPP_TYPE_RGB;
set_bit(SDE_SSPP_QOS, &sspp->features);
@@ -860,7 +864,7 @@
sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
RGB_SCALER_LEN, 0);
snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_scaler%u", sspp->id);
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
@@ -869,7 +873,7 @@
sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
SSPP_SCALE_SIZE, 0);
snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
- "sspp_scaler%u", sspp->id);
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
}
sblk->pcc_blk.id = SDE_SSPP_PCC;
@@ -897,7 +901,8 @@
sblk->maxdwnscale = SSPP_UNITY_SCALE;
sblk->format_list = sde_cfg->cursor_formats;
sspp->id = SSPP_CURSOR0 + *cursor_count;
- snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
sspp->type = SSPP_TYPE_CURSOR;
(*cursor_count)++;
@@ -912,7 +917,8 @@
sblk->format_list = sde_cfg->dma_formats;
sspp->id = SSPP_DMA0 + *dma_count;
sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
- snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
sspp->type = SSPP_TYPE_DMA;
set_bit(SDE_SSPP_QOS, &sspp->features);
(*dma_count)++;
@@ -1023,8 +1029,6 @@
set_bit(sde_cfg->smart_dma_rev, &sspp->features);
sblk->src_blk.id = SDE_SSPP_SRC;
- snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
- sblk->src_blk.id);
of_property_read_string_index(np,
sspp_prop[SSPP_TYPE].prop_name, i, &type);
@@ -1048,6 +1052,9 @@
goto end;
}
+ snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
+ sspp->id - SSPP_VIG0);
+
sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
sblk->maxvdeciexp = MAX_VERT_DECIMATION;
@@ -1142,7 +1149,8 @@
ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
ctl->id = CTL_0 + i;
- snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u", ctl->id);
+ snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u",
+ ctl->id - CTL_0);
if (i < MAX_SPLIT_DISPLAY_CTL)
set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
@@ -1255,7 +1263,8 @@
mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
mixer->id = LM_0 + i;
- snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u", mixer->id);
+ snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u",
+ mixer->id - LM_0);
if (!prop_exists[MIXER_LEN])
mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
@@ -1351,7 +1360,8 @@
intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
intf->id = INTF_0 + i;
- snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u", intf->id);
+ snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u",
+ intf->id - INTF_0);
if (!prop_exists[INTF_LEN])
intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
@@ -1434,7 +1444,8 @@
wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
- snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u", wb->id);
+ snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u",
+ wb->id - WB_0);
wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
PROP_VALUE_ACCESS(prop_value, WB_ID, i);
wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
@@ -1733,7 +1744,8 @@
dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0);
dspp->id = DSPP_0 + i;
- snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u", dspp->id);
+ snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u",
+ dspp->id - DSPP_0);
sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
if (!sblk) {
@@ -1805,6 +1817,9 @@
dsc->base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
dsc->id = DSC_0 + i;
dsc->len = PROP_VALUE_ACCESS(prop_value, DSC_LEN, 0);
+ snprintf(dsc->name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
+ dsc->id - DSC_0);
+
if (!prop_exists[DSC_LEN])
dsc->len = DEFAULT_SDE_HW_BLOCK_LEN;
}
@@ -1852,7 +1867,8 @@
cdm = sde_cfg->cdm + i;
cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
cdm->id = CDM_0 + i;
- snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u", cdm->id);
+ snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u",
+ cdm->id - CDM_0);
cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
/* intf3 and wb2 for cdm block */
@@ -1918,6 +1934,8 @@
vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
vbif->len = vbif_len;
vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
+ snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u",
+ vbif->id - VBIF_0);
SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
@@ -2044,19 +2062,21 @@
pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
pp->id = PINGPONG_0 + i;
- snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u", pp->id);
+ snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
+ pp->id - PINGPONG_0);
pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
sblk->te.id = SDE_PINGPONG_TE;
- snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u", pp->id);
+ snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u",
+ pp->id - PINGPONG_0);
set_bit(SDE_PINGPONG_TE, &pp->features);
sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
if (sblk->te2.base) {
sblk->te2.id = SDE_PINGPONG_TE2;
snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u",
- pp->id);
+ pp->id - PINGPONG_0);
set_bit(SDE_PINGPONG_TE2, &pp->features);
set_bit(SDE_PINGPONG_SPLIT, &pp->features);
}
@@ -2068,7 +2088,7 @@
if (sblk->dsc.base) {
sblk->dsc.id = SDE_PINGPONG_DSC;
snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
- pp->id);
+ pp->id - PINGPONG_0);
set_bit(SDE_PINGPONG_DSC, &pp->features);
}
}
@@ -2112,12 +2132,12 @@
cfg->mdss[0].base = MDSS_BASE_OFFSET;
cfg->mdss[0].id = MDP_TOP;
snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u",
- cfg->mdss[0].id);
+ cfg->mdss[0].id - MDP_TOP);
cfg->mdp_count = 1;
cfg->mdp[0].id = MDP_TOP;
snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u",
- cfg->mdp[0].id);
+ cfg->mdp[0].id - MDP_TOP);
cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
if (!prop_exists[SDE_LEN])
@@ -2196,6 +2216,7 @@
cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
cfg->has_dim_layer = PROP_VALUE_ACCESS(prop_value, DIM_LAYER, 0);
+ cfg->has_idle_pc = PROP_VALUE_ACCESS(prop_value, IDLE_PC, 0);
end:
kfree(prop_value);
return rc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 97da08f..b5f83ad 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -713,6 +713,7 @@
* @ubwc_version UBWC feature version (0x0 for not supported)
* @has_sbuf indicate if stream buffer is available
* @sbuf_headroom stream buffer headroom in lines
+ * @has_idle_pc indicate if idle power collapse feature is supported
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
@@ -735,6 +736,7 @@
u32 ubwc_version;
bool has_sbuf;
u32 sbuf_headroom;
+ bool has_idle_pc;
u32 mdss_count;
struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index 18893af..ad2910e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -56,6 +56,19 @@
*/
static u32 offsite_v_coeff[] = {0x00060002};
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -279,6 +292,11 @@
sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
c->hw.blk_off + c->hw.length, c->hw.xin_id);
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
return c;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index f7bdc96..82f1c09 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -107,6 +107,12 @@
SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
+static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ return SDE_REG_READ(c, CTL_FLUSH);
+}
static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
enum sde_sspp sspp)
@@ -529,6 +535,7 @@
ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
ops->trigger_flush = sde_hw_ctl_trigger_flush;
+ ops->get_flush_register = sde_hw_ctl_get_flush_register;
ops->trigger_start = sde_hw_ctl_trigger_start;
ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
ops->reset = sde_hw_ctl_reset_control;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index a4e3bfe..7ae43b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -127,6 +127,13 @@
void (*trigger_flush)(struct sde_hw_ctl *ctx);
/**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct sde_hw_ctl *ctx);
+
+ /**
* Setup ctl_path interface config
* @ctx
* @cfg : interface config structure pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
index f546710..62193f9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
@@ -182,6 +182,7 @@
if (dsc == m->dsc[i].id) {
b->base_off = addr;
b->blk_off = m->dsc[i].base;
+ b->length = m->dsc[i].len;
b->hwversion = m->hwversion;
b->log_mask = SDE_DBG_MASK_DSC;
return &m->dsc[i];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 51680d3..8df4de2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -25,6 +25,9 @@
{
int i;
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < m->dspp_count; i++) {
if (dspp == m->dspp[i].id) {
b->base_off = addr;
@@ -43,6 +46,9 @@
{
int i = 0, ret;
+ if (!c || !c->cap || !c->cap->sblk)
+ return;
+
for (i = 0; i < SDE_DSPP_MAX; i++) {
if (!test_bit(i, &features))
continue;
@@ -101,6 +107,8 @@
if (c->cap->sblk->ad.version ==
SDE_COLOR_PROCESS_VER(4, 0)) {
c->ops.setup_ad = sde_setup_dspp_ad4;
+ c->ops.ad_read_intr_resp =
+ sde_read_intr_resp_ad4;
c->ops.validate_ad = sde_validate_dspp_ad4;
}
break;
@@ -117,6 +125,9 @@
struct sde_hw_dspp *c;
struct sde_dspp_cfg *cfg;
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 455daa4..70b3e56 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -153,6 +153,15 @@
* @cfg: Pointer to ad configuration
*/
void (*setup_ad)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * ad_read_intr_resp - function to get interrupt response for ad
+ * @event: Event for which response needs to be read
+ * @resp: Pointer to u32 where response value is dumped.
+ */
+ void (*ad_read_intr_resp)(struct sde_hw_dspp *ctx, u32 event,
+ u32 *resp);
+
};
/**
@@ -183,6 +192,7 @@
* should be called once before accessing every dspp.
* @idx: DSPP index for which driver object is required
* @addr: Mapped register io address of MDP
+ * @Return: pointer to structure or ERR_PTR
*/
struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_hwio.h b/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
deleted file mode 100644
index e69de29..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
+++ /dev/null
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index e68e3c9..24f16c6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -29,6 +29,11 @@
#define MDP_INTF_2_OFF 0x6C000
#define MDP_INTF_3_OFF 0x6C800
#define MDP_INTF_4_OFF 0x6D000
+#define MDP_AD4_0_OFF 0x7D000
+#define MDP_AD4_1_OFF 0x7E000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
/**
* WB interrupt status bit definitions
@@ -82,7 +87,7 @@
* Pingpong Secondary interrupt status bit definitions
*/
#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
-#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
@@ -104,6 +109,15 @@
#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
/**
+ * Ctl start interrupt status bit definitions
+ */
+#define SDE_INTR_CTL_0_START BIT(9)
+#define SDE_INTR_CTL_1_START BIT(10)
+#define SDE_INTR_CTL_2_START BIT(11)
+#define SDE_INTR_CTL_3_START BIT(12)
+#define SDE_INTR_CTL_4_START BIT(13)
+
+/**
* Concurrent WB overflow interrupt status bit definitions
*/
#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
@@ -155,6 +169,14 @@
#define SDE_INTR_PROG_LINE BIT(8)
/**
+ * AD4 interrupt status bit definitions
+ */
+#define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
+#define SDE_INTR_DARKENH_UPDATED BIT(3)
+#define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define SDE_INTR_STREN_INROI_UPDATED BIT(1)
+#define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
* struct sde_intr_reg - array of SDE register sets
* @clr_off: offset to CLEAR reg
* @en_off: offset to ENABLE reg
@@ -223,6 +245,16 @@
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
MDP_INTF_4_OFF+INTF_INTR_EN,
MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
}
};
@@ -302,15 +334,21 @@
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* irq_idx: 40-43 */
+ /* irq_idx: 40 */
{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
SDE_INTR_PING_PONG_S0_RD_PTR, 1},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
- /* irq_idx: 44-47 */
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
- { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 41-45 */
+ { SDE_IRQ_TYPE_CTL_START, CTL_0,
+ SDE_INTR_CTL_0_START, 1},
+ { SDE_IRQ_TYPE_CTL_START, CTL_1,
+ SDE_INTR_CTL_1_START, 1},
+ { SDE_IRQ_TYPE_CTL_START, CTL_2,
+ SDE_INTR_CTL_2_START, 1},
+ { SDE_IRQ_TYPE_CTL_START, CTL_3,
+ SDE_INTR_CTL_3_START, 1},
+ { SDE_IRQ_TYPE_CTL_START, CTL_4,
+ SDE_INTR_CTL_4_START, 1},
+ /* irq_idx: 46-47 */
{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
/* irq_idx: 48-51 */
@@ -648,6 +686,10 @@
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+ /* irq_idx: 256-257 */
+ { SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, 8},
+ { SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, 9}
};
static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
@@ -669,6 +711,9 @@
static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
uint32_t mask)
{
+ if (!intr)
+ return;
+
SDE_REG_WRITE(&intr->hw, reg_off, mask);
}
@@ -683,6 +728,9 @@
u32 irq_status;
unsigned long irq_flags;
+ if (!intr)
+ return;
+
/*
* The dispatcher will save the IRQ status before calling here.
* Now need to go through each IRQ status and find matching
@@ -699,6 +747,10 @@
start_idx = reg_idx * 32;
end_idx = start_idx + 32;
+ if (start_idx >= ARRAY_SIZE(sde_irq_map) ||
+ end_idx > ARRAY_SIZE(sde_irq_map))
+ continue;
+
/*
* Search through matching intr status from irq map.
* start_idx and end_idx defined the search range in
@@ -742,6 +794,9 @@
const char *dbgstr = NULL;
uint32_t cache_irq_mask;
+ if (!intr)
+ return -EINVAL;
+
if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
@@ -783,6 +838,9 @@
const char *dbgstr = NULL;
uint32_t cache_irq_mask;
+ if (!intr)
+ return -EINVAL;
+
if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
@@ -817,26 +875,50 @@
static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
return 0;
}
static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
return 0;
}
static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
uint32_t *mask)
{
+ if (!intr || !mask)
+ return -EINVAL;
+
*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
return 0;
}
static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
uint32_t *sources)
{
+ if (!intr || !sources)
+ return -EINVAL;
+
*sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+
return 0;
}
@@ -846,6 +928,9 @@
u32 enable_mask;
unsigned long irq_flags;
+ if (!intr)
+ return;
+
spin_lock_irqsave(&intr->status_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
/* Read interrupt status */
@@ -872,6 +957,9 @@
int reg_idx;
unsigned long irq_flags;
+ if (!intr)
+ return;
+
spin_lock_irqsave(&intr->mask_lock, irq_flags);
reg_idx = sde_irq_map[irq_idx].reg_idx;
@@ -888,6 +976,9 @@
unsigned long irq_flags;
u32 intr_status;
+ if (!intr)
+ return 0;
+
spin_lock_irqsave(&intr->mask_lock, irq_flags);
reg_idx = sde_irq_map[irq_idx].reg_idx;
@@ -922,7 +1013,7 @@
static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
void __iomem *addr, struct sde_hw_blk_reg_map *hw)
{
- if (m->mdp_count == 0)
+ if (!m || !addr || !hw || m->mdp_count == 0)
return NULL;
hw->base_off = addr;
@@ -934,9 +1025,13 @@
struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
struct sde_mdss_cfg *m)
{
- struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ struct sde_hw_intr *intr;
struct sde_mdss_base_cfg *cfg;
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index 261ef64..aaba1be 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
#define IRQ_SOURCE_DSI1 BIT(5)
#define IRQ_SOURCE_HDMI BIT(8)
#define IRQ_SOURCE_EDP BIT(12)
-#define IRQ_SOURCE_MHL BIT(16)
+#define IRQ_SOURCE_MHL BIT(16)
/**
* sde_intr_type - HW Interrupt Type
@@ -54,6 +54,8 @@
* @SDE_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
* @SDE_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
* @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @SDE_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
+ * @SDE_IRQ_TYPE_CTL_START: Control start
* @SDE_IRQ_TYPE_RESERVED: Reserved for expansion
*/
enum sde_intr_type {
@@ -82,6 +84,8 @@
SDE_IRQ_TYPE_SFI_CMD_2_IN,
SDE_IRQ_TYPE_SFI_CMD_2_OUT,
SDE_IRQ_TYPE_PROG_LINE,
+ SDE_IRQ_TYPE_AD4_BL_DONE,
+ SDE_IRQ_TYPE_CTL_START,
SDE_IRQ_TYPE_RESERVED,
};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index f79dc08..d15b804 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -120,37 +120,37 @@
return -EINVAL;
switch (drm_pixfmt) {
- case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
if (SDE_MODIFIER_IS_UBWC(drm_modifier))
*pixfmt = SDE_PIX_FMT_RGB_565_UBWC;
else
*pixfmt = SDE_PIX_FMT_RGB_565;
break;
- case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_BGRA8888:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
else
*pixfmt = SDE_PIX_FMT_ARGB_8888;
break;
- case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
else
*pixfmt = SDE_PIX_FMT_XRGB_8888;
break;
- case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
else
*pixfmt = SDE_PIX_FMT_ABGR_8888;
break;
- case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
else
*pixfmt = SDE_PIX_FMT_XBGR_8888;
break;
- case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_ABGR8888:
if (SDE_MODIFIER_IS_UBWC(drm_modifier))
*pixfmt = SDE_PIX_FMT_RGBA_8888_UBWC;
else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -158,7 +158,7 @@
else
*pixfmt = SDE_PIX_FMT_RGBA_8888;
break;
- case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_XBGR8888:
if (SDE_MODIFIER_IS_UBWC(drm_modifier))
*pixfmt = SDE_PIX_FMT_RGBX_8888_UBWC;
else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -166,13 +166,13 @@
else
*pixfmt = SDE_PIX_FMT_RGBX_8888;
break;
- case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_ARGB8888:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_BGRA_8888_TILE;
else
*pixfmt = SDE_PIX_FMT_BGRA_8888;
break;
- case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_XRGB8888:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_BGRX_8888_TILE;
else
@@ -220,43 +220,43 @@
else
*pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2;
break;
- case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_BGRA1010102:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
else
*pixfmt = SDE_PIX_FMT_ARGB_2101010;
break;
- case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_BGRX1010102:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
else
*pixfmt = SDE_PIX_FMT_XRGB_2101010;
break;
- case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
else
*pixfmt = SDE_PIX_FMT_ABGR_2101010;
break;
- case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
else
*pixfmt = SDE_PIX_FMT_XBGR_2101010;
break;
- case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB2101010:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
else
*pixfmt = SDE_PIX_FMT_BGRA_1010102;
break;
- case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_XRGB2101010:
if (SDE_MODIFIER_IS_TILE(drm_modifier))
*pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
else
*pixfmt = SDE_PIX_FMT_BGRX_1010102;
break;
- case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_ABGR2101010:
if (SDE_MODIFIER_IS_UBWC(drm_modifier))
*pixfmt = SDE_PIX_FMT_RGBA_1010102_UBWC;
else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -264,7 +264,7 @@
else
*pixfmt = SDE_PIX_FMT_RGBA_1010102;
break;
- case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_XBGR2101010:
if (SDE_MODIFIER_IS_UBWC(drm_modifier))
*pixfmt = SDE_PIX_FMT_RGBX_1010102_UBWC;
else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -298,28 +298,28 @@
switch (pixfmt) {
case SDE_PIX_FMT_RGB_565:
- *drm_pixfmt = DRM_FORMAT_RGB565;
+ *drm_pixfmt = DRM_FORMAT_BGR565;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_RGB_565_UBWC:
- *drm_pixfmt = DRM_FORMAT_RGB565;
+ *drm_pixfmt = DRM_FORMAT_BGR565;
*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_RGBA_8888:
- *drm_pixfmt = DRM_FORMAT_RGBA8888;
+ *drm_pixfmt = DRM_FORMAT_ABGR8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_RGBX_8888:
- *drm_pixfmt = DRM_FORMAT_RGBX8888;
+ *drm_pixfmt = DRM_FORMAT_XBGR8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_BGRA_8888:
- *drm_pixfmt = DRM_FORMAT_BGRA8888;
+ *drm_pixfmt = DRM_FORMAT_ARGB8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_BGRX_8888:
- *drm_pixfmt = DRM_FORMAT_BGRX8888;
+ *drm_pixfmt = DRM_FORMAT_XRGB8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
@@ -332,12 +332,12 @@
*drm_modifier = 0;
break;
case SDE_PIX_FMT_RGBA_8888_UBWC:
- *drm_pixfmt = DRM_FORMAT_RGBA8888;
+ *drm_pixfmt = DRM_FORMAT_ABGR8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_RGBX_8888_UBWC:
- *drm_pixfmt = DRM_FORMAT_RGBX8888;
+ *drm_pixfmt = DRM_FORMAT_XBGR8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
DRM_FORMAT_MOD_QCOM_TILE;
break;
@@ -346,59 +346,59 @@
*drm_modifier = 0;
break;
case SDE_PIX_FMT_ARGB_8888:
- *drm_pixfmt = DRM_FORMAT_ARGB8888;
+ *drm_pixfmt = DRM_FORMAT_BGRA8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_XRGB_8888:
- *drm_pixfmt = DRM_FORMAT_XRGB8888;
+ *drm_pixfmt = DRM_FORMAT_BGRX8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_ABGR_8888:
- *drm_pixfmt = DRM_FORMAT_ABGR8888;
+ *drm_pixfmt = DRM_FORMAT_RGBA8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_XBGR_8888:
- *drm_pixfmt = DRM_FORMAT_XBGR8888;
+ *drm_pixfmt = DRM_FORMAT_RGBX8888;
*drm_modifier = 0;
break;
case SDE_PIX_FMT_ARGB_2101010:
- *drm_pixfmt = DRM_FORMAT_ARGB2101010;
- *drm_modifier = 0;
- break;
- case SDE_PIX_FMT_XRGB_2101010:
- *drm_pixfmt = DRM_FORMAT_XRGB2101010;
- *drm_modifier = 0;
- break;
- case SDE_PIX_FMT_ABGR_2101010:
- *drm_pixfmt = DRM_FORMAT_ABGR2101010;
- *drm_modifier = 0;
- break;
- case SDE_PIX_FMT_XBGR_2101010:
- *drm_pixfmt = DRM_FORMAT_XBGR2101010;
- *drm_modifier = 0;
- break;
- case SDE_PIX_FMT_BGRA_1010102:
*drm_pixfmt = DRM_FORMAT_BGRA1010102;
*drm_modifier = 0;
break;
- case SDE_PIX_FMT_BGRX_1010102:
+ case SDE_PIX_FMT_XRGB_2101010:
*drm_pixfmt = DRM_FORMAT_BGRX1010102;
*drm_modifier = 0;
break;
+ case SDE_PIX_FMT_ABGR_2101010:
+ *drm_pixfmt = DRM_FORMAT_RGBA1010102;
+ *drm_modifier = 0;
+ break;
+ case SDE_PIX_FMT_XBGR_2101010:
+ *drm_pixfmt = DRM_FORMAT_RGBX1010102;
+ *drm_modifier = 0;
+ break;
+ case SDE_PIX_FMT_BGRA_1010102:
+ *drm_pixfmt = DRM_FORMAT_ARGB2101010;
+ *drm_modifier = 0;
+ break;
+ case SDE_PIX_FMT_BGRX_1010102:
+ *drm_pixfmt = DRM_FORMAT_XRGB2101010;
+ *drm_modifier = 0;
+ break;
case SDE_PIX_FMT_RGBA_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_RGBA8888;
+ *drm_pixfmt = DRM_FORMAT_ABGR8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_RGBX_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_RGBX8888;
+ *drm_pixfmt = DRM_FORMAT_XBGR8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_BGRA_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_BGRA8888;
+ *drm_pixfmt = DRM_FORMAT_ARGB8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_BGRX_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_BGRX8888;
+ *drm_pixfmt = DRM_FORMAT_XRGB8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
@@ -410,45 +410,55 @@
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_ARGB_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_ARGB8888;
+ *drm_pixfmt = DRM_FORMAT_BGRA8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_XRGB_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_XRGB8888;
+ *drm_pixfmt = DRM_FORMAT_BGRX8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_ABGR_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_ABGR8888;
+ *drm_pixfmt = DRM_FORMAT_RGBA8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_XBGR_8888_TILE:
- *drm_pixfmt = DRM_FORMAT_XBGR8888;
+ *drm_pixfmt = DRM_FORMAT_RGBX8888;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
case SDE_PIX_FMT_ARGB_2101010_TILE:
- *drm_pixfmt = DRM_FORMAT_ARGB2101010;
- *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
- break;
- case SDE_PIX_FMT_XRGB_2101010_TILE:
- *drm_pixfmt = DRM_FORMAT_XRGB2101010;
- *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
- break;
- case SDE_PIX_FMT_ABGR_2101010_TILE:
- *drm_pixfmt = DRM_FORMAT_ABGR2101010;
- *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
- break;
- case SDE_PIX_FMT_XBGR_2101010_TILE:
- *drm_pixfmt = DRM_FORMAT_XBGR2101010;
- *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
- break;
- case SDE_PIX_FMT_BGRA_1010102_TILE:
*drm_pixfmt = DRM_FORMAT_BGRA1010102;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
- case SDE_PIX_FMT_BGRX_1010102_TILE:
+ case SDE_PIX_FMT_XRGB_2101010_TILE:
*drm_pixfmt = DRM_FORMAT_BGRX1010102;
*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
break;
+ case SDE_PIX_FMT_ABGR_2101010_TILE:
+ *drm_pixfmt = DRM_FORMAT_RGBA1010102;
+ *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+ break;
+ case SDE_PIX_FMT_XBGR_2101010_TILE:
+ *drm_pixfmt = DRM_FORMAT_RGBX1010102;
+ *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+ break;
+ case SDE_PIX_FMT_BGRA_1010102_TILE:
+ *drm_pixfmt = DRM_FORMAT_ARGB2101010;
+ *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+ break;
+ case SDE_PIX_FMT_BGRX_1010102_TILE:
+ *drm_pixfmt = DRM_FORMAT_XRGB2101010;
+ *drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+ break;
+ case SDE_PIX_FMT_RGBA_1010102_UBWC:
+ *drm_pixfmt = DRM_FORMAT_ABGR2101010;
+ *drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+ DRM_FORMAT_MOD_QCOM_TILE;
+ break;
+ case SDE_PIX_FMT_RGBX_1010102_UBWC:
+ *drm_pixfmt = DRM_FORMAT_XBGR2101010;
+ *drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+ DRM_FORMAT_MOD_QCOM_TILE;
+ break;
case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
*drm_pixfmt = DRM_FORMAT_NV12;
*drm_modifier = DRM_FORMAT_MOD_QCOM_DX;
@@ -564,9 +574,16 @@
rot_cmd.video_mode = data->video_mode;
rot_cmd.fps = data->fps;
+
+ /*
+ * DRM rotation property is specified in counter clockwise direction
+ * whereas rotator h/w rotates in clockwise direction.
+ * Convert rotation property to clockwise 90 by toggling h/v flip
+ */
rot_cmd.rot90 = data->rot90;
- rot_cmd.hflip = data->hflip;
- rot_cmd.vflip = data->vflip;
+ rot_cmd.hflip = data->rot90 ? !data->hflip : data->hflip;
+ rot_cmd.vflip = data->rot90 ? !data->vflip : data->vflip;
+
rot_cmd.secure = data->secure;
rot_cmd.clkrate = data->clkrate;
rot_cmd.data_bw = 0;
@@ -779,6 +796,25 @@
}
/**
+ * sde_hw_rot_get_maxlinewidth - get maximum line width of rotator
+ * @hw: Pointer to rotator hardware driver
+ * return: maximum line width
+ */
+static int sde_hw_rot_get_maxlinewidth(struct sde_hw_rot *hw)
+{
+ struct platform_device *pdev;
+
+ if (!hw || !hw->caps || !hw->caps->pdev) {
+ SDE_ERROR("invalid rotator hw\n");
+ return 0;
+ }
+
+ pdev = hw->caps->pdev;
+
+ return sde_rotator_inline_get_maxlinewidth(pdev);
+}
+
+/**
* _setup_rot_ops - setup rotator operations
* @ops: Pointer to operation table
* @features: available feature bitmask
@@ -790,64 +826,7 @@
ops->get_format_caps = sde_hw_rot_get_format_caps;
ops->get_downscale_caps = sde_hw_rot_get_downscale_caps;
ops->get_cache_size = sde_hw_rot_get_cache_size;
-}
-
-/**
- * sde_hw_rot_init - create/initialize given rotator instance
- * @idx: index of given rotator
- * @addr: i/o address mapping
- * @m: Pointer to mdss catalog
- * return: Pointer to hardware rotator driver of the given instance
- */
-struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
- void __iomem *addr,
- struct sde_mdss_cfg *m)
-{
- struct sde_hw_rot *c;
- struct sde_rot_cfg *cfg;
- int rc;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return ERR_PTR(-ENOMEM);
-
- cfg = _rot_offset(idx, m, addr, &c->hw);
- if (IS_ERR(cfg)) {
- WARN(1, "Unable to find rot idx=%d\n", idx);
- kfree(c);
- return ERR_PTR(-EINVAL);
- }
-
- /* Assign ops */
- c->idx = idx;
- c->caps = cfg;
- _setup_rot_ops(&c->ops, c->caps->features);
-
- rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx);
- if (rc) {
- SDE_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
-}
-
-/**
- * sde_hw_rot_destroy - destroy given hardware rotator driver
- * @hw_rot: Pointer to hardware rotator driver
- * return: none
- */
-void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot)
-{
- sde_hw_blk_destroy(&hw_rot->base);
- kfree(hw_rot->downscale_caps);
- kfree(hw_rot->format_caps);
- kfree(hw_rot);
+ ops->get_maxlinewidth = sde_hw_rot_get_maxlinewidth;
}
/**
@@ -881,26 +860,81 @@
return rc;
}
+static struct sde_hw_blk_ops sde_hw_rot_ops = {
+ .start = sde_hw_rot_blk_start,
+ .stop = sde_hw_rot_blk_stop,
+};
+
+/**
+ * sde_hw_rot_init - create/initialize given rotator instance
+ * @idx: index of given rotator
+ * @addr: i/o address mapping
+ * @m: Pointer to mdss catalog
+ * return: Pointer to hardware rotator driver of the given instance
+ */
+struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_rot *c;
+ struct sde_rot_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _rot_offset(idx, m, addr, &c->hw);
+ if (IS_ERR(cfg)) {
+ WARN(1, "Unable to find rot idx=%d\n", idx);
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_rot_ops(&c->ops, c->caps->features);
+
+ rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx,
+ &sde_hw_rot_ops);
+ if (rc) {
+ SDE_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+/**
+ * sde_hw_rot_destroy - destroy given hardware rotator driver
+ * @hw_rot: Pointer to hardware rotator driver
+ * return: none
+ */
+void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot)
+{
+ sde_hw_blk_destroy(&hw_rot->base);
+ kfree(hw_rot->downscale_caps);
+ kfree(hw_rot->format_caps);
+ kfree(hw_rot);
+}
+
struct sde_hw_rot *sde_hw_rot_get(struct sde_hw_rot *hw_rot)
{
struct sde_hw_blk *hw_blk = sde_hw_blk_get(hw_rot ? &hw_rot->base :
NULL, SDE_HW_BLK_ROT, -1);
- int rc = 0;
- if (!hw_rot && hw_blk)
- rc = sde_hw_rot_blk_start(hw_blk);
-
- if (rc) {
- sde_hw_blk_put(hw_blk, NULL);
- return NULL;
- }
-
- return hw_blk ? to_sde_hw_rot(hw_blk) : NULL;
+ return IS_ERR_OR_NULL(hw_blk) ? NULL : to_sde_hw_rot(hw_blk);
}
void sde_hw_rot_put(struct sde_hw_rot *hw_rot)
{
struct sde_hw_blk *hw_blk = hw_rot ? &hw_rot->base : NULL;
- sde_hw_blk_put(hw_blk, sde_hw_rot_blk_stop);
+ sde_hw_blk_put(hw_blk);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index 949f9bd..a4f5b49 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -20,12 +20,6 @@
struct sde_hw_rot;
-/* tags for attachment */
-#define SDE_TAG_ROT_OUT_FBO 0x1000
-#define SDE_TAG_ROT_OUT_FB 0x1001
-#define SDE_TAG_ROT_PLANE 0x1002
-#define SDE_TAG_ROT_IN_FB 0x1003
-
/**
* enum sde_hw_rot_cmd_type - type of rotator hardware command
* @SDE_HW_ROT_CMD_VALDIATE: validate rotator command; do not commit
@@ -124,6 +118,7 @@
struct sde_hw_rot *hw);
const char *(*get_downscale_caps)(struct sde_hw_rot *hw);
size_t (*get_cache_size)(struct sde_hw_rot *hw);
+ int (*get_maxlinewidth)(struct sde_hw_rot *hw);
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 37fb81d..c045067 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -1156,8 +1156,9 @@
if (cfg->sblk->scaler_blk.len)
sde_dbg_reg_register_dump_range(SDE_DBG_NAME,
cfg->sblk->scaler_blk.name,
- cfg->sblk->scaler_blk.base,
- cfg->sblk->scaler_blk.base + cfg->sblk->scaler_blk.len,
+ hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base,
+ hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base +
+ cfg->sblk->scaler_blk.len,
hw_pipe->hw.xin_id);
return hw_pipe;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index a7bebc2..cf54611 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -39,13 +39,15 @@
static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
struct split_pipe_cfg *cfg)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
+ struct sde_hw_blk_reg_map *c;
u32 upper_pipe = 0;
u32 lower_pipe = 0;
if (!mdp || !cfg)
return;
+ c = &mdp->hw;
+
if (cfg->en) {
if (cfg->mode == INTF_MODE_CMD) {
lower_pipe = FLD_SPLIT_DISPLAY_CMD;
@@ -107,9 +109,14 @@
static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
struct cdm_output_cfg *cfg)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
+ struct sde_hw_blk_reg_map *c;
u32 out_ctl = 0;
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
if (cfg->wb_en)
out_ctl |= BIT(24);
else if (cfg->intf_en)
@@ -121,11 +128,16 @@
static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
enum sde_clk_ctrl_type clk_ctrl, bool enable)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
+ struct sde_hw_blk_reg_map *c;
u32 reg_off, bit_off;
u32 reg_val, new_val;
bool clk_forced_on;
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
return false;
@@ -150,9 +162,14 @@
static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
struct sde_danger_safe_status *status)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
+ struct sde_hw_blk_reg_map *c;
u32 value;
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
value = SDE_REG_READ(c, DANGER_STATUS);
status->mdp = (value >> 0) & 0x3;
status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
@@ -178,9 +195,14 @@
static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
struct sde_danger_safe_status *status)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
+ struct sde_hw_blk_reg_map *c;
u32 value;
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
value = SDE_REG_READ(c, SAFE_STATUS);
status->mdp = (value >> 0) & 0x1;
status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
@@ -205,11 +227,32 @@
static void sde_hw_setup_dce(struct sde_hw_mdp *mdp, u32 dce_sel)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
+ struct sde_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
SDE_REG_WRITE(c, DCE_SEL, dce_sel);
}
+void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
+{
+ struct sde_hw_blk_reg_map c;
+
+ if (!mdp || !m)
+ return;
+
+ if (!IS_UBWC_20_SUPPORTED(m->ubwc_version))
+ return;
+
+ /* force blk offset to zero to access beginning of register region */
+ c = mdp->hw;
+ c.blk_off = 0x0;
+ SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
unsigned long cap)
{
@@ -220,6 +263,7 @@
ops->get_danger_status = sde_hw_get_danger_status;
ops->get_safe_status = sde_hw_get_safe_status;
ops->setup_dce = sde_hw_setup_dce;
+ ops->reset_ubwc = sde_hw_reset_ubwc;
}
static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
@@ -229,6 +273,9 @@
{
int i;
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < m->mdp_count; i++) {
if (mdp == m->mdp[i].id) {
b->base_off = addr;
@@ -243,25 +290,6 @@
return ERR_PTR(-EINVAL);
}
-static inline void _sde_hw_mdptop_init_ubwc(void __iomem *addr,
- const struct sde_mdss_cfg *m)
-{
- struct sde_hw_blk_reg_map hw;
-
- if (!addr || !m)
- return;
-
- if (!IS_UBWC_20_SUPPORTED(m->ubwc_version))
- return;
-
- memset(&hw, 0, sizeof(hw));
- hw.base_off = addr;
- hw.blk_off = 0x0;
- hw.hwversion = m->hwversion;
- hw.log_mask = SDE_DBG_MASK_TOP;
- SDE_REG_WRITE(&hw, UBWC_STATIC, m->mdp[0].ubwc_static);
-}
-
struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
void __iomem *addr,
const struct sde_mdss_cfg *m)
@@ -292,8 +320,7 @@
sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
mdp->hw.xin_id);
-
- _sde_hw_mdptop_init_ubwc(addr, m);
+ sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 9cb0c55..7511358 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -148,6 +148,13 @@
*/
void (*get_safe_status)(struct sde_hw_mdp *mdp,
struct sde_danger_safe_status *status);
+
+ /**
+ * reset_ubwc - reset top level UBWC configuration
+ * @mdp: mdp top context driver
+ * @m: pointer to mdss catalog data
+ */
+ void (*reset_ubwc)(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m);
};
struct sde_hw_mdp {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index e9f54d0..048ec47 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -158,8 +158,7 @@
c->cap = cfg;
_setup_vbif_ops(&c->ops, c->cap->features);
- sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
- c->hw.blk_off + c->hw.length, c->hw.xin_id);
+ /* no need to register sub-range in sde dbg, dump entire vbif io base */
return c;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index b6a9f42..4a5479d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -57,7 +57,7 @@
* # echo 0x2 > /sys/module/drm/parameters/debug
*
* To enable DRM driver h/w logging
- * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
*
* See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
*/
@@ -275,7 +275,13 @@
void *sde_debugfs_get_root(struct sde_kms *sde_kms)
{
- return sde_kms ? sde_kms->dev->primary->debugfs_root : 0;
+ struct msm_drm_private *priv;
+
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
+ return NULL;
+
+ priv = sde_kms->dev->dev_private;
+ return priv->debug_root;
}
static int _sde_debugfs_init(struct sde_kms *sde_kms)
@@ -296,8 +302,9 @@
/* allow debugfs_root to be NULL */
debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0644, debugfs_root, p);
- sde_debugfs_danger_init(sde_kms, debugfs_root);
- sde_debugfs_vbif_init(sde_kms, debugfs_root);
+ (void) sde_debugfs_danger_init(sde_kms, debugfs_root);
+ (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
+ (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
if (rc) {
@@ -314,6 +321,7 @@
if (sde_kms) {
sde_debugfs_vbif_destroy(sde_kms);
sde_debugfs_danger_destroy(sde_kms);
+ sde_debugfs_core_irq_destroy(sde_kms);
}
}
#else
@@ -324,18 +332,6 @@
static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
{
- return 0;
-}
-
-static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
- struct dentry *parent)
-{
-}
-
-static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
- struct dentry *parent)
-{
- return 0;
}
#endif
@@ -352,9 +348,16 @@
static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
- struct sde_kms *sde_kms = to_sde_kms(kms);
- struct drm_device *dev = sde_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms;
+ struct msm_drm_private *priv;
+
+ if (!kms)
+ return;
+ sde_kms = to_sde_kms(kms);
+
+ if (!sde_kms->dev || !sde_kms->dev->dev_private)
+ return;
+ priv = sde_kms->dev->dev_private;
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
}
@@ -377,13 +380,20 @@
static void sde_kms_complete_commit(struct msm_kms *kms,
struct drm_atomic_state *old_state)
{
- struct sde_kms *sde_kms = to_sde_kms(kms);
- struct drm_device *dev = sde_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms;
+ struct msm_drm_private *priv;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
+ if (!kms || !old_state)
+ return;
+ sde_kms = to_sde_kms(kms);
+
+ if (!sde_kms->dev || !sde_kms->dev->dev_private)
+ return;
+ priv = sde_kms->dev->dev_private;
+
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
sde_crtc_complete_commit(crtc, old_crtc_state);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
@@ -417,11 +427,11 @@
if (encoder->crtc != crtc)
continue;
/*
- * Wait post-flush if necessary to delay before plane_cleanup
- * For example, wait for vsync in case of video mode panels
- * This should be a no-op for command mode panels
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
*/
- SDE_EVT32(DRMID(crtc));
+ SDE_EVT32_VERBOSE(DRMID(crtc));
ret = sde_encoder_wait_for_commit_done(encoder);
if (ret && ret != -EWOULDBLOCK) {
SDE_ERROR("wait for commit done returned %d\n", ret);
@@ -555,7 +565,9 @@
.mode_valid = dsi_conn_mode_valid,
.get_info = dsi_display_get_info,
.set_backlight = dsi_display_set_backlight,
- .soft_reset = dsi_display_soft_reset
+ .soft_reset = dsi_display_soft_reset,
+ .pre_kickoff = dsi_conn_pre_kickoff,
+ .clk_ctrl = dsi_display_clk_ctrl
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
@@ -1238,7 +1250,7 @@
/* the caller api needs to turn on clock before calling it */
static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
{
- return;
+ sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
}
static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
@@ -1518,7 +1530,7 @@
}
rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
- &priv->phandle, priv->pclient, "core_clk_src");
+ &priv->phandle, priv->pclient, "core_clk");
if (rc) {
SDE_ERROR("failed to init perf %d\n", rc);
goto perf_err;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index ebc277e..d38a6b9 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -397,13 +397,47 @@
void sde_kms_info_stop(struct sde_kms_info *info);
/**
- * sde_kms_rect_intersect() - find the intersecting region between two rects
- * @res: Intersecting region between the two rectangles
- * @rect1: first rectangle coordinates
- * @rect2: second rectangle coordinates
+ * sde_kms_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: scissor rectangle
+ * @result: result rectangle, all 0's on no intersection found
*/
-void sde_kms_rect_intersect(struct sde_rect *res,
- const struct sde_rect *rect1, const struct sde_rect *rect2);
+void sde_kms_rect_intersect(const struct sde_rect *r1,
+ const struct sde_rect *r2,
+ struct sde_rect *result);
+
+/**
+ * sde_kms_rect_is_equal - compares two rects
+ * @r1: rect value to compare
+ * @r2: rect value to compare
+ *
+ * Returns 1 if the rects are same, 0 otherwise.
+ */
+static inline bool sde_kms_rect_is_equal(struct sde_rect *r1,
+ struct sde_rect *r2)
+{
+ if ((!r1 && r2) || (r1 && !r2))
+ return false;
+
+ if (!r1 && !r2)
+ return true;
+
+ return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
+ r1->h == r2->h;
+}
+
+/**
+ * sde_kms_rect_is_null - returns true if the width or height of a rect is 0
+ * @rect: rectangle to check for zero size
+ * @Return: True if width or height of rectangle is 0
+ */
+static inline bool sde_kms_rect_is_null(const struct sde_rect *r)
+{
+ if (!r)
+ return true;
+
+ return (!r->w || !r->h);
+}
/**
* Vblank enable/disable functions
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
index f95f5df..30e12c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -152,18 +152,26 @@
}
}
-void sde_kms_rect_intersect(struct sde_rect *res,
- const struct sde_rect *rect1, const struct sde_rect *rect2)
+void sde_kms_rect_intersect(const struct sde_rect *r1,
+ const struct sde_rect *r2,
+ struct sde_rect *result)
{
int l, t, r, b;
- l = max(rect1->x, rect2->x);
- t = max(rect1->y, rect2->y);
- r = min((rect1->x + rect1->w), (rect2->x + rect2->w));
- b = min((rect1->y + rect1->h), (rect2->y + rect2->h));
+ if (!r1 || !r2 || !result)
+ return;
- if (r < l || b < t)
- *res = (struct sde_rect) {0, 0, 0, 0};
- else
- *res = (struct sde_rect) {l, t, (r - l), (b - t)};
+ l = max(r1->x, r2->x);
+ t = max(r1->y, r2->y);
+ r = min((r1->x + r1->w), (r2->x + r2->w));
+ b = min((r1->y + r1->h), (r2->y + r2->h));
+
+ if (r < l || b < t) {
+ memset(result, 0, sizeof(*result));
+ } else {
+ result->x = l;
+ result->y = t;
+ result->w = r - l;
+ result->h = b - t;
+ }
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 0be17e4..93268be 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -253,9 +253,10 @@
((src_width + 32) * fmt->bpp);
}
- SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
+ SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
plane->base.id, psde->pipe - SSPP_VIG0,
- fmt->base.pixel_format, src_width, total_fl);
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
return total_fl;
}
@@ -365,10 +366,10 @@
psde->is_rt_pipe, total_fl, qos_lut,
(fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
- SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
+ SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%x\n",
plane->base.id,
psde->pipe - SSPP_VIG0,
- (fmt) ? fmt->base.pixel_format : 0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
psde->is_rt_pipe, total_fl, qos_lut);
psde->pipe_hw->ops.setup_creq_lut(psde->pipe_hw, &psde->pipe_qos_cfg);
@@ -427,10 +428,10 @@
psde->pipe_qos_cfg.danger_lut,
psde->pipe_qos_cfg.safe_lut);
- SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
+ SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
plane->base.id,
psde->pipe - SSPP_VIG0,
- fmt ? fmt->base.pixel_format : 0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
fmt ? fmt->fetch_mode : -1,
psde->pipe_qos_cfg.danger_lut,
psde->pipe_qos_cfg.safe_lut);
@@ -620,8 +621,6 @@
prefix = sde_sync_get_name_prefix(input_fence);
rc = sde_sync_wait(input_fence, wait_ms);
- SDE_EVT32(DRMID(plane), -ret, prefix);
-
switch (rc) {
case 0:
SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
@@ -648,6 +647,8 @@
ret = 0;
break;
}
+
+ SDE_EVT32_VERBOSE(DRMID(plane), -ret, prefix);
} else {
ret = 0;
}
@@ -1220,6 +1221,40 @@
}
/**
+ * _sde_plane_fb_get/put - framebuffer callback for crtc res ops
+ */
+static void *_sde_plane_fb_get(void *fb, u32 type, u64 tag)
+{
+ drm_framebuffer_reference(fb);
+ return fb;
+}
+static void _sde_plane_fb_put(void *fb)
+{
+ drm_framebuffer_unreference(fb);
+}
+static struct sde_crtc_res_ops fb_res_ops = {
+ .put = _sde_plane_fb_put,
+ .get = _sde_plane_fb_get,
+};
+
+/**
+ * _sde_plane_fbo_get/put - framebuffer object callback for crtc res ops
+ */
+static void *_sde_plane_fbo_get(void *fbo, u32 type, u64 tag)
+{
+ sde_kms_fbo_reference(fbo);
+ return fbo;
+}
+static void _sde_plane_fbo_put(void *fbo)
+{
+ sde_kms_fbo_unreference(fbo);
+}
+static struct sde_crtc_res_ops fbo_res_ops = {
+ .put = _sde_plane_fbo_put,
+ .get = _sde_plane_fbo_get,
+};
+
+/**
* sde_plane_rot_calc_prefill - calculate rotator start prefill
* @plane: Pointer to drm plane
* return: prefill time in line
@@ -1294,17 +1329,26 @@
struct sde_plane_state *pstate;
struct sde_plane_rot_state *rstate;
struct sde_hw_blk *hw_blk;
- struct sde_hw_blk_attachment *attach;
+ struct drm_crtc_state *cstate;
struct drm_rect *in_rot, *out_rot;
+ struct drm_plane *attached_plane;
u32 dst_x, dst_y, dst_w, dst_h;
int found = 0;
int xpos = 0;
+ int ret;
if (!plane || !state || !state->state) {
SDE_ERROR("invalid parameters\n");
return;
}
+ cstate = _sde_plane_get_crtc_state(state);
+ if (IS_ERR_OR_NULL(cstate)) {
+ ret = PTR_ERR(cstate);
+ SDE_ERROR("invalid crtc state %d\n", ret);
+ return;
+ }
+
pstate = to_sde_plane_state(state);
rstate = &pstate->rot;
@@ -1341,24 +1385,12 @@
hw_blk = &rstate->rot_hw->base;
/* enumerating over all planes attached to the same rotator */
- list_for_each_entry(attach, &hw_blk->attach_list, list) {
- struct drm_plane *attached_plane;
+ drm_atomic_crtc_state_for_each_plane(attached_plane, cstate) {
struct drm_plane_state *attached_state;
struct sde_plane_state *attached_pstate;
struct sde_plane_rot_state *attached_rstate;
struct drm_rect attached_out_rect;
- if (attach->tag != SDE_TAG_ROT_PLANE)
- continue;
-
- attached_plane = attach->value;
-
- found++;
-
- /* skip itself */
- if (attached_plane == plane)
- continue;
-
attached_state = drm_atomic_get_existing_plane_state(
state->state, attached_plane);
@@ -1368,6 +1400,15 @@
attached_pstate = to_sde_plane_state(attached_state);
attached_rstate = &attached_pstate->rot;
+ if (attached_rstate->rot_hw != rstate->rot_hw)
+ continue;
+
+ found++;
+
+ /* skip itself */
+ if (attached_plane == plane)
+ continue;
+
/* find bounding rotator source roi */
if (attached_state->src_x < in_rot->x1)
in_rot->x1 = attached_state->src_x;
@@ -1583,31 +1624,25 @@
rstate->out_rotation &= ~DRM_REFLECT_Y;
SDE_DEBUG(
- "plane%d.%d rot:%d/%c%c%c%c/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+ "plane%d.%d rot:%d/%c%c%c%c/%dx%d/%4.4s/%llx/%dx%d+%d+%d\n",
plane->base.id, rstate->sequence_id, hw_cmd,
rot_cmd->rot90 ? 'r' : '_',
rot_cmd->hflip ? 'h' : '_',
rot_cmd->vflip ? 'v' : '_',
rot_cmd->video_mode ? 'V' : 'C',
state->fb->width, state->fb->height,
- state->fb->pixel_format >> 0,
- state->fb->pixel_format >> 8,
- state->fb->pixel_format >> 16,
- state->fb->pixel_format >> 24,
+ (char *) &state->fb->pixel_format,
state->fb->modifier[0],
drm_rect_width(&rstate->in_rot_rect) >> 16,
drm_rect_height(&rstate->in_rot_rect) >> 16,
rstate->in_rot_rect.x1 >> 16,
rstate->in_rot_rect.y1 >> 16);
- SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+ SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%4.4s/%llx/%dx%d+%d+%d\n",
plane->base.id, rstate->sequence_id, hw_cmd,
rstate->out_rotation,
rstate->out_fb_width, rstate->out_fb_height,
- rstate->out_fb_pixel_format >> 0,
- rstate->out_fb_pixel_format >> 8,
- rstate->out_fb_pixel_format >> 16,
- rstate->out_fb_pixel_format >> 24,
+ (char *) &rstate->out_fb_pixel_format,
rstate->out_fb_modifier[0],
rstate->out_src_w >> 16, rstate->out_src_h >> 16,
rstate->out_src_x >> 16, rstate->out_src_y >> 16);
@@ -1628,6 +1663,7 @@
struct drm_framebuffer *fb = new_state->fb;
struct sde_plane_state *new_pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *new_rstate = &new_pstate->rot;
+ struct drm_crtc_state *cstate;
int ret;
SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n",
@@ -1639,6 +1675,13 @@
if (!new_rstate->out_sbuf || !new_rstate->rot_hw)
return 0;
+ cstate = _sde_plane_get_crtc_state(new_state);
+ if (IS_ERR(cstate)) {
+ ret = PTR_ERR(cstate);
+ SDE_ERROR("invalid crtc state %d\n", ret);
+ return ret;
+ }
+
/* need to re-calc based on all newly validated plane states */
sde_plane_rot_calc_cfg(plane, new_state);
@@ -1647,26 +1690,25 @@
struct sde_kms_fbo *fbo;
struct drm_framebuffer *fb;
- fbo = sde_hw_blk_lookup_value(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FBO, 0);
- fb = sde_hw_blk_lookup_value(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FB, 0);
+ fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &new_rstate->rot_hw->base);
+ fb = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &new_rstate->rot_hw->base);
if (fb && fbo) {
SDE_DEBUG("plane%d.%d get fb/fbo\n", plane->base.id,
new_rstate->sequence_id);
-
- new_rstate->out_fbo = fbo;
- sde_kms_fbo_reference(new_rstate->out_fbo);
- sde_hw_blk_attach(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FBO,
- new_rstate->out_fbo);
-
- new_rstate->out_fb = fb;
- drm_framebuffer_reference(new_rstate->out_fb);
- sde_hw_blk_attach(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FB,
- new_rstate->out_fb);
+ } else if (fbo) {
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &new_rstate->rot_hw->base);
+ fbo = NULL;
+ } else if (fb) {
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &new_rstate->rot_hw->base);
+ fb = NULL;
}
+
+ new_rstate->out_fbo = fbo;
+ new_rstate->out_fb = fb;
}
/* release buffer if output format configuration changes */
@@ -1682,13 +1724,11 @@
SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
new_rstate->sequence_id);
- sde_hw_blk_detach(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FB, new_rstate->out_fb);
- drm_framebuffer_unreference(new_rstate->out_fb);
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &new_rstate->rot_hw->base);
new_rstate->out_fb = NULL;
- sde_hw_blk_detach(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FBO, new_rstate->out_fbo);
- sde_kms_fbo_unreference(new_rstate->out_fbo);
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &new_rstate->rot_hw->base);
new_rstate->out_fbo = NULL;
}
@@ -1716,8 +1756,13 @@
goto error_create_fbo;
}
- sde_hw_blk_attach(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FBO, new_rstate->out_fbo);
+ ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &new_rstate->rot_hw->base,
+ new_rstate->out_fbo, &fbo_res_ops);
+ if (ret) {
+ SDE_ERROR("failed to add crtc resource\n");
+ goto error_create_fbo_res;
+ }
new_rstate->out_fb = sde_kms_fbo_create_fb(plane->dev,
new_rstate->out_fbo);
@@ -1727,8 +1772,13 @@
goto error_create_fb;
}
- sde_hw_blk_attach(&new_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FB, new_rstate->out_fb);
+ ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &new_rstate->rot_hw->base,
+ new_rstate->out_fb, &fb_res_ops);
+ if (ret) {
+ SDE_ERROR("failed to add crtc resource %d\n", ret);
+ goto error_create_fb_res;
+ }
}
/* prepare rotator input buffer */
@@ -1756,14 +1806,14 @@
error_prepare_output_buffer:
msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
error_prepare_input_buffer:
- sde_hw_blk_detach(&new_rstate->rot_hw->base, SDE_TAG_ROT_OUT_FB,
- new_rstate->out_fb);
- drm_framebuffer_unreference(new_rstate->out_fb);
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &new_rstate->rot_hw->base);
+error_create_fb_res:
new_rstate->out_fb = NULL;
error_create_fb:
- sde_hw_blk_detach(&new_rstate->rot_hw->base, SDE_TAG_ROT_OUT_FBO,
- new_rstate->out_fbo);
- sde_kms_fbo_unreference(new_rstate->out_fbo);
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &new_rstate->rot_hw->base);
+error_create_fbo_res:
new_rstate->out_fbo = NULL;
error_create_fbo:
return ret;
@@ -1782,6 +1832,7 @@
struct sde_plane_state *old_pstate = to_sde_plane_state(old_state);
struct sde_plane_rot_state *old_rstate = &old_pstate->rot;
struct sde_hw_rot_cmd *cmd = &old_rstate->rot_cmd;
+ struct drm_crtc_state *cstate;
int ret;
SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", plane->base.id,
@@ -1792,6 +1843,13 @@
if (!old_rstate->out_sbuf || !old_rstate->rot_hw)
return;
+ cstate = _sde_plane_get_crtc_state(old_state);
+ if (IS_ERR(cstate)) {
+ ret = PTR_ERR(cstate);
+ SDE_ERROR("invalid crtc state %d\n", ret);
+ return;
+ }
+
if (sde_plane_crtc_enabled(old_state)) {
ret = old_rstate->rot_hw->ops.commit(old_rstate->rot_hw, cmd,
SDE_HW_ROT_CMD_CLEANUP);
@@ -1803,15 +1861,11 @@
if (old_rstate->out_fb) {
msm_framebuffer_cleanup(old_rstate->out_fb,
old_rstate->mmu_id);
- sde_hw_blk_detach(&old_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FB,
- old_rstate->out_fb);
- drm_framebuffer_unreference(old_rstate->out_fb);
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &old_rstate->rot_hw->base);
old_rstate->out_fb = NULL;
- sde_hw_blk_detach(&old_rstate->rot_hw->base,
- SDE_TAG_ROT_OUT_FBO,
- old_rstate->out_fbo);
- sde_kms_fbo_unreference(old_rstate->out_fbo);
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &old_rstate->rot_hw->base);
old_rstate->out_fbo = NULL;
}
@@ -1831,6 +1885,7 @@
struct sde_plane *psde;
struct sde_plane_state *pstate, *old_pstate;
struct sde_plane_rot_state *rstate, *old_rstate;
+ struct drm_crtc_state *cstate;
struct sde_hw_blk *hw_blk;
int i, ret = 0;
@@ -1845,6 +1900,14 @@
rstate = &pstate->rot;
old_rstate = &old_pstate->rot;
+ /* cstate will be null if crtc is disconnected from plane */
+ cstate = _sde_plane_get_crtc_state(state);
+ if (IS_ERR(cstate)) {
+ ret = PTR_ERR(cstate);
+ SDE_ERROR("invalid crtc state %d\n", ret);
+ return ret;
+ }
+
SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", plane->base.id,
rstate->sequence_id, state->fb ? state->fb->base.id : 0,
!!rstate->out_sbuf, !!rstate->rot_hw,
@@ -1852,70 +1915,40 @@
rstate->in_rotation = drm_rotation_simplify(
sde_plane_get_property(pstate, PLANE_PROP_ROTATION),
- DRM_ROTATE_90 | DRM_REFLECT_X | DRM_REFLECT_Y);
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_REFLECT_X | DRM_REFLECT_Y);
rstate->rot90 = rstate->in_rotation & DRM_ROTATE_90 ? true : false;
rstate->hflip = rstate->in_rotation & DRM_REFLECT_X ? true : false;
rstate->vflip = rstate->in_rotation & DRM_REFLECT_Y ? true : false;
rstate->out_sbuf = psde->sbuf_mode || rstate->rot90;
- if ((!sde_plane_enabled(state) || !rstate->out_sbuf) &&
- rstate->rot_hw) {
-
- SDE_DEBUG("plane%d.%d release rotator\n",
+ if (sde_plane_enabled(state) && rstate->out_sbuf) {
+ SDE_DEBUG("plane%d.%d acquire rotator\n",
plane->base.id, rstate->sequence_id);
- sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
- rstate->in_fb);
- rstate->in_fb = NULL;
- sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
- plane);
- sde_hw_rot_put(rstate->rot_hw);
- rstate->rot_hw = NULL;
-
- } else if (sde_plane_enabled(state) && rstate->out_sbuf &&
- !rstate->rot_hw) {
-
- SDE_DEBUG("plane%d.%d allocate rotator\n",
- plane->base.id, rstate->sequence_id);
-
- hw_blk = sde_hw_blk_lookup_blk(SDE_TAG_ROT_IN_FB, state->fb,
- SDE_HW_BLK_ROT);
- if (hw_blk)
- rstate->rot_hw = to_sde_hw_rot(hw_blk);
- else
- rstate->rot_hw = sde_hw_rot_get(NULL);
-
- if (!rstate->rot_hw) {
+ hw_blk = sde_crtc_res_get(cstate, SDE_HW_BLK_ROT,
+ (u64) state->fb);
+ if (!hw_blk) {
SDE_ERROR("plane%d no available rotator\n",
plane->base.id);
return -EINVAL;
}
+ rstate->rot_hw = to_sde_hw_rot(hw_blk);
+
if (!rstate->rot_hw->ops.commit) {
SDE_ERROR("plane%d invalid rotator ops\n",
plane->base.id);
- sde_hw_rot_put(rstate->rot_hw);
+ sde_crtc_res_put(cstate,
+ SDE_HW_BLK_ROT, (u64) state->fb);
rstate->rot_hw = NULL;
return -EINVAL;
}
rstate->in_fb = state->fb;
- sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
- rstate->in_fb);
- sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
- plane);
-
- } else if (sde_plane_enabled(state) && rstate->out_sbuf &&
- (rstate->in_fb != state->fb)) {
-
- SDE_DEBUG("plane%d.%d update fb\n",
- plane->base.id, rstate->sequence_id);
-
- sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
- rstate->in_fb);
- rstate->in_fb = state->fb;
- sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
- rstate->in_fb);
+ } else {
+ rstate->in_fb = NULL;
+ rstate->rot_hw = NULL;
}
if (sde_plane_enabled(state) && rstate->out_sbuf && rstate->rot_hw) {
@@ -1928,21 +1961,29 @@
ret = sde_plane_rot_submit_command(plane, state,
SDE_HW_ROT_CMD_VALIDATE);
- } else if (sde_plane_enabled(state)) {
+ } else {
SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
rstate->sequence_id);
/* bypass rotator - initialize output setting as input */
+ for (i = 0; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
+ rstate->out_fb_modifier[i] = state->fb ?
+ state->fb->modifier[i] : 0x0;
+
+ if (state->fb) {
+ rstate->out_fb_pixel_format = state->fb->pixel_format;
+ rstate->out_fb_flags = state->fb->flags;
+ rstate->out_fb_width = state->fb->width;
+ rstate->out_fb_height = state->fb->height;
+ } else {
+ rstate->out_fb_pixel_format = 0x0;
+ rstate->out_fb_flags = 0x0;
+ rstate->out_fb_width = 0;
+ rstate->out_fb_height = 0;
+ }
+
rstate->out_rotation = rstate->in_rotation;
- rstate->out_fb_pixel_format = state->fb->pixel_format;
-
- for (i = 0.; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
- rstate->out_fb_modifier[i] = state->fb->modifier[i];
-
- rstate->out_fb_flags = state->fb->flags;
- rstate->out_fb_width = state->fb->width;
- rstate->out_fb_height = state->fb->height;
rstate->out_src_x = state->src_x;
rstate->out_src_y = state->src_y;
rstate->out_src_w = state->src_w;
@@ -2008,16 +2049,6 @@
rstate->sequence_id,
!!rstate->out_sbuf, !!rstate->rot_hw,
sde_plane_crtc_enabled(state));
-
- if (rstate->rot_hw) {
- sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
- rstate->in_fb);
- rstate->in_fb = NULL;
- sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
- plane);
- sde_hw_rot_put(rstate->rot_hw);
- rstate->rot_hw = NULL;
- }
}
/**
@@ -2031,6 +2062,8 @@
{
struct sde_plane_state *pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *rstate = &pstate->rot;
+ struct drm_crtc_state *cstate;
+ int ret;
rstate->sequence_id++;
@@ -2038,14 +2071,19 @@
rstate->sequence_id,
!!rstate->out_sbuf, !!rstate->rot_hw);
- if (rstate->rot_hw) {
- sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
- rstate->in_fb);
- sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
- plane);
- sde_hw_rot_get(rstate->rot_hw);
+ cstate = _sde_plane_get_crtc_state(new_state);
+ if (IS_ERR(cstate)) {
+ ret = PTR_ERR(cstate);
+ SDE_ERROR("invalid crtc state %d\n", ret);
+ return -EINVAL;
}
+ if (rstate->rot_hw && cstate)
+ sde_crtc_res_get(cstate, SDE_HW_BLK_ROT, (u64) rstate->in_fb);
+ else if (rstate->rot_hw && !cstate)
+ SDE_ERROR("plane%d.%d zombie rotator hw\n",
+ plane->base.id, rstate->sequence_id);
+
rstate->out_fb = NULL;
rstate->out_fbo = NULL;
@@ -2108,6 +2146,10 @@
sde_kms_info_add_keyint(info, "cache_size",
rot_hw->ops.get_cache_size(rot_hw));
+ if (rot_hw->ops.get_maxlinewidth)
+ sde_kms_info_add_keyint(info, "max_linewidth",
+ rot_hw->ops.get_maxlinewidth(rot_hw));
+
msm_property_set_blob(&psde->property_info, &psde->blob_rot_caps,
info->data, info->len, PLANE_PROP_ROT_CAPS_V1);
@@ -2126,7 +2168,8 @@
struct sde_mdss_cfg *catalog)
{
struct sde_plane *psde = to_sde_plane(plane);
- unsigned long supported_rotations = DRM_REFLECT_X | DRM_REFLECT_Y;
+ unsigned long supported_rotations = DRM_ROTATE_0 | DRM_REFLECT_X |
+ DRM_REFLECT_Y;
if (!plane || !psde) {
SDE_ERROR("invalid plane\n");
@@ -2169,6 +2212,7 @@
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct sde_rect src, dst;
+ const struct sde_rect *crtc_roi;
bool q16_data = true;
int idx;
@@ -2195,13 +2239,10 @@
nplanes = fmt->num_planes;
SDE_DEBUG(
- "plane%d.%d sspp:%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
+ "plane%d.%d sspp:%dx%d/%4.4s/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
plane->base.id, rstate->sequence_id,
rstate->out_fb_width, rstate->out_fb_height,
- rstate->out_fb_pixel_format >> 0,
- rstate->out_fb_pixel_format >> 8,
- rstate->out_fb_pixel_format >> 16,
- rstate->out_fb_pixel_format >> 24,
+ (char *) &rstate->out_fb_pixel_format,
rstate->out_fb_modifier[0],
rstate->out_src_w >> 16, rstate->out_src_h >> 16,
rstate->out_src_x >> 16, rstate->out_src_y >> 16,
@@ -2251,6 +2292,11 @@
}
}
+ /* re-program the output rects always in the case of partial update */
+ sde_crtc_get_crtc_roi(crtc->state, &crtc_roi);
+ if (!sde_kms_rect_is_null(crtc_roi))
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+
if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
@@ -2272,13 +2318,10 @@
state->crtc_w, state->crtc_h, !q16_data);
SDE_DEBUG_PLANE(psde,
- "FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %c%c%c%c ubwc %d\n",
+ "FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %4.4s ubwc %d\n",
fb->base.id, src.x, src.y, src.w, src.h,
crtc->base.id, dst.x, dst.y, dst.w, dst.h,
- fmt->base.pixel_format >> 0,
- fmt->base.pixel_format >> 8,
- fmt->base.pixel_format >> 16,
- fmt->base.pixel_format >> 24,
+ (char *)&fmt->base.pixel_format,
SDE_FORMAT_IS_UBWC(fmt));
if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
@@ -2291,6 +2334,13 @@
src.y &= ~0x1;
}
+ /*
+ * adjust layer mixer position of the sspp in the presence
+ * of a partial update to the active lm origin
+ */
+ dst.x -= crtc_roi->x;
+ dst.y -= crtc_roi->y;
+
psde->pipe_cfg.src_rect = src;
psde->pipe_cfg.dst_rect = dst;
@@ -2731,14 +2781,11 @@
goto modeset_update;
SDE_DEBUG(
- "plane%d.%u sspp:%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
+ "plane%d.%u sspp:%x/%dx%d/%4.4s/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
plane->base.id, rstate->sequence_id,
rstate->out_rotation,
rstate->out_fb_width, rstate->out_fb_height,
- rstate->out_fb_pixel_format >> 0,
- rstate->out_fb_pixel_format >> 8,
- rstate->out_fb_pixel_format >> 16,
- rstate->out_fb_pixel_format >> 24,
+ (char *) &rstate->out_fb_pixel_format,
rstate->out_fb_modifier[0],
rstate->out_src_w >> 16, rstate->out_src_h >> 16,
rstate->out_src_x >> 16, rstate->out_src_y >> 16,
@@ -2827,14 +2874,14 @@
* Cropping is not required as hardware will consider only the
* intersecting region with the src rect.
*/
- sde_kms_rect_intersect(&intersect, &src, &pstate->excl_rect);
+ sde_kms_rect_intersect(&src, &pstate->excl_rect, &intersect);
if (!intersect.w || !intersect.h || SDE_FORMAT_IS_YUV(fmt)) {
SDE_ERROR_PLANE(psde,
- "invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt:%s\n",
+ "invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt: %4.4s\n",
pstate->excl_rect.x, pstate->excl_rect.y,
pstate->excl_rect.w, pstate->excl_rect.h,
src.x, src.y, src.w, src.h,
- drm_get_format_name(fmt->base.pixel_format));
+ (char *)&fmt->base.pixel_format);
ret = -EINVAL;
}
}
@@ -3531,10 +3578,6 @@
msm_property_duplicate_state(&psde->property_info, old_state, pstate,
pstate->property_values, pstate->property_blobs);
- /* add ref count for frame buffer */
- if (pstate->base.fb)
- drm_framebuffer_reference(pstate->base.fb);
-
/* clear out any input fence */
pstate->input_fence = 0;
input_fence_default = msm_property_get_default(
@@ -3545,6 +3588,8 @@
pstate->dirty = 0x0;
pstate->pending = false;
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
sde_plane_rot_duplicate_state(plane, &pstate->base);
return &pstate->base;
@@ -3710,7 +3755,7 @@
/* create overall sub-directory for the pipe */
psde->debugfs_root =
debugfs_create_dir(psde->pipe_name,
- sde_debugfs_get_root(kms));
+ plane->dev->primary->debugfs_root);
if (!psde->debugfs_root)
return -ENOMEM;
@@ -3929,7 +3974,7 @@
if (master_plane_id)
format_list = plane_formats;
- psde->nformats = sde_populate_formats(plane_formats,
+ psde->nformats = sde_populate_formats(format_list,
psde->formats,
0,
ARRAY_SIZE(psde->formats));
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 2220925..b2665be 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -286,6 +286,27 @@
return 0;
}
+int sde_wb_get_topology(const struct drm_display_mode *drm_mode,
+ struct msm_display_topology *topology, u32 max_mixer_width)
+{
+ const u32 dual_lm = 2;
+ const u32 single_lm = 1;
+ const u32 single_intf = 1;
+ const u32 no_enc = 0;
+
+ if (!drm_mode || !topology || !max_mixer_width) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
+ dual_lm : single_lm;
+ topology->num_enc = no_enc;
+ topology->num_intf = single_intf;
+
+ return 0;
+}
+
int sde_wb_connector_post_init(struct drm_connector *connector,
void *info,
void *display)
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index 4e33595..205ff24 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
* @wb_lock Serialization lock for writeback context structure
* @connector: Connector associated with writeback device
* @encoder: Encoder associated with writeback device
+ * @max_mixer_width: Max width supported by SDE LM HW block
* @count_modes: Length of writeback connector modes array
* @modes: Writeback connector modes array
*/
@@ -49,6 +50,8 @@
struct drm_encoder *encoder;
enum drm_connector_status detect_status;
+ u32 max_mixer_width;
+
u32 count_modes;
struct drm_mode_modeinfo *modes;
};
@@ -183,6 +186,17 @@
int sde_wb_get_info(struct msm_display_info *info, void *display);
/**
+ * sde_wb_get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: zero on success
+ */
+int sde_wb_get_topology(const struct drm_display_mode *drm_mode,
+ struct msm_display_topology *topology,
+ u32 max_mixer_width);
+
+/**
* sde_wb_connector_get_wb - retrieve writeback device of the given connector
* @connector: Pointer to drm connector
* Returns: Pointer to writeback device on success; NULL otherwise
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 9977d10..a4b918e 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -19,6 +19,7 @@
#include <linux/uaccess.h>
#include <linux/dma-buf.h>
#include <linux/slab.h>
+#include <linux/list_sort.h>
#include "sde_dbg.h"
#include "sde/sde_hw_catalog.h"
@@ -41,7 +42,7 @@
#define DBGBUS_NAME_SDE "sde"
#define DBGBUS_NAME_VBIF_RT "vbif_rt"
-/* offsets from sde_base address for the debug buses */
+/* offsets from sde top address for the debug buses */
#define DBGBUS_SSPP0 0x188
#define DBGBUS_SSPP1 0x298
#define DBGBUS_DSPP 0x348
@@ -54,6 +55,9 @@
#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
#define MMSS_VBIF_TEST_BUS_OUT 0x230
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN 16
+
/**
* struct sde_dbg_reg_offset - tracking for start and end of region
* @start: start offset
@@ -135,6 +139,7 @@
struct sde_dbg_sde_debug_bus {
struct sde_dbg_debug_bus_common cmn;
struct sde_debug_bus_entry *entries;
+ u32 top_blk_off;
};
struct sde_dbg_vbif_debug_bus {
@@ -146,7 +151,6 @@
* struct sde_dbg_base - global sde debug base structure
* @evtlog: event log instance
* @reg_base_list: list of register dumping regions
- * @root: base debugfs root
* @dev: device pointer
* @power_ctrl: callback structure for enabling power for reading hw registers
* @req_dump_blks: list of blocks requested for dumping
@@ -160,7 +164,6 @@
static struct sde_dbg_base {
struct sde_dbg_evtlog *evtlog;
struct list_head reg_base_list;
- struct dentry *root;
struct device *dev;
struct sde_dbg_power_ctrl power_ctrl;
@@ -1961,15 +1964,17 @@
* _sde_dump_reg - helper function for dumping rotator register set content
* @dump_name: register set name
* @reg_dump_flag: dumping flag controlling in-log/memory dump location
+ * @base_addr: starting address of io region for calculating offsets to print
* @addr: starting address offset for dumping
* @len_bytes: range of the register set
* @dump_mem: output buffer for memory dump location option
* @from_isr: whether being called from isr context
*/
-static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
- size_t len_bytes, u32 **dump_mem, bool from_isr)
+static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
+ char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem,
+ bool from_isr)
{
- u32 in_log, in_mem, len_align_16, len_bytes_aligned;
+ u32 in_log, in_mem, len_align, len_padded;
u32 *dump_addr = NULL;
char *end_addr;
int i;
@@ -1980,28 +1985,33 @@
in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
- pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
- reg_dump_flag, in_log, in_mem);
+ pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n",
+ dump_name, reg_dump_flag, in_log, in_mem);
if (!in_log && !in_mem)
return;
- len_align_16 = (len_bytes + 15) / 16;
- len_bytes_aligned = len_align_16 * 16;
+ if (in_log)
+ dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
+ dump_name, addr - base_addr, len_bytes);
+
+ len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
+ len_padded = len_align * REG_DUMP_ALIGN;
end_addr = addr + len_bytes;
if (in_mem) {
if (dump_mem && !(*dump_mem)) {
phys_addr_t phys = 0;
*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
- len_bytes_aligned, &phys, GFP_KERNEL);
+ len_padded, &phys, GFP_KERNEL);
}
if (dump_mem && *dump_mem) {
dump_addr = *dump_mem;
- pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%pK\n",
- dump_name, dump_addr,
- dump_addr + len_bytes_aligned, addr);
+ dev_info(sde_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
+ dump_name, dump_addr, len_padded,
+ addr - base_addr);
} else {
in_mem = 0;
pr_err("dump_mem: kzalloc fails!\n");
@@ -2011,7 +2021,7 @@
if (!from_isr)
_sde_dbg_enable_power(true);
- for (i = 0; i < len_align_16; i++) {
+ for (i = 0; i < len_align; i++) {
u32 x0, x4, x8, xc;
x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
@@ -2020,8 +2030,9 @@
xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
if (in_log)
- pr_info("%pK : %08x %08x %08x %08x\n", addr, x0, x4, x8,
- xc);
+ dev_info(sde_dbg_base.dev,
+ "0x%lx : %08x %08x %08x %08x\n",
+ addr - base_addr, x0, x4, x8, xc);
if (dump_addr) {
dump_addr[i * 4] = x0;
@@ -2030,7 +2041,7 @@
dump_addr[i * 4 + 3] = xc;
}
- addr += 16;
+ addr += REG_DUMP_ALIGN;
}
if (!from_isr)
@@ -2059,6 +2070,20 @@
return length;
}
+static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct sde_dbg_reg_range *ar, *br;
+
+ if (!a || !b)
+ return 0;
+
+ ar = container_of(a, struct sde_dbg_reg_range, head);
+ br = container_of(b, struct sde_dbg_reg_range, head);
+
+ return ar->offset.start - br->offset.start;
+}
+
/**
* _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base
* @dbg: register blk base structure
@@ -2076,10 +2101,13 @@
return;
}
- pr_info("%s:=========%s DUMP=========\n", __func__, dbg->name);
+ dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__,
+ dbg->name);
/* If there is a list to dump the registers by ranges, use the ranges */
if (!list_empty(&dbg->sub_range_list)) {
+ /* sort the list by start address first */
+ list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp);
list_for_each_entry(range_node, &dbg->sub_range_list, head) {
len = _sde_dbg_get_dump_range(&range_node->offset,
dbg->max_offset);
@@ -2089,18 +2117,20 @@
addr, range_node->offset.start,
range_node->offset.end);
- _sde_dump_reg((const char *)range_node->range_name,
- reg_dump_flag, addr, len, &range_node->reg_dump,
- false);
+ _sde_dump_reg(range_node->range_name, reg_dump_flag,
+ dbg->base, addr, len,
+ &range_node->reg_dump, false);
}
} else {
/* If there is no list to dump ranges, dump all registers */
- pr_info("Ranges not found, will dump full registers\n");
- pr_info("base:0x%pK len:0x%zx\n", dbg->base, dbg->max_offset);
+ dev_info(sde_dbg_base.dev,
+ "Ranges not found, will dump full registers\n");
+ dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base,
+ dbg->max_offset);
addr = dbg->base;
len = dbg->max_offset;
- _sde_dump_reg((const char *)dbg->name, reg_dump_flag, addr,
- len, &dbg->reg_dump, false);
+ _sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
+ &dbg->reg_dump, false);
}
}
@@ -2180,7 +2210,7 @@
reg_base_head)
if (strlen(reg_base->name) &&
!strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base;
+ mem_base = reg_base->base + bus->top_blk_off;
if (!mem_base) {
pr_err("unable to find mem_base for %s\n", bus->cmn.name);
@@ -2198,7 +2228,8 @@
if (!in_log && !in_mem)
return;
- pr_info("======== start %s dump =========\n", bus->cmn.name);
+ dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
if (in_mem) {
if (!(*dump_mem))
@@ -2207,8 +2238,9 @@
if (*dump_mem) {
dump_addr = *dump_mem;
- pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
- __func__, dump_addr, dump_addr + list_size);
+ dev_info(sde_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
} else {
in_mem = false;
pr_err("dump_mem: allocation fails\n");
@@ -2230,9 +2262,10 @@
status = readl_relaxed(mem_base + offset);
if (in_log)
- pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
- head->wr_addr, head->block_id, head->test_id,
- status);
+ dev_info(sde_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
if (dump_addr && in_mem) {
dump_addr[i*4] = head->wr_addr;
@@ -2247,7 +2280,8 @@
}
_sde_dbg_enable_power(false);
- pr_info("======== end %s dump =========\n", bus->cmn.name);
+ dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
}
static void _sde_dbg_dump_vbif_debug_bus_entry(
@@ -2277,7 +2311,8 @@
*dump_addr++ = val;
}
if (in_log)
- pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ dev_info(sde_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
head->block_bus_addr, i, j, val);
}
}
@@ -2316,7 +2351,8 @@
list_size = bus->cmn.entries_size;
dump_mem = &bus->cmn.dumped_content;
- pr_info("======== start %s dump =========\n", bus->cmn.name);
+ dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
if (!dump_mem || !dbg_bus || !bus_size || !list_size)
return;
@@ -2343,8 +2379,9 @@
if (*dump_mem) {
dump_addr = *dump_mem;
- pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
- __func__, dump_addr, dump_addr + list_size);
+ dev_info(sde_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
} else {
in_mem = false;
pr_err("dump_mem: allocation fails\n");
@@ -2375,7 +2412,8 @@
_sde_dbg_enable_power(false);
- pr_info("======== end %s dump =========\n", bus->cmn.name);
+ dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
}
/**
@@ -2863,24 +2901,19 @@
struct sde_dbg_reg_base *blk_base;
char debug_name[80] = "";
- sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root);
- if (IS_ERR_OR_NULL(sde_dbg_base.root)) {
- pr_err("debugfs_create_dir fail, error %ld\n",
- PTR_ERR(sde_dbg_base.root));
- sde_dbg_base.root = NULL;
- return -ENODEV;
- }
+ if (!debugfs_root)
+ return -EINVAL;
- debugfs_create_file("dump", 0644, sde_dbg_base.root, NULL,
+ debugfs_create_file("dump", 0644, debugfs_root, NULL,
&sde_evtlog_fops);
- debugfs_create_u32("enable", 0644, sde_dbg_base.root,
+ debugfs_create_u32("enable", 0644, debugfs_root,
&(sde_dbg_base.evtlog->enable));
- debugfs_create_file("filter", 0644, sde_dbg_base.root,
+ debugfs_create_file("filter", 0644, debugfs_root,
sde_dbg_base.evtlog,
&sde_evtlog_filter_fops);
- debugfs_create_u32("panic", 0644, sde_dbg_base.root,
+ debugfs_create_u32("panic", 0644, debugfs_root,
&sde_dbg_base.panic_on_err);
- debugfs_create_u32("reg_dump", 0644, sde_dbg_base.root,
+ debugfs_create_u32("reg_dump", 0644, debugfs_root,
&sde_dbg_base.enable_reg_dump);
if (dbg->dbgbus_sde.entries) {
@@ -2888,7 +2921,7 @@
snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
dbg->dbgbus_sde.cmn.name);
dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
- debugfs_create_u32(debug_name, 0644, dbg->root,
+ debugfs_create_u32(debug_name, 0644, debugfs_root,
&dbg->dbgbus_sde.cmn.enable_mask);
}
@@ -2897,36 +2930,28 @@
snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
dbg->dbgbus_vbif_rt.cmn.name);
dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
- debugfs_create_u32(debug_name, 0644, dbg->root,
+ debugfs_create_u32(debug_name, 0644, debugfs_root,
&dbg->dbgbus_vbif_rt.cmn.enable_mask);
}
list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
snprintf(debug_name, sizeof(debug_name), "%s_off",
blk_base->name);
- debugfs_create_file(debug_name, 0644, dbg->root, blk_base,
+ debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
&sde_off_fops);
snprintf(debug_name, sizeof(debug_name), "%s_reg",
blk_base->name);
- debugfs_create_file(debug_name, 0644, dbg->root, blk_base,
+ debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
&sde_reg_fops);
}
return 0;
}
-#if defined(CONFIG_DEBUG_FS)
-static void _sde_dbg_debugfs_destroy(void)
-{
- debugfs_remove_recursive(sde_dbg_base.root);
- sde_dbg_base.root = 0;
-}
-#else
static void _sde_dbg_debugfs_destroy(void)
{
}
-#endif
void sde_dbg_init_dbg_buses(u32 hwversion)
{
@@ -3051,6 +3076,21 @@
return;
}
+ if (!range_name || strlen(range_name) == 0) {
+ pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n",
+ __builtin_return_address(0), base_name,
+ offset_start, offset_end);
+ return;
+ }
+
+ if (offset_end - offset_start < REG_DUMP_ALIGN ||
+ offset_start > offset_end) {
+ pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n",
+ __builtin_return_address(0), base_name,
+ range_name, offset_start, offset_end);
+ return;
+ }
+
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range)
return;
@@ -3061,6 +3101,12 @@
range->xin_id = xin_id;
list_add_tail(&range->head, ®_base->sub_range_list);
- pr_debug("%s start: 0x%X end: 0x%X\n", range->range_name,
+ pr_debug("base %s, range %s, start 0x%X, end 0x%X\n",
+ base_name, range->range_name,
range->offset.start, range->offset.end);
}
+
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+ sde_dbg_base.dbgbus_sde.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 4344eb8..02d46c7 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -20,6 +20,13 @@
#define SDE_EVTLOG_DATA_LIMITER (-1)
#define SDE_EVTLOG_FUNC_ENTRY 0x1111
#define SDE_EVTLOG_FUNC_EXIT 0x2222
+#define SDE_EVTLOG_FUNC_CASE1 0x3333
+#define SDE_EVTLOG_FUNC_CASE2 0x4444
+#define SDE_EVTLOG_FUNC_CASE3 0x5555
+#define SDE_EVTLOG_FUNC_CASE4 0x6666
+#define SDE_EVTLOG_FUNC_CASE5 0x7777
+#define SDE_EVTLOG_PANIC 0xdead
+#define SDE_EVTLOG_FATAL 0xbad
#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
@@ -36,7 +43,7 @@
};
#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
-#define SDE_EVTLOG_DEFAULT_ENABLE SDE_EVTLOG_CRITICAL
+#define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
#else
#define SDE_EVTLOG_DEFAULT_ENABLE 0
#endif
@@ -258,6 +265,13 @@
uint32_t xin_id);
/**
+ * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void sde_dbg_set_sde_top_offset(u32 blk_off);
+
+/**
* sde_evtlog_set_filter - update evtlog filtering
* @evtlog: pointer to evtlog
* @filter: pointer to optional function name filter, set to NULL to disable
@@ -341,6 +355,10 @@
{
}
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+}
+
static inline void sde_evtlog_set_filter(
struct sde_dbg_evtlog *evtlog, char *filter)
{
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 62efe8e..1e4f6b1 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -25,10 +25,48 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include <linux/sde_io_util.h>
+#include <linux/sde_rsc.h>
#include "sde_power_handle.h"
#include "sde_trace.h"
+static void sde_power_event_trigger_locked(struct sde_power_handle *phandle,
+ u32 event_type)
+{
+ struct sde_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+static int sde_power_rsc_update(struct sde_power_handle *phandle, bool enable)
+{
+ u32 rsc_state;
+ int ret = 0;
+
+ /* creates the rsc client on the first enable */
+ if (!phandle->rsc_client_init) {
+ phandle->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX,
+ "sde_power_handle", false);
+ if (IS_ERR_OR_NULL(phandle->rsc_client)) {
+ pr_debug("sde rsc client create failed :%ld\n",
+ PTR_ERR(phandle->rsc_client));
+ phandle->rsc_client = NULL;
+ }
+ phandle->rsc_client_init = true;
+ }
+
+ rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
+
+ if (phandle->rsc_client)
+ ret = sde_rsc_client_state_update(phandle->rsc_client,
+ rsc_state, NULL, -1);
+
+ return ret;
+}
+
struct sde_power_client *sde_power_client_create(
struct sde_power_handle *phandle, char *client_name)
{
@@ -48,6 +86,7 @@
strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
client->usecase_ndx = VOTE_INDEX_DISABLE;
client->id = id;
+ client->active = true;
pr_debug("client %s created:%pK id :%d\n", client_name,
client, id);
id++;
@@ -62,6 +101,9 @@
{
if (!client || !phandle) {
pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("sde power deinit already done\n");
+ kfree(client);
} else {
pr_debug("bus vote client %s destroyed:%pK id:%u\n",
client->name, client, client->id);
@@ -661,6 +703,11 @@
}
INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ phandle->rsc_client = NULL;
+ phandle->rsc_client_init = false;
+
mutex_init(&phandle->phandle_lock);
return rc;
@@ -672,10 +719,12 @@
clk_err:
msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
vreg_err:
- devm_kfree(&pdev->dev, mp->vreg_config);
+ if (mp->vreg_config)
+ devm_kfree(&pdev->dev, mp->vreg_config);
mp->num_vreg = 0;
parse_vreg_err:
- devm_kfree(&pdev->dev, mp->clk_config);
+ if (mp->clk_config)
+ devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
end:
return rc;
@@ -685,6 +734,8 @@
struct sde_power_handle *phandle)
{
struct dss_module_power *mp;
+ struct sde_power_client *curr_client, *next_client;
+ struct sde_power_event *curr_event, *next_event;
if (!phandle || !pdev) {
pr_err("invalid input param\n");
@@ -692,6 +743,26 @@
}
mp = &phandle->mp;
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("cliend:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+
sde_power_data_bus_unregister(&phandle->data_bus_handle);
sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
@@ -708,6 +779,9 @@
mp->num_vreg = 0;
mp->num_clk = 0;
+
+ if (phandle->rsc_client)
+ sde_rsc_client_destroy(phandle->rsc_client);
}
int sde_power_resource_enable(struct sde_power_handle *phandle,
@@ -757,6 +831,9 @@
goto end;
if (enable) {
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_PRE_ENABLE);
+
rc = sde_power_data_bus_update(&phandle->data_bus_handle,
enable);
if (rc) {
@@ -764,10 +841,21 @@
goto data_bus_hdl_err;
}
- rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
- if (rc) {
- pr_err("failed to enable vregs rc=%d\n", rc);
- goto vreg_err;
+ /*
+ * - When the target is RSCC enabled, regulator should
+ * be enabled by the s/w only for the first time during
+ * bootup. After that, RSCC hardware takes care of enabling/
+ * disabling it.
+ * - When the target is not RSCC enabled, regulator should
+ * be totally handled by the software.
+ */
+ if (!phandle->rsc_client) {
+ rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
+ enable);
+ if (rc) {
+ pr_err("failed to enable vregs rc=%d\n", rc);
+ goto vreg_err;
+ }
}
rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
@@ -777,20 +865,39 @@
goto reg_bus_hdl_err;
}
+ rc = sde_power_rsc_update(phandle, true);
+ if (rc) {
+ pr_err("failed to update rsc\n");
+ goto rsc_err;
+ }
+
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
if (rc) {
pr_err("clock enable failed rc:%d\n", rc);
goto clk_err;
}
+
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_POST_ENABLE);
+
} else {
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_PRE_DISABLE);
+
msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ sde_power_rsc_update(phandle, false);
+
sde_power_reg_bus_update(phandle->reg_bus_hdl,
max_usecase_ndx);
- msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
-
+ if (!phandle->rsc_client)
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
+ enable);
sde_power_data_bus_update(&phandle->data_bus_handle, enable);
+
+ sde_power_event_trigger_locked(phandle,
+ SDE_POWER_EVENT_POST_DISABLE);
}
end:
@@ -798,9 +905,12 @@
return rc;
clk_err:
+ sde_power_rsc_update(phandle, false);
+rsc_err:
sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
reg_bus_hdl_err:
- msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+ if (!phandle->rsc_client)
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
vreg_err:
sde_power_data_bus_update(&phandle->data_bus_handle, 0);
data_bus_hdl_err:
@@ -903,3 +1013,52 @@
return clk;
}
+
+struct sde_power_event *sde_power_handle_register_event(
+ struct sde_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct sde_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct sde_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void sde_power_handle_unregister_event(
+ struct sde_power_handle *phandle,
+ struct sde_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 4e262a3..d753f0a 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,13 +16,25 @@
#define MAX_CLIENT_NAME_LEN 128
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 64000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 2000000000
#define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 64000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 2000000000
#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
#include <linux/sde_io_util.h>
+/* event will be triggered before power handler disable */
+#define SDE_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define SDE_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define SDE_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define SDE_POWER_EVENT_POST_ENABLE 0x8
+
/**
* mdss_bus_vote_type: register bus vote type
* VOTE_INDEX_DISABLE: removes the client vote
@@ -59,6 +71,7 @@
* @list: list to attach power handle master list
* @ab: arbitrated bandwidth for each bus client
* @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of sde power handle
*/
struct sde_power_client {
char name[MAX_CLIENT_NAME_LEN];
@@ -68,6 +81,7 @@
struct list_head list;
u64 ab[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
u64 ib[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
};
/**
@@ -90,6 +104,24 @@
u32 ao_bw_uc_idx;
};
+/*
+ * struct sde_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to SDE_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of sde power handle
+ */
+struct sde_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
/**
* struct sde_power_handle: power handle main struct
* @mp: module power for clock and regulator
@@ -99,6 +131,9 @@
* @usecase_ndx: current usecase index
* @reg_bus_hdl: current register bus handle
* @data_bus_handle: context structure for data bus control
+ * @event_list: current power handle event list
+ * @rsc_client: sde rsc client pointer
+ * @rsc_client_init: boolean to control rsc client create
*/
struct sde_power_handle {
struct dss_module_power mp;
@@ -108,6 +143,9 @@
u32 current_usecase_ndx;
u32 reg_bus_hdl;
struct sde_power_data_bus_handle data_bus_handle;
+ struct list_head event_list;
+ struct sde_rsc_client *rsc_client;
+ bool rsc_client_init;
};
/**
@@ -226,4 +264,28 @@
void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
struct sde_power_client *pclient, int enable);
+/**
+ * sde_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer SDE_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct sde_power_event *sde_power_handle_register_event(
+ struct sde_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * sde_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void sde_power_handle_unregister_event(struct sde_power_handle *phandle,
+ struct sde_power_event *event);
+
#endif /* _SDE_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index a9a7d4f..1f770c3 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -28,16 +28,19 @@
#include <drm/drmP.h>
#include <drm/drm_irq.h>
#include "sde_rsc_priv.h"
+#include "sde_dbg.h"
-/* this time is ~0.02ms */
-#define RSC_BACKOFF_TIME_NS 20000
+/* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
+#define TCS_CASE_EXECUTION_TIME 1064000
-/* next two values should be same based on doc */
+/* this time is ~1ms - only wake tcs in any mode */
+#define RSC_BACKOFF_TIME_NS (TCS_CASE_EXECUTION_TIME + 100)
-/* this time is ~0.2ms */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS 200000
-/* this time is ~0.2ms */
-#define RSC_TIME_SLOT_0_NS 200000
+/* this time is ~1ms - only wake TCS in mode-0 */
+#define RSC_MODE_THRESHOLD_TIME_IN_NS ((TCS_CASE_EXECUTION_TIME >> 1) + 100)
+
+/* this time is ~2ms - sleep+ wake TCS in mode-1 */
+#define RSC_TIME_SLOT_0_NS ((TCS_CASE_EXECUTION_TIME * 2) + 100)
#define DEFAULT_PANEL_FPS 60
#define DEFAULT_PANEL_JITTER 5
@@ -74,6 +77,7 @@
{
struct sde_rsc_client *client;
struct sde_rsc_priv *rsc;
+ static int id;
if (!client_name) {
pr_err("client name is null- not supported\n");
@@ -83,7 +87,7 @@
return ERR_PTR(-EINVAL);
} else if (!rsc_prv_list[rsc_index]) {
pr_err("rsc not probed yet or not available\n");
- return ERR_PTR(-EINVAL);
+ return NULL;
}
rsc = rsc_prv_list[rsc_index];
@@ -95,12 +99,14 @@
strlcpy(client->name, client_name, MAX_RSC_CLIENT_NAME_LEN);
client->current_state = SDE_RSC_IDLE_STATE;
client->rsc_index = rsc_index;
+ client->id = id;
if (is_primary_client)
rsc->primary_client = client;
pr_debug("client %s rsc index:%d primary:%d\n", client_name,
rsc_index, is_primary_client);
list_add(&client->list, &rsc->client_list);
+ id++;
mutex_unlock(&rsc->client_lock);
return client;
@@ -381,6 +387,8 @@
} else if (rsc->hw_ops.state_update) {
rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
+ if (!rc)
+ rpmh_mode_solver_set(rsc->disp_rsc, false);
}
return rc;
@@ -388,7 +396,7 @@
static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
struct sde_rsc_cmd_config *config,
- struct sde_rsc_client *caller_client, bool wait_req)
+ struct sde_rsc_client *caller_client)
{
struct sde_rsc_client *client;
int rc = STATE_UPDATE_NOT_ALLOWED;
@@ -413,17 +421,14 @@
if (client->current_state == SDE_RSC_VID_STATE)
goto end;
- /* no need to enable solver again */
- if (rsc->current_state == SDE_RSC_CLK_STATE) {
- rc = 0;
- goto end;
+ if (rsc->hw_ops.state_update) {
+ rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+ if (!rc)
+ rpmh_mode_solver_set(rsc->disp_rsc, true);
}
- if (rsc->hw_ops.state_update)
- rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
-
- /* wait for vsync */
- if (!rc && wait_req)
+ /* wait for vsync for vid to cmd state switch */
+ if (!rc && (rsc->current_state == SDE_RSC_VID_STATE))
drm_wait_one_vblank(rsc->master_drm,
rsc->primary_client->crtc_id);
end:
@@ -440,21 +445,24 @@
(client->current_state == SDE_RSC_CMD_STATE))
goto end;
- /* no need to enable the solver again */
- if (rsc->current_state == SDE_RSC_CMD_STATE) {
- rc = 0;
- goto end;
+ if (rsc->hw_ops.state_update) {
+ rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
+ if (!rc)
+ rpmh_mode_solver_set(rsc->disp_rsc, false);
}
- if (rsc->hw_ops.state_update)
- rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+ /* wait for vsync for cmd to clk state switch */
+ if (!rc && rsc->primary_client &&
+ (rsc->current_state == SDE_RSC_CMD_STATE))
+ drm_wait_one_vblank(rsc->master_drm,
+ rsc->primary_client->crtc_id);
end:
return rc;
}
static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
struct sde_rsc_cmd_config *config,
- struct sde_rsc_client *caller_client, bool wait_req)
+ struct sde_rsc_client *caller_client)
{
int rc = 0;
@@ -463,11 +471,15 @@
sde_rsc_timer_calculate(rsc, config);
/* video state switch should be done immediately */
- if (rsc->hw_ops.state_update)
+ if (rsc->hw_ops.state_update) {
rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
+ if (!rc)
+ rpmh_mode_solver_set(rsc->disp_rsc, false);
+ }
- /* wait for vsync */
- if (!rc && rsc->primary_client && wait_req)
+ /* wait for vsync for cmd to vid state switch */
+ if (!rc && rsc->primary_client &&
+ (rsc->current_state == SDE_RSC_CMD_STATE))
drm_wait_one_vblank(rsc->master_drm,
rsc->primary_client->crtc_id);
return rc;
@@ -493,7 +505,6 @@
{
int rc = 0;
struct sde_rsc_priv *rsc;
- bool wait_requested = false;
if (!caller_client) {
pr_err("invalid client for rsc state update\n");
@@ -508,6 +519,8 @@
return -EINVAL;
mutex_lock(&rsc->client_lock);
+ SDE_EVT32(caller_client->id, caller_client->current_state,
+ state, rsc->current_state, SDE_EVTLOG_FUNC_ENTRY);
caller_client->crtc_id = crtc_id;
caller_client->current_state = state;
@@ -524,11 +537,7 @@
__builtin_return_address(0), rsc->current_state,
caller_client->name, state);
- /* only switch state needs vsync wait */
- wait_requested = (rsc->current_state == SDE_RSC_VID_STATE) ||
- (rsc->current_state == SDE_RSC_CMD_STATE);
-
- if (rsc->power_collapse)
+ if (rsc->current_state == SDE_RSC_IDLE_STATE)
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
switch (state) {
@@ -538,7 +547,7 @@
/* video state client might be exiting; try cmd state switch */
if (rc == TRY_CMD_MODE_SWITCH) {
rc = sde_rsc_switch_to_cmd(rsc, NULL,
- rsc->primary_client, wait_requested);
+ rsc->primary_client);
if (!rc)
state = SDE_RSC_CMD_STATE;
@@ -551,13 +560,11 @@
break;
case SDE_RSC_CMD_STATE:
- rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
- wait_requested);
+ rc = sde_rsc_switch_to_cmd(rsc, config, caller_client);
break;
case SDE_RSC_VID_STATE:
- rc = sde_rsc_switch_to_vid(rsc, config, caller_client,
- wait_requested);
+ rc = sde_rsc_switch_to_vid(rsc, config, caller_client);
break;
case SDE_RSC_CLK_STATE:
@@ -571,17 +578,23 @@
if (rc == STATE_UPDATE_NOT_ALLOWED) {
rc = 0;
+ SDE_EVT32(caller_client->id, caller_client->current_state,
+ state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE1);
goto clk_disable;
} else if (rc) {
- pr_err("state update failed rc:%d\n", rc);
+ pr_debug("state:%d update failed rc:%d\n", state, rc);
+ SDE_EVT32(caller_client->id, caller_client->current_state,
+ state, rsc->current_state, rc, SDE_EVTLOG_FUNC_CASE2);
goto clk_disable;
}
pr_debug("state switch successfully complete: %d\n", state);
rsc->current_state = state;
+ SDE_EVT32(caller_client->id, caller_client->current_state,
+ state, rsc->current_state, SDE_EVTLOG_FUNC_EXIT);
clk_disable:
- if (rsc->power_collapse)
+ if (rsc->current_state == SDE_RSC_IDLE_STATE)
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
end:
mutex_unlock(&rsc->client_lock);
@@ -627,14 +640,9 @@
caller_client->name, ab_vote, ib_vote);
mutex_lock(&rsc->client_lock);
- if ((caller_client->current_state == SDE_RSC_IDLE_STATE) ||
- (rsc->current_state == SDE_RSC_IDLE_STATE)) {
-
- pr_err("invalid state: client state:%d rsc state:%d\n",
- caller_client->current_state, rsc->current_state);
- rc = -EINVAL;
- goto end;
- }
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto clk_enable_fail;
if (rsc->hw_ops.is_amc_mode)
amc_mode = rsc->hw_ops.is_amc_mode(rsc);
@@ -656,14 +664,19 @@
}
}
+ rpmh_invalidate(rsc->disp_rsc);
sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient,
SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, ab_vote, ib_vote);
+ rpmh_flush(rsc->disp_rsc);
if (rsc->hw_ops.tcs_use_ok)
rsc->hw_ops.tcs_use_ok(rsc);
end:
+ sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+clk_enable_fail:
mutex_unlock(&rsc->client_lock);
+
return rc;
}
EXPORT_SYMBOL(sde_rsc_client_vote);
@@ -680,6 +693,10 @@
rsc = s->private;
mutex_lock(&rsc->client_lock);
+ ret = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (ret)
+ goto end;
+
seq_printf(s, "rsc current state:%d\n", rsc->current_state);
seq_printf(s, "wraper backoff time(ns):%d\n",
rsc->timer_config.static_wakeup_time_ns);
@@ -703,17 +720,15 @@
seq_printf(s, "\t client:%s state:%d\n",
client->name, client->current_state);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-
if (rsc->hw_ops.debug_show) {
ret = rsc->hw_ops.debug_show(s, rsc);
if (ret)
pr_err("sde rsc: hw debug failed ret:%d\n", ret);
}
-
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
return 0;
}
@@ -734,20 +749,23 @@
{
struct sde_rsc_priv *rsc = file->private_data;
char buffer[MAX_BUFFER_SIZE];
- int blen = 0;
+ int blen = 0, rc;
if (*ppos || !rsc || !rsc->hw_ops.mode_ctrl)
return 0;
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto end;
blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
MAX_BUFFER_SIZE, 0);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
if (blen < 0)
return 0;
@@ -764,6 +782,7 @@
struct sde_rsc_priv *rsc = file->private_data;
char *input, *mode;
u32 mode0_state = 0, mode1_state = 0, mode2_state = 0;
+ int rc;
if (!rsc || !rsc->hw_ops.mode_ctrl)
return 0;
@@ -779,7 +798,9 @@
input[count - 1] = '\0';
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto clk_enable_fail;
mode = strnstr(input, "mode0=", strlen("mode0="));
if (mode) {
@@ -806,9 +827,10 @@
end:
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+clk_enable_fail:
mutex_unlock(&rsc->client_lock);
- pr_err("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
+ pr_info("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
mode2_state);
kfree(input);
return count;
@@ -826,20 +848,23 @@
{
struct sde_rsc_priv *rsc = file->private_data;
char buffer[MAX_BUFFER_SIZE];
- int blen = 0;
+ int blen = 0, rc;
if (*ppos || !rsc || !rsc->hw_ops.hw_vsync)
return 0;
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto end;
blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
MAX_BUFFER_SIZE, 0);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
if (blen < 0)
return 0;
@@ -856,6 +881,7 @@
struct sde_rsc_priv *rsc = file->private_data;
char *input, *vsync_mode;
u32 vsync_state = 0;
+ int rc;
if (!rsc || !rsc->hw_ops.hw_vsync)
return 0;
@@ -877,7 +903,9 @@
}
mutex_lock(&rsc->client_lock);
- sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
+ if (rc)
+ goto end;
if (vsync_state)
rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -886,8 +914,9 @@
rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
- mutex_unlock(&rsc->client_lock);
+end:
+ mutex_unlock(&rsc->client_lock);
kfree(input);
return count;
}
@@ -942,6 +971,8 @@
msm_dss_iounmap(&rsc->wrapper_io);
if (rsc->drv_io.base)
msm_dss_iounmap(&rsc->drv_io);
+ if (rsc->disp_rsc)
+ rpmh_release(rsc->disp_rsc);
if (rsc->pclient)
sde_power_client_destroy(&rsc->phandle, rsc->pclient);
@@ -1050,6 +1081,14 @@
goto sde_rsc_fail;
}
+ rsc->disp_rsc = rpmh_get_byname(pdev, "disp_rsc");
+ if (IS_ERR_OR_NULL(rsc->disp_rsc)) {
+ ret = PTR_ERR(rsc->disp_rsc);
+ rsc->disp_rsc = NULL;
+ pr_err("sde rsc:get display rsc failed ret:%d\n", ret);
+ goto sde_rsc_fail;
+ }
+
ret = msm_dss_ioremap_byname(pdev, &rsc->wrapper_io, "wrapper");
if (ret) {
pr_err("sde rsc: wrapper io data mapping failed ret=%d\n", ret);
@@ -1086,6 +1125,7 @@
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
INIT_LIST_HEAD(&rsc->client_list);
+ INIT_LIST_HEAD(&rsc->event_list);
mutex_init(&rsc->client_lock);
pr_info("sde rsc index:%d probed successfully\n",
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index fb963ee..c87dac4 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -36,6 +36,7 @@
#define SDE_RSCC_AMC_TCS_MODE_IRQ_STATUS_DRV0 0x1c00
#define SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0 0xc04
+#define SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0 0xc08
#define SDE_RSCC_MAX_IDLE_DURATION_DRV0 0xc0c
#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0 0x1000
#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0 0x1004
@@ -94,6 +95,7 @@
#define SDE_RSCC_F1_QTMR_V1_CNTP_CTL 0x302C
#define MAX_CHECK_LOOPS 500
+#define POWER_CTRL_BIT_12 12
static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
{
@@ -190,27 +192,27 @@
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
0xa138ebaa, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
- 0xe0a581e1, rsc->debug_mode);
+ 0xaca581e1, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
- 0x82e2a2ed, rsc->debug_mode);
+ 0xe2a2ede0, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
- 0x88ea8a39, rsc->debug_mode);
+ 0xea8a3982, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
- 0xa6e9a920, rsc->debug_mode);
+ 0xa920888c, rsc->debug_mode);
/* tcs sleep sequence */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
- 0xa92089e6, rsc->debug_mode);
+ 0x89e6a6e9, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
- 0x89e7a7e9, rsc->debug_mode);
+ 0xa7e9a920, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
- 0x00000020, rsc->debug_mode);
+ 0x002079e7, rsc->debug_mode);
/* branch address */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
- 0x29, rsc->debug_mode);
+ 0x2b, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
- 0x2f, rsc->debug_mode);
+ 0x31, rsc->debug_mode);
return 0;
}
@@ -224,7 +226,9 @@
pr_debug("rsc solver init\n");
dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0,
- 0x7FFFFFFF, rsc->debug_mode);
+ 0xFFFFFFFF, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_HI_DRV0,
+ 0xFFFFFFFF, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_MAX_IDLE_DURATION_DRV0,
0xEFFFFFFF, rsc->debug_mode);
@@ -263,7 +267,7 @@
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0,
mode_0_start_addr, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0,
- 0x80000010, rsc->debug_mode);
+ 0x80000000, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
@@ -272,7 +276,7 @@
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1,
mode_1_start_addr, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1,
- 0x80000010, rsc->debug_mode);
+ 0x80000000, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
@@ -281,9 +285,9 @@
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2,
mode_2_start_addr, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2,
- 0x80000010, rsc->debug_mode);
+ 0x80000000, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2,
- rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+ 0x0, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
@@ -294,6 +298,7 @@
{
int rc;
int count, wrapper_status;
+ unsigned long reg;
if (rsc->power_collapse_block)
return -EINVAL;
@@ -308,12 +313,35 @@
rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+ /* update qtimers to high during clk & video mode state */
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ }
+
wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
rsc->debug_mode);
wrapper_status |= BIT(3);
wrapper_status |= BIT(0);
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
wrapper_status, rsc->debug_mode);
+
+ /**
+ * force busy and idle during clk & video mode state because it
+ * is trying to entry in mode-2 without turning on the vysnc.
+ */
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(0) | BIT(1), rsc->debug_mode);
+ wmb(); /* force busy gurantee */
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(0) | BIT(9), rsc->debug_mode);
+ }
+
/* make sure that mode-2 is triggered before wait*/
wmb();
@@ -329,6 +357,25 @@
if (rc) {
pr_err("vdd fs is still enabled\n");
goto end;
+ } else {
+ rc = -EINVAL;
+ /* this wait is required to turn off the rscc clocks */
+ for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_PWR_CTRL, rsc->debug_mode);
+ if (test_bit(POWER_CTRL_BIT_12, ®)) {
+ rc = 0;
+ break;
+ }
+ usleep_range(1, 2);
+ }
+ }
+
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(0) | BIT(8), rsc->debug_mode);
+ wmb(); /* force busy on vsync */
}
rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
@@ -336,20 +383,30 @@
return 0;
end:
- regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
-
rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
return rc;
}
-int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc)
+int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
{
int rc = -EBUSY;
int count, reg;
rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
+ /**
+ * force busy and idle during clk & video mode state because it
+ * is trying to entry in mode-2 without turning on the vysnc.
+ */
+ if ((state == SDE_RSC_VID_STATE) || (state == SDE_RSC_CLK_STATE)) {
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
+ reg &= ~(BIT(8) | BIT(0));
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ reg, rsc->debug_mode);
+ }
+
// needs review with HPG sequence
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
0x0, rsc->debug_mode);
@@ -377,7 +434,7 @@
rc = 0;
break;
}
- usleep_range(1, 2);
+ usleep_range(10, 100);
}
reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
@@ -385,14 +442,9 @@
reg &= ~BIT(13);
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
reg, rsc->debug_mode);
-
if (rc)
pr_err("vdd reg is not enabled yet\n");
- rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
- if (rc)
- pr_err("vdd reg normal mode set failed rc:%d\n", rc);
-
rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
return rc;
@@ -405,7 +457,7 @@
int reg;
if (rsc->power_collapse) {
- rc = sde_rsc_mode2_exit(rsc);
+ rc = sde_rsc_mode2_exit(rsc, state);
if (rc)
pr_err("power collapse: mode2 exit failed\n");
else
@@ -443,14 +495,24 @@
reg &= ~(BIT(1) | BIT(0));
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
reg, rsc->debug_mode);
- dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
- 0x1, rsc->debug_mode);
/* make sure that solver mode is override */
wmb();
rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_DISABLED);
break;
+ case SDE_RSC_CLK_STATE:
+ pr_debug("clk state handling\n");
+
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
+ reg &= ~(BIT(8) | BIT(0));
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ reg, rsc->debug_mode);
+ /* make sure that solver mode is disabled */
+ wmb();
+ break;
+
case SDE_RSC_IDLE_STATE:
rc = sde_rsc_mode2_entry(rsc);
if (rc)
@@ -694,9 +756,6 @@
rsc->hw_ops.tcs_use_ok = rsc_hw_tcs_use_ok;
rsc->hw_ops.is_amc_mode = rsc_hw_is_amc_mode;
- rsc->hw_ops.mode2_entry = sde_rsc_mode2_entry;
- rsc->hw_ops.mode2_exit = sde_rsc_mode2_exit;
-
rsc->hw_ops.hw_vsync = rsc_hw_vsync;
rsc->hw_ops.state_update = sde_rsc_state_update;
rsc->hw_ops.debug_show = sde_rsc_debug_show;
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index 2563c85..b83a866 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -61,10 +61,6 @@
* TCS command.
* @hw_vsync: Enables the vsync on RSC block.
* @tcs_use_ok: set TCS set to high to allow RSC to use it.
- * @mode2_entry: Request to entry mode2 when all clients are
- * requesting power collapse.
- * @mode2_exit: Request to exit mode2 when one of the client
- * is requesting against the power collapse
* @is_amc_mode: Check current amc mode status
* @state_update: Enable/override the solver based on rsc state
* status (command/video)
@@ -78,8 +74,6 @@
int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
char *buffer, int buffer_size, u32 mode);
int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
- int (*mode2_entry)(struct sde_rsc_priv *rsc);
- int (*mode2_exit)(struct sde_rsc_priv *rsc);
bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
@@ -117,6 +111,7 @@
* @pclient: module power client of phandle
* @fs: "MDSS GDSC" handle
*
+ * @disp_rsc: display rsc handle
* @drv_io: sde drv io data mapping
* @wrapper_io: wrapper io data mapping
*
@@ -147,6 +142,7 @@
struct sde_power_client *pclient;
struct regulator *fs;
+ struct rpmh_client *disp_rsc;
struct dss_io_data drv_io;
struct dss_io_data wrapper_io;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e0d7f84..d741ff8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
+ .mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fbb8c7d..0d65e7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@
case 0x94:
case 0x96:
case 0x98:
- case 0xaa:
- case 0xac:
return true;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac91..8a88952 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@
}
if (type == 0x00000010) {
- if (!nv31_mpeg_mthd(mpeg, mthd, data))
+ if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37..c3cf02e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@
}
if (type == 0x00000010) {
- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+ if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e..4ce04e0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
- if (rbo->placements[0].fpfn < fpfn)
- rbo->placements[0].fpfn = fpfn;
+ if (rbo->placements[i].fpfn < fpfn)
+ rbo->placements[i].fpfn = fpfn;
} else {
rbo->placement.busy_placement =
&rbo->placements[i];
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 32c0584..6e6c59a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -408,6 +408,7 @@
static const struct of_device_id sun4i_backend_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-backend" },
+ { .compatible = "allwinner,sun6i-a31-display-backend" },
{ .compatible = "allwinner,sun8i-a33-display-backend" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 70e9fd5..c3b2186 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -201,12 +201,15 @@
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
}
@@ -322,6 +325,8 @@
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-engine" },
+ { .compatible = "allwinner,sun6i-a31-display-engine" },
+ { .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index cadacb5..c6afb24 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -62,7 +63,7 @@
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE, 0);
clk_disable_unprepare(tcon->sclk1);
@@ -80,7 +81,7 @@
return;
}
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
SUN4I_TCON1_CTL_TCON_ENABLE,
SUN4I_TCON1_CTL_TCON_ENABLE);
@@ -202,7 +203,7 @@
u8 clk_delay;
u32 val;
- WARN_ON(!tcon->has_channel_1);
+ WARN_ON(!tcon->quirks->has_channel_1);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -266,7 +267,7 @@
/*
* FIXME: Undocumented bits
*/
- if (tcon->has_mux)
+ if (tcon->quirks->has_unknown_mux)
regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
}
EXPORT_SYMBOL(sun4i_tcon1_mode_set);
@@ -327,7 +328,7 @@
return PTR_ERR(tcon->sclk0);
}
- if (tcon->has_channel_1) {
+ if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
if (IS_ERR(tcon->sclk1)) {
dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
@@ -487,14 +488,7 @@
drv->tcon = tcon;
tcon->drm = drm;
tcon->dev = dev;
-
- if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
- tcon->has_mux = true;
- tcon->has_channel_1 = true;
- } else {
- tcon->has_mux = false;
- tcon->has_channel_1 = false;
- }
+ tcon->quirks = of_device_get_match_data(dev);
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
@@ -588,9 +582,28 @@
return 0;
}
+static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
+ .has_unknown_mux = true,
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
+ .has_channel_1 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
+ /* nothing is supported */
+};
+
static const struct of_device_id sun4i_tcon_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-tcon" },
- { .compatible = "allwinner,sun8i-a33-tcon" },
+ { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
+ { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
+ { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 12bd489..166064b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -142,6 +142,11 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon_quirks {
+ bool has_unknown_mux; /* sun5i has undocumented mux */
+ bool has_channel_1; /* a33 does not have channel 1 */
+};
+
struct sun4i_tcon {
struct device *dev;
struct drm_device *drm;
@@ -160,12 +165,10 @@
/* Reset control */
struct reset_control *lcd_rst;
- /* Platform adjustments */
- bool has_mux;
-
struct drm_panel *panel;
- bool has_channel_1;
+ /* Platform adjustments */
+ const struct sun4i_tcon_quirks *quirks;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 4f5fa8d..144367c 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -318,7 +318,8 @@
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -635,7 +639,7 @@
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7aadce1..c7e6c98 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -842,6 +842,17 @@
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
+static void
+vc4_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = vc4_crtc_destroy,
@@ -849,7 +860,7 @@
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.gamma_set = vc4_crtc_gamma_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e8..967450d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -726,13 +761,9 @@
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
@@ -771,13 +802,9 @@
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1037,24 +1065,18 @@
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
+ vmw_fence_obj_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1097,8 +1119,7 @@
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03..5ec24fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -186,7 +184,7 @@
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9..bc354f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -589,7 +589,7 @@
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
@@ -773,7 +773,7 @@
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b445ce9..05fa092 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
@@ -929,17 +931,14 @@
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 2709aca..14a19a4 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -111,14 +111,6 @@
#define A6XX_VSC_ADDR_MODE_CNTL 0xC01
/* RBBM registers */
-#define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x10
-#define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x1f
-#define A6XX_RBBM_INT_CLEAR_CMD 0x37
-#define A6XX_RBBM_INT_0_MASK 0x38
-#define A6XX_RBBM_SW_RESET_CMD 0x43
-#define A6XX_RBBM_BLOCK_SW_RESET_CMD 0x45
-#define A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x46
-#define A6XX_RBBM_CLOCK_CNTL 0xAE
#define A6XX_RBBM_INT_0_STATUS 0x201
#define A6XX_RBBM_STATUS 0x210
#define A6XX_RBBM_STATUS3 0x213
@@ -390,6 +382,8 @@
#define A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x509
#define A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x50A
+#define A6XX_RBBM_ISDB_CNT 0x533
+
#define A6XX_RBBM_SECVID_TRUST_CNTL 0xF400
#define A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0xF800
#define A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0xF801
@@ -397,6 +391,122 @@
#define A6XX_RBBM_SECVID_TSB_CNTL 0xF803
#define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0xF810
+#define A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00010
+#define A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0001f
+#define A6XX_RBBM_INT_CLEAR_CMD 0x00037
+#define A6XX_RBBM_INT_0_MASK 0x00038
+#define A6XX_RBBM_SP_HYST_CNT 0x00042
+#define A6XX_RBBM_SW_RESET_CMD 0x00043
+#define A6XX_RBBM_RAC_THRESHOLD_CNT 0x00044
+#define A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00045
+#define A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00046
+#define A6XX_RBBM_CLOCK_CNTL 0x000ae
+#define A6XX_RBBM_CLOCK_CNTL_SP0 0x000b0
+#define A6XX_RBBM_CLOCK_CNTL_SP1 0x000b1
+#define A6XX_RBBM_CLOCK_CNTL_SP2 0x000b2
+#define A6XX_RBBM_CLOCK_CNTL_SP3 0x000b3
+#define A6XX_RBBM_CLOCK_CNTL2_SP0 0x000b4
+#define A6XX_RBBM_CLOCK_CNTL2_SP1 0x000b5
+#define A6XX_RBBM_CLOCK_CNTL2_SP2 0x000b6
+#define A6XX_RBBM_CLOCK_CNTL2_SP3 0x000b7
+#define A6XX_RBBM_CLOCK_DELAY_SP0 0x000b8
+#define A6XX_RBBM_CLOCK_DELAY_SP1 0x000b9
+#define A6XX_RBBM_CLOCK_DELAY_SP2 0x000ba
+#define A6XX_RBBM_CLOCK_DELAY_SP3 0x000bb
+#define A6XX_RBBM_CLOCK_HYST_SP0 0x000bc
+#define A6XX_RBBM_CLOCK_HYST_SP1 0x000bd
+#define A6XX_RBBM_CLOCK_HYST_SP2 0x000be
+#define A6XX_RBBM_CLOCK_HYST_SP3 0x000bf
+#define A6XX_RBBM_CLOCK_CNTL_TP0 0x000c0
+#define A6XX_RBBM_CLOCK_CNTL_TP1 0x000c1
+#define A6XX_RBBM_CLOCK_CNTL_TP2 0x000c2
+#define A6XX_RBBM_CLOCK_CNTL_TP3 0x000c3
+#define A6XX_RBBM_CLOCK_CNTL2_TP0 0x000c4
+#define A6XX_RBBM_CLOCK_CNTL2_TP1 0x000c5
+#define A6XX_RBBM_CLOCK_CNTL2_TP2 0x000c6
+#define A6XX_RBBM_CLOCK_CNTL2_TP3 0x000c7
+#define A6XX_RBBM_CLOCK_CNTL3_TP0 0x000c8
+#define A6XX_RBBM_CLOCK_CNTL3_TP1 0x000c9
+#define A6XX_RBBM_CLOCK_CNTL3_TP2 0x000ca
+#define A6XX_RBBM_CLOCK_CNTL3_TP3 0x000cb
+#define A6XX_RBBM_CLOCK_CNTL4_TP0 0x000cc
+#define A6XX_RBBM_CLOCK_CNTL4_TP1 0x000cd
+#define A6XX_RBBM_CLOCK_CNTL4_TP2 0x000ce
+#define A6XX_RBBM_CLOCK_CNTL4_TP3 0x000cf
+#define A6XX_RBBM_CLOCK_DELAY_TP0 0x000d0
+#define A6XX_RBBM_CLOCK_DELAY_TP1 0x000d1
+#define A6XX_RBBM_CLOCK_DELAY_TP2 0x000d2
+#define A6XX_RBBM_CLOCK_DELAY_TP3 0x000d3
+#define A6XX_RBBM_CLOCK_DELAY2_TP0 0x000d4
+#define A6XX_RBBM_CLOCK_DELAY2_TP1 0x000d5
+#define A6XX_RBBM_CLOCK_DELAY2_TP2 0x000d6
+#define A6XX_RBBM_CLOCK_DELAY2_TP3 0x000d7
+#define A6XX_RBBM_CLOCK_DELAY3_TP0 0x000d8
+#define A6XX_RBBM_CLOCK_DELAY3_TP1 0x000d9
+#define A6XX_RBBM_CLOCK_DELAY3_TP2 0x000da
+#define A6XX_RBBM_CLOCK_DELAY3_TP3 0x000db
+#define A6XX_RBBM_CLOCK_DELAY4_TP0 0x000dc
+#define A6XX_RBBM_CLOCK_DELAY4_TP1 0x000dd
+#define A6XX_RBBM_CLOCK_DELAY4_TP2 0x000de
+#define A6XX_RBBM_CLOCK_DELAY4_TP3 0x000df
+#define A6XX_RBBM_CLOCK_HYST_TP0 0x000e0
+#define A6XX_RBBM_CLOCK_HYST_TP1 0x000e1
+#define A6XX_RBBM_CLOCK_HYST_TP2 0x000e2
+#define A6XX_RBBM_CLOCK_HYST_TP3 0x000e3
+#define A6XX_RBBM_CLOCK_HYST2_TP0 0x000e4
+#define A6XX_RBBM_CLOCK_HYST2_TP1 0x000e5
+#define A6XX_RBBM_CLOCK_HYST2_TP2 0x000e6
+#define A6XX_RBBM_CLOCK_HYST2_TP3 0x000e7
+#define A6XX_RBBM_CLOCK_HYST3_TP0 0x000e8
+#define A6XX_RBBM_CLOCK_HYST3_TP1 0x000e9
+#define A6XX_RBBM_CLOCK_HYST3_TP2 0x000ea
+#define A6XX_RBBM_CLOCK_HYST3_TP3 0x000eb
+#define A6XX_RBBM_CLOCK_HYST4_TP0 0x000ec
+#define A6XX_RBBM_CLOCK_HYST4_TP1 0x000ed
+#define A6XX_RBBM_CLOCK_HYST4_TP2 0x000ee
+#define A6XX_RBBM_CLOCK_HYST4_TP3 0x000ef
+#define A6XX_RBBM_CLOCK_CNTL_RB0 0x000f0
+#define A6XX_RBBM_CLOCK_CNTL_RB1 0x000f1
+#define A6XX_RBBM_CLOCK_CNTL_RB2 0x000f2
+#define A6XX_RBBM_CLOCK_CNTL_RB3 0x000f3
+#define A6XX_RBBM_CLOCK_CNTL2_RB0 0x000f4
+#define A6XX_RBBM_CLOCK_CNTL2_RB1 0x000f5
+#define A6XX_RBBM_CLOCK_CNTL2_RB2 0x000f6
+#define A6XX_RBBM_CLOCK_CNTL2_RB3 0x000f7
+#define A6XX_RBBM_CLOCK_CNTL_CCU0 0x000f8
+#define A6XX_RBBM_CLOCK_CNTL_CCU1 0x000f9
+#define A6XX_RBBM_CLOCK_CNTL_CCU2 0x000fa
+#define A6XX_RBBM_CLOCK_CNTL_CCU3 0x000fb
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00100
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00101
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00102
+#define A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00103
+#define A6XX_RBBM_CLOCK_CNTL_RAC 0x00104
+#define A6XX_RBBM_CLOCK_CNTL2_RAC 0x00105
+#define A6XX_RBBM_CLOCK_DELAY_RAC 0x00106
+#define A6XX_RBBM_CLOCK_HYST_RAC 0x00107
+#define A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00108
+#define A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00109
+#define A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0010a
+#define A6XX_RBBM_CLOCK_CNTL_UCHE 0x0010b
+#define A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0010c
+#define A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0010d
+#define A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0010e
+#define A6XX_RBBM_CLOCK_DELAY_UCHE 0x0010f
+#define A6XX_RBBM_CLOCK_HYST_UCHE 0x00110
+#define A6XX_RBBM_CLOCK_MODE_VFD 0x00111
+#define A6XX_RBBM_CLOCK_DELAY_VFD 0x00112
+#define A6XX_RBBM_CLOCK_HYST_VFD 0x00113
+#define A6XX_RBBM_CLOCK_MODE_GPC 0x00114
+#define A6XX_RBBM_CLOCK_DELAY_GPC 0x00115
+#define A6XX_RBBM_CLOCK_HYST_GPC 0x00116
+#define A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00117
+#define A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00118
+#define A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00119
+#define A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0011a
+#define A6XX_RBBM_CLOCK_MODE_HLSQ 0x0011b
+#define A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0011c
+
/* DBGC_CFG registers */
#define A6XX_DBGC_CFG_DBGBUS_SEL_A 0x600
#define A6XX_DBGC_CFG_DBGBUS_SEL_B 0x601
@@ -537,6 +647,8 @@
#define A6XX_UCHE_GMEM_RANGE_MAX_HI 0xE0E
#define A6XX_UCHE_CACHE_WAYS 0xE17
#define A6XX_UCHE_FILTER_CNTL 0xE18
+#define A6XX_UCHE_CLIENT_PF 0xE19
+#define A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK 0x7
#define A6XX_UCHE_PERFCTR_UCHE_SEL_0 0xE1C
#define A6XX_UCHE_PERFCTR_UCHE_SEL_1 0xE1D
#define A6XX_UCHE_PERFCTR_UCHE_SEL_2 0xE1E
@@ -664,6 +776,7 @@
#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT 0x8
/* GMU control registers */
+#define A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x1A880
#define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x1A881
#define A6XX_GMU_CM3_ITCM_START 0x1B400
#define A6XX_GMU_CM3_DTCM_START 0x1C400
@@ -676,6 +789,7 @@
#define A6XX_GMU_DCVS_RETURN 0x1CBFF
#define A6XX_GMU_CM3_SYSRESET 0x1F800
#define A6XX_GMU_CM3_BOOT_CONFIG 0x1F801
+#define A6XX_GMU_CM3_FW_BUSY 0x1F81A
#define A6XX_GMU_CM3_FW_INIT_RESULT 0x1F81C
#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x1F8C0
#define A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x1F8C1
@@ -685,6 +799,7 @@
#define A6XX_GMU_RPMH_CTRL 0x1F8E8
#define A6XX_GMU_RPMH_HYST_CTRL 0x1F8E9
#define A6XX_GMU_RPMH_POWER_STATE 0x1F8EC
+#define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x1F9F0
/* HFI registers*/
#define A6XX_GMU_ALWAYS_ON_COUNTER_L 0x1F888
@@ -715,9 +830,12 @@
#define A6XX_GMU_GENERAL_7 0x1F9CC
#define A6XX_GMU_AO_INTERRUPT_EN 0x23B03
-#define A6XX_GMU_HOST_INTERRUPT_CLR 0x23B04
-#define A6XX_GMU_HOST_INTERRUPT_STATUS 0x23B05
-#define A6XX_GMU_HOST_INTERRUPT_MASK 0x23B06
+#define A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x23B04
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x23B05
+#define A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x23B06
+#define A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x23B09
+#define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x23B0A
+#define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x23B0B
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x23B0C
#define A6XX_GMU_AHB_FENCE_STATUS 0x23B13
#define A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x23B15
@@ -761,7 +879,6 @@
#define PDC_GPU_TCS1_CMD0_MSGID 0x21575
#define PDC_GPU_TCS1_CMD0_ADDR 0x21576
#define PDC_GPU_TCS1_CMD0_DATA 0x21577
-#define PDC_GPU_TIMESTAMP_UNIT1_EN_DRV0 0x23489
#define PDC_GPU_SEQ_MEM_0 0xA0000
#endif /* _A6XX_REG_H */
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 876ff0c..9a44f34 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -326,8 +326,7 @@
.major = 3,
.minor = 0,
.patchid = ANY_ID,
- .features = ADRENO_64BIT |
- ADRENO_GPMU | ADRENO_RPMH,
+ .features = ADRENO_64BIT | ADRENO_RPMH,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a630_zap",
.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 1c37978..68d7653 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -3063,6 +3063,7 @@
.regulator_disable_poll = adreno_regulator_disable_poll,
.clk_set_options = adreno_clk_set_options,
.gpu_model = adreno_gpu_model,
+ .stop_fault_timer = adreno_dispatcher_stop_fault_timer,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index e23d6a0..530529f 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -618,9 +618,9 @@
ADRENO_REG_VBIF_XIN_HALT_CTRL1,
ADRENO_REG_VBIF_VERSION,
ADRENO_REG_GMU_AO_INTERRUPT_EN,
- ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
- ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
- ADRENO_REG_GMU_HOST_INTERRUPT_MASK,
+ ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+ ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
+ ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
ADRENO_REG_GMU_AHB_FENCE_STATUS,
ADRENO_REG_GMU_RPMH_POWER_STATE,
@@ -629,6 +629,7 @@
ADRENO_REG_GMU_HFI_SFR_ADDR,
ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
+ ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
ADRENO_REG_GMU_HOST2GMU_INTR_SET,
ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
@@ -856,6 +857,8 @@
unsigned int arg1, unsigned int arg2);
bool (*hw_isidle)(struct adreno_device *);
int (*wait_for_gmu_idle)(struct adreno_device *);
+ const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+ unsigned int fsynr1);
};
/**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 0211a17..b5546ef 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -59,6 +59,127 @@
{ adreno_is_a630, a630_vbif },
};
+
+struct kgsl_hwcg_reg {
+ unsigned int off;
+ unsigned int val;
+};
+static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
+ {A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_DELAY_SP3, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_HYST_SP0, 0x00000080},
+ {A6XX_RBBM_CLOCK_HYST_SP1, 0x00000080},
+ {A6XX_RBBM_CLOCK_HYST_SP2, 0x00000080},
+ {A6XX_RBBM_CLOCK_HYST_SP3, 0x00000080},
+ {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP0, 0x07777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP1, 0x07777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP2, 0x07777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP3, 0x07777777},
+ {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+ {A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+ {A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+ {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+static const struct {
+ int (*devfunc)(struct adreno_device *adreno_dev);
+ const struct kgsl_hwcg_reg *regs;
+ unsigned int count;
+} a6xx_hwcg_registers[] = {
+ {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}
+};
+
static struct a6xx_protected_regs {
unsigned int base;
unsigned int count;
@@ -125,7 +246,7 @@
unsigned int mmu_base = 0, mmu_range = 0, cur_range;
/* enable access protection to privileged registers */
- kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
+ kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
if (mmu_prot) {
mmu_base = mmu_prot->base;
@@ -181,6 +302,48 @@
kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
+
+static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ const struct kgsl_hwcg_reg *regs;
+ int i, j;
+
+ if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
+ if (a6xx_hwcg_registers[i].devfunc(adreno_dev))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(a6xx_hwcg_registers))
+ return;
+
+ regs = a6xx_hwcg_registers[i].regs;
+
+ /* Disable SP clock before programming HWCG registers */
+ kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 0);
+
+ for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
+ kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
+
+ if (kgsl_gmu_isenabled(device)) {
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ 0x00020222);
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ 0x00010111);
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ 0x00050555);
+ }
+ /* Enable SP clock */
+ kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ /* enable top level HWCG */
+ kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, on ? 0x8AA8AA02 : 0);
+ kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
+}
+
/*
* a6xx_start() - Device start
* @adreno_dev: Pointer to adreno device
@@ -197,6 +360,8 @@
if (!kgsl_gmu_isenabled(device))
/* Legacy idle management if gmu is disabled */
ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
+ /* enable hardware clockgating */
+ a6xx_hwcg_set(adreno_dev, true);
adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
ARRAY_SIZE(a6xx_vbif_platforms));
@@ -235,6 +400,9 @@
/* Set the AHB default slave response to "ERROR" */
kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
+ /* Turn on performance counters */
+ kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
if (of_property_read_u32(device->pdev->dev.of_node,
"qcom,highest-bank-bit", &bit))
bit = MIN_HBB;
@@ -282,8 +450,11 @@
kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) |
(mal << 23) | (bit << 21));
+ /* Set hang detection threshold to 4 million cycles (0x3FFFF*16) */
kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
- (1 << 30) | 0x4000);
+ (1 << 30) | 0x3ffff);
+
+ kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
/* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
@@ -502,15 +673,6 @@
__raw_writel(value, reg);
}
-static void _gmu_regrmw(struct kgsl_device *device,
- unsigned int offsetwords, unsigned int mask)
-{
- unsigned int value;
-
- kgsl_gmu_regread(device, offsetwords, &value);
- kgsl_gmu_regwrite(device, offsetwords, value | mask);
-}
-
/*
* _load_gmu_rpmh_ucode() - Load the ucode into the GPU PDC/RSC blocks
* PDC and RSC execute GPU power on/off RPMh sequence
@@ -646,38 +808,45 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct gmu_device *gmu = &device->gmu;
- if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC)) {
- kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
- 0x000A0080);
- _gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
- SPTP_ENABLE_MASK);
- gmu->idle_level = GPU_HW_SPTP_PC;
- }
-
- if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) {
+ /* Configure registers for idle setting. The setting is cumulative */
+ switch (gmu->idle_level) {
+ case GPU_HW_MIN_VOLT:
+ kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
+ MIN_BW_ENABLE_MASK);
+ kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, 0,
+ MIN_BW_HYST);
+ /* fall through */
+ case GPU_HW_NAP:
+ kgsl_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, 0,
+ HW_NAP_ENABLE_MASK);
+ /* fall through */
+ case GPU_HW_IFPC:
kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
- 0x000A0080);
- _gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
- IFPC_ENABLE_MASK);
- gmu->idle_level = GPU_HW_IFPC;
+ 0x000A0080);
+ kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ IFPC_ENABLE_MASK);
+ /* fall through */
+ case GPU_HW_SPTP_PC:
+ kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ 0x000A0080);
+ kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ SPTP_ENABLE_MASK);
+ /* fall through */
+ default:
+ break;
}
- if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP)) {
- _gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL,
- HW_NAP_ENABLE_MASK);
- gmu->idle_level = GPU_HW_NAP;
- }
-
- if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT)) {
- _gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, MIN_BW_ENABLE_MASK);
- _gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, MIN_BW_HYST);
- gmu->idle_level = GPU_HW_MIN_VOLT;
- }
+ /* ACD feature enablement */
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
+ BIT(10));
/* Enable RPMh GPU client */
if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
- _gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, RPMH_ENABLE_MASK);
+ kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
+ RPMH_ENABLE_MASK);
+ /* Disable reference bandgap voltage */
kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
}
@@ -717,9 +886,8 @@
{
struct gmu_device *gmu = &device->gmu;
- kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
- (HFI_IRQ_MASK & (~HFI_IRQ_MSGQ_MASK)));
-
+ kgsl_gmu_regrmw(device, A6XX_GMU_GMU2HOST_INTR_MASK,
+ HFI_IRQ_MSGQ_MASK, 0);
kgsl_gmu_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
if (timed_poll_check(device,
@@ -806,7 +974,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = &device->gmu;
- if (!kgsl_gmu_isenabled(device))
+ if (!gmu->pdev)
return -EINVAL;
kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
@@ -833,7 +1001,7 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = &device->gmu;
- if (!kgsl_gmu_isenabled(device))
+ if (!gmu->pdev)
return;
kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
@@ -858,13 +1026,14 @@
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
- if (!IS_ERR_OR_NULL(gmu->gx_gdsc)) {
- ret = regulator_enable(gmu->gx_gdsc);
- if (ret) {
- dev_err(&gmu->pdev->dev,
- "Failed to turn on GPU HM HS\n");
- return ret;
- }
+ if (regulator_is_enabled(gmu->gx_gdsc))
+ return 0;
+
+ ret = regulator_enable(gmu->gx_gdsc);
+ if (ret) {
+ dev_err(&gmu->pdev->dev,
+ "Failed to turn on GPU HM HS\n");
+ return ret;
}
ret = clk_set_rate(pwr->grp_clks[0],
@@ -886,19 +1055,85 @@
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
+ if (!regulator_is_enabled(gmu->gx_gdsc))
+ return 0;
+
clk_disable_unprepare(pwr->grp_clks[0]);
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
- if (IS_ERR_OR_NULL(gmu->gx_gdsc))
- return 0;
-
return regulator_disable(gmu->gx_gdsc);
}
/*
+ * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC
+ * @device: Pointer to KGSL device
+ */
+static int a6xx_hm_sptprac_enable(struct kgsl_device *device)
+{
+ int ret = 0;
+ struct gmu_device *gmu = &device->gmu;
+
+ /* If GMU does not control HM we must */
+ if (gmu->idle_level < GPU_HW_IFPC) {
+ ret = a6xx_hm_enable(ADRENO_DEVICE(device));
+ if (ret) {
+ dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
+ return ret;
+ }
+ }
+
+ /* If GMU does not control SPTPRAC we must */
+ if (gmu->idle_level < GPU_HW_SPTP_PC) {
+ ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
+ if (ret) {
+ a6xx_hm_disable(ADRENO_DEVICE(device));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * a6xx_hm_sptprac_disable() - Turn off SPTPRAC and HM
+ * @device: Pointer to KGSL device
+ */
+static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
+{
+ int ret = 0;
+ struct gmu_device *gmu = &device->gmu;
+
+ /* If GMU does not control SPTPRAC we must */
+ if (gmu->idle_level < GPU_HW_SPTP_PC)
+ a6xx_sptprac_disable(ADRENO_DEVICE(device));
+
+ /* If GMU does not control HM we must */
+ if (gmu->idle_level < GPU_HW_IFPC) {
+ ret = a6xx_hm_disable(ADRENO_DEVICE(device));
+ if (ret)
+ dev_err(&gmu->pdev->dev, "Failed to power off GPU HM\n");
+ }
+
+ return ret;
+}
+
+/*
+ * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
+ * @device: Pointer to KGSL device
+ * @on: True to turn on or false to turn off
+ */
+static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
+{
+ if (on)
+ return a6xx_hm_sptprac_enable(device);
+ else
+ return a6xx_hm_sptprac_disable(device);
+}
+
+/*
* a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
* @device: Pointer to KGSL device
*
@@ -976,7 +1211,7 @@
{
struct gmu_device *gmu = &device->gmu;
struct device *dev = &gmu->pdev->dev;
- int ret;
+ int ret = 0;
if (device->state != KGSL_STATE_INIT &&
device->state != KGSL_STATE_SUSPEND) {
@@ -1002,26 +1237,11 @@
0xFFFFFFFF))
goto error_rsc;
- /* If GMU does not control HM we must */
- if (gmu->idle_level < GPU_HW_IFPC) {
- ret = a6xx_hm_enable(ADRENO_DEVICE(device));
- if (ret) {
- dev_err(dev, "Failed to power on GPU HM\n");
- return ret;
- }
- }
-
- /* If GMU does not control SPTP we must */
- if (gmu->idle_level < GPU_HW_SPTP_PC) {
- ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
- if (ret) {
- a6xx_hm_disable(ADRENO_DEVICE(device));
- return ret;
- }
- }
+ /* Turn on the HM and SPTP head switches */
+ ret = a6xx_hm_sptprac_control(device, true);
}
- return 0;
+ return ret;
error_rsc:
dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
@@ -1031,22 +1251,13 @@
static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
{
struct gmu_device *gmu = &device->gmu;
- struct device *dev = &gmu->pdev->dev;
- int val, ret;
+ int val, ret = 0;
- /* If GMU does not control SPTP we must */
- if (gmu->idle_level < GPU_HW_SPTP_PC)
- a6xx_sptprac_disable(ADRENO_DEVICE(device));
-
- /* If GMU does not control HM we must */
- if (gmu->idle_level < GPU_HW_IFPC) {
- ret = a6xx_hm_disable(ADRENO_DEVICE(device));
- if (ret)
- dev_err(dev, "Failed to power off GPU HM\n");
- }
+ /* Turn off the SPTP and HM head switches */
+ ret = a6xx_hm_sptprac_control(device, false);
/* RSC sleep sequence */
- _regwrite(gmu->pdc_reg_virt, PDC_GPU_TIMESTAMP_UNIT1_EN_DRV0, 1);
+ kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
wmb();
@@ -1069,7 +1280,7 @@
/* FIXME: v2 has different procedure to trigger sequence */
- return 0;
+ return ret;
}
/*
@@ -1083,30 +1294,14 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct gmu_device *gmu = &device->gmu;
struct gmu_memdesc *mem_addr = gmu->hfi_mem;
- struct device *dev = &gmu->pdev->dev;
int ret, i;
- a6xx_gmu_power_config(device);
-
- /* If GMU does not control HM then we must */
- if (gmu->idle_level < GPU_HW_IFPC) {
- ret = a6xx_hm_enable(adreno_dev);
- if (ret) {
- dev_err(dev, "Failed to power on GPU HM\n");
- return ret;
- }
- }
-
- /* If GMU does not control SPTP then we must */
- if (gmu->idle_level < GPU_HW_SPTP_PC) {
- ret = a6xx_sptprac_enable(adreno_dev);
- if (ret) {
- a6xx_hm_disable(adreno_dev);
- return ret;
- }
- }
-
if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
+ /* Turn on the HM and SPTP head switches */
+ ret = a6xx_hm_sptprac_control(device, true);
+ if (ret)
+ return ret;
+
/* Turn on TCM retention */
kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
@@ -1145,6 +1340,8 @@
kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
FENCE_RANGE_MASK);
+ /* Configure power control and bring the GMU out of reset */
+ a6xx_gmu_power_config(device);
ret = a6xx_gmu_start(device);
if (ret)
return ret;
@@ -1275,16 +1472,11 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = &device->gmu;
- if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
- gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
- dev_err(&gmu->pdev->dev,
- "GMU is not going to powerstate %d\n",
- gmu->idle_level);
- return -ETIMEDOUT;
- }
+ /* TODO: Remove this register write when firmware is updated */
+ kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_BUSY, 0);
if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
- 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
+ 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
dev_err(&gmu->pdev->dev, "GMU is not idling\n");
return -ETIMEDOUT;
}
@@ -1494,6 +1686,49 @@
iounmap(gpu_cx_reg);
}
+static const char *fault_block[8] = {
+ [0] = "CP",
+ [1] = "UCHE",
+ [2] = "VFD",
+ [3] = "UCHE",
+ [4] = "CCU",
+ [5] = "unknown",
+ [6] = "CDP Prefetch",
+ [7] = "GPMU",
+};
+
+static const char *uche_client[8] = {
+ [0] = "VFD",
+ [1] = "SP",
+ [2] = "VSC",
+ [3] = "VPC",
+ [4] = "HLSQ",
+ [5] = "PC",
+ [6] = "LRZ",
+ [7] = "unknown",
+};
+
+static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
+ unsigned int fsynr1)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int client_id;
+ unsigned int uche_client_id;
+
+ client_id = fsynr1 & 0xff;
+
+ if (client_id >= ARRAY_SIZE(fault_block))
+ return "unknown";
+ else if (client_id != 3)
+ return fault_block[client_id];
+
+ mutex_lock(&device->mutex);
+ kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
+ mutex_unlock(&device->mutex);
+
+ return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+}
+
#define A6XX_INT_MASK \
((1 << A6XX_INT_CP_AHB_ERROR) | \
(1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
@@ -1981,12 +2216,12 @@
A6XX_GMU_ALWAYS_ON_COUNTER_H),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
A6XX_GMU_AO_INTERRUPT_EN),
- ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
- A6XX_GMU_HOST_INTERRUPT_CLR),
- ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
- A6XX_GMU_HOST_INTERRUPT_STATUS),
- ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_MASK,
- A6XX_GMU_HOST_INTERRUPT_MASK),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+ A6XX_GMU_AO_HOST_INTERRUPT_CLR),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+ A6XX_GMU_AO_HOST_INTERRUPT_MASK),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
@@ -2003,6 +2238,8 @@
A6XX_GMU_GMU2HOST_INTR_CLR),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
A6XX_GMU_GMU2HOST_INTR_INFO),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+ A6XX_GMU_GMU2HOST_INTR_MASK),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
A6XX_GMU_HOST2GMU_INTR_SET),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
@@ -2050,5 +2287,6 @@
.oob_clear = a6xx_oob_clear,
.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
- .wait_for_gmu_idle = a6xx_wait_for_gmu_idle
+ .wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
+ .iommu_fault_block = a6xx_iommu_fault_block,
};
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index f2a7963..01ecb01 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -121,36 +121,38 @@
unsigned int statetype;
const unsigned int *regs;
unsigned int num_sets;
+ unsigned int offset0;
+ unsigned int offset1;
} a6xx_dbgahb_ctx_clusters[] = {
{ CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
{ CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
- { CP_CLUSTER_SP_VS, 0x0002EC00, 0x41, a6xx_hlsq_duplicate_cluster,
+ { CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_hlsq_duplicate_cluster,
ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
{ CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
- { CP_CLUSTER_SP_VS, 0x0002AC00, 0x21, a6xx_sp_duplicate_cluster,
+ { CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_duplicate_cluster,
ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
- { CP_CLUSTER_SP_VS, 0x0002CC00, 0x1, a6xx_tp_duplicate_cluster,
+ { CP_CLUSTER_SP_VS, 0x0002C000, 0x1, a6xx_tp_duplicate_cluster,
ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002E600, 0x42, a6xx_sp_ps_hlsq_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_sp_ps_hlsq_cluster,
ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002F300, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002F000, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002A600, 0x22, a6xx_sp_ps_sp_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002A000, 0x22, a6xx_sp_ps_sp_cluster,
ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002B300, 0x26, a6xx_sp_ps_sp_2d_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002B000, 0x26, a6xx_sp_ps_sp_2d_cluster,
ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002C600, 0x2, a6xx_sp_ps_tp_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002C000, 0x2, a6xx_sp_ps_tp_cluster,
ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002D300, 0x6, a6xx_sp_ps_tp_2d_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002D000, 0x6, a6xx_sp_ps_tp_2d_cluster,
ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
- { CP_CLUSTER_SP_PS, 0x0002EC00, 0x42, a6xx_hlsq_duplicate_cluster,
+ { CP_CLUSTER_SP_PS, 0x0002E000, 0x42, a6xx_hlsq_duplicate_cluster,
ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
- { CP_CLUSTER_SP_VS, 0x0002AC00, 0x22, a6xx_sp_duplicate_cluster,
+ { CP_CLUSTER_SP_VS, 0x0002A000, 0x22, a6xx_sp_duplicate_cluster,
ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
- { CP_CLUSTER_SP_VS, 0x0002CC00, 0x2, a6xx_tp_duplicate_cluster,
+ { CP_CLUSTER_SP_VS, 0x0002C000, 0x2, a6xx_tp_duplicate_cluster,
ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
};
@@ -221,12 +223,12 @@
static const unsigned int a6xx_registers[] = {
/* RBBM */
- 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0014, 0x0014,
- 0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042,
- 0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE,
- 0x00B0, 0x00FB, 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213,
- 0x0218, 0x023D, 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B,
- 0x050E, 0x0511, 0x0533, 0x0533, 0x0540, 0x0555,
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
+ 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
+ 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
+ 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
+ 0x0533, 0x0533, 0x0540, 0x0555,
/* CP */
0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
@@ -240,8 +242,8 @@
0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
0x0E38, 0x0E39,
/* GRAS */
- 0x8600, 0x8601, 0x8604, 0x8605, 0x8610, 0x861B, 0x8620, 0x8620,
- 0x8628, 0x862B, 0x8630, 0x8637,
+ 0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
+ 0x8630, 0x8637,
/* RB */
0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
@@ -254,7 +256,7 @@
0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
/* VFD */
0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
- 0xA630, 0xA630, 0xD200, 0xD263,
+ 0xA630, 0xA630,
};
enum a6xx_debugbus_id {
@@ -275,11 +277,12 @@
A6XX_DBGBUS_LRZ = 0x10,
A6XX_DBGBUS_A2D = 0x11,
A6XX_DBGBUS_CCUFCHE = 0x12,
- A6XX_DBGBUS_GMU = 0x13,
+ A6XX_DBGBUS_GMU_CX = 0x13,
A6XX_DBGBUS_RBP = 0x14,
A6XX_DBGBUS_DCS = 0x15,
A6XX_DBGBUS_RBBM_CFG = 0x16,
A6XX_DBGBUS_CX = 0x17,
+ A6XX_DBGBUS_GMU_GX = 0x18,
A6XX_DBGBUS_TPFCHE = 0x19,
A6XX_DBGBUS_GPC = 0x1d,
A6XX_DBGBUS_LARC = 0x1e,
@@ -321,6 +324,7 @@
{ A6XX_DBGBUS_RBP, 0x100, },
{ A6XX_DBGBUS_DCS, 0x100, },
{ A6XX_DBGBUS_RBBM_CFG, 0x100, },
+ { A6XX_DBGBUS_GMU_GX, 0x100, },
{ A6XX_DBGBUS_TPFCHE, 0x100, },
{ A6XX_DBGBUS_GPC, 0x100, },
{ A6XX_DBGBUS_LARC, 0x100, },
@@ -345,7 +349,7 @@
static void __iomem *a6xx_cx_dbgc;
static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
{ A6XX_DBGBUS_VBIF, 0x100, },
- { A6XX_DBGBUS_GMU, 0x100, },
+ { A6XX_DBGBUS_GMU_CX, 0x100, },
{ A6XX_DBGBUS_CX, 0x100, },
};
@@ -581,8 +585,9 @@
struct kgsl_snapshot *snapshot)
{
unsigned int pool_size;
+ u8 *buf = snapshot->ptr;
- /* Save the mempool size to 0 to stabilize it while dumping */
+ /* Set the mempool size to 0 to stabilize it while dumping */
kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
@@ -590,6 +595,22 @@
A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
0, 0x2060);
+ /*
+ * Data at offset 0x2000 in the mempool section is the mempool size.
+ * Since we set it to 0, patch in the original size so that the data
+ * is consistent.
+ */
+ if (buf < snapshot->ptr) {
+ unsigned int *data;
+
+ /* Skip over the headers */
+ buf += sizeof(struct kgsl_snapshot_section_header) +
+ sizeof(struct kgsl_snapshot_indexed_regs);
+
+ data = (unsigned int *)buf + 0x2000;
+ *data = pool_size;
+ }
+
/* Restore the saved mempool size */
kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
}
@@ -605,8 +626,8 @@
return val;
}
-static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
- size_t remain, void *priv)
+static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_mvc_regs *header =
(struct kgsl_snapshot_mvc_regs *)buf;
@@ -659,6 +680,63 @@
return data_size + sizeof(*header);
}
+static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
+ size_t remain, void *priv)
+{
+ struct kgsl_snapshot_mvc_regs *header =
+ (struct kgsl_snapshot_mvc_regs *)buf;
+ struct a6xx_cluster_dbgahb_regs_info *info =
+ (struct a6xx_cluster_dbgahb_regs_info *)priv;
+ struct a6xx_cluster_dbgahb_registers *cluster = info->cluster;
+ unsigned int data_size = 0;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ int i, j;
+ unsigned int *src;
+
+
+ if (crash_dump_valid == false)
+ return a6xx_legacy_snapshot_cluster_dbgahb(device, buf, remain,
+ info);
+
+ if (remain < sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ return 0;
+ }
+
+ remain -= sizeof(*header);
+
+ header->ctxt_id = info->ctxt_id;
+ header->cluster_id = cluster->id;
+
+ src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
+ (header->ctxt_id ? cluster->offset1 : cluster->offset0));
+
+ for (i = 0; i < cluster->num_sets; i++) {
+ unsigned int start;
+ unsigned int end;
+
+ start = cluster->regs[2 * i];
+ end = cluster->regs[2 * i + 1];
+
+ if (remain < (end - start + 3) * 4) {
+ SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+ goto out;
+ }
+
+ remain -= (end - start + 3) * 4;
+ data_size += (end - start + 3) * 4;
+
+ *data++ = start | (1 << 31);
+ *data++ = end;
+ for (j = start; j <= end; j++)
+ *data++ = *src++;
+ }
+out:
+ return data_size + sizeof(*header);
+}
+
+
+
static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
size_t remain, void *priv)
{
@@ -883,12 +961,14 @@
static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_snapshot_debugbus *header =
(struct kgsl_snapshot_debugbus *)buf;
struct adreno_debugbus_block *block = priv;
int i;
unsigned int *data = (unsigned int *)(buf + sizeof(*header));
unsigned int dwords;
+ unsigned int block_id;
size_t size;
dwords = block->dwords;
@@ -904,9 +984,14 @@
header->id = block->block_id;
header->count = dwords * 2;
+ block_id = block->block_id;
+ /* GMU_GX data is read using the GMU_CX block id on A630 */
+ if (adreno_is_a630(adreno_dev) &&
+ (block_id == A6XX_DBGBUS_GMU_GX))
+ block_id = A6XX_DBGBUS_GMU_CX;
+
for (i = 0; i < dwords; i++)
- a6xx_dbgc_debug_bus_read(device, block->block_id, i,
- &data[i*2]);
+ a6xx_dbgc_debug_bus_read(device, block_id, i, &data[i*2]);
return size;
}
@@ -1166,6 +1251,28 @@
snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
}
+/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
+static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf,
+ size_t remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+
+ if (remain < DEBUG_SECTION_SZ(1)) {
+ SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
+ return 0;
+ }
+
+ /* Dump the SQE firmware version */
+ header->type = SNAPSHOT_DEBUG_SQE_VERSION;
+ header->size = 1;
+ *data = fw->version;
+
+ return DEBUG_SECTION_SZ(1);
+}
+
static void _a6xx_do_crashdump(struct kgsl_device *device)
{
unsigned long wait_time;
@@ -1255,6 +1362,10 @@
snapshot, adreno_snapshot_cp_roq,
&snap_data->sect_sizes->roq);
+ /* SQE Firmware */
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
+ snapshot, a6xx_snapshot_sqe, NULL);
+
/* Mempool debug data */
a6xx_snapshot_mempool(device, snapshot);
@@ -1339,6 +1450,47 @@
return qwords;
}
+static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
+{
+ int qwords = 0;
+ unsigned int i, j, k;
+ unsigned int count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
+ struct a6xx_cluster_dbgahb_registers *cluster =
+ &a6xx_dbgahb_ctx_clusters[i];
+
+ cluster->offset0 = *offset;
+
+ for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+ if (j == 1)
+ cluster->offset1 = *offset;
+
+ /* Program the aperture */
+ ptr[qwords++] =
+ ((cluster->statetype + j * 2) & 0xff) << 8;
+ ptr[qwords++] =
+ (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
+ (1 << 21) | 1;
+
+ for (k = 0; k < cluster->num_sets; k++) {
+ unsigned int start = cluster->regs[2 * k];
+
+ count = REG_PAIR_COUNT(cluster->regs, k);
+ ptr[qwords++] =
+ a6xx_crashdump_registers.gpuaddr + *offset;
+ ptr[qwords++] =
+ (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ start - cluster->regbase / 4) << 44)) |
+ count;
+
+ *offset += count * sizeof(unsigned int);
+ }
+ }
+ }
+ return qwords;
+}
+
void a6xx_crashdump_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1406,6 +1558,26 @@
}
}
+ /* Calculate the script and data size for debug AHB registers */
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
+ struct a6xx_cluster_dbgahb_registers *cluster =
+ &a6xx_dbgahb_ctx_clusters[i];
+
+ for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+
+ /* 16 bytes for programming the aperture */
+ script_size += 16;
+
+ /* Reading each pair of registers takes 16 bytes */
+ script_size += 16 * cluster->num_sets;
+
+ /* A dword per register read from the cluster list */
+ for (k = 0; k < cluster->num_sets; k++)
+ data_size += REG_PAIR_COUNT(cluster->regs, k) *
+ sizeof(unsigned int);
+ }
+ }
+
/* Now allocate the script and data buffers */
/* The script buffers needs 2 extra qwords on the end */
@@ -1445,6 +1617,8 @@
/* Program the capturescript for the MVC regsiters */
ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
+ ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
+
*ptr++ = 0;
*ptr++ = 0;
}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index b1f832f..2a1d352 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -137,11 +137,8 @@
break;
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- char fence_str[128];
-
- kgsl_dump_fence(sync_event->handle,
- fence_str, sizeof(fence_str));
- seq_printf(s, "sync: [%pK] %s", sync_event->handle, fence_str);
+ seq_printf(s, "sync: [%pK] %s", sync_event->handle,
+ sync_event->fence_name);
break;
}
default:
@@ -241,6 +238,9 @@
static void drawobj_print(struct seq_file *s,
struct kgsl_drawobj *drawobj)
{
+ if (!kref_get_unless_zero(&drawobj->refcount))
+ return;
+
if (drawobj->type == SYNCOBJ_TYPE)
syncobj_print(s, SYNCOBJ(drawobj));
else if ((drawobj->type == CMDOBJ_TYPE) ||
@@ -251,6 +251,7 @@
print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
drawobj->flags);
+ kgsl_drawobj_put(drawobj);
seq_puts(s, "\n");
}
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 4d1f1ad..1cb0259 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -208,6 +208,9 @@
if (!kgsl_state_is_awake(KGSL_DEVICE(adreno_dev)))
goto ret;
+ if (adreno_rb_empty(adreno_dev->cur_rb))
+ goto ret;
+
/* only check rbbm status to determine if GPU is idle */
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, ®_rbbm_status);
@@ -1042,6 +1045,13 @@
*/
if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+ /*
+ * Set the fault tolerance policy to FT_REPLAY - As context wants
+ * to invalidate it after a replay attempt fails. This doesn't
+ * require to execute the default FT policy.
+ */
+ else if (drawctxt->base.flags & KGSL_CONTEXT_INVALIDATE_ON_FAULT)
+ set_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
else
cmdobj->fault_policy = adreno_dev->ft_policy;
}
@@ -2055,6 +2065,18 @@
return 0;
/*
+ * In the very unlikely case that the power is off, do nothing - the
+ * state will be reset on power up and everybody will be happy
+ */
+
+ if (!kgsl_state_is_awake(device) && (fault & ADRENO_SOFT_FAULT)) {
+ /* Clear the existing register values */
+ memset(adreno_ft_regs_val, 0,
+ adreno_ft_regs_num * sizeof(unsigned int));
+ return 0;
+ }
+
+ /*
* On A5xx and A6xx, read RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24)
* to tell if this function was entered after a pagefault. If so, only
* proceed if the fault handler has already run in the IRQ thread,
@@ -2509,7 +2531,7 @@
if (!fault_detect_read_compare(adreno_dev)) {
adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
- } else {
+ } else if (dispatcher->inflight > 0) {
mod_timer(&dispatcher->fault_timer,
jiffies + msecs_to_jiffies(_fault_timer_interval));
}
@@ -2554,6 +2576,20 @@
}
/**
+ * adreno_dispatcher_stop_fault_timer() - stop the dispatcher fault timer
+ * @device: pointer to the KGSL device structure
+ *
+ * Stop the dispatcher fault timer
+ */
+void adreno_dispatcher_stop_fault_timer(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ del_timer_sync(&dispatcher->fault_timer);
+}
+
+/**
* adreno_dispatcher_close() - close the dispatcher
* @adreno_dev: pointer to the adreno device structure
*
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index cb9106f..72545db 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -108,6 +108,7 @@
int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
+void adreno_dispatcher_stop_fault_timer(struct kgsl_device *device);
int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 9f4e185..f217822 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -95,6 +95,9 @@
goto stats;
}
+ if (!kref_get_unless_zero(&drawobj->refcount))
+ goto stats;
+
if (drawobj->type == SYNCOBJ_TYPE) {
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
@@ -106,6 +109,8 @@
kgsl_dump_syncpoints(device, syncobj);
}
}
+
+ kgsl_drawobj_put(drawobj);
}
stats:
@@ -337,13 +342,14 @@
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
- unsigned long local;
+ unsigned int local;
local = *flags & (KGSL_CONTEXT_PREAMBLE |
KGSL_CONTEXT_NO_GMEM_ALLOC |
KGSL_CONTEXT_PER_CONTEXT_TS |
KGSL_CONTEXT_USER_GENERATED_TS |
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+ KGSL_CONTEXT_INVALIDATE_ON_FAULT |
KGSL_CONTEXT_CTX_SWITCH |
KGSL_CONTEXT_PRIORITY_MASK |
KGSL_CONTEXT_TYPE_MASK |
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 32175f5..fbff535 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -54,10 +54,21 @@
/* Read always on registers */
if (!adreno_is_a3xx(adreno_dev)) {
- adreno_readreg64(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
- &time->ticks);
+ if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev))) {
+ uint32_t val_lo, val_hi;
+
+ adreno_read_gmureg(adreno_dev,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO, &val_lo);
+ adreno_read_gmureg(adreno_dev,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI, &val_hi);
+
+ time->ticks = (val_lo | ((uint64_t)val_hi << 32));
+ } else {
+ adreno_readreg64(adreno_dev,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+ &time->ticks);
+ }
/* Mask hi bits as they may be incorrect on some targets */
if (ADRENO_GPUREV(adreno_dev) >= 400 &&
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index d1c84f1..8f49bc7 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -261,8 +261,11 @@
{
struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (entry != NULL)
+ if (entry != NULL) {
kref_init(&entry->refcount);
+ /* put this ref in userspace memory alloc and map ioctls */
+ kref_get(&entry->refcount);
+ }
return entry;
}
@@ -1624,7 +1627,8 @@
/* If no profiling buffer was specified, clear the flag */
if (cmdobj->profiling_buf_entry == NULL)
- DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ DRAWOBJ(cmdobj)->flags &=
+ ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
}
result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
@@ -1713,7 +1717,8 @@
/* If no profiling buffer was specified, clear the flag */
if (cmdobj->profiling_buf_entry == NULL)
- DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ DRAWOBJ(cmdobj)->flags &=
+ ~(unsigned long)KGSL_DRAWOBJ_PROFILING;
}
result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
@@ -1775,9 +1780,9 @@
/* Commit the pointer to the context in context_idr */
write_lock(&device->context_lock);
idr_replace(&device->context_idr, context, context->id);
+ param->drawctxt_id = context->id;
write_unlock(&device->context_lock);
- param->drawctxt_id = context->id;
done:
return result;
}
@@ -1952,7 +1957,7 @@
}
handle = kgsl_sync_fence_async_wait(event.fd,
- gpuobj_free_fence_func, entry);
+ gpuobj_free_fence_func, entry, NULL, 0);
/* if handle is NULL the fence has already signaled */
if (handle == NULL)
@@ -2038,7 +2043,7 @@
unsigned long flags_requested = (VM_READ | VM_WRITE);
if (flags & KGSL_MEMFLAGS_GPUREADONLY)
- flags_requested &= ~VM_WRITE;
+ flags_requested &= ~(unsigned long)VM_WRITE;
if ((vma->vm_flags & flags_requested) == flags_requested)
return 0;
@@ -2132,7 +2137,7 @@
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = (uint64_t) size;
entry->memdesc.useraddr = hostptr;
- entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ADDR;
+ entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR;
if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
int ret;
@@ -2163,7 +2168,7 @@
static void _setup_cache_mode(struct kgsl_mem_entry *entry,
struct vm_area_struct *vma)
{
- unsigned int mode;
+ uint64_t mode;
pgprot_t pgprot = vma->vm_page_prot;
if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(pgprot)))
@@ -2412,6 +2417,10 @@
trace_kgsl_mem_map(entry, fd);
kgsl_mem_entry_commit_process(entry);
+
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
unmap:
@@ -2518,7 +2527,7 @@
entry->memdesc.size = 0;
/* USE_CPU_MAP is not impemented for ION. */
entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
- entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ION;
+ entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ION;
sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
@@ -2718,6 +2727,10 @@
trace_kgsl_mem_map(entry, param->fd);
kgsl_mem_entry_commit_process(entry);
+
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return result;
error_attach:
@@ -3017,8 +3030,9 @@
if ((flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT ==
KGSL_CACHEMODE_WRITETHROUGH) {
flags &= ~((uint64_t) KGSL_CACHEMODE_MASK);
- flags |= (KGSL_CACHEMODE_WRITEBACK << KGSL_CACHEMODE_SHIFT) &
- KGSL_CACHEMODE_MASK;
+ flags |= (uint64_t)((KGSL_CACHEMODE_WRITEBACK <<
+ KGSL_CACHEMODE_SHIFT) &
+ KGSL_CACHEMODE_MASK);
}
return flags;
}
@@ -3072,8 +3086,9 @@
KGSL_MAX_ALIGN >> 10);
flags &= ~((uint64_t) KGSL_MEMALIGN_MASK);
- flags |= (ilog2(KGSL_MAX_ALIGN) << KGSL_MEMALIGN_SHIFT) &
- KGSL_MEMALIGN_MASK;
+ flags |= (uint64_t)((ilog2(KGSL_MAX_ALIGN) <<
+ KGSL_MEMALIGN_SHIFT) &
+ KGSL_MEMALIGN_MASK);
}
/* For now only allow allocations up to 4G */
@@ -3155,6 +3170,9 @@
param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc);
param->id = entry->id;
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3178,6 +3196,9 @@
param->size = (size_t) entry->memdesc.size;
param->flags = (unsigned int) entry->memdesc.flags;
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3201,6 +3222,9 @@
param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3318,6 +3342,9 @@
trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
kgsl_mem_entry_commit_process(entry);
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
err_invalid_pages:
@@ -3397,6 +3424,9 @@
trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
kgsl_mem_entry_commit_process(entry);
+ /* Put the extra ref from kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3949,7 +3979,8 @@
if (param->flags & KGSL_GPUOBJ_SET_INFO_TYPE) {
entry->memdesc.flags &= ~((uint64_t) KGSL_MEMTYPE_MASK);
- entry->memdesc.flags |= param->type << KGSL_MEMTYPE_SHIFT;
+ entry->memdesc.flags |= (uint64_t)(param->type <<
+ KGSL_MEMTYPE_SHIFT);
}
kgsl_mem_entry_put(entry);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b4725c1..db105c5 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -80,6 +80,7 @@
{ KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
{ KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
{ KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
+ { KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
{ KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
{ KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }
@@ -178,6 +179,7 @@
const char *name, struct clk *clk);
void (*gpu_model)(struct kgsl_device *device, char *str,
size_t bufsz);
+ void (*stop_fault_timer)(struct kgsl_device *device);
};
struct kgsl_ioctl {
@@ -568,6 +570,17 @@
device->ftbl->regwrite(device, offsetwords, val | bits);
}
+static inline void kgsl_gmu_regrmw(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int mask, unsigned int bits)
+{
+ unsigned int val = 0;
+
+ kgsl_gmu_regread(device, offsetwords, &val);
+ val &= ~mask;
+ kgsl_gmu_regwrite(device, offsetwords, val | bits);
+}
+
static inline int kgsl_idle(struct kgsl_device *device)
{
return device->ftbl->idle(device);
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 3a87e6e..bca3d57 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -44,7 +44,7 @@
static struct kmem_cache *sparseobjs_cache;
-static void drawobj_destroy_object(struct kref *kref)
+void kgsl_drawobj_destroy_object(struct kref *kref)
{
struct kgsl_drawobj *drawobj = container_of(kref,
struct kgsl_drawobj, refcount);
@@ -68,12 +68,6 @@
}
}
-static inline void drawobj_put(struct kgsl_drawobj *drawobj)
-{
- if (drawobj)
- kref_put(&drawobj->refcount, drawobj_destroy_object);
-}
-
void kgsl_dump_syncpoints(struct kgsl_device *device,
struct kgsl_drawobj_sync *syncobj)
{
@@ -100,16 +94,11 @@
retired);
break;
}
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- char fence_str[128];
-
- kgsl_dump_fence(event->handle,
- fence_str, sizeof(fence_str));
- dev_err(device->dev,
- " fence: %s\n", fence_str);
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ dev_err(device->dev, " fence: %s\n",
+ event->fence_name);
break;
}
- }
}
}
@@ -117,13 +106,23 @@
{
struct kgsl_device *device;
struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
- struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj *drawobj;
struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (syncobj == NULL || drawobj->context == NULL)
+ if (syncobj == NULL)
return;
+ drawobj = DRAWOBJ(syncobj);
+
+ if (!kref_get_unless_zero(&drawobj->refcount))
+ return;
+
+ if (drawobj->context == NULL) {
+ kgsl_drawobj_put(drawobj);
+ return;
+ }
+
device = drawobj->context->device;
dev_err(device->dev,
@@ -147,18 +146,14 @@
dev_err(device->dev, " [%d] TIMESTAMP %d:%d\n",
i, event->context->id, event->timestamp);
break;
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- char fence_str[128];
-
- kgsl_dump_fence(event->handle,
- fence_str, sizeof(fence_str));
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
dev_err(device->dev, " [%d] FENCE %s\n",
- i, fence_str);
+ i, event->fence_name);
break;
}
- }
}
+ kgsl_drawobj_put(drawobj);
dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
}
@@ -204,7 +199,7 @@
drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- drawobj_put(&event->syncobj->base);
+ kgsl_drawobj_put(&event->syncobj->base);
}
static inline void memobj_list_free(struct list_head *list)
@@ -265,7 +260,7 @@
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
break;
}
}
@@ -321,21 +316,19 @@
else
return;
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
}
EXPORT_SYMBOL(kgsl_drawobj_destroy);
static void drawobj_sync_fence_func(void *priv)
{
struct kgsl_drawobj_sync_event *event = priv;
- char fence_str[128];
- kgsl_dump_fence(event->handle, fence_str, sizeof(fence_str));
- trace_syncpoint_fence_expire(event->syncobj, fence_str);
+ trace_syncpoint_fence_expire(event->syncobj, event->fence_name);
drawobj_sync_expire(event->device, event);
- drawobj_put(&event->syncobj->base);
+ kgsl_drawobj_put(&event->syncobj->base);
}
/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
@@ -352,7 +345,6 @@
struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
struct kgsl_drawobj_sync_event *event;
unsigned int id;
- char fence_str[128];
kref_get(&drawobj->refcount);
@@ -369,7 +361,8 @@
set_bit(event->id, &syncobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- drawobj_sync_fence_func, event);
+ drawobj_sync_fence_func, event,
+ event->fence_name, sizeof(event->fence_name));
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
@@ -377,7 +370,7 @@
clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
@@ -389,8 +382,7 @@
return ret;
}
- kgsl_dump_fence(event->handle, fence_str, sizeof(fence_str));
- trace_syncpoint_fence(syncobj, fence_str);
+ trace_syncpoint_fence(syncobj, event->fence_name);
return 0;
}
@@ -457,7 +449,7 @@
if (ret) {
clear_bit(event->id, &syncobj->pending);
- drawobj_put(drawobj);
+ kgsl_drawobj_put(drawobj);
} else {
trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
}
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index 5ec98ed..06eef7f 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -105,6 +105,8 @@
unsigned long timeout_jiffies;
};
+#define KGSL_FENCE_NAME_LEN 74
+
/**
* struct kgsl_drawobj_sync_event
* @id: identifer (positiion within the pending bitmap)
@@ -114,6 +116,7 @@
* register this event
* @timestamp: Pending timestamp for the event
* @handle: Pointer to a sync fence handle
+ * @fence_name: A fence name string to describe the fence
* @device: Pointer to the KGSL device
*/
struct kgsl_drawobj_sync_event {
@@ -123,6 +126,7 @@
struct kgsl_context *context;
unsigned int timestamp;
struct kgsl_sync_fence_cb *handle;
+ char fence_name[KGSL_FENCE_NAME_LEN];
struct kgsl_device *device;
};
@@ -206,6 +210,8 @@
void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+void kgsl_drawobj_destroy_object(struct kref *kref);
+
static inline bool kgsl_drawobj_events_pending(
struct kgsl_drawobj_sync *syncobj)
{
@@ -220,4 +226,11 @@
return test_bit(bit, &syncobj->pending);
}
+
+static inline void kgsl_drawobj_put(struct kgsl_drawobj *drawobj)
+{
+ if (drawobj)
+ kref_put(&drawobj->refcount, kgsl_drawobj_destroy_object);
+}
+
#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 2e9f108..0c821cd 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -748,44 +748,49 @@
{
struct gmu_device *gmu = data;
struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
- struct kgsl_hfi *hfi = &gmu->hfi;
unsigned int status = 0;
- if (irq == gmu->gmu_interrupt_num) {
- adreno_read_gmureg(ADRENO_DEVICE(device),
- ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
- &status);
+ adreno_read_gmureg(ADRENO_DEVICE(device),
+ ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
+ adreno_write_gmureg(ADRENO_DEVICE(device),
+ ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
- /* Ignore GMU_INT_RSCC_COMP interrupts */
- if (status & GMU_INT_WDOG_BITE)
- dev_err_ratelimited(&gmu->pdev->dev,
- "GMU watchdog expired interrupt\n");
- if (status & GMU_INT_DBD_WAKEUP)
- dev_err_ratelimited(&gmu->pdev->dev,
- "GMU doorbell interrupt received\n");
- if (status & GMU_INT_HOST_AHB_BUS_ERR)
- dev_err_ratelimited(&gmu->pdev->dev,
- "AHB bus error interrupt received\n");
+ /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
+ if (status & GMU_INT_WDOG_BITE)
+ dev_err_ratelimited(&gmu->pdev->dev,
+ "GMU watchdog expired interrupt received\n");
+ if (status & GMU_INT_HOST_AHB_BUS_ERR)
+ dev_err_ratelimited(&gmu->pdev->dev,
+ "AHB bus error interrupt received\n");
+ if (status & ~GMU_AO_INT_MASK)
+ dev_err_ratelimited(&gmu->pdev->dev,
+ "Unhandled GMU interrupts 0x%lx\n",
+ status & ~GMU_AO_INT_MASK);
- adreno_write_gmureg(ADRENO_DEVICE(device),
- ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
- status);
- } else {
- adreno_read_gmureg(ADRENO_DEVICE(device),
- ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
- &status);
- adreno_write_gmureg(ADRENO_DEVICE(device),
- ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
- status);
+ return IRQ_HANDLED;
+}
- if (status & HFI_IRQ_MASK) {
- if (status & HFI_IRQ_MSGQ_MASK)
- tasklet_hi_schedule(&hfi->tasklet);
- } else
- dev_err_ratelimited(&gmu->pdev->dev,
- "Unhandled GMU interrupts %x\n",
- status);
- }
+static irqreturn_t hfi_irq_handler(int irq, void *data)
+{
+ struct kgsl_hfi *hfi = data;
+ struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
+ struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+ unsigned int status = 0;
+
+ adreno_read_gmureg(ADRENO_DEVICE(device),
+ ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
+ adreno_write_gmureg(ADRENO_DEVICE(device),
+ ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & HFI_IRQ_MSGQ_MASK)
+ tasklet_hi_schedule(&hfi->tasklet);
+ if (status & HFI_IRQ_CM3_FAULT_MASK)
+ dev_err_ratelimited(&gmu->pdev->dev,
+ "GMU CM3 fault interrupt received\n");
+ if (status & ~HFI_IRQ_MASK)
+ dev_err_ratelimited(&gmu->pdev->dev,
+ "Unhandled HFI interrupts 0x%lx\n",
+ status & ~HFI_IRQ_MASK);
return IRQ_HANDLED;
}
@@ -978,6 +983,82 @@
return 0;
}
+static int gmu_irq_probe(struct gmu_device *gmu)
+{
+ int ret;
+ struct kgsl_hfi *hfi = &gmu->hfi;
+
+ hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
+ "kgsl_hfi_irq");
+ ret = devm_request_irq(&gmu->pdev->dev,
+ hfi->hfi_interrupt_num,
+ hfi_irq_handler, IRQF_TRIGGER_HIGH,
+ "HFI", hfi);
+ if (ret) {
+ dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
+ hfi->hfi_interrupt_num, ret);
+ return ret;
+ }
+
+ gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
+ "kgsl_gmu_irq");
+ ret = devm_request_irq(&gmu->pdev->dev,
+ gmu->gmu_interrupt_num,
+ gmu_irq_handler, IRQF_TRIGGER_HIGH,
+ "GMU", gmu);
+ if (ret)
+ dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
+ gmu->gmu_interrupt_num, ret);
+
+ return ret;
+}
+
+static void gmu_irq_enable(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct gmu_device *gmu = &device->gmu;
+ struct kgsl_hfi *hfi = &gmu->hfi;
+
+ /* Clear any pending IRQs before unmasking on GMU */
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
+ 0xFFFFFFFF);
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+ 0xFFFFFFFF);
+
+ /* Unmask needed IRQs on GMU */
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+ (unsigned int) ~HFI_IRQ_MASK);
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+ (unsigned int) ~GMU_AO_INT_MASK);
+
+ /* Enable all IRQs on host */
+ enable_irq(hfi->hfi_interrupt_num);
+ enable_irq(gmu->gmu_interrupt_num);
+}
+
+static void gmu_irq_disable(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct gmu_device *gmu = &device->gmu;
+ struct kgsl_hfi *hfi = &gmu->hfi;
+
+ /* Disable all IRQs on host */
+ disable_irq(gmu->gmu_interrupt_num);
+ disable_irq(hfi->hfi_interrupt_num);
+
+ /* Mask all IRQs on GMU */
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+ 0xFFFFFFFF);
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+ 0xFFFFFFFF);
+
+ /* Clear any pending IRQs before disabling */
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+ 0xFFFFFFFF);
+ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
+ 0xFFFFFFFF);
+}
+
/* Do not access any GMU registers in GMU probe function */
int gmu_probe(struct kgsl_device *device)
{
@@ -986,6 +1067,7 @@
struct gmu_memdesc *mem_addr = NULL;
struct kgsl_hfi *hfi = &gmu->hfi;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i = 0, ret = -ENXIO;
node = of_find_compatible_node(device->pdev->dev.of_node,
@@ -1023,32 +1105,13 @@
gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
- /* Initialize HFI GMU interrupts */
- hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
- "kgsl_hfi_irq");
- ret = devm_request_irq(&gmu->pdev->dev,
- hfi->hfi_interrupt_num,
- gmu_irq_handler, IRQF_TRIGGER_HIGH,
- "GMU", gmu);
- if (ret) {
- dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
- hfi->hfi_interrupt_num, ret);
+ /* Initialize HFI and GMU interrupts */
+ ret = gmu_irq_probe(gmu);
+ if (ret)
goto error;
- }
-
- gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
- "kgsl_gmu_irq");
- ret = devm_request_irq(&gmu->pdev->dev,
- gmu->gmu_interrupt_num,
- gmu_irq_handler, IRQF_TRIGGER_HIGH,
- "GMU", gmu);
- if (ret) {
- dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
- gmu->gmu_interrupt_num, ret);
- goto error;
- }
/* Don't enable GMU interrupts until GMU started */
+ /* We cannot use gmu_irq_disable because it writes registers */
disable_irq(gmu->gmu_interrupt_num);
disable_irq(hfi->hfi_interrupt_num);
@@ -1086,7 +1149,17 @@
hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
- gmu->idle_level = GPU_HW_ACTIVE;
+ /* Set up GMU idle states */
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
+ gmu->idle_level = GPU_HW_MIN_VOLT;
+ else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
+ gmu->idle_level = GPU_HW_NAP;
+ else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
+ gmu->idle_level = GPU_HW_IFPC;
+ else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
+ gmu->idle_level = GPU_HW_SPTP_PC;
+ else
+ gmu->idle_level = GPU_HW_ACTIVE;
return 0;
@@ -1142,7 +1215,6 @@
while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
clk_disable_unprepare(gmu->clks[j]);
- gmu->clks[j] = NULL;
j++;
}
@@ -1189,7 +1261,6 @@
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
- struct kgsl_hfi *hfi = &gmu->hfi;
int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
if (!kgsl_gmu_isenabled(device))
@@ -1224,8 +1295,7 @@
if (ret)
goto error_bus;
- enable_irq(hfi->hfi_interrupt_num);
- enable_irq(gmu->gmu_interrupt_num);
+ gmu_irq_enable(device);
ret = hfi_start(gmu, GMU_COLD_BOOT);
if (ret)
@@ -1243,8 +1313,7 @@
if (ret)
goto error_clks;
- enable_irq(hfi->hfi_interrupt_num);
- enable_irq(gmu->gmu_interrupt_num);
+ gmu_irq_enable(device);
ret = hfi_start(gmu, GMU_WARM_BOOT);
if (ret)
@@ -1282,8 +1351,7 @@
error_gpu:
hfi_stop(gmu);
- disable_irq(gmu->gmu_interrupt_num);
- disable_irq(hfi->hfi_interrupt_num);
+ gmu_irq_disable(device);
if (device->state == KGSL_STATE_INIT ||
device->state == KGSL_STATE_SUSPEND) {
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
@@ -1301,31 +1369,43 @@
return ret;
}
+#define GMU_IDLE_TIMEOUT 10 /* ms */
+
/* Caller shall ensure GPU is ready for SLUMBER */
void gmu_stop(struct kgsl_device *device)
{
struct gmu_device *gmu = &device->gmu;
- struct kgsl_hfi *hfi = &gmu->hfi;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ unsigned long t;
+ bool idle = false;
if (!test_bit(GMU_CLK_ON, &gmu->flags))
return;
- if (gpudev->wait_for_gmu_idle &&
- !gpudev->wait_for_gmu_idle(adreno_dev)) {
- dev_err(&gmu->pdev->dev, "Failure to stop gmu");
- return;
+ if (gpudev->hw_isidle) {
+ t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
+ while (!time_after(jiffies, t)) {
+ if (gpudev->hw_isidle(adreno_dev)) {
+ idle = true;
+ break;
+ }
+ cpu_relax();
+ }
}
gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
+ if (!idle || (gpudev->wait_for_gmu_idle &&
+ gpudev->wait_for_gmu_idle(adreno_dev))) {
+ dev_err(&gmu->pdev->dev, "Failure to stop GMU");
+ return;
+ }
+
/* Pending message in all queues are abandoned */
hfi_stop(gmu);
clear_bit(GMU_HFI_ON, &gmu->flags);
-
- disable_irq(gmu->gmu_interrupt_num);
- disable_irq(hfi->hfi_interrupt_num);
+ gmu_irq_disable(device);
gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
gmu_disable_clks(gmu);
@@ -1340,7 +1420,7 @@
{
struct gmu_device *gmu = &device->gmu;
struct kgsl_hfi *hfi = &gmu->hfi;
- int i;
+ int i = 0;
if (!device->gmu.pdev)
return;
@@ -1348,16 +1428,20 @@
tasklet_kill(&hfi->tasklet);
gmu_stop(device);
+ gmu_irq_disable(device);
+
+ while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
+ gmu->clks[i] = NULL;
+ i++;
+ }
if (gmu->gmu_interrupt_num) {
- disable_irq(gmu->gmu_interrupt_num);
devm_free_irq(&gmu->pdev->dev,
gmu->gmu_interrupt_num, gmu);
gmu->gmu_interrupt_num = 0;
}
if (hfi->hfi_interrupt_num) {
- disable_irq(hfi->hfi_interrupt_num);
devm_free_irq(&gmu->pdev->dev,
hfi->hfi_interrupt_num, gmu);
hfi->hfi_interrupt_num = 0;
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index ac2c151..7055eb7 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -26,7 +26,6 @@
#define GMU_INT_HOST_AHB_BUS_ERR BIT(5)
#define GMU_AO_INT_MASK \
(GMU_INT_WDOG_BITE | \
- GMU_INT_DBD_WAKEUP | \
GMU_INT_HOST_AHB_BUS_ERR)
#define MAX_GMUFW_SIZE 0x2000 /* in dwords */
@@ -147,7 +146,7 @@
GPU_HW_NAP = 0x4,
GPU_HW_MIN_VOLT = 0x5,
GPU_HW_MIN_DDR = 0x6,
- GPU_HW_SLUMBER = 0xF
+ GPU_HW_SLUMBER = 0x7
};
/**
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 39b513e..83abec4 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -38,16 +38,15 @@
#define GMU_QUEUE_START_ADDR(hfi_mem, i) \
((hfi_mem)->gmuaddr + HFI_QUEUE_OFFSET(i))
-#define HFI_IRQ_MSGQ_MASK 0x1
-#define HFI_IRQ_DBGQ_MASK 0x2
-#define HFI_IRQ_BLOCKED_MSG_MASK 0x4
-#define HFI_IRQ_GMU_ERR_MASK 0xFF0000
-#define HFI_IRQ_OOB_MASK 0xFF000000
-#define HFI_IRQ_MASK (HFI_IRQ_MSGQ_MASK |\
- HFI_IRQ_DBGQ_MASK |\
- HFI_IRQ_BLOCKED_MSG_MASK |\
- HFI_IRQ_GMU_ERR_MASK |\
- HFI_IRQ_OOB_MASK)
+#define HFI_IRQ_MSGQ_MASK BIT(0)
+#define HFI_IRQ_DBGQ_MASK BIT(1)
+#define HFI_IRQ_BLOCKED_MSG_MASK BIT(2)
+#define HFI_IRQ_CM3_FAULT_MASK BIT(23)
+#define HFI_IRQ_GMU_ERR_MASK GENMASK(22, 16)
+#define HFI_IRQ_OOB_MASK GENMASK(31, 24)
+#define HFI_IRQ_MASK (HFI_IRQ_MSGQ_MASK |\
+ HFI_IRQ_CM3_FAULT_MASK)
+
/**
* struct hfi_queue_table_header - HFI queue table structure
* @version: HFI protocol version
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 0325db8..938c96d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -797,6 +797,7 @@
int write;
struct kgsl_device *device;
struct adreno_device *adreno_dev;
+ struct adreno_gpudev *gpudev;
unsigned int no_page_fault_log = 0;
unsigned int curr_context_id = 0;
struct kgsl_context *context;
@@ -813,6 +814,7 @@
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
device = KGSL_MMU_DEVICE(mmu);
adreno_dev = ADRENO_DEVICE(device);
+ gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (pt->name == KGSL_MMU_SECURE_PT)
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
@@ -886,6 +888,16 @@
ctx->name, ptbase, contextidr,
write ? "write" : "read", fault_type);
+ if (gpudev->iommu_fault_block) {
+ unsigned int fsynr1;
+
+ fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
+ KGSL_MEM_CRIT(ctx->kgsldev,
+ "FAULTING BLOCK: %s\n",
+ gpudev->iommu_fault_block(adreno_dev,
+ fsynr1));
+ }
+
/* Don't print the debug if this is a permissions fault */
if (!(flags & IOMMU_FAULT_PERMISSION)) {
_check_if_freed(ctx, addr, ptname);
@@ -1596,6 +1608,8 @@
ret = PTR_ERR(mmu->defaultpagetable);
mmu->defaultpagetable = NULL;
return ret;
+ } else if (mmu->defaultpagetable == NULL) {
+ return -ENOMEM;
}
}
@@ -2586,7 +2600,7 @@
static const struct {
char *feature;
- int bit;
+ unsigned long bit;
} kgsl_iommu_features[] = {
{ "qcom,retention", KGSL_MMU_RETENTION },
{ "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 4d38794..a9a3c94 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -67,7 +67,9 @@
"isense_clk",
"rbcpr_clk",
"iref_clk",
- "gmu_clk"
+ "gmu_clk",
+ "ahb_clk",
+ "cxo_clk"
};
static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
@@ -2621,6 +2623,7 @@
return -EBUSY;
}
+ device->ftbl->stop_fault_timer(device);
kgsl_pwrscale_midframe_timer_cancel(device);
/*
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 62ee597..6b22fd4 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -25,7 +25,7 @@
#define KGSL_PWR_ON 0xFFFF
-#define KGSL_MAX_CLKS 15
+#define KGSL_MAX_CLKS 17
#define KGSL_MAX_REGULATORS 2
#define KGSL_MAX_PWRLEVELS 10
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 07a54d9..7636a42 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -927,8 +927,7 @@
&data->bin.ctxt_aware_target_pwrlevel))
data->bin.ctxt_aware_target_pwrlevel = 1;
- if ((data->bin.ctxt_aware_target_pwrlevel < 0) ||
- (data->bin.ctxt_aware_target_pwrlevel >
+ if ((data->bin.ctxt_aware_target_pwrlevel >
pwr->num_pwrlevels))
data->bin.ctxt_aware_target_pwrlevel = 1;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 10b37ae..dd41e4e 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -131,8 +131,9 @@
if (align > 32)
align = 32;
- memdesc->flags &= ~KGSL_MEMALIGN_MASK;
- memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
+ memdesc->flags &= ~(uint64_t)KGSL_MEMALIGN_MASK;
+ memdesc->flags |= (uint64_t)((align << KGSL_MEMALIGN_SHIFT) &
+ KGSL_MEMALIGN_MASK);
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index d2ff8f1..340a7db 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -225,6 +225,7 @@
#define SNAPSHOT_DEBUG_CP_ROQ 10
#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
#define SNAPSHOT_DEBUG_CP_MERCIU 12
+#define SNAPSHOT_DEBUG_SQE_VERSION 14
struct kgsl_snapshot_debug {
int type; /* Type identifier for the attached tata */
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 3b57b73..973a2ff 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -152,6 +152,17 @@
return ret;
}
+/* Only to be used if creating a related event failed */
+static void kgsl_sync_cancel(struct kgsl_sync_fence *kfence)
+{
+ spin_lock(&kfence->parent->lock);
+ if (!list_empty(&kfence->child_list)) {
+ list_del_init(&kfence->child_list);
+ fence_put(&kfence->fence);
+ }
+ spin_unlock(&kfence->parent->lock);
+}
+
/**
* kgsl_add_fence_event - Create a new fence event
* @device - KGSL device to create the event on
@@ -235,6 +246,7 @@
put_unused_fd(priv.fence_fd);
if (kfence) {
+ kgsl_sync_cancel(kfence);
/*
* Put the refcount of sync file. This will release
* kfence->fence as well.
@@ -366,7 +378,7 @@
list_for_each_entry_safe(kfence, next, &ktimeline->child_list_head,
child_list) {
if (fence_is_signaled_locked(&kfence->fence)) {
- list_del(&kfence->child_list);
+ list_del_init(&kfence->child_list);
fence_put(&kfence->fence);
}
}
@@ -419,8 +431,27 @@
kfree(kcb);
}
+static void kgsl_get_fence_name(struct fence *fence,
+ char *fence_name, int name_len)
+{
+ char *ptr = fence_name;
+ char *last = fence_name + name_len;
+
+ ptr += snprintf(ptr, last - ptr, "%s %s",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence));
+
+ if ((ptr + 2) >= last)
+ return;
+
+ if (fence->ops->fence_value_str) {
+ ptr += snprintf(ptr, last - ptr, ": ");
+ fence->ops->fence_value_str(fence, ptr, last - ptr);
+ }
+}
+
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv)
+ void (*func)(void *priv), void *priv, char *fence_name, int name_len)
{
struct kgsl_sync_fence_cb *kcb;
struct fence *fence;
@@ -441,13 +472,16 @@
kcb->priv = priv;
kcb->func = func;
+ if (fence_name)
+ kgsl_get_fence_name(fence, fence_name, name_len);
+
/* if status then error or signaled */
status = fence_add_callback(fence, &kcb->fence_cb,
kgsl_sync_fence_callback);
if (status) {
kfree(kcb);
- if (fence_is_signaled(fence))
+ if (!fence_is_signaled(fence))
kcb = ERR_PTR(status);
else
kcb = NULL;
@@ -777,43 +811,3 @@
.release = kgsl_syncsource_fence_release,
};
-void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
- char *fence_str, int len)
-{
- struct fence *fence;
- char *ptr = fence_str;
- char *last = fence_str + len;
-
- if (!handle || !handle->fence) {
- snprintf(fence_str, len, "NULL");
- return;
- }
-
- fence = handle->fence;
-
- ptr += snprintf(ptr, last - ptr, "%s %s",
- fence->ops->get_timeline_name(fence),
- fence->ops->get_driver_name(fence));
- if (ptr >= last)
- return;
-
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = !!strlen(value);
-
- if (success) {
- ptr += snprintf(ptr, last - ptr, ": %s", value);
- if (ptr >= last)
- return;
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
- ptr += snprintf(ptr, last - ptr, " / %s", value);
- }
- }
-}
-
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index dc84c54..99fe0e1 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -91,7 +91,8 @@
void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline);
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv);
+ void (*func)(void *priv), void *priv,
+ char *fence_name, int name_len);
int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
@@ -109,8 +110,8 @@
void kgsl_syncsource_cleanup(struct kgsl_process_private *private,
struct kgsl_syncsource *syncsource);
-void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
- char *fence_str, int len);
+void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event,
+ char *fence_str, int len);
#else
static inline int kgsl_add_fence_event(struct kgsl_device *device,
@@ -134,8 +135,10 @@
{
}
-static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv)
+
+struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
+ void (*func)(void *priv), void *priv,
+ char *fence_name, int name_len)
{
return NULL;
}
@@ -185,8 +188,8 @@
}
-void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
- char *fence_str, int len)
+void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event,
+ char *fence_str, int len)
{
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c70..a5dd7e6 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -728,7 +728,6 @@
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1984,7 +1983,6 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9845189..da93077 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -318,8 +318,11 @@
#define USB_VENDOR_ID_DMI 0x0c0b
#define USB_DEVICE_ID_DMI_ENC 0x5fab
-#define USB_VENDOR_ID_DRAGONRISE 0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -365,6 +368,9 @@
#define USB_VENDOR_ID_FLATFROG 0x25b5
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
+#define USB_VENDOR_ID_FUTABA 0x0547
+#define USB_DEVICE_ID_LED_DISPLAY 0x7000
+
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
@@ -722,7 +728,6 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -1037,6 +1042,10 @@
#define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH 0x0500
#define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET 0x0502
+#define USB_VENDOR_ID_WEIDA 0x2575
+#define USB_DEVICE_ID_WEIDA_8752 0xC300
+#define USB_DEVICE_ID_WEIDA_8755 0xC301
+
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
#define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392..ba02667 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -282,8 +282,6 @@
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
- .driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f4..89e9032 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
+ unsigned long initial_quirks; /* initial quirks state */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@
u8 *buf;
/*
- * Only fetch the feature report if initial reports are not already
- * been retrieved. Currently this is only done for Windows 8 touch
- * devices.
+ * Do not fetch the feature report if the device has been explicitly
+ * marked as non-capable.
*/
- if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
- return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -842,7 +840,9 @@
if (!td->mtclass.export_all_inputs &&
field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
- field->application != HID_DG_TOUCHPAD)
+ field->application != HID_DG_TOUCHPAD &&
+ field->application != HID_GD_KEYBOARD &&
+ field->application != HID_CP_CONSUMER_CONTROL)
return -1;
/*
@@ -1083,36 +1083,6 @@
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
- /*
- * This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
- * device.
- */
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
- hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
- /*
- * Handle special quirks for Windows 8 certified devices.
- */
- if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
- /*
- * Some multitouch screens do not like to be polled for input
- * reports. Fortunately, the Win8 spec says that all touches
- * should be sent during each report, making the initialization
- * of input reports unnecessary.
- *
- * In addition some touchpads do not behave well if we read
- * all feature reports from them. Instead we prevent
- * initial report fetching and then selectively fetch each
- * report we are interested in.
- */
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1106,39 @@
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+ /*
+ * Store the initial quirk state
+ */
+ td->initial_quirks = hdev->quirks;
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ /*
+ * This allows the driver to handle different input sensors
+ * that emits events through different reports on the same HID
+ * device.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary. For Win7 devices, well, let's hope
+ * they will still be happy (this is only be a problem if a touch
+ * was already there while probing the device).
+ *
+ * In addition some touchpads do not behave well if we read
+ * all feature reports from them. Instead we prevent
+ * initial report fetching and then selectively fetch each
+ * report we are interested in.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
ret = hid_parse(hdev);
if (ret != 0)
return ret;
@@ -1204,8 +1207,11 @@
static void mt_remove(struct hid_device *hdev)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
+ hdev->quirks = td->initial_quirks;
}
/*
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 8f6c353..4ef7337 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -796,6 +796,12 @@
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+ 0x07bd), /* Microsoft Surface 3 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+ 0x0f01), /* MM7150 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b3ec4f2..b1bce80 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -41,6 +41,11 @@
#include <linux/i2c/i2c-hid.h>
+#include "../hid-ids.h"
+
+/* quirks to control the device */
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
@@ -143,6 +148,7 @@
char *argsbuf; /* Command arguments buffer */
unsigned long flags; /* device flags */
+ unsigned long quirks; /* Various quirks */
wait_queue_head_t wait; /* For waiting the interrupt */
struct gpio_desc *desc;
@@ -154,6 +160,39 @@
struct mutex reset_lock;
};
+static const struct i2c_hid_quirks {
+ __u16 idVendor;
+ __u16 idProduct;
+ __u32 quirks;
+} i2c_hid_quirks[] = {
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { 0, 0 }
+};
+
+/*
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+ * @idVendor: the 16-bit vendor ID
+ * @idProduct: the 16-bit product ID
+ *
+ * Returns: a u32 quirks value.
+ */
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+{
+ u32 quirks = 0;
+ int n;
+
+ for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+ if (i2c_hid_quirks[n].idVendor == idVendor &&
+ (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+ i2c_hid_quirks[n].idProduct == idProduct))
+ quirks = i2c_hid_quirks[n].quirks;
+
+ return quirks;
+}
+
static int __i2c_hid_command(struct i2c_client *client,
const struct i2c_hid_cmd *command, u8 reportID,
u8 reportType, u8 *args, int args_len,
@@ -346,11 +385,27 @@
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some devices require to send a command to wakeup before power on.
+ * The call will get a return value (EREMOTEIO) but device will be
+ * triggered and activated. After that, it goes like a normal device.
+ */
+ if (power_state == I2C_HID_PWR_ON &&
+ ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+ ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+
+ /* Device was already activated */
+ if (!ret)
+ goto set_pwr_exit;
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
+set_pwr_exit:
return ret;
}
@@ -1050,6 +1105,8 @@
client->name, hid->vendor, hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index cde060f..97dbb25 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -83,10 +83,14 @@
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
@@ -103,7 +107,6 @@
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a564..0c535d0 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2017,6 +2017,14 @@
wacom_update_name(wacom, wireless ? " (WL)" : "");
+ /* pen only Bamboo neither support touch nor pad */
+ if ((features->type == BAMBOO_PEN) &&
+ ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
+ (features->device_type & WACOM_DEVICETYPE_PAD))) {
+ error = -ENODEV;
+ goto fail;
+ }
+
error = wacom_add_shared_data(hdev);
if (error)
goto fail;
@@ -2064,14 +2072,6 @@
goto fail_quirks;
}
- /* pen only Bamboo neither support touch nor pad */
- if ((features->type == BAMBOO_PEN) &&
- ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
- (features->device_type & WACOM_DEVICETYPE_PAD))) {
- error = -ENODEV;
- goto fail_quirks;
- }
-
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
error = hid_hw_open(hdev);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 623be90..0e07a76 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2896,6 +2896,9 @@
{
struct wacom_features *features = &wacom_wac->features;
+ if ((features->type == HID_GENERIC) && features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
if (!(features->device_type & WACOM_DEVICETYPE_PAD))
return -ENODEV;
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index 33b760f..d883483 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -392,78 +392,78 @@
/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
- {1758, -40},
- {1742, -35},
- {1719, -30},
- {1691, -25},
- {1654, -20},
- {1608, -15},
- {1551, -10},
- {1483, -5},
+ {1758, -40000},
+ {1742, -35000},
+ {1719, -30000},
+ {1691, -25000},
+ {1654, -20000},
+ {1608, -15000},
+ {1551, -10000},
+ {1483, -5000},
{1404, 0},
- {1315, 5},
- {1218, 10},
- {1114, 15},
- {1007, 20},
- {900, 25},
- {795, 30},
- {696, 35},
- {605, 40},
- {522, 45},
- {448, 50},
- {383, 55},
- {327, 60},
- {278, 65},
- {237, 70},
- {202, 75},
- {172, 80},
- {146, 85},
- {125, 90},
- {107, 95},
- {92, 100},
- {79, 105},
- {68, 110},
- {59, 115},
- {51, 120},
- {44, 125}
+ {1315, 5000},
+ {1218, 10000},
+ {1114, 15000},
+ {1007, 20000},
+ {900, 25000},
+ {795, 30000},
+ {696, 35000},
+ {605, 40000},
+ {522, 45000},
+ {448, 50000},
+ {383, 55000},
+ {327, 60000},
+ {278, 65000},
+ {237, 70000},
+ {202, 75000},
+ {172, 80000},
+ {146, 85000},
+ {125, 90000},
+ {107, 95000},
+ {92, 100000},
+ {79, 105000},
+ {68, 110000},
+ {59, 115000},
+ {51, 120000},
+ {44, 125000}
};
/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
- {1738, -40},
- {1714, -35},
- {1682, -30},
- {1641, -25},
- {1589, -20},
- {1526, -15},
- {1451, -10},
- {1363, -5},
+ {1738, -40000},
+ {1714, -35000},
+ {1682, -30000},
+ {1641, -25000},
+ {1589, -20000},
+ {1526, -15000},
+ {1451, -10000},
+ {1363, -5000},
{1266, 0},
- {1159, 5},
- {1048, 10},
- {936, 15},
- {825, 20},
- {720, 25},
- {622, 30},
- {533, 35},
- {454, 40},
- {385, 45},
- {326, 50},
- {275, 55},
- {232, 60},
- {195, 65},
- {165, 70},
- {139, 75},
- {118, 80},
- {100, 85},
- {85, 90},
- {73, 95},
- {62, 100},
- {53, 105},
- {46, 110},
- {40, 115},
- {34, 120},
- {30, 125}
+ {1159, 5000},
+ {1048, 10000},
+ {936, 15000},
+ {825, 20000},
+ {720, 25000},
+ {622, 30000},
+ {533, 35000},
+ {454, 40000},
+ {385, 45000},
+ {326, 50000},
+ {275, 55000},
+ {232, 60000},
+ {195, 65000},
+ {165, 70000},
+ {139, 75000},
+ {118, 80000},
+ {100, 85000},
+ {85, 90000},
+ {73, 95000},
+ {62, 100000},
+ {53, 105000},
+ {46, 110000},
+ {40, 115000},
+ {34, 120000},
+ {30, 125000}
};
static const struct qpnp_vadc_map_pt adcmap_smb_batt_therm[] = {
@@ -595,40 +595,40 @@
* 1.875V reference.
*/
static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
- { 1831, -40 },
- { 1814, -35 },
- { 1791, -30 },
- { 1761, -25 },
- { 1723, -20 },
- { 1675, -15 },
- { 1616, -10 },
- { 1545, -5 },
+ { 1831, -40000 },
+ { 1814, -35000 },
+ { 1791, -30000 },
+ { 1761, -25000 },
+ { 1723, -20000 },
+ { 1675, -15000 },
+ { 1616, -10000 },
+ { 1545, -5000 },
{ 1463, 0 },
- { 1370, 5 },
- { 1268, 10 },
- { 1160, 15 },
- { 1049, 20 },
- { 937, 25 },
- { 828, 30 },
- { 726, 35 },
- { 630, 40 },
- { 544, 45 },
- { 467, 50 },
- { 399, 55 },
- { 340, 60 },
- { 290, 65 },
- { 247, 70 },
- { 209, 75 },
- { 179, 80 },
- { 153, 85 },
- { 130, 90 },
- { 112, 95 },
- { 96, 100 },
- { 82, 105 },
- { 71, 110 },
- { 62, 115 },
- { 53, 120 },
- { 46, 125 },
+ { 1370, 5000 },
+ { 1268, 10000 },
+ { 1160, 15000 },
+ { 1049, 20000 },
+ { 937, 25000 },
+ { 828, 30000 },
+ { 726, 35000 },
+ { 630, 40000 },
+ { 544, 45000 },
+ { 467, 50000 },
+ { 399, 55000 },
+ { 340, 60000 },
+ { 290, 65000 },
+ { 247, 70000 },
+ { 209, 75000 },
+ { 179, 80000 },
+ { 153, 85000 },
+ { 130, 90000 },
+ { 112, 95000 },
+ { 96, 100000 },
+ { 82, 105000 },
+ { 71, 110000 },
+ { 62, 115000 },
+ { 53, 120000 },
+ { 46, 125000 },
};
static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 6cd63b2..4b5e206 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -2562,10 +2562,9 @@
return rc;
}
-static int qpnp_vadc_get_temp(struct thermal_zone_device *thermal,
- int *temp)
+static int qpnp_vadc_get_temp(void *data, int *temp)
{
- struct qpnp_vadc_thermal_data *vadc_therm = thermal->devdata;
+ struct qpnp_vadc_thermal_data *vadc_therm = data;
struct qpnp_vadc_chip *vadc = vadc_therm->vadc_dev;
struct qpnp_vadc_result result;
int rc = 0;
@@ -2583,7 +2582,7 @@
return rc;
}
-static struct thermal_zone_device_ops qpnp_vadc_thermal_ops = {
+static struct thermal_zone_of_device_ops qpnp_vadc_thermal_ops = {
.get_temp = qpnp_vadc_get_temp,
};
@@ -2612,9 +2611,11 @@
vadc->adc->adc_channels[i].name);
vadc->vadc_therm_chan[i].vadc_dev = vadc;
vadc->vadc_therm_chan[i].tz_dev =
- thermal_zone_device_register(name,
- 0, 0, &vadc->vadc_therm_chan[i],
- &qpnp_vadc_thermal_ops, NULL, 0, 0);
+ devm_thermal_zone_of_sensor_register(
+ vadc->dev,
+ vadc->vadc_therm_chan[i].vadc_channel,
+ &vadc->vadc_therm_chan[i],
+ &qpnp_vadc_thermal_ops);
if (IS_ERR(vadc->vadc_therm_chan[i].tz_dev)) {
pr_err("thermal device register failed.\n");
goto thermal_err_sens;
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 5d67089..af94ad7 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -22,5 +22,5 @@
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
-obj-$(CONFIG_CORESIGHT_SOURCE_DUMMY) += coresight-dummy.o
+obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o
obj-$(CONFIG_CORESIGHT_REMOTE_ETM) += coresight-remote-etm.o
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index a2ce81a..8a57ed2 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/clk.h>
+#include <linux/amba/bus.h>
#include <linux/cpu_pm.h>
#include <linux/topology.h>
#include <linux/of.h>
@@ -379,7 +379,7 @@
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0) {
- ret = clk_prepare_enable(drvdata->clk);
+ ret = pm_runtime_get_sync(drvdata->dev);
if (ret)
goto err1;
}
@@ -402,7 +402,7 @@
* adjusting its value.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
err1:
cti_trigin_gpio_disable(drvdata);
err0:
@@ -463,7 +463,7 @@
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0) {
- ret = clk_prepare_enable(drvdata->clk);
+ ret = pm_runtime_get_sync(drvdata->dev);
if (ret)
goto err1;
}
@@ -485,7 +485,7 @@
* __cti_map_trigout so it is safe to check it against 0.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
err1:
cti_trigout_gpio_disable(drvdata);
err0:
@@ -563,7 +563,7 @@
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
if (drvdata->gpio_trigin->trig == trig)
cti_trigin_gpio_disable(drvdata);
@@ -632,7 +632,7 @@
* within the mutex lock region in addition to within the spinlock.
*/
if (drvdata->refcnt == 0)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
if (drvdata->gpio_trigout->trig == trig)
cti_trigout_gpio_disable(drvdata);
@@ -1388,34 +1388,29 @@
.notifier_call = cti_cpu_pm_callback,
};
-static int cti_probe(struct platform_device *pdev)
+static int cti_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
int trig;
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct cti_drvdata *drvdata;
- struct resource *res;
struct coresight_desc *desc;
struct device_node *cpu_node;
- pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Store the driver data pointer for use in exported functions */
- drvdata->dev = &pdev->dev;
- platform_set_drvdata(pdev, drvdata);
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cti-base");
- if (!res)
- return -ENODEV;
-
- drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
@@ -1423,21 +1418,13 @@
mutex_init(&drvdata->mutex);
- drvdata->clk = devm_clk_get(dev, "core_clk");
- if (IS_ERR(drvdata->clk))
- return PTR_ERR(drvdata->clk);
-
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
drvdata->gpio_trigin = devm_kzalloc(dev, sizeof(struct cti_pctrl),
GFP_KERNEL);
if (!drvdata->gpio_trigin)
return -ENOMEM;
drvdata->gpio_trigin->trig = -1;
- ret = of_property_read_u32(pdev->dev.of_node,
+ ret = of_property_read_u32(adev->dev.of_node,
"qcom,cti-gpio-trigin", &trig);
if (!ret)
drvdata->gpio_trigin->trig = trig;
@@ -1450,7 +1437,7 @@
return -ENOMEM;
drvdata->gpio_trigout->trig = -1;
- ret = of_property_read_u32(pdev->dev.of_node,
+ ret = of_property_read_u32(adev->dev.of_node,
"qcom,cti-gpio-trigout", &trig);
if (!ret)
drvdata->gpio_trigout->trig = trig;
@@ -1458,7 +1445,7 @@
return ret;
drvdata->cpu = -1;
- cpu_node = of_parse_phandle(pdev->dev.of_node, "cpu", 0);
+ cpu_node = of_parse_phandle(adev->dev.of_node, "cpu", 0);
if (cpu_node) {
drvdata->cpu = pdata ? pdata->cpu : -1;
if (drvdata->cpu == -1) {
@@ -1468,7 +1455,7 @@
}
if (!cti_save_disable)
- drvdata->cti_save = of_property_read_bool(pdev->dev.of_node,
+ drvdata->cti_save = of_property_read_bool(adev->dev.of_node,
"qcom,cti-save");
if (drvdata->cti_save) {
drvdata->state = devm_kzalloc(dev, sizeof(struct cti_state),
@@ -1476,18 +1463,18 @@
if (!drvdata->state)
return -ENOMEM;
- drvdata->cti_hwclk = of_property_read_bool(pdev->dev.of_node,
+ drvdata->cti_hwclk = of_property_read_bool(adev->dev.of_node,
"qcom,cti-hwclk");
}
if (drvdata->cti_save && !drvdata->cti_hwclk) {
- ret = clk_prepare_enable(drvdata->clk);
+ ret = pm_runtime_get_sync(drvdata->dev);
if (ret)
return ret;
}
mutex_lock(&cti_lock);
drvdata->cti.name = ((struct coresight_platform_data *)
- (pdev->dev.platform_data))->name;
+ (adev->dev.platform_data))->name;
list_add_tail(&drvdata->cti.link, &cti_list);
mutex_unlock(&cti_lock);
@@ -1497,8 +1484,8 @@
goto err;
}
desc->type = CORESIGHT_DEV_TYPE_NONE;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
desc->groups = cti_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
@@ -1511,56 +1498,35 @@
cpu_pm_register_notifier(&cti_cpu_pm_notifier);
registered++;
}
-
+ pm_runtime_put(&adev->dev);
dev_dbg(dev, "CTI initialized\n");
return 0;
err:
if (drvdata->cti_save && !drvdata->cti_hwclk)
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
return ret;
}
-static int cti_remove(struct platform_device *pdev)
-{
- struct cti_drvdata *drvdata = platform_get_drvdata(pdev);
-
- if (drvdata->cti_save) {
- registered--;
- if (!registered)
- cpu_pm_unregister_notifier(&cti_cpu_pm_notifier);
- }
- coresight_unregister(drvdata->csdev);
- if (drvdata->cti_save && !drvdata->cti_hwclk)
- clk_disable_unprepare(drvdata->clk);
- return 0;
-}
-
-static const struct of_device_id cti_match[] = {
- {.compatible = "arm,coresight-cti"},
- {}
+static struct amba_id cti_ids[] = {
+ {
+ .id = 0x0003b966,
+ .mask = 0x0003ffff,
+ .data = "CTI",
+ },
+ { 0, 0},
};
-static struct platform_driver cti_driver = {
- .probe = cti_probe,
- .remove = cti_remove,
- .driver = {
+static struct amba_driver cti_driver = {
+ .drv = {
.name = "coresight-cti",
.owner = THIS_MODULE,
- .of_match_table = cti_match,
+ .suppress_bind_attrs = true,
},
+ .probe = cti_probe,
+ .id_table = cti_ids,
};
-static int __init cti_init(void)
-{
- return platform_driver_register(&cti_driver);
-}
-module_init(cti_init);
-
-static void __exit cti_exit(void)
-{
- platform_driver_unregister(&cti_driver);
-}
-module_exit(cti_exit);
+builtin_amba_driver(cti_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight CTI driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 4fc5916..b04e8da 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,7 +62,7 @@
static bool etm4_arch_supported(u8 arch)
{
switch (arch) {
- case ETM_ARCH_V4:
+ case ETM_ARCH_MAJOR_V4:
break;
default:
return false;
@@ -482,7 +482,7 @@
* TRCARCHMIN, bits[7:4] architecture the minor version number
* TRCARCHMAJ, bits[11:8] architecture major versin number
*/
- drvdata->arch = BMVAL(etmidr1, 4, 11);
+ drvdata->arch = BMVAL(etmidr1, 8, 11);
/* maximum size of resources */
etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
@@ -528,8 +528,8 @@
else
drvdata->sysstall = false;
- /* NUMPROC, bits[30:28] the number of PEs available for tracing */
- drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
+ /* NUMPROC, bits[13:12, 30:28] the number of PEs available for trace */
+ drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
if (BMVAL(etmidr3, 31, 31))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index ba8d3f8..4e51ecdc 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,7 +143,7 @@
#define ETM_MAX_RES_SEL 16
#define ETM_MAX_SS_CMP 8
-#define ETM_ARCH_V4 0x40
+#define ETM_ARCH_MAJOR_V4 0x4
#define ETMv4_SYNC_MASK 0x1F
#define ETM_CYC_THRESHOLD_MASK 0xFFF
#define ETMv4_EVENT_MASK 0xFF
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 0bd8b78..98547a9 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -47,8 +47,6 @@
{
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
-
CS_UNLOCK(drvdata->base);
/*
@@ -85,7 +83,6 @@
CS_LOCK(drvdata->base);
- pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2d2abe3..833f10d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -447,7 +447,9 @@
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
- writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
+ writel_relaxed(((u64)drvdata->paddr >> 32) & 0xFF,
+ drvdata->base + TMC_DBAHI);
+
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index f5018fc..10e8da4 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -436,8 +436,11 @@
if (ret)
drvdata->size = SZ_1M;
+ if (of_property_read_bool(np, "arm,sg-enable"))
+ drvdata->memtype = TMC_ETR_MEM_TYPE_SG;
+ else
+ drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
drvdata->mem_size = drvdata->size;
- drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
drvdata->mem_type = drvdata->memtype;
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index c96087d..5d2d087 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,10 +14,10 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
-#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -53,7 +53,6 @@
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
- struct clk *clk;
struct mutex lock;
bool enable;
uint32_t atid;
@@ -183,11 +182,6 @@
static int tpda_enable(struct coresight_device *csdev, int inport, int outport)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
mutex_lock(&drvdata->lock);
__tpda_enable(drvdata, inport);
@@ -221,8 +215,6 @@
drvdata->enable = false;
mutex_unlock(&drvdata->lock);
- clk_disable_unprepare(drvdata->clk);
-
dev_info(drvdata->dev, "TPDA inport %d disabled\n", inport);
}
@@ -653,31 +645,27 @@
drvdata->freq_ts = true;
}
-static int tpda_probe(struct platform_device *pdev)
+static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpda_drvdata *drvdata;
- struct resource *res;
struct coresight_desc *desc;
- pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &pdev->dev;
- platform_set_drvdata(pdev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpda-base");
- if (!res)
- return -ENODEV;
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
- drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
@@ -687,22 +675,10 @@
if (ret)
return ret;
- drvdata->clk = devm_clk_get(dev, "core_clk");
- if (IS_ERR(drvdata->clk))
- return PTR_ERR(drvdata->clk);
-
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
if (!coresight_authstatus_enabled(drvdata->base))
goto err;
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
tpda_init_default_data(drvdata);
@@ -712,8 +688,8 @@
desc->type = CORESIGHT_DEV_TYPE_LINK;
desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc->ops = &tpda_cs_ops;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
desc->groups = tpda_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
@@ -722,44 +698,29 @@
dev_dbg(drvdata->dev, "TPDA initialized\n");
return 0;
err:
- clk_disable_unprepare(drvdata->clk);
return -EPERM;
}
-static int tpda_remove(struct platform_device *pdev)
-{
- struct tpda_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
-static const struct of_device_id tpda_match[] = {
- {.compatible = "qcom,coresight-tpda"},
- {}
+static struct amba_id tpda_ids[] = {
+ {
+ .id = 0x0003b969,
+ .mask = 0x0003ffff,
+ .data = "TPDA",
+ },
+ { 0, 0},
};
-static struct platform_driver tpda_driver = {
- .probe = tpda_probe,
- .remove = tpda_remove,
- .driver = {
+static struct amba_driver tpda_driver = {
+ .drv = {
.name = "coresight-tpda",
.owner = THIS_MODULE,
- .of_match_table = tpda_match,
+ .suppress_bind_attrs = true,
},
+ .probe = tpda_probe,
+ .id_table = tpda_ids,
};
-static int __init tpda_init(void)
-{
- return platform_driver_register(&tpda_driver);
-}
-module_init(tpda_init);
-
-static void __exit tpda_exit(void)
-{
- platform_driver_unregister(&tpda_driver);
-}
-module_exit(tpda_exit);
+builtin_amba_driver(tpda_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Aggregator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 673689c..36e3db2 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -13,11 +13,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
-#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -159,11 +158,6 @@
TPDM_SUPPORT_TYPE_NO,
};
-enum tpdm_cmb_mode {
- TPDM_CMB_MODE_CONTINUOUS,
- TPDM_CMB_MODE_TRACE_ON_CHANGE,
-};
-
enum tpdm_cmb_patt_bits {
TPDM_CMB_LSB,
TPDM_CMB_MSB,
@@ -234,7 +228,8 @@
};
struct cmb_dataset {
- enum tpdm_cmb_mode mode;
+ bool trace_mode;
+ uint32_t cycle_acc;
uint32_t patt_val[TPDM_CMB_PATT_CMP];
uint32_t patt_mask[TPDM_CMB_PATT_CMP];
bool patt_ts;
@@ -250,7 +245,6 @@
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
- struct clk *clk;
struct mutex lock;
bool enable;
bool clk_enable;
@@ -528,24 +522,18 @@
static void __tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
{
uint32_t val;
+ int i;
- tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
- TPDM_CMB_TPR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
- TPDM_CMB_TPMR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
- TPDM_CMB_TPR(TPDM_CMB_MSB));
- tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
- TPDM_CMB_TPMR(TPDM_CMB_MSB));
-
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
- TPDM_CMB_XPR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
- TPDM_CMB_XPMR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
- TPDM_CMB_XPR(TPDM_CMB_MSB));
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
- TPDM_CMB_XPMR(TPDM_CMB_MSB));
+ for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+ tpdm_writel(drvdata, drvdata->cmb->patt_val[i],
+ TPDM_CMB_TPR(i));
+ tpdm_writel(drvdata, drvdata->cmb->patt_mask[i],
+ TPDM_CMB_TPMR(i));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[i],
+ TPDM_CMB_XPR(i));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[i],
+ TPDM_CMB_XPMR(i));
+ }
val = tpdm_readl(drvdata, TPDM_CMB_TIER);
if (drvdata->cmb->patt_ts == true)
@@ -563,10 +551,13 @@
val = tpdm_readl(drvdata, TPDM_CMB_CR);
/* Set the flow control bit */
val = val & ~BIT(2);
- if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
- val = val & ~BIT(1);
- else
+ if (drvdata->cmb->trace_mode)
val = val | BIT(1);
+ else
+ val = val & ~BIT(1);
+
+ val = val & ~BM(8, 9);
+ val = val | BMVAL(drvdata->cmb->cycle_acc, 0, 1) << 8;
tpdm_writel(drvdata, val, TPDM_CMB_CR);
/* Set the enable bit */
val = val | BIT(0);
@@ -577,24 +568,18 @@
{
uint32_t val;
struct mcmb_dataset *mcmb = drvdata->cmb->mcmb;
+ int i;
- tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
- TPDM_CMB_TPR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
- TPDM_CMB_TPMR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
- TPDM_CMB_TPR(TPDM_CMB_MSB));
- tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
- TPDM_CMB_TPMR(TPDM_CMB_MSB));
-
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
- TPDM_CMB_XPR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
- TPDM_CMB_XPMR(TPDM_CMB_LSB));
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
- TPDM_CMB_XPR(TPDM_CMB_MSB));
- tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
- TPDM_CMB_XPMR(TPDM_CMB_MSB));
+ for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+ tpdm_writel(drvdata, drvdata->cmb->patt_val[i],
+ TPDM_CMB_TPR(i));
+ tpdm_writel(drvdata, drvdata->cmb->patt_mask[i],
+ TPDM_CMB_TPMR(i));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[i],
+ TPDM_CMB_XPR(i));
+ tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[i],
+ TPDM_CMB_XPMR(i));
+ }
val = tpdm_readl(drvdata, TPDM_CMB_TIER);
if (drvdata->cmb->patt_ts == true)
@@ -612,14 +597,17 @@
val = tpdm_readl(drvdata, TPDM_CMB_CR);
/* Set the flow control bit */
val = val & ~BIT(2);
- if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
- val = val & ~BIT(1);
- else
+ if (drvdata->cmb->trace_mode)
val = val | BIT(1);
+ else
+ val = val & ~BIT(1);
- val = val | (BMVAL(mcmb->mcmb_trig_lane, 0, 3) << 18);
-
- val = val | (mcmb->mcmb_lane_select << 10);
+ val = val & ~BM(8, 9);
+ val = val | BMVAL(drvdata->cmb->cycle_acc, 0, 1) << 8;
+ val = val & ~BM(18, 20);
+ val = val | (BMVAL(mcmb->mcmb_trig_lane, 0, 2) << 18);
+ val = val & ~BM(10, 17);
+ val = val | (BMVAL(mcmb->mcmb_lane_select, 0, 7) << 10);
tpdm_writel(drvdata, val, TPDM_CMB_CR);
/* Set the enable bit */
@@ -658,11 +646,6 @@
struct perf_event *event, u32 mode)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
mutex_lock(&drvdata->lock);
__tpdm_enable(drvdata);
@@ -722,7 +705,8 @@
if (test_bit(TPDM_DS_DSB, drvdata->enable_ds))
__tpdm_disable_dsb(drvdata);
- if (test_bit(TPDM_DS_CMB, drvdata->enable_ds))
+ if (test_bit(TPDM_DS_CMB, drvdata->enable_ds) ||
+ test_bit(TPDM_DS_MCMB, drvdata->enable_ds))
__tpdm_disable_cmb(drvdata);
if (drvdata->clk_enable)
@@ -741,8 +725,6 @@
drvdata->enable = false;
mutex_unlock(&drvdata->lock);
- clk_disable_unprepare(drvdata->clk);
-
dev_info(drvdata->dev, "TPDM tracing disabled\n");
}
@@ -3192,9 +3174,10 @@
test_bit(TPDM_DS_MCMB, drvdata->datasets)))
return -EPERM;
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS ?
- "continuous" : "trace_on_change");
+ return scnprintf(buf, PAGE_SIZE, "trace_mode: %s cycle_acc: %d\n",
+ drvdata->cmb->trace_mode ?
+ "trace_on_change" : "continuous",
+ drvdata->cmb->cycle_acc);
}
static ssize_t tpdm_store_cmb_mode(struct device *dev,
@@ -3203,180 +3186,118 @@
size_t size)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- char str[20] = "";
+ unsigned int trace_mode, cycle_acc;
+ int nval;
- if (strlen(buf) >= 20)
+ nval = sscanf(buf, "%u %u", &trace_mode, &cycle_acc);
+ if (nval != 2)
return -EINVAL;
- if (sscanf(buf, "%s", str) != 1)
- return -EINVAL;
+
if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
test_bit(TPDM_DS_MCMB, drvdata->datasets)))
return -EPERM;
mutex_lock(&drvdata->lock);
- if (!strcmp(str, "continuous")) {
- drvdata->cmb->mode = TPDM_CMB_MODE_CONTINUOUS;
- } else if (!strcmp(str, "trace_on_change")) {
- drvdata->cmb->mode = TPDM_CMB_MODE_TRACE_ON_CHANGE;
- } else {
- mutex_unlock(&drvdata->lock);
- return -EINVAL;
- }
+ drvdata->cmb->trace_mode = trace_mode;
+ drvdata->cmb->cycle_acc = cycle_acc;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR(cmb_mode, 0644,
tpdm_show_cmb_mode, tpdm_store_cmb_mode);
-static ssize_t tpdm_show_cmb_patt_val_lsb(struct device *dev,
+static ssize_t tpdm_show_cmb_patt_val(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
+ ssize_t size = 0;
+ int i;
if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
test_bit(TPDM_DS_MCMB, drvdata->datasets)))
return -EPERM;
- val = drvdata->cmb->patt_val[TPDM_CMB_LSB];
-
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->cmb->patt_val[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
}
-static ssize_t tpdm_store_cmb_patt_val_lsb(struct device *dev,
+static ssize_t tpdm_store_cmb_patt_val(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
+ unsigned long index, val;
- if (kstrtoul(buf, 16, &val))
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (index >= TPDM_CMB_PATT_CMP)
return -EINVAL;
if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
test_bit(TPDM_DS_MCMB, drvdata->datasets)))
return -EPERM;
mutex_lock(&drvdata->lock);
- drvdata->cmb->patt_val[TPDM_CMB_LSB] = val;
+ drvdata->cmb->patt_val[index] = val;
mutex_unlock(&drvdata->lock);
+
return size;
}
-static DEVICE_ATTR(cmb_patt_val_lsb, 0644,
- tpdm_show_cmb_patt_val_lsb,
- tpdm_store_cmb_patt_val_lsb);
+static DEVICE_ATTR(cmb_patt_val, 0644,
+ tpdm_show_cmb_patt_val,
+ tpdm_store_cmb_patt_val);
-static ssize_t tpdm_show_cmb_patt_mask_lsb(struct device *dev,
+static ssize_t tpdm_show_cmb_patt_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
+ ssize_t size = 0;
+ int i;
if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
test_bit(TPDM_DS_MCMB, drvdata->datasets)))
return -EPERM;
- val = drvdata->cmb->patt_mask[TPDM_CMB_LSB];
+ mutex_lock(&drvdata->lock);
+ for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ "Index: 0x%x Value: 0x%x\n", i,
+ drvdata->cmb->patt_mask[i]);
+ }
+ mutex_unlock(&drvdata->lock);
+ return size;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
-static ssize_t tpdm_store_cmb_patt_mask_lsb(struct device *dev,
+static ssize_t tpdm_store_cmb_patt_mask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
+ unsigned long index, val;
- if (kstrtoul(buf, 16, &val))
+ if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+ return -EINVAL;
+ if (index >= TPDM_CMB_PATT_CMP)
return -EINVAL;
if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
test_bit(TPDM_DS_MCMB, drvdata->datasets)))
return -EPERM;
mutex_lock(&drvdata->lock);
- drvdata->cmb->patt_mask[TPDM_CMB_LSB] = val;
+ drvdata->cmb->patt_mask[index] = val;
mutex_unlock(&drvdata->lock);
return size;
}
-static DEVICE_ATTR(cmb_patt_mask_lsb, 0644,
- tpdm_show_cmb_patt_mask_lsb, tpdm_store_cmb_patt_mask_lsb);
-
-static ssize_t tpdm_show_cmb_patt_val_msb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
-
- if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
- test_bit(TPDM_DS_MCMB, drvdata->datasets)))
- return -EPERM;
-
- val = drvdata->cmb->patt_val[TPDM_CMB_MSB];
-
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t tpdm_store_cmb_patt_val_msb(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
- test_bit(TPDM_DS_MCMB, drvdata->datasets)))
- return -EPERM;
-
- mutex_lock(&drvdata->lock);
- drvdata->cmb->patt_val[TPDM_CMB_MSB] = val;
- mutex_unlock(&drvdata->lock);
- return size;
-}
-static DEVICE_ATTR(cmb_patt_val_msb, 0644,
- tpdm_show_cmb_patt_val_msb,
- tpdm_store_cmb_patt_val_msb);
-
-static ssize_t tpdm_show_cmb_patt_mask_msb(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
-
- if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
- test_bit(TPDM_DS_MCMB, drvdata->datasets)))
- return -EPERM;
-
- val = drvdata->cmb->patt_mask[TPDM_CMB_MSB];
-
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t tpdm_store_cmb_patt_mask_msb(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- unsigned long val;
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
- test_bit(TPDM_DS_MCMB, drvdata->datasets)))
- return -EPERM;
-
- mutex_lock(&drvdata->lock);
- drvdata->cmb->patt_mask[TPDM_CMB_MSB] = val;
- mutex_unlock(&drvdata->lock);
- return size;
-}
-static DEVICE_ATTR(cmb_patt_mask_msb, 0644,
- tpdm_show_cmb_patt_mask_msb, tpdm_store_cmb_patt_mask_msb);
+static DEVICE_ATTR(cmb_patt_mask, 0644,
+ tpdm_show_cmb_patt_mask, tpdm_store_cmb_patt_mask);
static ssize_t tpdm_show_cmb_patt_ts(struct device *dev,
struct device_attribute *attr,
@@ -3896,10 +3817,8 @@
static struct attribute *tpdm_cmb_attrs[] = {
&dev_attr_cmb_available_modes.attr,
&dev_attr_cmb_mode.attr,
- &dev_attr_cmb_patt_val_lsb.attr,
- &dev_attr_cmb_patt_mask_lsb.attr,
- &dev_attr_cmb_patt_val_msb.attr,
- &dev_attr_cmb_patt_mask_msb.attr,
+ &dev_attr_cmb_patt_val.attr,
+ &dev_attr_cmb_patt_mask.attr,
&dev_attr_cmb_patt_ts.attr,
&dev_attr_cmb_trig_patt_val_lsb.attr,
&dev_attr_cmb_trig_patt_mask_lsb.attr,
@@ -4011,57 +3930,40 @@
drvdata->cmb->trig_ts = true;
}
-static int tpdm_probe(struct platform_device *pdev)
+static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret, i;
uint32_t pidr, devid;
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpdm_drvdata *drvdata;
- struct resource *res;
struct coresight_desc *desc;
static int traceid = TPDM_TRACE_ID_START;
uint32_t version;
- pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &pdev->dev;
- platform_set_drvdata(pdev, drvdata);
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpdm-base");
- if (!res)
- return -ENODEV;
-
- drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
- drvdata->clk_enable = of_property_read_bool(pdev->dev.of_node,
+ drvdata->clk_enable = of_property_read_bool(adev->dev.of_node,
"qcom,clk-enable");
- drvdata->msr_fix_req = of_property_read_bool(pdev->dev.of_node,
+ drvdata->msr_fix_req = of_property_read_bool(adev->dev.of_node,
"qcom,msr-fix-req");
mutex_init(&drvdata->lock);
- drvdata->clk = devm_clk_get(dev, "core_clk");
- if (IS_ERR(drvdata->clk))
- return PTR_ERR(drvdata->clk);
-
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
- return ret;
-
version = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR2);
drvdata->version = BMVAL(version, 4, 7);
@@ -4089,7 +3991,7 @@
drvdata->bc_counters_avail = BMVAL(devid, 6, 10) + 1;
drvdata->tc_counters_avail = BMVAL(devid, 4, 5) + 1;
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
drvdata->traceid = traceid++;
@@ -4099,8 +4001,8 @@
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc->ops = &tpdm_cs_ops;
- desc->pdata = pdev->dev.platform_data;
- desc->dev = &pdev->dev;
+ desc->pdata = adev->dev.platform_data;
+ desc->dev = &adev->dev;
desc->groups = tpdm_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
@@ -4114,40 +4016,26 @@
return 0;
}
-static int tpdm_remove(struct platform_device *pdev)
-{
- struct tpdm_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
-static const struct of_device_id tpdm_match[] = {
- {.compatible = "qcom,coresight-tpdm"},
- {}
+static struct amba_id tpdm_ids[] = {
+ {
+ .id = 0x0003b968,
+ .mask = 0x0003ffff,
+ .data = "TPDM",
+ },
+ { 0, 0},
};
-static struct platform_driver tpdm_driver = {
- .probe = tpdm_probe,
- .remove = tpdm_remove,
- .driver = {
+static struct amba_driver tpdm_driver = {
+ .drv = {
.name = "coresight-tpdm",
.owner = THIS_MODULE,
- .of_match_table = tpdm_match,
+ .suppress_bind_attrs = true,
},
+ .probe = tpdm_probe,
+ .id_table = tpdm_ids,
};
-static int __init tpdm_init(void)
-{
- return platform_driver_register(&tpdm_driver);
-}
-module_init(tpdm_init);
-
-static void __exit tpdm_exit(void)
-{
- platform_driver_unregister(&tpdm_driver);
-}
-module_exit(tpdm_exit);
+builtin_amba_driver(tpdm_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 3a4474d..e233e76 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/clk.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
#include <linux/coresight.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
@@ -578,27 +579,29 @@
}
EXPORT_SYMBOL_GPL(coresight_enable);
-void coresight_disable(struct coresight_device *csdev)
+static void __coresight_disable(struct coresight_device *csdev)
{
int ret;
- mutex_lock(&coresight_mutex);
-
ret = coresight_validate_source(csdev, __func__);
if (ret)
- goto out;
+ return;
if (!csdev->enable)
- goto out;
+ return;
if (csdev->node == NULL)
- goto out;
+ return;
coresight_disable_source(csdev);
coresight_disable_path(csdev->node->path);
coresight_release_path(csdev, csdev->node->path);
+}
-out:
+void coresight_disable(struct coresight_device *csdev)
+{
+ mutex_lock(&coresight_mutex);
+ __coresight_disable(csdev);
mutex_unlock(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_disable);
@@ -904,7 +907,7 @@
csdev = coresight_get_source(cspath->path);
if (!csdev)
continue;
- coresight_disable(csdev);
+ __coresight_disable(csdev);
}
mutex_unlock(&coresight_mutex);
@@ -938,6 +941,14 @@
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
struct coresight_connection *conns = NULL;
+ struct clk *pclk;
+
+ pclk = clk_get(desc->dev, "apb_pclk");
+ if (!IS_ERR(pclk)) {
+ ret = clk_set_rate(pclk, QDSS_CLK_LEVEL_DYNAMIC);
+ if (ret)
+ dev_err(desc->dev, "clk set rate failed\n");
+ }
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d4f3239..f283b71 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -64,6 +64,7 @@
int irq;
struct i2c_adapter adapter;
struct completion completion;
+ struct i2c_msg *curr_msg;
u32 msg_err;
u8 *msg_buf;
size_t msg_buf_remaining;
@@ -126,14 +127,13 @@
return IRQ_HANDLED;
}
- if (val & BCM2835_I2C_S_RXD) {
- bcm2835_drain_rxfifo(i2c_dev);
- if (!(val & BCM2835_I2C_S_DONE))
- return IRQ_HANDLED;
- }
-
if (val & BCM2835_I2C_S_DONE) {
- if (i2c_dev->msg_buf_remaining)
+ if (i2c_dev->curr_msg->flags & I2C_M_RD) {
+ bcm2835_drain_rxfifo(i2c_dev);
+ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+ }
+
+ if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
i2c_dev->msg_err = BCM2835_I2C_S_LEN;
else
i2c_dev->msg_err = 0;
@@ -141,11 +141,16 @@
return IRQ_HANDLED;
}
- if (val & BCM2835_I2C_S_TXD) {
+ if (val & BCM2835_I2C_S_TXW) {
bcm2835_fill_txfifo(i2c_dev);
return IRQ_HANDLED;
}
+ if (val & BCM2835_I2C_S_RXR) {
+ bcm2835_drain_rxfifo(i2c_dev);
+ return IRQ_HANDLED;
+ }
+
return IRQ_NONE;
}
@@ -155,6 +160,7 @@
u32 c;
unsigned long time_left;
+ i2c_dev->curr_msg = msg;
i2c_dev->msg_buf = msg->buf;
i2c_dev->msg_buf_remaining = msg->len;
reinit_completion(&i2c_dev->completion);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 4466a2f..5ded9b2 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -724,6 +724,50 @@
{
.enter = NULL }
};
+static struct cpuidle_state tangier_cstates[] = {
+ {
+ .name = "C1-TNG",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C4-TNG",
+ .desc = "MWAIT 0x30",
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 100,
+ .target_residency = 400,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-TNG",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 560,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7-TNG",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+ .target_residency = 4000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C9-TNG",
+ .desc = "MWAIT 0x64",
+ .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 20000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
static struct cpuidle_state avn_cstates[] = {
{
.name = "C1-AVN",
@@ -978,6 +1022,10 @@
.state_table = atom_cstates,
};
+static const struct idle_cpu idle_cpu_tangier = {
+ .state_table = tangier_cstates,
+};
+
static const struct idle_cpu idle_cpu_lincroft = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
@@ -1066,6 +1114,7 @@
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index e412230..b521df6 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -38,6 +38,7 @@
#define FG_ADC_RR_FAKE_BATT_HIGH_MSB 0x5B
#define FG_ADC_RR_BATT_ID_CTRL 0x60
+#define FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV BIT(0)
#define FG_ADC_RR_BATT_ID_TRIGGER 0x61
#define FG_ADC_RR_BATT_ID_TRIGGER_CTL BIT(0)
#define FG_ADC_RR_BATT_ID_STS 0x62
@@ -748,6 +749,75 @@
return rc;
}
+static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+ FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV,
+ FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV);
+ if (rc < 0) {
+ pr_err("Enabling BATT ID channel failed:%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+ FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV, 0);
+ if (rc < 0) {
+ pr_err("Disabling BATT ID channel failed:%d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int rradc_do_batt_id_conversion(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u16 *data, u8 *buf)
+{
+ int rc = 0, ret = 0;
+
+ rc = rradc_enable_batt_id_channel(chip, true);
+ if (rc < 0) {
+ pr_err("Enabling BATT ID channel failed:%d\n", rc);
+ return rc;
+ }
+
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+ if (rc < 0) {
+ pr_err("BATT_ID trigger set failed:%d\n", rc);
+ ret = rc;
+ rc = rradc_enable_batt_id_channel(chip, false);
+ if (rc < 0)
+ pr_err("Disabling BATT ID channel failed:%d\n", rc);
+ return ret;
+ }
+
+ rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+ if (rc < 0) {
+ pr_err("Error reading in continuous mode:%d\n", rc);
+ ret = rc;
+ }
+
+ rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+ FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+ if (rc < 0) {
+ pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+ ret = rc;
+ }
+
+ rc = rradc_enable_batt_id_channel(chip, false);
+ if (rc < 0) {
+ pr_err("Disabling BATT ID channel failed:%d\n", rc);
+ ret = rc;
+ }
+
+ return ret;
+}
+
static int rradc_do_conversion(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 *data)
{
@@ -760,24 +830,9 @@
switch (prop->channel) {
case RR_ADC_BATT_ID:
- rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
- FG_ADC_RR_BATT_ID_TRIGGER_CTL,
- FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+ rc = rradc_do_batt_id_conversion(chip, prop, data, buf);
if (rc < 0) {
- pr_err("BATT_ID trigger set failed:%d\n", rc);
- goto fail;
- }
-
- rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
- if (rc < 0) {
- pr_err("Error reading in continuous mode:%d\n", rc);
- goto fail;
- }
-
- rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
- FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
- if (rc < 0) {
- pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+ pr_err("Battery ID conversion failed:%d\n", rc);
goto fail;
}
break;
diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c
index 9241288..05b1985 100644
--- a/drivers/iio/adc/qcom-tadc.c
+++ b/drivers/iio/adc/qcom-tadc.c
@@ -18,7 +18,12 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/pmic-voter.h>
+#define USB_PRESENT_VOTER "USB_PRESENT_VOTER"
+#define SLEEP_VOTER "SLEEP_VOTER"
+#define SHUTDOWN_VOTER "SHUTDOWN_VOTER"
#define TADC_REVISION1_REG 0x00
#define TADC_REVISION2_REG 0x01
#define TADC_REVISION3_REG 0x02
@@ -54,6 +59,7 @@
#define TADC_CH7_ADC_HI_REG(chip) (chip->tadc_base + 0x73)
#define TADC_CH8_ADC_LO_REG(chip) (chip->tadc_base + 0x74)
#define TADC_CH8_ADC_HI_REG(chip) (chip->tadc_base + 0x75)
+#define TADC_ADC_DIRECT_TST(chip) (chip->tadc_base + 0xE7)
/* TADC_CMP register definitions */
#define TADC_CMP_THR1_CMP_REG(chip) (chip->tadc_cmp_base + 0x51)
@@ -217,6 +223,12 @@
struct tadc_chan_data chans[TADC_NUM_CH];
struct completion eoc_complete;
struct mutex write_lock;
+ struct mutex conv_lock;
+ struct power_supply *usb_psy;
+ struct votable *tadc_disable_votable;
+ struct work_struct status_change_work;
+ struct notifier_block nb;
+ u8 hwtrig_conv;
};
struct tadc_pt {
@@ -274,7 +286,7 @@
if ((reg & 0xFF00) == chip->tadc_cmp_base)
return true;
- if (reg == TADC_HWTRIG_CONV_CH_EN_REG(chip))
+ if (reg >= TADC_HWTRIG_CONV_CH_EN_REG(chip))
return true;
return false;
@@ -345,6 +357,26 @@
return rc;
}
+static int tadc_masked_write(struct tadc_chip *chip, u16 reg, u8 mask, u8 data)
+{
+ int rc = 0;
+
+ mutex_lock(&chip->write_lock);
+ if (tadc_is_reg_locked(chip, reg)) {
+ rc = regmap_write(chip->regmap, (reg & 0xFF00) | 0xD0, 0xA5);
+ if (rc < 0) {
+ pr_err("Couldn't unlock secure register rc=%d\n", rc);
+ goto unlock;
+ }
+ }
+
+ rc = regmap_update_bits(chip->regmap, reg, mask, data);
+
+unlock:
+ mutex_unlock(&chip->write_lock);
+ return rc;
+}
+
static int tadc_lerp(const struct tadc_pt *pts, size_t size, bool inv,
s32 input, s32 *output)
{
@@ -480,12 +512,22 @@
{
unsigned long timeout, timeleft;
u8 val[TADC_NUM_CH * 2];
- int rc, i;
+ int rc = 0, i;
+ mutex_lock(&chip->conv_lock);
rc = tadc_read(chip, TADC_MBG_ERR_REG(chip), val, 1);
if (rc < 0) {
pr_err("Couldn't read mbg error status rc=%d\n", rc);
- return rc;
+ goto unlock;
+ }
+
+ reinit_completion(&chip->eoc_complete);
+
+ if (get_effective_result(chip->tadc_disable_votable)) {
+ /* leave it back in completed state */
+ complete_all(&chip->eoc_complete);
+ rc = -ENODATA;
+ goto unlock;
}
if (val[0] != 0) {
@@ -496,7 +538,7 @@
rc = tadc_write(chip, TADC_CONV_REQ_REG(chip), channels);
if (rc < 0) {
pr_err("Couldn't write conversion request rc=%d\n", rc);
- return rc;
+ goto unlock;
}
timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
@@ -506,25 +548,34 @@
rc = tadc_read(chip, TADC_SW_CH_CONV_REG(chip), val, 1);
if (rc < 0) {
pr_err("Couldn't read conversion status rc=%d\n", rc);
- return rc;
+ goto unlock;
}
+ /*
+ * check one last time if the channel we are requesting
+ * has completed conversion
+ */
if (val[0] != channels) {
- pr_err("Conversion timed out\n");
- return -ETIMEDOUT;
+ rc = -ETIMEDOUT;
+ goto unlock;
}
}
rc = tadc_read(chip, TADC_CH1_ADC_LO_REG(chip), val, ARRAY_SIZE(val));
if (rc < 0) {
pr_err("Couldn't read adc channels rc=%d\n", rc);
- return rc;
+ goto unlock;
}
for (i = 0; i < TADC_NUM_CH; i++)
adc[i] = (s16)(val[i * 2] | (u16)val[i * 2 + 1] << 8);
- return jiffies_to_msecs(timeout - timeleft);
+ pr_debug("Conversion time for channels 0x%x = %dms\n", channels,
+ jiffies_to_msecs(timeout - timeleft));
+
+unlock:
+ mutex_unlock(&chip->conv_lock);
+ return rc;
}
static int tadc_read_raw(struct iio_dev *indio_dev,
@@ -593,12 +644,17 @@
break;
default:
rc = tadc_do_conversion(chip, BIT(chan->channel), adc);
- if (rc >= 0)
- *val = adc[chan->channel];
+ if (rc < 0) {
+ if (rc != -ENODATA)
+ pr_err("Couldn't read battery current and voltage channels rc=%d\n",
+ rc);
+ return rc;
+ }
+ *val = adc[chan->channel];
break;
}
- if (rc < 0) {
+ if (rc < 0 && rc != -ENODATA) {
pr_err("Couldn't read channel %d\n", chan->channel);
return rc;
}
@@ -630,7 +686,7 @@
case TADC_BATT_P:
rc = tadc_do_conversion(chip,
BIT(TADC_BATT_I) | BIT(TADC_BATT_V), adc);
- if (rc < 0) {
+ if (rc < 0 && rc != -ENODATA) {
pr_err("Couldn't read battery current and voltage channels rc=%d\n",
rc);
return rc;
@@ -641,7 +697,7 @@
case TADC_INPUT_P:
rc = tadc_do_conversion(chip,
BIT(TADC_INPUT_I) | BIT(TADC_INPUT_V), adc);
- if (rc < 0) {
+ if (rc < 0 && rc != -ENODATA) {
pr_err("Couldn't read input current and voltage channels rc=%d\n",
rc);
return rc;
@@ -683,6 +739,7 @@
case TADC_DIE_TEMP:
case TADC_DIE_TEMP_THR1:
case TADC_DIE_TEMP_THR2:
+ case TADC_DIE_TEMP_THR3:
*val = chan_data->scale;
return IIO_VAL_INT;
case TADC_BATT_I:
@@ -821,15 +878,137 @@
return 0;
}
-
static irqreturn_t handle_eoc(int irq, void *dev_id)
{
struct tadc_chip *chip = dev_id;
- complete(&chip->eoc_complete);
+ complete_all(&chip->eoc_complete);
return IRQ_HANDLED;
}
+static int tadc_disable_vote_callback(struct votable *votable,
+ void *data, int disable, const char *client)
+{
+ struct tadc_chip *chip = data;
+ int rc;
+ int timeout;
+ unsigned long timeleft;
+
+ if (disable) {
+ timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+ timeleft = wait_for_completion_timeout(&chip->eoc_complete,
+ timeout);
+ if (timeleft == 0)
+ pr_err("Timed out waiting for eoc, disabling hw conversions regardless\n");
+
+ rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+ &chip->hwtrig_conv, 1);
+ if (rc < 0) {
+ pr_err("Couldn't save hw conversions rc=%d\n", rc);
+ return rc;
+ }
+ rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), 0x00);
+ if (rc < 0) {
+ pr_err("Couldn't disable hw conversions rc=%d\n", rc);
+ return rc;
+ }
+ rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x80);
+ if (rc < 0) {
+ pr_err("Couldn't enable direct test mode rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x00);
+ if (rc < 0) {
+ pr_err("Couldn't disable direct test mode rc=%d\n", rc);
+ return rc;
+ }
+ rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+ chip->hwtrig_conv);
+ if (rc < 0) {
+ pr_err("Couldn't restore hw conversions rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ pr_debug("client: %s disable: %d\n", client, disable);
+ return 0;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+ struct tadc_chip *chip = container_of(work,
+ struct tadc_chip, status_change_work);
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (!chip->usb_psy) {
+ /* treat usb is not present */
+ vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+ return;
+ }
+
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get present status rc=%d\n", rc);
+ /* treat usb is not present */
+ vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+ return;
+ }
+
+ /* disable if usb is not present */
+ vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, !pval.intval, 0);
+}
+
+static int tadc_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct tadc_chip *chip = container_of(nb, struct tadc_chip, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if ((strcmp(psy->desc->name, "usb") == 0))
+ schedule_work(&chip->status_change_work);
+
+ return NOTIFY_OK;
+}
+
+static int tadc_register_notifier(struct tadc_chip *chip)
+{
+ int rc;
+
+ chip->nb.notifier_call = tadc_notifier_call;
+ rc = power_supply_reg_notifier(&chip->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int tadc_suspend(struct device *dev)
+{
+ struct tadc_chip *chip = dev_get_drvdata(dev);
+
+ vote(chip->tadc_disable_votable, SLEEP_VOTER, true, 0);
+ return 0;
+}
+
+static int tadc_resume(struct device *dev)
+{
+ struct tadc_chip *chip = dev_get_drvdata(dev);
+
+ vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+ return 0;
+}
+
static int tadc_set_therm_table(struct tadc_chan_data *chan_data, u32 beta,
u32 rtherm)
{
@@ -975,16 +1154,23 @@
return rc;
}
- /* enable all temperature hardware triggers */
- rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
- BIT(TADC_THERM1) |
- BIT(TADC_THERM2) |
- BIT(TADC_DIE_TEMP));
+ /* enable connector and die temp hardware triggers */
+ rc = tadc_masked_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+ BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP),
+ BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP));
if (rc < 0) {
pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
return rc;
}
+ /* save hw triggered conversion configuration */
+ rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+ &chip->hwtrig_conv, 1);
+ if (rc < 0) {
+ pr_err("Couldn't save hw conversions rc=%d\n", rc);
+ return rc;
+ }
+
return 0;
}
@@ -1009,6 +1195,12 @@
chip->dev = &pdev->dev;
init_completion(&chip->eoc_complete);
+ /*
+ * set the completion in "completed" state so disable of the tadc
+ * can progress
+ */
+ complete_all(&chip->eoc_complete);
+
rc = of_property_read_u32(node, "reg", &chip->tadc_base);
if (rc < 0) {
pr_err("Couldn't read base address rc=%d\n", rc);
@@ -1017,6 +1209,8 @@
chip->tadc_cmp_base = chip->tadc_base + 0x100;
mutex_init(&chip->write_lock);
+ mutex_init(&chip->conv_lock);
+ INIT_WORK(&chip->status_change_work, status_change_work);
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
pr_err("Couldn't get regmap\n");
@@ -1035,17 +1229,36 @@
return rc;
}
+ chip->tadc_disable_votable = create_votable("SMB_TADC_DISABLE",
+ VOTE_SET_ANY,
+ tadc_disable_vote_callback,
+ chip);
+ if (IS_ERR(chip->tadc_disable_votable)) {
+ rc = PTR_ERR(chip->tadc_disable_votable);
+ return rc;
+ }
+ /* assume usb is not present */
+ vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+ vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, false, 0);
+ vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+
+ rc = tadc_register_notifier(chip);
+ if (rc < 0) {
+ pr_err("Couldn't register notifier=%d\n", rc);
+ goto destroy_votable;
+ }
+
irq = of_irq_get_byname(node, "eoc");
if (irq < 0) {
pr_err("Couldn't get eoc irq rc=%d\n", irq);
- return irq;
+ goto destroy_votable;
}
rc = devm_request_threaded_irq(chip->dev, irq, NULL, handle_eoc,
IRQF_ONESHOT, "eoc", chip);
if (rc < 0) {
pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
- return rc;
+ goto destroy_votable;
}
indio_dev->dev.parent = chip->dev;
@@ -1058,17 +1271,37 @@
rc = devm_iio_device_register(chip->dev, indio_dev);
if (rc < 0) {
pr_err("Couldn't register IIO device rc=%d\n", rc);
- return rc;
+ goto destroy_votable;
}
+ platform_set_drvdata(pdev, chip);
return 0;
+
+destroy_votable:
+ destroy_votable(chip->tadc_disable_votable);
+ return rc;
}
static int tadc_remove(struct platform_device *pdev)
{
+ struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+ destroy_votable(chip->tadc_disable_votable);
return 0;
}
+static void tadc_shutdown(struct platform_device *pdev)
+{
+ struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+ vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, true, 0);
+}
+
+static const struct dev_pm_ops tadc_pm_ops = {
+ .resume = tadc_resume,
+ .suspend = tadc_suspend,
+};
+
static const struct of_device_id tadc_match_table[] = {
{ .compatible = "qcom,tadc" },
{ }
@@ -1076,12 +1309,14 @@
MODULE_DEVICE_TABLE(of, tadc_match_table);
static struct platform_driver tadc_driver = {
- .driver = {
+ .driver = {
.name = "qcom-tadc",
.of_match_table = tadc_match_table,
+ .pm = &tadc_pm_ops,
},
- .probe = tadc_probe,
- .remove = tadc_remove,
+ .probe = tadc_probe,
+ .remove = tadc_remove,
+ .shutdown = tadc_shutdown,
};
module_platform_driver(tadc_driver);
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index f7fcfa8..821919d 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,6 +27,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
@@ -52,6 +53,9 @@
#define BMG160_DEF_BW 100
#define BMG160_REG_PMU_BW_RES BIT(7)
+#define BMG160_GYRO_REG_RESET 0x14
+#define BMG160_GYRO_RESET_VAL 0xb6
+
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
@@ -236,6 +240,14 @@
int ret;
unsigned int val;
+ /*
+ * Reset chip to get it in a known good state. A delay of 30ms after
+ * reset is required according to the datasheet.
+ */
+ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
+ BMG160_GYRO_RESET_VAL);
+ usleep_range(30000, 30700);
+
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
dev_err(dev, "Error reading reg_chip_id\n");
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index bbe1524..f397a5b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -201,6 +201,7 @@
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -329,6 +330,7 @@
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
+ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
{ }
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2909365..9b8079c 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,6 +26,7 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
@@ -35,6 +36,7 @@
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
+ struct gpio_desc *gpiod;
struct timer_list release_timer;
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
@@ -140,7 +142,7 @@
*/
disable_irq(bdata->irq);
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -358,19 +360,20 @@
const struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
- int state = gpio_get_value_cansleep(button->gpio);
+ int state;
+ state = gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
- dev_err(input->dev.parent, "failed to get gpio state\n");
+ dev_err(input->dev.parent,
+ "failed to get gpio state: %d\n", state);
return;
}
- state = (state ? 1 : 0) ^ button->active_low;
if (type == EV_ABS) {
if (state)
input_event(input, type, button->code, button->value);
} else {
- input_event(input, type, button->code, !!state);
+ input_event(input, type, button->code, state);
}
input_sync(input);
}
@@ -456,7 +459,7 @@
{
struct gpio_button_data *bdata = data;
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -478,18 +481,30 @@
bdata->button = button;
spin_lock_init(&bdata->lock);
+ /*
+ * Legacy GPIO number, so request the GPIO here and
+ * convert it to descriptor.
+ */
if (gpio_is_valid(button->gpio)) {
+ unsigned flags = GPIOF_IN;
- error = devm_gpio_request_one(&pdev->dev, button->gpio,
- GPIOF_IN, desc);
+ if (button->active_low)
+ flags |= GPIOF_ACTIVE_LOW;
+
+ error = devm_gpio_request_one(&pdev->dev, button->gpio, flags,
+ desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod)
+ return -EINVAL;
+
if (button->debounce_interval) {
- error = gpio_set_debounce(button->gpio,
+ error = gpiod_set_debounce(bdata->gpiod,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
@@ -500,7 +515,7 @@
if (button->irq) {
bdata->irq = button->irq;
} else {
- irq = gpio_to_irq(button->gpio);
+ irq = gpiod_to_irq(bdata->gpiod);
if (irq < 0) {
error = irq;
dev_err(dev,
@@ -575,7 +590,7 @@
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
gpio_keys_gpio_report_event(bdata);
}
input_sync(input);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index db7d1d6..7826994 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,6 +1118,7 @@
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
@@ -1524,6 +1525,13 @@
},
},
{
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 25eab45..e7b96f1 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -685,6 +685,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
},
+ {
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ },
+ },
{ }
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 34df44c..dd96670 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -828,26 +828,26 @@
return 0;
}
- ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
+ ret = arm_smmu_request_bus(pwr);
if (ret)
goto out_unlock;
- ret = arm_smmu_request_bus(pwr);
+ ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
if (ret)
- goto out_disable_regulators;
+ goto out_disable_bus;
ret = arm_smmu_prepare_clocks(pwr);
if (ret)
- goto out_disable_bus;
+ goto out_disable_regulators;
pwr->power_count = 1;
mutex_unlock(&pwr->power_lock);
return 0;
-out_disable_bus:
- arm_smmu_unrequest_bus(pwr);
out_disable_regulators:
regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
+out_disable_bus:
+ arm_smmu_unrequest_bus(pwr);
out_unlock:
mutex_unlock(&pwr->power_lock);
return ret;
@@ -868,9 +868,9 @@
}
arm_smmu_unprepare_clocks(pwr);
- arm_smmu_unrequest_bus(pwr);
regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
-
+ arm_smmu_unrequest_bus(pwr);
+ pwr->power_count = 0;
mutex_unlock(&pwr->power_lock);
}
@@ -3341,7 +3341,7 @@
i = 0;
of_property_for_each_string(dev->of_node, "qcom,regulator-names",
prop, cname)
- pwr->gdscs[i].supply = cname;
+ pwr->gdscs[i++].supply = cname;
ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
return ret;
@@ -3366,7 +3366,6 @@
pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
if (!pwr->bus_client) {
dev_err(dev, "Bus client registration failed\n");
- msm_bus_cl_clear_pdata(pwr->bus_dt_data);
return -EINVAL;
}
@@ -3407,12 +3406,11 @@
}
/*
- * Bus APIs are not devm-safe.
+ * Bus APIs are devm-safe.
*/
static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
{
msm_bus_scale_unregister_client(pwr->bus_client);
- msm_bus_cl_clear_pdata(pwr->bus_dt_data);
}
static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
@@ -3977,6 +3975,8 @@
void __iomem *tcu_base;
u32 version;
};
+#define get_qsmmuv500_archdata(smmu) \
+ ((struct qsmmuv500_archdata *)(smmu->archdata))
struct qsmmuv500_tbu_device {
struct list_head list;
@@ -3997,7 +3997,7 @@
static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
{
struct qsmmuv500_tbu_device *tbu;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
int ret = 0;
list_for_each_entry(tbu, &data->tbus, list) {
@@ -4017,7 +4017,7 @@
static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
{
struct qsmmuv500_tbu_device *tbu;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
list_for_each_entry_reverse(tbu, &data->tbus, list) {
arm_smmu_power_off(tbu->pwr);
@@ -4085,7 +4085,7 @@
static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
{
struct qsmmuv500_tbu_device *tbu;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
int ret = 0;
list_for_each_entry(tbu, &data->tbus, list) {
@@ -4106,7 +4106,7 @@
static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
{
struct qsmmuv500_tbu_device *tbu;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
list_for_each_entry(tbu, &data->tbus, list) {
qsmmuv500_tbu_resume(tbu);
@@ -4117,14 +4117,14 @@
struct arm_smmu_device *smmu, u32 sid)
{
struct qsmmuv500_tbu_device *tbu = NULL;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
list_for_each_entry(tbu, &data->tbus, list) {
if (tbu->sid_start <= sid &&
sid < tbu->sid_start + tbu->num_sids)
- break;
+ return tbu;
}
- return tbu;
+ return NULL;
}
static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
@@ -4150,7 +4150,7 @@
unsigned long *flags)
{
struct arm_smmu_device *smmu = tbu->smmu;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
u32 val;
spin_lock_irqsave(&smmu->atos_lock, *flags);
@@ -4174,7 +4174,7 @@
unsigned long *flags)
{
struct arm_smmu_device *smmu = tbu->smmu;
- struct qsmmuv500_archdata *data = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
/* The status register is not accessible on version 1.0 */
if (data->version != 0x01000000)
@@ -4326,11 +4326,11 @@
return qsmmuv500_iova_to_phys(domain, iova, sid);
}
-static int qsmmuv500_tbu_register(struct device *dev, void *data)
+static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
{
- struct arm_smmu_device *smmu = data;
+ struct arm_smmu_device *smmu = cookie;
struct qsmmuv500_tbu_device *tbu;
- struct list_head *list = smmu->archdata;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
if (!dev->driver) {
dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
@@ -4341,7 +4341,7 @@
INIT_LIST_HEAD(&tbu->list);
tbu->smmu = smmu;
- list_add(&tbu->list, list);
+ list_add(&tbu->list, &data->tbus);
return 0;
}
@@ -4375,7 +4375,7 @@
/* Attempt to register child devices */
ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
if (ret)
- return -EINVAL;
+ return -EPROBE_DEFER;
return 0;
}
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 34c7381..aded314 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,8 @@
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/dma-iommu.h>
-
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
#define FAST_PAGE_SHIFT 12
@@ -633,7 +634,7 @@
dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
- fast->pgtbl_pmds, ptep - fast->pgtbl_pmds);
+ fast->pgtbl_pmds, bitmap_idx);
print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
32, 8, fast->bitmap, fast->bitmap_size, false);
}
@@ -683,7 +684,7 @@
* fast_smmu_attach_device function.
*/
static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
- dma_addr_t base, size_t size)
+ dma_addr_t base, u64 size)
{
struct dma_fast_smmu_mapping *fast;
@@ -696,7 +697,11 @@
fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
- fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL);
+ fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
+ if (!fast->bitmap)
+ fast->bitmap = vzalloc(fast->bitmap_size);
+
if (!fast->bitmap)
goto err2;
@@ -726,7 +731,7 @@
int atomic_domain = 1;
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
- size_t size = mapping->bits << PAGE_SHIFT;
+ u64 size = (u64)mapping->bits << PAGE_SHIFT;
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
@@ -780,7 +785,7 @@
dev->archdata.mapping = NULL;
set_dma_ops(dev, NULL);
- kfree(mapping->fast->bitmap);
+ kvfree(mapping->fast->bitmap);
kfree(mapping->fast);
}
EXPORT_SYMBOL(fast_smmu_detach_device);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 393e20c4..f7739ae 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -78,7 +78,7 @@
/* Calculate the block/page mapping size at level l for pagetable in d. */
#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
/* Page table bits */
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 85fe317..9b13fce 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/io-pgtable-fast.h>
#include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
#include "io-pgtable.h"
@@ -268,11 +269,18 @@
return size;
}
+#if defined(CONFIG_ARM64)
+#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
+#elif defined(CONFIG_ARM)
+#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
+#endif
+
static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
+ unsigned long pgd;
phys_addr_t phys;
const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
@@ -282,8 +290,9 @@
/* TODO: clean up some of these magic numbers... */
- pgdp = (av8l_fast_iopte *)
- (((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
+ pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
+ pgdp = (av8l_fast_iopte *)pgd;
+
pte = *pgdp;
if (((pte >> pts) & ptm) != ptt)
return 0;
@@ -345,7 +354,12 @@
int i, j, pg = 0;
struct page **pages, *page;
- pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, GFP_KERNEL);
+ pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
+ __GFP_NORETRY);
+
+ if (!pages)
+ pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
+
if (!pages)
return -ENOMEM;
@@ -414,7 +428,7 @@
for (i = 0; i < pg; ++i)
__free_page(pages[i]);
err_free_pages_arr:
- kfree(pages);
+ kvfree(pages);
return -ENOMEM;
}
@@ -473,6 +487,9 @@
reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
+#if defined(CONFIG_ARM)
+ reg |= ARM_32_LPAE_TCR_EAE;
+#endif
cfg->av8l_fast_cfg.tcr = reg;
/* MAIRs */
@@ -512,7 +529,7 @@
vunmap(data->pmds);
for (i = 0; i < NUM_PGTBL_PAGES; ++i)
__free_page(data->pages[i]);
- kfree(data->pages);
+ kvfree(data->pages);
kfree(data);
}
@@ -560,7 +577,7 @@
const phys_addr_t phys_start,
const size_t size)
{
- unsigned long iova = iova_start;
+ u64 iova = iova_start;
phys_addr_t phys = phys_start;
while (iova < (iova_start + size)) {
@@ -576,11 +593,12 @@
static int __init av8l_fast_positive_testing(void)
{
int failed = 0;
- unsigned long iova;
+ u64 iova;
struct io_pgtable_ops *ops;
struct io_pgtable_cfg cfg;
struct av8l_fast_io_pgtable *data;
av8l_fast_iopte *pmds;
+ u64 max = SZ_1G * 4ULL - 1;
cfg = (struct io_pgtable_cfg) {
.quirks = 0,
@@ -600,19 +618,18 @@
pmds = data->pmds;
/* map the entire 4GB VA space with 4K map calls */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+ for (iova = 0; iova < max; iova += SZ_4K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
failed++;
continue;
}
}
-
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- SZ_1G * 4UL)))
+ max)))
failed++;
/* unmap it all */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+ for (iova = 0; iova < max; iova += SZ_4K) {
if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
failed++;
}
@@ -621,7 +638,7 @@
av8l_fast_clear_stale_ptes(pmds, false);
/* map the entire 4GB VA space with 8K map calls */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+ for (iova = 0; iova < max; iova += SZ_8K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
failed++;
continue;
@@ -629,11 +646,11 @@
}
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- SZ_1G * 4UL)))
+ max)))
failed++;
/* unmap it all with 8K unmap calls */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+ for (iova = 0; iova < max; iova += SZ_8K) {
if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
failed++;
}
@@ -642,7 +659,7 @@
av8l_fast_clear_stale_ptes(pmds, false);
/* map the entire 4GB VA space with 16K map calls */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+ for (iova = 0; iova < max; iova += SZ_16K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
failed++;
continue;
@@ -650,11 +667,11 @@
}
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- SZ_1G * 4UL)))
+ max)))
failed++;
/* unmap it all */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+ for (iova = 0; iova < max; iova += SZ_16K) {
if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
failed++;
}
@@ -663,7 +680,7 @@
av8l_fast_clear_stale_ptes(pmds, false);
/* map the entire 4GB VA space with 64K map calls */
- for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
+ for (iova = 0; iova < max; iova += SZ_64K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
failed++;
continue;
@@ -671,11 +688,11 @@
}
if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- SZ_1G * 4UL)))
+ max)))
failed++;
/* unmap it all at once */
- if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
+ if (WARN_ON(ops->unmap(ops, 0, max) != max))
failed++;
free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 45ffb40..181e889 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -124,29 +124,6 @@
.release = single_release,
};
-static ssize_t iommu_debug_attachment_trigger_fault_write(
- struct file *file, const char __user *ubuf, size_t count,
- loff_t *offset)
-{
- struct iommu_debug_attachment *attach = file->private_data;
- unsigned long flags;
-
- if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
- pr_err("Invalid flags format\n");
- return -EFAULT;
- }
-
- iommu_trigger_fault(attach->domain, flags);
-
- return count;
-}
-
-static const struct file_operations
-iommu_debug_attachment_trigger_fault_fops = {
- .open = simple_open,
- .write = iommu_debug_attachment_trigger_fault_write,
-};
-
static ssize_t iommu_debug_attachment_reg_offset_write(
struct file *file, const char __user *ubuf, size_t count,
loff_t *offset)
@@ -271,14 +248,6 @@
}
if (!debugfs_create_file(
- "trigger_fault", S_IRUSR, attach->dentry, attach,
- &iommu_debug_attachment_trigger_fault_fops)) {
- pr_err("Couldn't create iommu/attachments/%s/trigger_fault debugfs file for domain 0x%p\n",
- dev_name(dev), domain);
- goto err_rmdir;
- }
-
- if (!debugfs_create_file(
"reg_offset", S_IRUSR, attach->dentry, attach,
&iommu_debug_attachment_reg_offset_fops)) {
pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
@@ -822,7 +791,7 @@
if (!virt)
goto out;
- mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
if (!mapping) {
seq_puts(s, "fast_smmu_create_mapping failed\n");
goto out_kfree;
@@ -922,8 +891,8 @@
static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
{
int i, ret = 0;
- unsigned long iova;
- const unsigned long max = SZ_1G * 4UL;
+ u64 iova;
+ const u64 max = SZ_1G * 4ULL - 1;
void *virt;
phys_addr_t phys;
dma_addr_t dma_addr;
@@ -995,8 +964,8 @@
}
/* we're all full again. unmap everything. */
- for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
- dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
+ for (iova = 0; iova < max; iova += SZ_8K)
+ dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
out:
free_pages((unsigned long)virt, get_order(SZ_8K));
@@ -1029,7 +998,7 @@
const size_t size)
{
u64 iova;
- const unsigned long max = SZ_1G * 4UL;
+ const u64 max = SZ_1G * 4ULL - 1;
int i, remapped, unmapped, ret = 0;
void *virt;
dma_addr_t dma_addr, dma_addr2;
@@ -1061,9 +1030,9 @@
fib_init(&fib);
for (iova = get_next_fib(&fib) * size;
iova < max - size;
- iova = get_next_fib(&fib) * size) {
- dma_addr = iova;
- dma_addr2 = max - size - iova;
+ iova = (u64)get_next_fib(&fib) * size) {
+ dma_addr = (dma_addr_t)(iova);
+ dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
if (dma_addr == dma_addr2) {
WARN(1,
"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
@@ -1089,8 +1058,8 @@
ret = -EINVAL;
}
- for (dma_addr = 0; dma_addr < max; dma_addr += size)
- dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+ for (iova = 0; iova < max; iova += size)
+ dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
out:
free_pages((unsigned long)virt, get_order(size));
@@ -1118,10 +1087,11 @@
static int __full_va_sweep(struct device *dev, struct seq_file *s,
const size_t size, struct iommu_domain *domain)
{
- unsigned long iova;
+ u64 iova;
dma_addr_t dma_addr;
void *virt;
phys_addr_t phys;
+ const u64 max = SZ_1G * 4ULL - 1;
int ret = 0, i;
virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
@@ -1136,7 +1106,7 @@
}
phys = virt_to_phys(virt);
- for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
+ for (iova = 0, i = 0; iova < max; iova += size, ++i) {
unsigned long expected = iova;
dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
@@ -1184,8 +1154,8 @@
}
out:
- for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
- dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+ for (iova = 0; iova < max; iova += size)
+ dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
free_pages((unsigned long)virt, get_order(size));
return ret;
@@ -1374,7 +1344,8 @@
int ret = -EINVAL, fast = 1;
phys_addr_t pt_phys;
- mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ (SZ_1G * 4ULL));
if (!mapping)
goto out;
@@ -1443,7 +1414,9 @@
size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
int ret = -EINVAL;
- mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+ /* Make the size equal to MAX_ULONG */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+ (SZ_1G * 4ULL - 1));
if (!mapping)
goto out;
@@ -1855,6 +1828,33 @@
.write = iommu_debug_config_clocks_write,
};
+static ssize_t iommu_debug_trigger_fault_write(
+ struct file *file, const char __user *ubuf, size_t count,
+ loff_t *offset)
+{
+ struct iommu_debug_device *ddev = file->private_data;
+ unsigned long flags;
+
+ if (!ddev->domain) {
+ pr_err("No domain. Did you already attach?\n");
+ return -EINVAL;
+ }
+
+ if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
+ pr_err("Invalid flags format\n");
+ return -EFAULT;
+ }
+
+ iommu_trigger_fault(ddev->domain, flags);
+
+ return count;
+}
+
+static const struct file_operations iommu_debug_trigger_fault_fops = {
+ .open = simple_open,
+ .write = iommu_debug_trigger_fault_write,
+};
+
/*
* The following will only work for drivers that implement the generic
* device tree bindings described in
@@ -1970,6 +1970,13 @@
goto err_rmdir;
}
+ if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
+ &iommu_debug_trigger_fault_fops)) {
+ pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
+ dev_name(dev));
+ goto err_rmdir;
+ }
+
list_add(&ddev->list, &iommu_debug_devices);
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c90fbf0..7f9d9e1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1750,3 +1750,36 @@
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+
+/*
+ * Return the id asoociated with a pci device.
+ */
+int iommu_fwspec_get_id(struct device *dev, u32 *id)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (!fwspec)
+ return -EINVAL;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ if (fwspec->num_ids != 1)
+ return -EINVAL;
+
+ *id = fwspec->ids[0];
+ return 0;
+}
+
+/*
+ * Until a formal solution for probe deferral becomes part
+ * of the iommu framework...
+ */
+int iommu_is_available(struct device *dev)
+{
+ if (!dev->bus->iommu_ops ||
+ !dev->iommu_fwspec ||
+ !dev->iommu_group)
+ return -EPROBE_DEFER;
+ return 0;
+}
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9..2d203b4 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@
return -ENOMEM;
}
+ raw_spin_lock_init(&cd->rlock);
+
cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) {
pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index b045e3b..fdc4b30 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -175,9 +175,7 @@
struct flash_node_data {
struct platform_device *pdev;
struct led_classdev cdev;
- struct pinctrl *pinctrl;
- struct pinctrl_state *gpio_state_active;
- struct pinctrl_state *gpio_state_suspend;
+ struct pinctrl *strobe_pinctrl;
struct pinctrl_state *hw_strobe_state_active;
struct pinctrl_state *hw_strobe_state_suspend;
int hw_strobe_gpio;
@@ -198,6 +196,9 @@
struct flash_switch_data {
struct platform_device *pdev;
struct regulator *vreg;
+ struct pinctrl *led_en_pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
struct led_classdev cdev;
int led_mask;
bool regulator_on;
@@ -509,7 +510,7 @@
if (led->pdata->led1n2_iclamp_low_ma) {
val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_low_ma,
- led->fnode[0].ires_ua);
+ led->fnode[LED1].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base),
FLASH_LED_CURRENT_MASK, val);
@@ -519,7 +520,7 @@
if (led->pdata->led1n2_iclamp_mid_ma) {
val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_mid_ma,
- led->fnode[0].ires_ua);
+ led->fnode[LED1].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base),
FLASH_LED_CURRENT_MASK, val);
@@ -529,7 +530,7 @@
if (led->pdata->led3_iclamp_low_ma) {
val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_low_ma,
- led->fnode[3].ires_ua);
+ led->fnode[LED3].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED3_ICLAMP_LOW(led->base),
FLASH_LED_CURRENT_MASK, val);
@@ -539,7 +540,7 @@
if (led->pdata->led3_iclamp_mid_ma) {
val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_mid_ma,
- led->fnode[3].ires_ua);
+ led->fnode[LED3].ires_ua);
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LED3_ICLAMP_MID(led->base),
FLASH_LED_CURRENT_MASK, val);
@@ -570,9 +571,9 @@
if (gpio_is_valid(fnode->hw_strobe_gpio)) {
gpio_set_value(fnode->hw_strobe_gpio, on ? 1 : 0);
- } else if (fnode->hw_strobe_state_active &&
+ } else if (fnode->strobe_pinctrl && fnode->hw_strobe_state_active &&
fnode->hw_strobe_state_suspend) {
- rc = pinctrl_select_state(fnode->pinctrl,
+ rc = pinctrl_select_state(fnode->strobe_pinctrl,
on ? fnode->hw_strobe_state_active :
fnode->hw_strobe_state_suspend);
if (rc < 0) {
@@ -949,15 +950,6 @@
led->fnode[i].led_on = false;
- if (led->fnode[i].pinctrl) {
- rc = pinctrl_select_state(led->fnode[i].pinctrl,
- led->fnode[i].gpio_state_suspend);
- if (rc < 0) {
- pr_err("failed to disable GPIO, rc=%d\n", rc);
- return rc;
- }
- }
-
if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
led->pdata->hw_strobe_option, false);
@@ -969,6 +961,17 @@
}
}
+ if (snode->led_en_pinctrl) {
+ pr_debug("Selecting suspend state for %s\n", snode->cdev.name);
+ rc = pinctrl_select_state(snode->led_en_pinctrl,
+ snode->gpio_state_suspend);
+ if (rc < 0) {
+ pr_err("failed to select pinctrl suspend state rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
snode->enabled = false;
return 0;
}
@@ -1039,15 +1042,6 @@
val |= FLASH_LED_ENABLE << led->fnode[i].id;
- if (led->fnode[i].pinctrl) {
- rc = pinctrl_select_state(led->fnode[i].pinctrl,
- led->fnode[i].gpio_state_active);
- if (rc < 0) {
- pr_err("failed to enable GPIO rc=%d\n", rc);
- return rc;
- }
- }
-
if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
led->pdata->hw_strobe_option, true);
@@ -1059,6 +1053,17 @@
}
}
+ if (snode->led_en_pinctrl) {
+ pr_debug("Selecting active state for %s\n", snode->cdev.name);
+ rc = pinctrl_select_state(snode->led_en_pinctrl,
+ snode->gpio_state_active);
+ if (rc < 0) {
+ pr_err("failed to select pinctrl active state rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
if (led->enable == 0) {
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_MOD_CTRL(led->base),
@@ -1461,6 +1466,20 @@
}
fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high;
+ rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
+ if (rc < 0) {
+ pr_err("Unable to register led node %d\n", fnode->id);
+ return rc;
+ }
+
+ fnode->cdev.dev->of_node = node;
+ fnode->strobe_pinctrl = devm_pinctrl_get(fnode->cdev.dev);
+ if (IS_ERR_OR_NULL(fnode->strobe_pinctrl)) {
+ pr_debug("No pinctrl defined for %s, err=%ld\n",
+ fnode->cdev.name, PTR_ERR(fnode->strobe_pinctrl));
+ fnode->strobe_pinctrl = NULL;
+ }
+
if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) {
fnode->hw_strobe_gpio = of_get_named_gpio(node,
@@ -1470,11 +1489,11 @@
return fnode->hw_strobe_gpio;
}
gpio_direction_output(fnode->hw_strobe_gpio, 0);
- } else {
+ } else if (fnode->strobe_pinctrl) {
fnode->hw_strobe_gpio = -1;
fnode->hw_strobe_state_active =
- pinctrl_lookup_state(fnode->pinctrl,
- "strobe_enable");
+ pinctrl_lookup_state(fnode->strobe_pinctrl,
+ "strobe_enable");
if (IS_ERR_OR_NULL(fnode->hw_strobe_state_active)) {
pr_err("No active pin for hardware strobe, rc=%ld\n",
PTR_ERR(fnode->hw_strobe_state_active));
@@ -1482,8 +1501,8 @@
}
fnode->hw_strobe_state_suspend =
- pinctrl_lookup_state(fnode->pinctrl,
- "strobe_disable");
+ pinctrl_lookup_state(fnode->strobe_pinctrl,
+ "strobe_disable");
if (IS_ERR_OR_NULL(fnode->hw_strobe_state_suspend)) {
pr_err("No suspend pin for hardware strobe, rc=%ld\n",
PTR_ERR(fnode->hw_strobe_state_suspend)
@@ -1493,38 +1512,6 @@
}
}
- rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
- if (rc < 0) {
- pr_err("Unable to register led node %d\n", fnode->id);
- return rc;
- }
-
- fnode->cdev.dev->of_node = node;
-
- fnode->pinctrl = devm_pinctrl_get(fnode->cdev.dev);
- if (IS_ERR_OR_NULL(fnode->pinctrl)) {
- pr_debug("No pinctrl defined\n");
- fnode->pinctrl = NULL;
- } else {
- fnode->gpio_state_active =
- pinctrl_lookup_state(fnode->pinctrl, "led_enable");
- if (IS_ERR_OR_NULL(fnode->gpio_state_active)) {
- pr_err("Cannot lookup LED active state\n");
- devm_pinctrl_put(fnode->pinctrl);
- fnode->pinctrl = NULL;
- return PTR_ERR(fnode->gpio_state_active);
- }
-
- fnode->gpio_state_suspend =
- pinctrl_lookup_state(fnode->pinctrl, "led_disable");
- if (IS_ERR_OR_NULL(fnode->gpio_state_suspend)) {
- pr_err("Cannot lookup LED disable state\n");
- devm_pinctrl_put(fnode->pinctrl);
- fnode->pinctrl = NULL;
- return PTR_ERR(fnode->gpio_state_suspend);
- }
- }
-
return 0;
}
@@ -1589,6 +1576,36 @@
}
snode->cdev.dev->of_node = node;
+
+ snode->led_en_pinctrl = devm_pinctrl_get(snode->cdev.dev);
+ if (IS_ERR_OR_NULL(snode->led_en_pinctrl)) {
+ pr_debug("No pinctrl defined for %s, err=%ld\n",
+ snode->cdev.name, PTR_ERR(snode->led_en_pinctrl));
+ snode->led_en_pinctrl = NULL;
+ }
+
+ if (snode->led_en_pinctrl) {
+ snode->gpio_state_active =
+ pinctrl_lookup_state(snode->led_en_pinctrl,
+ "led_enable");
+ if (IS_ERR_OR_NULL(snode->gpio_state_active)) {
+ pr_err("Cannot lookup LED active state\n");
+ devm_pinctrl_put(snode->led_en_pinctrl);
+ snode->led_en_pinctrl = NULL;
+ return PTR_ERR(snode->gpio_state_active);
+ }
+
+ snode->gpio_state_suspend =
+ pinctrl_lookup_state(snode->led_en_pinctrl,
+ "led_disable");
+ if (IS_ERR_OR_NULL(snode->gpio_state_suspend)) {
+ pr_err("Cannot lookup LED disable state\n");
+ devm_pinctrl_put(snode->led_en_pinctrl);
+ snode->led_en_pinctrl = NULL;
+ return PTR_ERR(snode->gpio_state_suspend);
+ }
+ }
+
return 0;
}
@@ -2095,22 +2112,24 @@
if (!strcmp("flash", temp_string) ||
!strcmp("torch", temp_string)) {
rc = qpnp_flash_led_parse_each_led_dt(led,
- &led->fnode[i++], temp);
+ &led->fnode[i], temp);
if (rc < 0) {
pr_err("Unable to parse flash node %d rc=%d\n",
i, rc);
goto error_led_register;
}
+ i++;
}
if (!strcmp("switch", temp_string)) {
rc = qpnp_flash_led_parse_and_register_switch(led,
- &led->snode[j++], temp);
+ &led->snode[j], temp);
if (rc < 0) {
pr_err("Unable to parse and register switch node, rc=%d\n",
rc);
goto error_switch_register;
}
+ j++;
}
}
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 3060cfa..cb19cef 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -2264,7 +2264,7 @@
{
return platform_driver_register(&qpnp_wled_driver);
}
-module_init(qpnp_wled_init);
+subsys_initcall(qpnp_wled_init);
static void __exit qpnp_wled_exit(void)
{
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 1c73c5a2..6d0e913 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -13,7 +13,9 @@
#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -83,8 +85,8 @@
#define TCS_HIDDEN_CMD_SHIFT 0x08
#define TCS_TYPE_NR 4
-#define TCS_MBOX_TOUT_MS 2000
#define MAX_POOL_SIZE (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define TCS_M_INIT 0xFFFF
struct tcs_drv;
@@ -94,12 +96,13 @@
struct tcs_mbox_msg *msg;
u32 m; /* m-th TCS */
struct tasklet_struct tasklet;
- struct delayed_work dwork;
int err;
+ int idx;
+ bool in_use;
};
struct tcs_response_pool {
- struct tcs_response *resp;
+ struct tcs_response resp[MAX_POOL_SIZE];
spinlock_t lock;
DECLARE_BITMAP(avail, MAX_POOL_SIZE);
};
@@ -115,8 +118,6 @@
int ncpt; /* num cmds per tcs */
DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
spinlock_t tcs_lock; /* TCS type lock */
- spinlock_t tcs_m_lock[MAX_TCS_PER_TYPE];
- struct tcs_response *resp[MAX_TCS_PER_TYPE];
};
/* One per MBOX controller */
@@ -132,10 +133,12 @@
int num_tcs;
struct workqueue_struct *wq;
struct tcs_response_pool *resp_pool;
+ atomic_t tcs_in_use[MAX_POOL_SIZE];
+ atomic_t tcs_send_count[MAX_POOL_SIZE];
+ atomic_t tcs_irq_count[MAX_POOL_SIZE];
};
static void tcs_notify_tx_done(unsigned long data);
-static void tcs_notify_timeout(struct work_struct *work);
static int tcs_response_pool_init(struct tcs_drv *drv)
{
@@ -146,16 +149,12 @@
if (!pool)
return -ENOMEM;
- pool->resp = devm_kzalloc(&drv->pdev->dev, sizeof(*pool->resp) *
- MAX_POOL_SIZE, GFP_KERNEL);
- if (!pool->resp)
- return -ENOMEM;
-
for (i = 0; i < MAX_POOL_SIZE; i++) {
tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
(unsigned long) &pool->resp[i]);
- INIT_DELAYED_WORK(&pool->resp[i].dwork,
- tcs_notify_timeout);
+ pool->resp[i].drv = drv;
+ pool->resp[i].idx = i;
+ pool->resp[i].m = TCS_M_INIT;
}
spin_lock_init(&pool->lock);
@@ -164,39 +163,59 @@
return 0;
}
-static struct tcs_response *get_response_from_pool(struct tcs_drv *drv)
+static struct tcs_response *setup_response(struct tcs_drv *drv,
+ struct tcs_mbox_msg *msg, struct mbox_chan *chan,
+ u32 m, int err)
{
struct tcs_response_pool *pool = drv->resp_pool;
struct tcs_response *resp = ERR_PTR(-ENOMEM);
- unsigned long flags;
int pos;
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock(&pool->lock);
pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
if (pos != MAX_POOL_SIZE) {
bitmap_set(pool->avail, pos, 1);
resp = &pool->resp[pos];
- memset(resp, 0, sizeof(*resp));
- tasklet_init(&resp->tasklet, tcs_notify_tx_done,
- (unsigned long) resp);
- INIT_DELAYED_WORK(&resp->dwork, tcs_notify_timeout);
- resp->drv = drv;
+ resp->chan = chan;
+ resp->msg = msg;
+ resp->m = m;
+ resp->err = err;
+ resp->in_use = false;
}
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock(&pool->lock);
return resp;
}
-static void free_response_to_pool(struct tcs_response *resp)
+static void free_response(struct tcs_response *resp)
{
struct tcs_response_pool *pool = resp->drv->resp_pool;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&pool->lock, flags);
- i = resp - pool->resp;
- bitmap_clear(pool->avail, i, 1);
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_lock(&pool->lock);
+ resp->err = -EINVAL;
+ bitmap_clear(pool->avail, resp->idx, 1);
+ spin_unlock(&pool->lock);
+}
+
+static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m)
+{
+ struct tcs_response_pool *pool = drv->resp_pool;
+ struct tcs_response *resp = NULL;
+ int pos = 0;
+
+ do {
+ pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
+ if (pos == MAX_POOL_SIZE)
+ break;
+ resp = &pool->resp[pos];
+ if (resp->m == m && !resp->in_use) {
+ resp->in_use = true;
+ break;
+ }
+ pos++;
+ } while (1);
+
+ return resp;
}
static inline u32 read_drv_config(void __iomem *base)
@@ -224,13 +243,16 @@
write_tcs_reg(base, reg, m, n, data);
if (data == read_tcs_reg(base, reg, m, n))
break;
- cpu_relax();
+ udelay(1);
} while (1);
}
-static inline bool tcs_is_free(void __iomem *base, int m)
+static inline bool tcs_is_free(struct tcs_drv *drv, int m)
{
- return read_tcs_reg(base, TCS_DRV_STATUS, m, 0);
+ void __iomem *base = drv->reg_base;
+
+ return read_tcs_reg(base, TCS_DRV_STATUS, m, 0) &&
+ !atomic_read(&drv->tcs_in_use[m]);
}
static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
@@ -306,21 +328,23 @@
return get_tcs_of_type(drv, type);
}
-static inline struct tcs_response *get_tcs_response(struct tcs_drv *drv, int m)
-{
- struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
-
- return tcs ? tcs->resp[m - tcs->tcs_offset] : NULL;
-}
-
static inline void send_tcs_response(struct tcs_response *resp)
{
tasklet_schedule(&resp->tasklet);
}
-static inline void schedule_tcs_err_response(struct tcs_response *resp)
+static inline void enable_tcs_irq(struct tcs_drv *drv, int m, bool enable)
{
- schedule_delayed_work(&resp->dwork, msecs_to_jiffies(TCS_MBOX_TOUT_MS));
+ void __iomem *base = drv->reg_base;
+ u32 data;
+
+ /* Enable interrupts for non-ACTIVE TCS */
+ data = read_tcs_reg(base, TCS_DRV_IRQ_ENABLE, 0, 0);
+ if (enable)
+ data |= BIT(m);
+ else
+ data &= ~BIT(m);
+ write_tcs_reg(base, TCS_DRV_IRQ_ENABLE, 0, 0, data);
}
/**
@@ -335,25 +359,22 @@
struct tcs_mbox *tcs;
struct tcs_response *resp;
struct tcs_cmd *cmd;
- u32 irq_clear = 0;
u32 data;
/* Know which TCSes were triggered */
irq_status = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
- for (m = 0; irq_status >= BIT(m); m++) {
- if (!(irq_status & BIT(m)))
+ for (m = 0; m < drv->num_tcs; m++) {
+ if (!(irq_status & (u32)BIT(m)))
continue;
+ atomic_inc(&drv->tcs_irq_count[m]);
- /* Find the TCS that triggered */
- resp = get_tcs_response(drv, m);
+ resp = get_response(drv, m);
if (!resp) {
pr_err("No resp request for TCS-%d\n", m);
continue;
}
- cancel_delayed_work(&resp->dwork);
-
tcs = get_tcs_from_index(drv, m);
if (!tcs) {
pr_err("TCS-%d doesn't exist in DRV\n", m);
@@ -387,18 +408,26 @@
data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
data &= ~TCS_AMC_MODE_ENABLE;
write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
+ /*
+ * Disable interrupt for this TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ enable_tcs_irq(drv, m, false);
} else {
/* Clear the enable bit for the commands */
write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
}
- /* Notify the client that this request is completed. */
- send_tcs_response(resp);
- irq_clear |= BIT(m);
- }
+ /* Clear the TCS IRQ status */
+ write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
- /* Clear the TCS IRQ status */
- write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, irq_clear);
+ /* Clean up response object and notify mbox in tasklet */
+ send_tcs_response(resp);
+
+ /* Notify the client that this request is completed. */
+ atomic_set(&drv->tcs_in_use[m], 0);
+ }
return IRQ_HANDLED;
}
@@ -423,55 +452,8 @@
int err = resp->err;
int m = resp->m;
- free_response_to_pool(resp);
mbox_notify_tx_done(chan, msg, m, err);
-}
-
-/**
- * tcs_notify_timeout: TX Done for requests that do trigger TCS, but
- * we do not get a response IRQ back.
- */
-static void tcs_notify_timeout(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct tcs_response *resp = container_of(dwork,
- struct tcs_response, dwork);
- struct mbox_chan *chan = resp->chan;
- struct tcs_mbox_msg *msg = resp->msg;
- struct tcs_drv *drv = resp->drv;
- int m = resp->m;
- int err = -EIO;
-
- /*
- * In case the RPMH resource fails to respond to the completion
- * request, the TCS would be blocked forever waiting on the response.
- * There is no way to recover from this case.
- */
- if (!tcs_is_free(drv->reg_base, m)) {
- bool pending = false;
- struct tcs_cmd *cmd;
- int i;
- u32 addr;
-
- for (i = 0; i < msg->num_payload; i++) {
- cmd = &msg->payload[i];
- addr = read_tcs_reg(drv->reg_base, TCS_DRV_CMD_ADDR,
- m, i);
- pending = (cmd->addr == addr);
- }
- if (pending) {
- pr_err("TCS-%d blocked waiting for RPMH to respond.\n",
- m);
- for (i = 0; i < msg->num_payload; i++)
- pr_err("Addr: 0x%x Data: 0x%x\n",
- msg->payload[i].addr,
- msg->payload[i].data);
- BUG();
- }
- }
-
- free_response_to_pool(resp);
- mbox_notify_tx_done(chan, msg, -1, err);
+ free_response(resp);
}
static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
@@ -514,8 +496,6 @@
write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
if (trigger) {
- /* Clear pending interrupt bits for this TCS, OK to not lock */
- write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
/* HW req: Clear the DRV_CONTROL and enable TCS again */
write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
@@ -543,60 +523,53 @@
tcs = get_tcs_of_type(drv, WAKE_TCS);
for (m = tcs->tcs_offset; m < tcs->tcs_offset + tcs->num_tcs; m++)
- if (!tcs_is_free(drv->reg_base, m))
+ if (!tcs_is_free(drv, m))
return false;
return true;
}
-static void wait_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
+static int check_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
struct tcs_mbox_msg *msg)
{
- u32 curr_enabled;
+ u32 curr_enabled, addr;
int i, j, k;
- bool is_free;
+ void __iomem *base = drv->reg_base;
+ int m = tcs->tcs_offset;
- do {
- is_free = true;
- for (i = 1; i > tcs->tcs_mask; i = i << 1) {
- if (!(tcs->tcs_mask & i))
+ for (i = 0; i < tcs->num_tcs; i++, m++) {
+ if (tcs_is_free(drv, m))
+ continue;
+
+ curr_enabled = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+
+ for (j = 0; j < MAX_CMDS_PER_TCS; j++) {
+ if (!(curr_enabled & (u32)BIT(j)))
continue;
- if (tcs_is_free(drv->reg_base, i))
- continue;
- curr_enabled = read_tcs_reg(drv->reg_base,
- TCS_DRV_CMD_ENABLE, i, 0);
- for (j = 0; j < msg->num_payload; j++) {
- for (k = 0; k < curr_enabled; k++) {
- if (!(curr_enabled & BIT(k)))
- continue;
- if (tcs->cmd_addr[k] ==
- msg->payload[j].addr) {
- is_free = false;
- goto retry;
- }
- }
+
+ addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, j);
+ for (k = 0; k < msg->num_payload; k++) {
+ if (addr == msg->payload[k].addr)
+ return -EBUSY;
}
}
-retry:
- if (!is_free)
- cpu_relax();
- } while (!is_free);
+ }
+
+ return 0;
}
static int find_free_tcs(struct tcs_mbox *tcs)
{
- int slot, m = 0;
+ int slot = -EBUSY;
+ int m = 0;
/* Loop until we find a free AMC */
- do {
- if (tcs_is_free(tcs->drv->reg_base, tcs->tcs_offset + m)) {
+ for (m = 0; m < tcs->num_tcs; m++) {
+ if (tcs_is_free(tcs->drv, tcs->tcs_offset + m)) {
slot = m * tcs->ncpt;
break;
}
- if (++m > tcs->num_tcs)
- m = 0;
- cpu_relax();
- } while (1);
+ }
return slot;
}
@@ -652,26 +625,6 @@
return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
}
-static struct tcs_response *setup_response(struct tcs_mbox *tcs,
- struct mbox_chan *chan, struct tcs_mbox_msg *msg, int m)
-{
- struct tcs_response *resp = get_response_from_pool(tcs->drv);
-
- if (IS_ERR(resp))
- return resp;
-
- if (m < tcs->tcs_offset)
- return ERR_PTR(-EINVAL);
-
- tcs->resp[m - tcs->tcs_offset] = resp;
- resp->msg = msg;
- resp->chan = chan;
- resp->m = m;
- resp->err = 0;
-
- return resp;
-}
-
static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
bool trigger)
{
@@ -679,21 +632,36 @@
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
int d = drv->drv_id;
struct tcs_mbox *tcs;
- int i, slot, offset, m, n;
+ int i, slot, offset, m, n, ret;
struct tcs_response *resp = NULL;
+ unsigned long flags;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
+ if (trigger)
+ resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
+
/* Identify the sequential slots that we can write to */
- spin_lock(&tcs->tcs_lock);
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
slot = find_slots(tcs, msg);
if (slot < 0) {
dev_err(dev, "No TCS slot found.\n");
- spin_unlock(&tcs->tcs_lock);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ if (resp)
+ free_response(resp);
return slot;
}
+
+ if (trigger) {
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ return ret;
+ }
+ }
+
/* Mark the slots as in-use, before we unlock */
if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
bitmap_set(tcs->slots, slot, msg->num_payload);
@@ -702,37 +670,25 @@
for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
tcs->cmd_addr[slot + i] = msg->payload[i].addr;
- if (trigger)
- resp = setup_response(tcs, chan, msg,
- slot / tcs->ncpt + tcs->tcs_offset);
-
- spin_unlock(&tcs->tcs_lock);
-
- /*
- * Find the TCS corresponding to the slot and start writing.
- * Break down 'slot' into a 'n' position in the 'm'th TCS.
- */
offset = slot / tcs->ncpt;
m = offset + tcs->tcs_offset;
n = slot % tcs->ncpt;
- spin_lock(&tcs->tcs_m_lock[offset]);
+ /* Block, if we have an address from the msg in flight */
if (trigger) {
- /* Block, if we have an address from the msg in flight */
- wait_for_req_inflight(drv, tcs, msg);
- /* If the TCS is busy there is nothing to do but spin wait */
- while (!tcs_is_free(drv->reg_base, m))
- cpu_relax();
+ resp->m = m;
+ /* Mark the TCS as busy */
+ atomic_set(&drv->tcs_in_use[m], 1);
+ atomic_inc(&drv->tcs_send_count[m]);
+ /* Enable interrupt for active votes through wake TCS */
+ if (tcs->type != ACTIVE_TCS)
+ enable_tcs_irq(drv, m, true);
}
/* Write to the TCS or AMC */
__tcs_buffer_write(drv, d, m, n, msg, trigger);
- /* Schedule a timeout response, incase there is no actual response */
- if (trigger)
- schedule_tcs_err_response(resp);
-
- spin_unlock(&tcs->tcs_m_lock[offset]);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
return 0;
}
@@ -749,24 +705,21 @@
int m, i;
int inv_types[] = { WAKE_TCS, SLEEP_TCS };
int type = 0;
+ unsigned long flags;
do {
tcs = get_tcs_of_type(drv, inv_types[type]);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock(&tcs->tcs_lock);
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
for (i = 0; i < tcs->num_tcs; i++) {
m = i + tcs->tcs_offset;
- spin_lock(&tcs->tcs_m_lock[i]);
- while (!tcs_is_free(drv->reg_base, m))
- cpu_relax();
__tcs_buffer_invalidate(drv->reg_base, m);
- spin_unlock(&tcs->tcs_m_lock[i]);
}
/* Mark the TCS as free */
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
- spin_unlock(&tcs->tcs_lock);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
} while (++type < ARRAY_SIZE(inv_types));
return 0;
@@ -788,6 +741,7 @@
struct tcs_mbox_msg *msg = data;
const struct device *dev = chan->cl->dev;
int ret = -EINVAL;
+ int count = 0;
if (!msg) {
dev_err(dev, "Payload error.\n");
@@ -824,17 +778,21 @@
tcs_mbox_invalidate(chan);
/* Post the message to the TCS and trigger */
- ret = tcs_mbox_write(chan, msg, true);
+ do {
+ ret = tcs_mbox_write(chan, msg, true);
+ if (ret == -EBUSY) {
+ ret = -EIO;
+ udelay(10);
+ } else
+ break;
+ } while (++count < 10);
tx_fail:
if (ret) {
struct tcs_drv *drv = container_of(chan->mbox,
- struct tcs_drv, mbox);
- struct tcs_response *resp = get_response_from_pool(drv);
-
- resp->chan = chan;
- resp->msg = msg;
- resp->err = ret;
+ struct tcs_drv, mbox);
+ struct tcs_response *resp = setup_response(
+ drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
send_tcs_response(resp);
@@ -862,6 +820,7 @@
const struct device *dev = chan->cl->dev;
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
struct tcs_mbox *tcs;
+ unsigned long flags;
tcs = get_tcs_of_type(drv, CONTROL_TCS);
if (IS_ERR(tcs))
@@ -872,9 +831,9 @@
return -EINVAL;
}
- spin_lock(&tcs->tcs_lock);
+ spin_lock_irqsave(&tcs->tcs_lock, flags);
__tcs_write_hidden(tcs->drv, drv->drv_id, msg);
- spin_unlock(&tcs->tcs_lock);
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
return 0;
}
@@ -967,7 +926,6 @@
u32 config, max_tcs, ncpt;
int tcs_type_count[TCS_TYPE_NR] = { 0 };
struct resource *res;
- u32 irq_mask;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -1048,8 +1006,6 @@
if (!tcs->cmd_addr)
return -ENOMEM;
- for (j = 0; j < tcs->num_tcs; j++)
- spin_lock_init(&tcs->tcs_m_lock[j]);
}
/* Allocate only that many channels specified in DT for our MBOX */
@@ -1104,21 +1060,18 @@
if (irq < 0)
return irq;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- tcs_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
"tcs_irq", drv);
if (ret)
return ret;
- /*
- * Enable interrupts for AMC TCS,
- * if there are no AMC TCS, use wake TCS.
- */
- irq_mask = (drv->tcs[ACTIVE_TCS].num_tcs) ?
- drv->tcs[ACTIVE_TCS].tcs_mask :
- drv->tcs[WAKE_TCS].tcs_mask;
- write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0, irq_mask);
+ /* Enable interrupts for AMC TCS */
+ write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0,
+ drv->tcs[ACTIVE_TCS].tcs_mask);
+
+ for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
+ atomic_set(&drv->tcs_in_use[i], 0);
ret = mbox_controller_register(&drv->mbox);
if (ret)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 15daa36..ee75e35 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3589,7 +3589,7 @@
return r;
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
to_bytes(rs->requested_bitmap_chunk_sectors), 0);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index a8d4d2f..3b62315 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -147,8 +147,6 @@
block = fec_buffer_rs_block(v, fio, n, i);
res = fec_decode_rs8(v, fio, block, &par[offset], neras);
if (res < 0) {
- dm_bufio_release(buf);
-
r = res;
goto error;
}
@@ -173,6 +171,8 @@
done:
r = corrected;
error:
+ dm_bufio_release(buf);
+
if (r < 0 && neras)
DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
v->data_dev->name, (unsigned long long)rsb, r);
@@ -272,7 +272,7 @@
&is_zero) == 0) {
/* skip known zero blocks entirely */
if (is_zero)
- continue;
+ goto done;
/*
* skip if we have already found the theoretical
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 628ba00..aac7161 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -69,6 +69,13 @@
struct dm_stats_aux stats_aux;
};
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+ if (rq && rq->end_io_data)
+ return &((struct dm_rq_target_io *)rq->end_io_data)->info;
+ return NULL;
+}
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -986,26 +993,29 @@
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
struct bio_list list;
struct bio *bio;
+ int i;
INIT_LIST_HEAD(&o->cb.list);
if (unlikely(!current->bio_list))
return;
- list = *current->bio_list;
- bio_list_init(current->bio_list);
+ for (i = 0; i < 2; i++) {
+ list = current->bio_list[i];
+ bio_list_init(¤t->bio_list[i]);
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set) {
- bio_list_add(current->bio_list, bio);
- continue;
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs) || bs == fs_bio_set) {
+ bio_list_add(¤t->bio_list[i], bio);
+ continue;
+ }
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ spin_unlock(&bs->rescue_lock);
}
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
}
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 55b5e0e..4c4aab0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -941,7 +941,8 @@
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ (!bio_list_empty(¤t->bio_list[0]) ||
+ !bio_list_empty(¤t->bio_list[1]))),
conf->resync_lock);
conf->nr_waiting--;
if (!conf->nr_waiting)
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index c897669..19de267 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index 03b18cf..429474b 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -75,7 +75,26 @@
static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, unsigned long arg)
{
- return cam_subdev_ioctl(sd, cmd, compat_ptr(arg));
+ struct cam_control cmd_data;
+ int rc;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+ rc = cam_subdev_ioctl(sd, cmd, &cmd_data);
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
}
#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index f8c864f..87707b1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o cam_req_mgr_workq.o
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o cam_req_mgr_workq.o cam_mem_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
new file mode 100644
index 0000000..f3ef0e9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -0,0 +1,968 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-MEM-MGR %s:%d " fmt, __func__, __LINE__
+
+#ifdef CONFIG_MEM_MGR_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+#include <asm/cacheflush.h>
+
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
+static struct cam_mem_table tbl;
+
+static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
+ uint64_t *vaddr,
+ size_t *len)
+{
+ *vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("kernel map fail");
+ return -ENOSPC;
+ }
+
+ if (ion_handle_get_size(tbl.client, hdl, len)) {
+ pr_err("kernel get len failed");
+ ion_unmap_kernel(tbl.client, hdl);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int cam_mem_util_get_dma_dir(uint32_t flags)
+{
+ int rc = -EINVAL;
+
+ if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
+ rc = DMA_TO_DEVICE;
+ else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
+ rc = DMA_FROM_DEVICE;
+ else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ rc = DMA_BIDIRECTIONAL;
+
+ return rc;
+}
+
+static int cam_mem_util_client_create(void)
+{
+ int rc = 0;
+
+ tbl.client = msm_ion_client_create("camera_global_pool");
+ if (IS_ERR_OR_NULL(tbl.client)) {
+ pr_err("fail to create client\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void cam_mem_util_client_destroy(void)
+{
+ ion_client_destroy(tbl.client);
+ tbl.client = NULL;
+}
+
+int cam_mem_mgr_init(void)
+{
+ int rc;
+ int i;
+ int bitmap_size;
+
+ memset(tbl.bufq, 0, sizeof(tbl.bufq));
+
+ rc = cam_mem_util_client_create();
+ if (rc < 0) {
+ pr_err("fail to create ion client\n");
+ goto client_fail;
+ }
+
+ bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
+ tbl.bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+ if (!tbl.bitmap) {
+ rc = -ENOMEM;
+ goto bitmap_fail;
+ }
+ tbl.bits = bitmap_size * BITS_PER_BYTE;
+ bitmap_zero(tbl.bitmap, tbl.bits);
+ /* We need to reserve slot 0 because 0 is invalid */
+ set_bit(0, tbl.bitmap);
+
+ for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+ tbl.bufq[i].fd = -1;
+ tbl.bufq[i].buf_handle = -1;
+ }
+ mutex_init(&tbl.m_lock);
+ return rc;
+
+bitmap_fail:
+ cam_mem_util_client_destroy();
+client_fail:
+ return rc;
+}
+
+static int cam_mem_mgr_cleanup_table(void)
+{
+ int i;
+
+ mutex_lock(&tbl.m_lock);
+ for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+ if (!tbl.bufq[i].active) {
+ CDBG("Buffer inactive at idx=%d, continuing\n", i);
+ continue;
+ } else {
+ pr_err("Active buffer at idx=%d, possible leak\n", i);
+ }
+
+ mutex_lock(&tbl.bufq[i].q_lock);
+ ion_free(tbl.client, tbl.bufq[i].i_hdl);
+ tbl.bufq[i].fd = -1;
+ tbl.bufq[i].flags = 0;
+ tbl.bufq[i].buf_handle = -1;
+ tbl.bufq[i].vaddr = 0;
+ tbl.bufq[i].len = 0;
+ memset(tbl.bufq[i].hdls, 0,
+ sizeof(int32_t) * tbl.bufq[i].num_hdl);
+ tbl.bufq[i].num_hdl = 0;
+ tbl.bufq[i].i_hdl = NULL;
+ tbl.bufq[i].active = false;
+ mutex_unlock(&tbl.bufq[i].q_lock);
+ mutex_destroy(&tbl.bufq[i].q_lock);
+ }
+ bitmap_zero(tbl.bitmap, tbl.bits);
+ /* We need to reserve slot 0 because 0 is invalid */
+ set_bit(0, tbl.bitmap);
+ mutex_unlock(&tbl.m_lock);
+
+ return 0;
+}
+
+void cam_mem_mgr_deinit(void)
+{
+ cam_mem_mgr_cleanup_table();
+ mutex_lock(&tbl.m_lock);
+ bitmap_zero(tbl.bitmap, tbl.bits);
+ kfree(tbl.bitmap);
+ tbl.bitmap = NULL;
+ cam_mem_util_client_destroy();
+ mutex_unlock(&tbl.m_lock);
+ mutex_destroy(&tbl.m_lock);
+}
+
+static int32_t cam_mem_get_slot(void)
+{
+ int32_t idx;
+
+ mutex_lock(&tbl.m_lock);
+ idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ mutex_unlock(&tbl.m_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(idx, tbl.bitmap);
+ tbl.bufq[idx].active = true;
+ mutex_init(&tbl.bufq[idx].q_lock);
+ mutex_unlock(&tbl.m_lock);
+
+ return idx;
+}
+
+static void cam_mem_put_slot(int32_t idx)
+{
+ mutex_lock(&tbl.m_lock);
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].active = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ mutex_destroy(&tbl.bufq[idx].q_lock);
+ clear_bit(idx, tbl.bitmap);
+ mutex_unlock(&tbl.m_lock);
+}
+
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+ uint64_t *iova_ptr, size_t *len_ptr)
+{
+ int rc = 0, idx;
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+ return -EINVAL;
+
+ if (!tbl.bufq[idx].active)
+ return -EINVAL;
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ if (buf_handle != tbl.bufq[idx].buf_handle) {
+ rc = -EINVAL;
+ goto handle_mismatch;
+ }
+
+ rc = cam_smmu_get_iova(mmu_handle,
+ tbl.bufq[idx].fd,
+ iova_ptr,
+ len_ptr);
+ if (rc < 0)
+ pr_err("fail to get buf hdl :%d", buf_handle);
+
+handle_mismatch:
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_io_buf);
+
+int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
+{
+ int rc = 0;
+ int idx;
+ struct ion_handle *ion_hdl = NULL;
+ uint64_t kvaddr = 0;
+ size_t klen = 0;
+
+ if (!buf_handle || !vaddr_ptr || !len)
+ return -EINVAL;
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+ return -EINVAL;
+
+ if (!tbl.bufq[idx].active)
+ return -EPERM;
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ if (buf_handle != tbl.bufq[idx].buf_handle) {
+ rc = -EINVAL;
+ goto exit_func;
+ }
+
+ ion_hdl = tbl.bufq[idx].i_hdl;
+ if (!ion_hdl) {
+ pr_err("Invalid ION handle\n");
+ rc = -EINVAL;
+ goto exit_func;
+ }
+
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
+ if (!tbl.bufq[idx].kmdvaddr) {
+ rc = cam_mem_util_map_cpu_va(ion_hdl,
+ &kvaddr, &klen);
+ if (rc)
+ goto exit_func;
+ tbl.bufq[idx].kmdvaddr = kvaddr;
+ }
+ } else {
+ rc = -EINVAL;
+ goto exit_func;
+ }
+
+ *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
+ *len = tbl.bufq[idx].len;
+
+exit_func:
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_cpu_buf);
+
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
+{
+ int rc = 0, idx;
+ uint32_t ion_cache_ops;
+ unsigned long ion_flag = 0;
+
+ if (!cmd)
+ return -EINVAL;
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+ return -EINVAL;
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+
+ if (!tbl.bufq[idx].active) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
+ &ion_flag);
+ if (rc) {
+ pr_err("cache get flags failed %d\n", rc);
+ goto fail;
+ }
+
+ if (ION_IS_CACHED(ion_flag)) {
+ switch (cmd->mem_cache_ops) {
+ case CAM_MEM_CLEAN_CACHE:
+ ion_cache_ops = ION_IOC_CLEAN_CACHES;
+ break;
+ case CAM_MEM_INV_CACHE:
+ ion_cache_ops = ION_IOC_INV_CACHES;
+ break;
+ case CAM_MEM_CLEAN_INV_CACHE:
+ ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+ break;
+ default:
+ pr_err("invalid cache ops :%d", cmd->mem_cache_ops);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = msm_ion_do_cache_op(tbl.client,
+ tbl.bufq[idx].i_hdl,
+ (void *)tbl.bufq[idx].vaddr,
+ tbl.bufq[idx].len,
+ ion_cache_ops);
+ if (rc)
+ pr_err("cache operation failed %d\n", rc);
+ }
+fail:
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
+
+static int cam_mem_util_get_ion_buffer(size_t len,
+ size_t align,
+ unsigned int heap_id_mask,
+ unsigned int flags,
+ struct ion_handle **hdl,
+ int *fd)
+{
+ int rc = 0;
+
+ *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+ if (IS_ERR_OR_NULL(*hdl))
+ return -ENOMEM;
+
+ *fd = ion_share_dma_buf_fd(tbl.client, *hdl);
+ if (*fd < 0) {
+ pr_err("dma buf get fd fail");
+ rc = -EINVAL;
+ goto get_fd_fail;
+ }
+
+ return rc;
+
+get_fd_fail:
+ ion_free(tbl.client, *hdl);
+ return rc;
+}
+
+static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
+ struct ion_handle **hdl,
+ int *fd)
+{
+ uint32_t heap_id;
+ uint32_t ion_flag = 0;
+ int rc;
+
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+ heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
+ else
+ heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+ if (cmd->flags & CAM_MEM_FLAG_CACHE)
+ ion_flag |= ION_FLAG_CACHED;
+ else
+ ion_flag &= ~ION_FLAG_CACHED;
+
+ rc = cam_mem_util_get_ion_buffer(cmd->len,
+ cmd->align,
+ heap_id,
+ ion_flag,
+ hdl,
+ fd);
+
+ return rc;
+}
+
+
+static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+ if (!cmd->flags) {
+ pr_err("Invalid flags\n");
+ return -EINVAL;
+ }
+
+ if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+ pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+ CAM_MEM_MMU_MAX_HANDLE);
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+ cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+ pr_err("Kernel mapping in secure mode not allowed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
+{
+ if (!cmd->flags) {
+ pr_err("Invalid flags\n");
+ return -EINVAL;
+ }
+
+ if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+ pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+ CAM_MEM_MMU_MAX_HANDLE);
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+ cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+ pr_err("Kernel mapping in secure mode not allowed");
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+ pr_err("Shared memory buffers are not allowed to be mapped\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cam_mem_util_map_hw_va(uint32_t flags,
+ int32_t *mmu_hdls,
+ int32_t num_hdls,
+ int fd,
+ dma_addr_t *hw_vaddr,
+ size_t *len,
+ enum cam_smmu_region_id region)
+{
+ int i;
+ int rc = -1;
+ int dir = cam_mem_util_get_dma_dir(flags);
+
+ if (dir < 0) {
+ pr_err("fail to map DMA direction\n");
+ return dir;
+ }
+
+ if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_map_sec_iova(mmu_hdls[i],
+ fd,
+ dir,
+ (dma_addr_t *)hw_vaddr,
+ len);
+
+ if (rc < 0) {
+ pr_err("Failed to securely map to smmu");
+ goto multi_map_fail;
+ }
+ }
+ } else {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_map_iova(mmu_hdls[i],
+ fd,
+ dir,
+ (dma_addr_t *)hw_vaddr,
+ len,
+ region);
+
+ if (rc < 0) {
+ pr_err("Failed to map to smmu");
+ goto multi_map_fail;
+ }
+ }
+ }
+
+ return rc;
+multi_map_fail:
+ if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+ for (--i; i > 0; i--)
+ cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+ else
+ for (--i; i > 0; i--)
+ cam_smmu_unmap_iova(mmu_hdls[i],
+ fd,
+ CAM_SMMU_REGION_IO);
+ return rc;
+
+}
+
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+ int rc;
+ int32_t idx;
+ struct ion_handle *ion_hdl;
+ int ion_fd;
+ dma_addr_t hw_vaddr = 0;
+ size_t len;
+
+ if (!cmd) {
+ pr_err(" Invalid argument\n");
+ return -EINVAL;
+ }
+ len = cmd->len;
+
+ rc = cam_mem_util_check_flags(cmd);
+ if (rc) {
+ pr_err("Invalid flags: flags = %X\n", cmd->flags);
+ return rc;
+ }
+
+ rc = cam_mem_util_ion_alloc(cmd,
+ &ion_hdl,
+ &ion_fd);
+ if (rc) {
+ pr_err("Ion allocation failed\n");
+ return rc;
+ }
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto slot_fail;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+
+ enum cam_smmu_region_id region;
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ region = CAM_SMMU_REGION_SHARED;
+
+ rc = cam_mem_util_map_hw_va(cmd->flags,
+ cmd->mmu_hdls,
+ cmd->num_hdl,
+ ion_fd,
+ &hw_vaddr,
+ &len,
+ region);
+ if (rc)
+ goto map_hw_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].fd = ion_fd;
+ tbl.bufq[idx].flags = cmd->flags;
+ tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
+ tbl.bufq[idx].kmdvaddr = 0;
+
+ if (cmd->num_hdl > 0)
+ tbl.bufq[idx].vaddr = hw_vaddr;
+ else
+ tbl.bufq[idx].vaddr = 0;
+
+ tbl.bufq[idx].i_hdl = ion_hdl;
+ tbl.bufq[idx].len = cmd->len;
+ tbl.bufq[idx].num_hdl = cmd->num_hdl;
+ memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+ sizeof(int32_t) * cmd->num_hdl);
+ tbl.bufq[idx].is_imported = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+ cmd->out.fd = tbl.bufq[idx].fd;
+ cmd->out.vaddr = 0;
+
+ CDBG("buf handle: %x, fd: %d, len: %zu\n",
+ cmd->out.buf_handle, cmd->out.fd,
+ tbl.bufq[idx].len);
+
+ return rc;
+
+map_hw_fail:
+ cam_mem_put_slot(idx);
+slot_fail:
+ ion_free(tbl.client, ion_hdl);
+ return rc;
+}
+
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
+{
+ int32_t idx;
+ int rc;
+ struct ion_handle *ion_hdl;
+ dma_addr_t hw_vaddr = 0;
+ size_t len = 0;
+
+ if (!cmd || (cmd->fd < 0)) {
+ pr_err("Invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE)
+ return -EINVAL;
+
+ rc = cam_mem_util_check_map_flags(cmd);
+ if (rc) {
+ pr_err("Invalid flags: flags = %X\n", cmd->flags);
+ return rc;
+ }
+
+ ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
+ if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
+ pr_err("Failed to import ion fd\n");
+ return -EINVAL;
+ }
+
+ if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) {
+ rc = cam_mem_util_map_hw_va(cmd->flags,
+ cmd->mmu_hdls,
+ cmd->num_hdl,
+ cmd->fd,
+ &hw_vaddr,
+ &len,
+ CAM_SMMU_REGION_IO);
+ if (rc)
+ goto map_fail;
+ }
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto map_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].fd = cmd->fd;
+ tbl.bufq[idx].flags = cmd->flags;
+ tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+ tbl.bufq[idx].kmdvaddr = 0;
+
+ if (cmd->num_hdl > 0)
+ tbl.bufq[idx].vaddr = hw_vaddr;
+ else
+ tbl.bufq[idx].vaddr = 0;
+
+ tbl.bufq[idx].i_hdl = ion_hdl;
+ tbl.bufq[idx].len = len;
+ tbl.bufq[idx].num_hdl = cmd->num_hdl;
+ memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+ sizeof(int32_t) * cmd->num_hdl);
+ tbl.bufq[idx].is_imported = true;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+ cmd->out.vaddr = 0;
+
+ return rc;
+
+map_fail:
+ ion_free(tbl.client, ion_hdl);
+ return rc;
+}
+
+static int cam_mem_util_unmap_hw_va(int32_t idx,
+ enum cam_smmu_region_id region)
+{
+ int i;
+ uint32_t flags;
+ int32_t *mmu_hdls;
+ int num_hdls;
+ int fd;
+ int rc = -EINVAL;
+
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ pr_err("Incorrect index\n");
+ return rc;
+ }
+
+ flags = tbl.bufq[idx].flags;
+ mmu_hdls = tbl.bufq[idx].hdls;
+ num_hdls = tbl.bufq[idx].num_hdl;
+ fd = tbl.bufq[idx].fd;
+
+ if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+ if (rc < 0)
+ goto unmap_end;
+ }
+ } else {
+ for (i = 0; i < num_hdls; i++) {
+ rc = cam_smmu_unmap_iova(mmu_hdls[i],
+ fd,
+ region);
+ if (rc < 0)
+ goto unmap_end;
+ }
+ }
+
+unmap_end:
+ return rc;
+}
+
+static int cam_mem_util_unmap(int32_t idx)
+{
+ int rc = 0;
+ enum cam_smmu_region_id region;
+
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ pr_err("Incorrect index\n");
+ return -EINVAL;
+ }
+
+ CDBG("Flags = %X\n", tbl.bufq[idx].flags);
+
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
+ if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
+ ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
+
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ region = CAM_SMMU_REGION_SHARED;
+
+ rc = cam_mem_util_unmap_hw_va(idx,
+ region);
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ tbl.bufq[idx].flags = 0;
+ tbl.bufq[idx].buf_handle = -1;
+ tbl.bufq[idx].vaddr = 0;
+ memset(tbl.bufq[idx].hdls, 0,
+ sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
+
+ CDBG("Ion handle at idx = %d freeing = %pK, fd = %d\n",
+ idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd);
+
+ if (tbl.bufq[idx].i_hdl && !tbl.bufq[idx].is_imported) {
+ CDBG("Freeing up non-imported buffer at fd = %d, hdl = %pK",
+ tbl.bufq[idx].fd,
+ tbl.bufq[idx].i_hdl);
+ ion_free(tbl.client, tbl.bufq[idx].i_hdl);
+ tbl.bufq[idx].i_hdl = NULL;
+ } else {
+ CDBG("Not freeing up imported buffer at fd = %d",
+ tbl.bufq[idx].fd);
+ }
+
+ tbl.bufq[idx].fd = -1;
+ tbl.bufq[idx].is_imported = false;
+ tbl.bufq[idx].i_hdl = NULL;
+ tbl.bufq[idx].len = 0;
+ tbl.bufq[idx].num_hdl = 0;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+ cam_mem_put_slot(idx);
+
+ return rc;
+}
+
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
+{
+ int idx;
+ int rc;
+
+ if (!cmd) {
+ pr_err("Invalid argument\n");
+ return -EINVAL;
+ }
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ pr_err("Incorrect index extracted from mem handle\n");
+ return -EINVAL;
+ }
+
+ if (!tbl.bufq[idx].active) {
+ pr_err("Released buffer state should be active\n");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
+ pr_err("Released buf handle not matching within table\n");
+ return -EINVAL;
+ }
+
+ CDBG("Releasing hdl = %u\n", cmd->buf_handle);
+ rc = cam_mem_util_unmap(idx);
+
+ return rc;
+}
+
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+ struct cam_mem_mgr_memory_desc *out)
+{
+ struct ion_handle *hdl;
+ int ion_fd;
+ int rc = 0;
+ uint32_t heap_id;
+ int32_t ion_flag = 0;
+ uint64_t kvaddr;
+ dma_addr_t iova = 0;
+ size_t request_len = 0;
+ int32_t idx;
+ uint32_t mem_handle;
+ int32_t smmu_hdl = 0;
+ int32_t num_hdl = 0;
+ enum cam_smmu_region_id region;
+
+ if (!inp || !out) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (inp->region != CAM_MEM_MGR_REGION_SHARED &&
+ inp->region != CAM_MEM_MGR_REGION_NON_SECURE_IO) {
+ pr_err("Invalid flags for request mem\n");
+ return -EINVAL;
+ }
+
+ if (inp->flags & CAM_MEM_FLAG_CACHE)
+ ion_flag |= ION_FLAG_CACHED;
+ else
+ ion_flag &= ~ION_FLAG_CACHED;
+
+ heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+ rc = cam_mem_util_get_ion_buffer(inp->size,
+ inp->align,
+ heap_id,
+ ion_flag,
+ &hdl,
+ &ion_fd);
+
+ if (rc) {
+ pr_err("ION alloc failed for shared buffer\n");
+ goto ion_fail;
+ } else {
+ CDBG("Got ION fd = %d, hdl = %pK\n", ion_fd, hdl);
+ }
+
+ rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
+ if (rc) {
+ pr_err("Failed to get kernel vaddr\n");
+ goto map_fail;
+ }
+
+ if (!inp->smmu_hdl) {
+ pr_err("Invalid SMMU handle\n");
+ rc = -EINVAL;
+ goto smmu_fail;
+ }
+
+ if (inp->region == CAM_MEM_MGR_REGION_SHARED)
+ region = CAM_SMMU_REGION_SHARED;
+
+ if (inp->region == CAM_MEM_MGR_REGION_NON_SECURE_IO)
+ region = CAM_SMMU_REGION_IO;
+
+ rc = cam_smmu_map_iova(inp->smmu_hdl,
+ ion_fd,
+ CAM_SMMU_MAP_RW,
+ &iova,
+ &request_len,
+ region);
+
+ if (rc < 0) {
+ pr_err("SMMU mapping failed\n");
+ goto smmu_fail;
+ }
+
+ smmu_hdl = inp->smmu_hdl;
+ num_hdl = 1;
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto slot_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+ tbl.bufq[idx].fd = ion_fd;
+ tbl.bufq[idx].flags = inp->flags;
+ tbl.bufq[idx].buf_handle = mem_handle;
+ tbl.bufq[idx].kmdvaddr = kvaddr;
+
+ tbl.bufq[idx].vaddr = iova;
+
+ tbl.bufq[idx].i_hdl = hdl;
+ tbl.bufq[idx].len = inp->size;
+ tbl.bufq[idx].num_hdl = num_hdl;
+ memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+ sizeof(int32_t));
+ tbl.bufq[idx].is_imported = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ out->kva = kvaddr;
+ out->iova = (uint32_t)iova;
+ out->smmu_hdl = smmu_hdl;
+ out->mem_handle = mem_handle;
+ out->len = inp->size;
+ out->region = inp->region;
+
+ return rc;
+slot_fail:
+ cam_smmu_unmap_iova(inp->smmu_hdl,
+ ion_fd,
+ inp->region);
+smmu_fail:
+ ion_unmap_kernel(tbl.client, hdl);
+map_fail:
+ ion_free(tbl.client, hdl);
+ion_fail:
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_request_mem);
+
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
+{
+ int32_t idx;
+ int rc;
+
+ if (!inp) {
+ pr_err("Invalid argument\n");
+ return -EINVAL;
+ }
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ pr_err("Incorrect index extracted from mem handle\n");
+ return -EINVAL;
+ }
+
+ if (!tbl.bufq[idx].active) {
+ pr_err("Released buffer state should be active\n");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+ pr_err("Released buf handle not matching within table\n");
+ return -EINVAL;
+ }
+
+ CDBG("Releasing hdl = %X\n", inp->mem_handle);
+ rc = cam_mem_util_unmap(idx);
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_release_mem);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
new file mode 100644
index 0000000..c5f839b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_H_
+#define _CAM_MEM_MGR_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_mem_mgr_api.h"
+
+#define CAM_MEM_BUFQ_MAX 1024
+
+/**
+ * struct cam_mem_buf_queue
+ *
+ * @i_hdl: ion handle for the buffer
+ * @q_lock: mutex lock for buffer
+ * @hdls: list of mapped handles
+ * @num_hdl: number of handles
+ * @fd: file descriptor of buffer
+ * @buf_handle: unique handle for buffer
+ * @align: alignment for allocation
+ * @len: size of buffer
+ * @flags: attributes of buffer
+ * @vaddr: IOVA of buffer
+ * @kmdvaddr: Kernel virtual address
+ * @active: state of the buffer
+ * @is_imported: Flag indicating if buffer is imported from an FD in user space
+ */
+struct cam_mem_buf_queue {
+ struct ion_handle *i_hdl;
+ struct mutex q_lock;
+ int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
+ int32_t num_hdl;
+ int32_t fd;
+ int32_t buf_handle;
+ int32_t align;
+ size_t len;
+ uint32_t flags;
+ uint64_t vaddr;
+ uint64_t kmdvaddr;
+ bool active;
+ bool is_imported;
+};
+
+/**
+ * struct cam_mem_table
+ *
+ * @m_lock: mutex lock for table
+ * @bitmap: bitmap of the mem mgr utility
+ * @bits: max bits of the utility
+ * @client: ion client pointer
+ * @bufq: array of buffers
+ */
+struct cam_mem_table {
+ struct mutex m_lock;
+ void *bitmap;
+ size_t bits;
+ struct ion_client *client;
+ struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX];
+};
+
+/**
+ * @brief: Allocates and maps buffer
+ *
+ * @cmd: Allocation information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd);
+
+/**
+ * @brief: Releases a buffer reference
+ *
+ * @cmd: Buffer release information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd);
+
+/**
+ * @brief Maps a buffer
+ *
+ * @cmd: Buffer mapping information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd);
+
+/**
+ * @brief: Perform cache ops on the buffer
+ *
+ * @cmd: Cache ops information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd);
+
+/**
+ * @brief: Initializes the memory manager
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_init(void);
+
+/**
+ * @brief: Tears down the memory manager
+ *
+ * @return None
+ */
+void cam_mem_mgr_deinit(void);
+
+#endif /* _CAM_MEM_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
new file mode 100644
index 0000000..32a754e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_API_H_
+#define _CAM_MEM_MGR_API_H_
+
+#include <media/cam_req_mgr.h>
+
+/* Region IDs for memory manager */
+#define CAM_MEM_MGR_REGION_FIRMWARE 0
+#define CAM_MEM_MGR_REGION_SHARED 1
+#define CAM_MEM_MGR_REGION_NON_SECURE_IO 2
+#define CAM_MEM_MGR_REGION_SECURE_IO 3
+#define CAM_MEM_MGR_REGION_SCRATCH 4
+
+/**
+ * struct cam_mem_mgr_request_desc
+ *
+ * @size : Size of memory requested for allocation
+ * @align : Alignment of requested memory
+ * @smmu_hdl: SMMU handle to identify context bank where memory will be mapped
+ * @flags : Flags to indicate cached/uncached property
+ * @region : Region where memory should be allocated
+ */
+struct cam_mem_mgr_request_desc {
+ uint64_t size;
+ uint64_t align;
+ int32_t smmu_hdl;
+ uint32_t flags;
+ uint32_t region;
+};
+
+/**
+ * struct cam_mem_mgr_memory_desc
+ *
+ * @kva : Kernel virtual address of allocated memory
+ * @iova : IOVA of allocated memory
+ * @smmu_hdl : SMMU handle of allocated memory
+ * @mem_handle : Mem handle identifying allocated memory
+ * @len : Length of allocated memory
+ * @region : Region to which allocated memory belongs
+ */
+struct cam_mem_mgr_memory_desc {
+ uint64_t kva;
+ uint32_t iova;
+ int32_t smmu_hdl;
+ uint32_t mem_handle;
+ uint64_t len;
+ uint32_t region;
+};
+
+/**
+ * @brief: Requests a memory buffer
+ *
+ * @inp: Information specifying requested buffer properties
+ * @out: Information about allocated buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+ struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Releases a memory buffer
+ *
+ * @inp: Information specifying buffer to be released
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
+
+/**
+ * @brief: Returns IOVA information about buffer
+ *
+ * @buf_handle: Handle of the buffer
+ * @mmu_handle: SMMU handle where buffer is mapped
+ * @iova_ptr : Pointer to mmu's iova
+ * @len_ptr : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+ uint64_t *iova_ptr, size_t *len_ptr);
+/**
+ * @brief: Returns CPU address information about buffer
+ *
+ * @buf_handle: Handle for the buffer
+ * @vaddr_ptr : pointer to kernel virtual address
+ * @len_ptr : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
+ size_t *len);
+
+#endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index f3af1bd..43b020c6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#include "cam_req_mgr_util.h"
#include "cam_req_mgr_core.h"
#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
#define CAM_REQ_MGR_EVENT_MAX 30
@@ -115,7 +116,18 @@
spin_unlock_bh(&g_dev.cam_eventq_lock);
g_dev.open_cnt++;
+ rc = cam_mem_mgr_init();
+ if (rc) {
+ g_dev.open_cnt--;
+ pr_err("mem mgr init failed\n");
+ goto mem_mgr_init_fail;
+ }
+ mutex_unlock(&g_dev.cam_lock);
+ return rc;
+
+mem_mgr_init_fail:
+ v4l2_fh_release(filep);
end:
mutex_unlock(&g_dev.cam_lock);
return rc;
@@ -154,6 +166,7 @@
spin_unlock_bh(&g_dev.cam_eventq_lock);
cam_req_mgr_util_free_hdls();
+ cam_mem_mgr_deinit();
mutex_unlock(&g_dev.cam_lock);
return 0;
@@ -316,6 +329,84 @@
rc = cam_req_mgr_sync_mode(&sync_mode);
}
break;
+ case CAM_REQ_MGR_ALLOC_BUF: {
+ struct cam_mem_mgr_alloc_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ (void *)k_ioctl->handle,
+ k_ioctl->size)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_alloc_and_map(&cmd);
+ if (!rc)
+ if (copy_to_user((void *)k_ioctl->handle,
+ &cmd, k_ioctl->size)) {
+ rc = -EFAULT;
+ break;
+ }
+ }
+ break;
+ case CAM_REQ_MGR_MAP_BUF: {
+ struct cam_mem_mgr_map_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ (void *)k_ioctl->handle,
+ k_ioctl->size)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_map(&cmd);
+ if (!rc)
+ if (copy_to_user((void *)k_ioctl->handle,
+ &cmd, k_ioctl->size)) {
+ rc = -EFAULT;
+ break;
+ }
+ }
+ break;
+ case CAM_REQ_MGR_RELEASE_BUF: {
+ struct cam_mem_mgr_release_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ (void *)k_ioctl->handle,
+ k_ioctl->size)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_release(&cmd);
+ }
+ break;
+ case CAM_REQ_MGR_CACHE_OPS: {
+ struct cam_mem_cache_ops_cmd cmd;
+
+ if (k_ioctl->size != sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd,
+ (void *)k_ioctl->handle,
+ k_ioctl->size)) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = cam_mem_mgr_cache_ops(&cmd);
+ if (rc)
+ rc = -EINVAL;
+ }
+ break;
default:
return -ENOIOCTLCMD;
}
@@ -444,6 +535,7 @@
static int cam_req_mgr_remove(struct platform_device *pdev)
{
cam_req_mgr_core_device_deinit();
+ cam_mem_mgr_deinit();
cam_req_mgr_util_deinit();
cam_media_device_cleanup();
cam_video_device_cleanup();
@@ -482,6 +574,12 @@
goto req_mgr_util_fail;
}
+ rc = cam_mem_mgr_init();
+ if (rc) {
+ pr_err("mem mgr init failed\n");
+ goto mem_mgr_init_fail;
+ }
+
rc = cam_req_mgr_core_device_init();
if (rc) {
pr_err("core device setup failed\n");
@@ -493,8 +591,12 @@
return rc;
req_mgr_core_fail:
+ cam_mem_mgr_deinit();
+mem_mgr_init_fail:
cam_req_mgr_util_deinit();
req_mgr_util_fail:
+ mutex_destroy(&g_dev.dev_lock);
+ mutex_destroy(&g_dev.cam_lock);
cam_video_device_cleanup();
video_setup_fail:
cam_media_device_cleanup();
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 4f75a19..019a775 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -28,25 +28,34 @@
#endif
static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
-static struct mutex hdl_tbl_mutex = __MUTEX_INITIALIZER(hdl_tbl_mutex);
+static DEFINE_SPINLOCK(hdl_tbl_lock);
int cam_req_mgr_util_init(void)
{
int rc = 0;
int bitmap_size;
+ static struct cam_req_mgr_util_hdl_tbl *hdl_tbl_local;
- mutex_lock(&hdl_tbl_mutex);
if (hdl_tbl) {
rc = -EINVAL;
pr_err("Hdl_tbl is already present\n");
goto hdl_tbl_check_failed;
}
- hdl_tbl = kzalloc(sizeof(*hdl_tbl), GFP_KERNEL);
- if (!hdl_tbl) {
+ hdl_tbl_local = kzalloc(sizeof(*hdl_tbl), GFP_KERNEL);
+ if (!hdl_tbl_local) {
rc = -ENOMEM;
goto hdl_tbl_alloc_failed;
}
+ spin_lock_bh(&hdl_tbl_lock);
+ if (hdl_tbl) {
+ spin_unlock_bh(&hdl_tbl_lock);
+ rc = -EEXIST;
+ kfree(hdl_tbl_local);
+ goto hdl_tbl_check_failed;
+ }
+ hdl_tbl = hdl_tbl_local;
+ spin_unlock_bh(&hdl_tbl_lock);
bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
hdl_tbl->bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
@@ -55,7 +64,6 @@
goto bitmap_alloc_fail;
}
hdl_tbl->bits = bitmap_size * BITS_PER_BYTE;
- mutex_unlock(&hdl_tbl_mutex);
return rc;
@@ -64,16 +72,15 @@
hdl_tbl = NULL;
hdl_tbl_alloc_failed:
hdl_tbl_check_failed:
- mutex_unlock(&hdl_tbl_mutex);
return rc;
}
int cam_req_mgr_util_deinit(void)
{
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
@@ -81,7 +88,7 @@
hdl_tbl->bitmap = NULL;
kfree(hdl_tbl);
hdl_tbl = NULL;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return 0;
}
@@ -90,10 +97,10 @@
{
int i = 0;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
@@ -107,7 +114,7 @@
}
}
bitmap_zero(hdl_tbl->bitmap, CAM_REQ_MGR_MAX_HANDLES);
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return 0;
}
@@ -132,17 +139,17 @@
int rand = 0;
int32_t handle = 0;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
pr_err("Unable to create session handle\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -154,7 +161,7 @@
hdl_tbl->hdl[idx].state = HDL_ACTIVE;
hdl_tbl->hdl[idx].priv = priv;
hdl_tbl->hdl[idx].ops = NULL;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return handle;
}
@@ -165,17 +172,17 @@
int rand = 0;
int32_t handle;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
pr_err("Unable to create device handle\n");
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -187,7 +194,7 @@
hdl_tbl->hdl[idx].state = HDL_ACTIVE;
hdl_tbl->hdl[idx].priv = hdl_data->priv;
hdl_tbl->hdl[idx].ops = hdl_data->ops;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return handle;
}
@@ -198,7 +205,7 @@
int type;
void *priv;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
goto device_priv_fail;
@@ -227,12 +234,12 @@
}
priv = hdl_tbl->hdl[idx].priv;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return priv;
device_priv_fail:
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return NULL;
}
@@ -242,7 +249,7 @@
int type;
void *ops;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
goto device_ops_fail;
@@ -271,12 +278,12 @@
}
ops = hdl_tbl->hdl[idx].ops;
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return ops;
device_ops_fail:
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return NULL;
}
@@ -285,7 +292,7 @@
int idx;
int type;
- mutex_lock(&hdl_tbl_mutex);
+ spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
pr_err("Hdl tbl is NULL\n");
goto destroy_hdl_fail;
@@ -315,12 +322,12 @@
hdl_tbl->hdl[idx].state = HDL_FREE;
clear_bit(idx, hdl_tbl->bitmap);
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return 0;
destroy_hdl_fail:
- mutex_unlock(&hdl_tbl_mutex);
+ spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index e327723..1f6a97a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -22,12 +22,16 @@
return NULL;
spin_lock(&workq->task.lock);
+ if (list_empty(&workq->task.empty_head))
+ goto end;
+
task = list_first_entry(&workq->task.empty_head,
struct crm_workq_task, entry);
if (task) {
atomic_sub(1, &workq->task.free_cnt);
list_del_init(&task->entry);
}
+end:
spin_unlock(&workq->task.lock);
return task;
@@ -104,14 +108,14 @@
workq = (struct cam_req_mgr_core_workq *)
container_of(w, struct cam_req_mgr_core_workq, work);
- spin_lock(&workq->task.lock);
list_for_each_entry_safe(task, task_save,
&workq->task.process_head, entry) {
atomic_sub(1, &workq->task.pending_cnt);
+ spin_lock(&workq->task.lock);
list_del_init(&task->entry);
+ spin_unlock(&workq->task.lock);
cam_req_mgr_process_task(task);
}
- spin_unlock(&workq->task.lock);
CRM_DBG("processed task %p free_cnt %d",
task, atomic_read(&workq->task.free_cnt));
}
@@ -138,7 +142,6 @@
goto end;
}
- spin_lock(&workq->task.lock);
if (task->cancel == 1) {
cam_req_mgr_workq_put_task(task);
CRM_WARN("task aborted and queued back to pool");
@@ -146,12 +149,14 @@
spin_unlock(&workq->task.lock);
goto end;
}
+ spin_lock(&workq->task.lock);
list_add_tail(&task->entry,
&workq->task.process_head);
+ spin_unlock(&workq->task.lock);
atomic_add(1, &workq->task.pending_cnt);
CRM_DBG("enq task %p pending_cnt %d",
task, atomic_read(&workq->task.pending_cnt));
- spin_unlock(&workq->task.lock);
+
queue_work(workq->job, &workq->work);
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
new file mode 100644
index 0000000..3619da7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
new file mode 100644
index 0000000..f4215b5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -0,0 +1,2284 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/workqueue.h>
+#include <linux/genalloc.h>
+
+#include "cam_smmu_api.h"
+
+#define SHARED_MEM_POOL_GRANULARITY 12
+
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 2
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+#ifdef CONFIG_CAM_SMMU_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+struct firmware_alloc_info {
+ struct device *fw_dev;
+ void *fw_kva;
+ dma_addr_t fw_dma_hdl;
+};
+
+struct firmware_alloc_info icp_fw;
+
+struct cam_smmu_work_payload {
+ int idx;
+ struct iommu_domain *domain;
+ struct device *dev;
+ unsigned long iova;
+ int flags;
+ void *token;
+ struct list_head list;
+};
+
+enum cam_protection_type {
+ CAM_PROT_INVALID,
+ CAM_NON_SECURE,
+ CAM_SECURE,
+ CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+ CAM_SMMU_INVALID,
+ CAM_QSMMU,
+ CAM_ARM_SMMU,
+ CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+ CAM_SMMU_BUFF_EXIST,
+ CAM_SMMU_BUFF_NOT_EXIST
+};
+
+enum cam_smmu_init_dir {
+ CAM_SMMU_TABLE_INIT,
+ CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+};
+
+struct cam_smmu_region_info {
+ dma_addr_t iova_start;
+ size_t iova_len;
+};
+
+struct cam_context_bank_info {
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ dma_addr_t va_start;
+ size_t va_len;
+ const char *name;
+ bool is_secure;
+ uint8_t scratch_buf_support;
+ uint8_t firmware_support;
+ uint8_t shared_support;
+ uint8_t io_support;
+ bool is_fw_allocated;
+
+ struct scratch_mapping scratch_map;
+ struct gen_pool *shared_mem_pool;
+
+ struct cam_smmu_region_info scratch_info;
+ struct cam_smmu_region_info firmware_info;
+ struct cam_smmu_region_info shared_info;
+ struct cam_smmu_region_info io_info;
+
+ struct list_head smmu_buf_list;
+ struct mutex lock;
+ int handle;
+ enum cam_smmu_ops_param state;
+
+ void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*);
+ void *token[CAM_SMMU_CB_MAX];
+ int cb_count;
+};
+
+struct cam_iommu_cb_set {
+ struct cam_context_bank_info *cb_info;
+ u32 cb_num;
+ u32 cb_init_count;
+ struct work_struct smmu_work;
+ struct mutex payload_list_lock;
+ struct list_head payload_list;
+};
+
+static const struct of_device_id msm_cam_smmu_dt_match[] = {
+ { .compatible = "qcom,msm-cam-smmu", },
+ { .compatible = "qcom,msm-cam-smmu-cb", },
+ { .compatible = "qcom,msm-cam-smmu-fw-dev", },
+ {}
+};
+
+struct cam_dma_buff_info {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ enum dma_data_direction dir;
+ enum cam_smmu_region_id region_id;
+ int iommu_dir;
+ int ref_count;
+ dma_addr_t paddr;
+ struct list_head list;
+ int ion_fd;
+ size_t len;
+ size_t phys_len;
+};
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr,
+ enum cam_smmu_region_id region_id);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr);
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx);
+
+static void cam_smmu_clean_buffer_list(int idx);
+
+static void cam_smmu_print_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);
+
+static void cam_smmu_page_fault_work(struct work_struct *work)
+{
+ int j;
+ int idx;
+ struct cam_smmu_work_payload *payload;
+
+ mutex_lock(&iommu_cb_set.payload_list_lock);
+ if (list_empty(&iommu_cb_set.payload_list)) {
+ pr_err("Payload list empty\n");
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+ return;
+ }
+
+ payload = list_first_entry(&iommu_cb_set.payload_list,
+ struct cam_smmu_work_payload,
+ list);
+ list_del(&payload->list);
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+ /* Dereference the payload to call the handler */
+ idx = payload->idx;
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ if ((iommu_cb_set.cb_info[idx].handler[j])) {
+ iommu_cb_set.cb_info[idx].handler[j](
+ payload->domain,
+ payload->dev,
+ payload->iova,
+ payload->flags,
+ iommu_cb_set.cb_info[idx].token[j]);
+ }
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ kfree(payload);
+}
+
+static void cam_smmu_print_list(int idx)
+{
+ struct cam_dma_buff_info *mapping;
+
+ pr_err("index = %d\n", idx);
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ pr_err("ion_fd = %d, paddr= 0x%pK, len = %u, region = %d\n",
+ mapping->ion_fd, (void *)mapping->paddr,
+ (unsigned int)mapping->len,
+ mapping->region_id);
+ }
+}
+
+static void cam_smmu_print_table(void)
+{
+ int i;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+ (int)iommu_cb_set.cb_info[i].handle,
+ (void *)iommu_cb_set.cb_info[i].name);
+ pr_err("dev = %pK\n", iommu_cb_set.cb_info[i].dev);
+ }
+}
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+{
+ struct cam_dma_buff_info *mapping;
+ unsigned long start_addr, end_addr, current_addr;
+
+ current_addr = (unsigned long)vaddr;
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ start_addr = (unsigned long)mapping->paddr;
+ end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+ if (start_addr <= current_addr && current_addr < end_addr) {
+ pr_err("va %pK valid: range:%pK-%pK, fd = %d cb: %s\n",
+ vaddr, (void *)start_addr, (void *)end_addr,
+ mapping->ion_fd,
+ iommu_cb_set.cb_info[idx].name);
+ goto end;
+ } else {
+ CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+ vaddr, (void *)start_addr, (void *)end_addr,
+ mapping->ion_fd);
+ }
+ }
+ pr_err("Cannot find vaddr:%pK in SMMU %s uses invalid virt address\n",
+ vaddr, iommu_cb_set.cb_info[idx].name);
+end:
+ return;
+}
+
+void cam_smmu_reg_client_page_fault_handler(int handle,
+ void (*client_page_fault_handler)(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*), void *token)
+{
+ int idx, i = 0;
+
+ if (!token || (handle == HANDLE_INIT)) {
+ pr_err("Error: token is NULL or invalid handle\n");
+ return;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ if (client_page_fault_handler) {
+ if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+ pr_err("%s Should not regiester more handlers\n",
+ iommu_cb_set.cb_info[idx].name);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+ iommu_cb_set.cb_info[idx].cb_count++;
+ for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+ iommu_cb_set.cb_info[idx].token[i] = token;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ client_page_fault_handler;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == token) {
+ iommu_cb_set.cb_info[idx].token[i] = NULL;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ NULL;
+ iommu_cb_set.cb_info[idx].cb_count--;
+ break;
+ }
+ }
+ if (i == CAM_SMMU_CB_MAX)
+ pr_err("Error: hdl %x no matching tokens: %s\n",
+ handle, iommu_cb_set.cb_info[idx].name);
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova,
+ int flags, void *token)
+{
+ char *cb_name;
+ int idx;
+ struct cam_smmu_work_payload *payload;
+
+ if (!token) {
+ pr_err("Error: token is NULL\n");
+ pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
+ pr_err("iova = %lX, flags = %d\n", iova, flags);
+ return 0;
+ }
+
+ cb_name = (char *)token;
+ /* Check whether it is in the table */
+ for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+ if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+ break;
+ }
+
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: index is not valid, index = %d, token = %s\n",
+ idx, cb_name);
+ return 0;
+ }
+
+ payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
+ if (!payload)
+ return 0;
+
+ payload->domain = domain;
+ payload->dev = dev;
+ payload->iova = iova;
+ payload->flags = flags;
+ payload->token = token;
+ payload->idx = idx;
+
+ mutex_lock(&iommu_cb_set.payload_list_lock);
+ list_add_tail(&payload->list, &iommu_cb_set.payload_list);
+ mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+ schedule_work(&iommu_cb_set.smmu_work);
+
+ return 0;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return IOMMU_READ;
+ case CAM_SMMU_MAP_WRITE:
+ return IOMMU_WRITE;
+ case CAM_SMMU_MAP_RW:
+ return IOMMU_READ|IOMMU_WRITE;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ pr_err("Error: Direction is invalid. dir = %d\n", dir);
+ break;
+ };
+ return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return DMA_FROM_DEVICE;
+ case CAM_SMMU_MAP_WRITE:
+ return DMA_TO_DEVICE;
+ case CAM_SMMU_MAP_RW:
+ return DMA_BIDIRECTIONAL;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+ break;
+ }
+ return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+ unsigned int i;
+ int j = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+ INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+ iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+ iommu_cb_set.cb_info[i].dev = NULL;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ iommu_cb_set.cb_info[i].token[j] = NULL;
+ iommu_cb_set.cb_info[i].handler[j] = NULL;
+ }
+ if (ops == CAM_SMMU_TABLE_INIT)
+ mutex_init(&iommu_cb_set.cb_info[i].lock);
+ else
+ mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+ }
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+ int i;
+
+ if (hdl == HANDLE_INIT) {
+ CDBG("iommu handle is init number. Need to try again\n");
+ return 1;
+ }
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+ continue;
+
+ if (iommu_cb_set.cb_info[i].handle == hdl) {
+ CDBG("iommu handle %d conflicts\n", (int)hdl);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+ int rand, hdl = 0;
+
+ get_random_bytes(&rand, COOKIE_NUM_BYTE);
+ hdl = GET_SMMU_HDL(idx, rand);
+ CDBG("create handle value = %x\n", (int)hdl);
+ return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+ int rc;
+ struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+ /* attach the mapping to device */
+ rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (rc < 0) {
+ pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl)
+{
+ int i;
+ int handle;
+
+ /* create handle and add in the iommu hardware table */
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+ mutex_lock(&iommu_cb_set.cb_info[i].lock);
+ if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+ pr_err("Error: %s already got handle 0x%x\n",
+ name,
+ iommu_cb_set.cb_info[i].handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return -EINVAL;
+ }
+
+ /* make sure handle is unique */
+ do {
+ handle = cam_smmu_create_iommu_handle(i);
+ } while (cam_smmu_check_handle_unique(handle));
+
+ /* put handle in the table */
+ iommu_cb_set.cb_info[i].handle = handle;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ *hdl = handle;
+ CDBG("%s creates handle 0x%x\n", name, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return 0;
+ }
+ }
+
+ pr_err("Error: Cannot find name %s or all handle exist!\n",
+ name);
+ cam_smmu_print_table();
+ return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ int err = 0;
+
+ if (!count) {
+ err = -EINVAL;
+ pr_err("Page count is zero, size passed = %zu\n", size);
+ goto bail;
+ }
+
+ scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!scratch_map->bitmap) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ scratch_map->base = base;
+ scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+ scratch_map->order = order;
+
+bail:
+ return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ /*
+ * Transparently, add a guard page to the total count of pages
+ * to be allocated
+ */
+ count++;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+
+ if (start > mapping->bits)
+ return -ENOMEM;
+
+ bitmap_set(mapping->bitmap, start, count);
+ *iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+
+ return 0;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (!addr) {
+ pr_err("Error: Invalid address\n");
+ return -EINVAL;
+ }
+
+ if (start + count > mapping->bits) {
+ pr_err("Error: Invalid page bits in scratch map\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Transparently, add a guard page to the total count of pages
+ * to be freed
+ */
+ count++;
+ bitmap_clear(mapping->bitmap, start, count);
+
+ return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->paddr == virt_addr) {
+ CDBG("Found virtual address %lx\n",
+ (unsigned long)virt_addr);
+ return mapping;
+ }
+ }
+
+ pr_err("Error: Cannot find virtual address %lx by index %d\n",
+ (unsigned long)virt_addr, idx);
+ return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ CDBG(" find ion_fd %d\n", ion_fd);
+ return mapping;
+ }
+ }
+
+ pr_err("Error: Cannot find fd %d by index %d\n",
+ ion_fd, idx);
+ return NULL;
+}
+
+static void cam_smmu_clean_buffer_list(int idx)
+{
+ int ret;
+ struct cam_dma_buff_info *mapping_info, *temp;
+
+ list_for_each_entry_safe(mapping_info, temp,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+ (void *)mapping_info->paddr, idx,
+ mapping_info->ion_fd);
+
+ if (mapping_info->ion_fd == 0xDEADBEEF)
+ /* Clean up scratch buffers */
+ ret = cam_smmu_free_scratch_buffer_remove_from_list(
+ mapping_info, idx);
+ else
+ /* Clean up regular mapped buffers */
+ ret = cam_smmu_unmap_buf_and_remove_from_list(
+ mapping_info,
+ idx);
+
+ if (ret < 0) {
+ pr_err("Buffer delete failed: idx = %d\n", idx);
+ pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->ion_fd);
+ /*
+ * Ignore this error and continue to delete other
+ * buffers in the list
+ */
+ continue;
+ }
+ }
+}
+
+static int cam_smmu_attach(int idx)
+{
+ int ret;
+
+ if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+ ret = -EALREADY;
+ } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+ ret = cam_smmu_attach_device(idx);
+ if (ret < 0) {
+ pr_err("Error: ATTACH fail\n");
+ return -ENODEV;
+ }
+ iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+ ret = 0;
+ } else {
+ pr_err("Error: Not detach/attach: %d\n",
+ iommu_cb_set.cb_info[idx].state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cam_smmu_detach_device(int idx)
+{
+ int rc = 0;
+ struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+ /* detach the mapping to device if not already detached */
+ if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+ rc = -EALREADY;
+ } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+ arm_iommu_detach_device(cb->dev);
+ iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
+ }
+
+ return rc;
+}
+
+static int cam_smmu_alloc_iova(size_t size,
+ int32_t smmu_hdl, uint32_t *iova)
+{
+ int rc = 0;
+ int idx;
+ uint32_t vaddr = 0;
+
+ if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
+ pr_err("Error: Input args are invalid\n");
+ return -EINVAL;
+ }
+
+ CDBG("Allocating iova size = %zu for smmu hdl=%X\n", size, smmu_hdl);
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].shared_support) {
+ pr_err("Error: Shared memory not supported for hdl = %X\n",
+ smmu_hdl);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ vaddr = gen_pool_alloc(iommu_cb_set.cb_info[idx].shared_mem_pool, size);
+ if (!vaddr)
+ return -ENOMEM;
+
+ *iova = vaddr;
+
+get_addr_end:
+ return rc;
+}
+
+static int cam_smmu_free_iova(uint32_t addr, size_t size,
+ int32_t smmu_hdl)
+{
+ int rc = 0;
+ int idx;
+
+ if (!size || (smmu_hdl == HANDLE_INIT)) {
+ pr_err("Error: Input args are invalid\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ gen_pool_free(iommu_cb_set.cb_info[idx].shared_mem_pool, addr, size);
+
+get_addr_end:
+ return rc;
+}
+
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+ dma_addr_t *iova,
+ uint64_t *cpuva,
+ size_t *len)
+{
+ int rc;
+ int32_t idx;
+ size_t firmware_len = 0;
+ size_t firmware_start = 0;
+ struct iommu_domain *domain;
+
+ if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
+ pr_err("Error: Input args are invalid\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, smmu_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].firmware_support) {
+ pr_err("Firmware memory not supported for this SMMU handle\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
+ pr_err("Trying to allocate twice\n");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ }
+
+ firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+ firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+ CDBG("Firmware area len from DT = %zu\n", firmware_len);
+
+ icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
+ firmware_len,
+ &icp_fw.fw_dma_hdl,
+ GFP_KERNEL);
+ if (!icp_fw.fw_kva) {
+ pr_err("FW memory alloc failed\n");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ } else {
+ CDBG("DMA alloc returned fw = %pK, hdl = %pK\n",
+ icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
+ }
+
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ rc = iommu_map(domain,
+ firmware_start,
+ icp_fw.fw_dma_hdl,
+ firmware_len,
+ IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
+
+ if (rc) {
+ pr_err("Failed to map FW into IOMMU\n");
+ rc = -ENOMEM;
+ goto alloc_fail;
+ }
+ iommu_cb_set.cb_info[idx].is_fw_allocated = true;
+
+ *iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+ *cpuva = (uint64_t)icp_fw.fw_kva;
+ *len = firmware_len;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return rc;
+
+alloc_fail:
+ dma_free_coherent(icp_fw.fw_dev,
+ firmware_len,
+ icp_fw.fw_kva,
+ icp_fw.fw_dma_hdl);
+unlock_and_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_firmware);
+
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl)
+{
+ int rc = 0;
+ int32_t idx;
+ size_t firmware_len = 0;
+ size_t firmware_start = 0;
+ struct iommu_domain *domain;
+ size_t unmapped = 0;
+
+ if (smmu_hdl == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, smmu_hdl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].firmware_support) {
+ pr_err("Firmware memory not supported for this SMMU handle\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
+ pr_err("Trying to deallocate firmware that is not allocated\n");
+ rc = -ENOMEM;
+ goto unlock_and_end;
+ }
+
+ firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+ firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ unmapped = iommu_unmap(domain,
+ firmware_start,
+ firmware_len);
+
+ if (unmapped != firmware_len) {
+ pr_err("Only %zu unmapped out of total %zu\n",
+ unmapped,
+ firmware_len);
+ rc = -EINVAL;
+ }
+
+ dma_free_coherent(icp_fw.fw_dev,
+ firmware_len,
+ icp_fw.fw_kva,
+ icp_fw.fw_dma_hdl);
+
+ icp_fw.fw_kva = 0;
+ icp_fw.fw_dma_hdl = 0;
+
+ iommu_cb_set.cb_info[idx].is_fw_allocated = false;
+
+unlock_and_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr,
+ enum cam_smmu_region_id region_id)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+ struct dma_buf *buf = NULL;
+ struct dma_buf_attachment *attach = NULL;
+ struct sg_table *table = NULL;
+ struct iommu_domain *domain;
+ size_t size = 0;
+ uint32_t iova = 0;
+
+ /* allocate memory for each buffer information */
+ buf = dma_buf_get(ion_fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ rc = PTR_ERR(buf);
+ pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+ goto err_out;
+ }
+
+ attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ rc = PTR_ERR(attach);
+ pr_err("Error: dma buf attach failed\n");
+ goto err_put;
+ }
+
+ table = dma_buf_map_attachment(attach, dma_dir);
+ if (IS_ERR_OR_NULL(table)) {
+ rc = PTR_ERR(table);
+ pr_err("Error: dma buf map attachment failed\n");
+ goto err_detach;
+ }
+
+ if (region_id == CAM_SMMU_REGION_SHARED) {
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+ if (!domain) {
+ pr_err("CB has no domain set\n");
+ goto err_unmap_sg;
+ }
+
+ rc = cam_smmu_alloc_iova(*len_ptr,
+ iommu_cb_set.cb_info[idx].handle,
+ &iova);
+
+ if (rc < 0) {
+ pr_err("IOVA alloc failed for shared memory\n");
+ goto err_unmap_sg;
+ }
+
+ size = iommu_map_sg(domain,
+ iova,
+ table->sgl,
+ table->nents,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (size < 0) {
+ pr_err("IOMMU mapping failed\n");
+ rc = cam_smmu_free_iova(iova,
+ size,
+ iommu_cb_set.cb_info[idx].handle);
+
+ if (rc)
+ pr_err("IOVA free failed\n");
+ rc = -ENOMEM;
+ goto err_unmap_sg;
+ } else {
+ CDBG("iommu_map_sg returned %zu\n", size);
+ *paddr_ptr = iova;
+ *len_ptr = size;
+ }
+ } else if (region_id == CAM_SMMU_REGION_IO) {
+ rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev,
+ table->sgl, table->nents, dma_dir, buf);
+
+ if (rc != table->nents) {
+ pr_err("Error: msm_dma_map_sg_lazy failed\n");
+ rc = -ENOMEM;
+ goto err_unmap_sg;
+ } else {
+ *paddr_ptr = sg_dma_address(table->sgl);
+ *len_ptr = (size_t)sg_dma_len(table->sgl);
+ }
+ } else {
+ pr_err("Error: Wrong region id passed for %s\n", __func__);
+ rc = -EINVAL;
+ goto err_unmap_sg;
+ }
+
+ if (table->sgl) {
+ CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
+ (void *)buf,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)attach, (void *)table);
+ CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
+ (void *)table->sgl, rc,
+ (unsigned int)table->sgl->dma_address);
+ } else {
+ rc = -EINVAL;
+ pr_err("Error: table sgl is null\n");
+ goto err_unmap_sg;
+ }
+
+ /* fill up mapping_info */
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ rc = -ENOSPC;
+ goto err_alloc;
+ }
+ mapping_info->ion_fd = ion_fd;
+ mapping_info->buf = buf;
+ mapping_info->attach = attach;
+ mapping_info->table = table;
+ mapping_info->paddr = *paddr_ptr;
+ mapping_info->len = *len_ptr;
+ mapping_info->dir = dma_dir;
+ mapping_info->ref_count = 1;
+ mapping_info->region_id = region_id;
+
+ if (!*paddr_ptr || !*len_ptr) {
+ pr_err("Error: Space Allocation failed!\n");
+ kfree(mapping_info);
+ rc = -ENOSPC;
+ goto err_alloc;
+ }
+ CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+ /* add to the list */
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+ return 0;
+
+err_alloc:
+ if (region_id == CAM_SMMU_REGION_SHARED) {
+ cam_smmu_free_iova(iova,
+ size,
+ iommu_cb_set.cb_info[idx].handle);
+
+ iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+ *paddr_ptr,
+ *len_ptr);
+ } else if (region_id == CAM_SMMU_REGION_IO) {
+ msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+ table->sgl,
+ table->nents,
+ dma_dir,
+ buf);
+ }
+err_unmap_sg:
+ dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+ dma_buf_detach(buf, attach);
+err_put:
+ dma_buf_put(buf);
+err_out:
+ return rc;
+}
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ int rc;
+ size_t size;
+ struct iommu_domain *domain;
+
+ if ((!mapping_info->buf) || (!mapping_info->table) ||
+ (!mapping_info->attach)) {
+ pr_err("Error: Invalid params dev = %pK, table = %pK\n",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ pr_err("Error:dma_buf = %pK, attach = %pK\n",
+ (void *)mapping_info->buf,
+ (void *)mapping_info->attach);
+ return -EINVAL;
+ }
+
+ if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
+ CDBG("Removing SHARED buffer paddr = %pK, len = %zu\n",
+ (void *)mapping_info->paddr, mapping_info->len);
+
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+ size = iommu_unmap(domain,
+ mapping_info->paddr,
+ mapping_info->len);
+
+ if (size != mapping_info->len) {
+ pr_err("IOMMU unmap failed\n");
+ pr_err("Unmapped = %zu, requested = %zu\n",
+ size,
+ mapping_info->len);
+ }
+
+ rc = cam_smmu_free_iova(mapping_info->paddr,
+ mapping_info->len,
+ iommu_cb_set.cb_info[idx].handle);
+
+ if (rc)
+ pr_err("IOVA free failed\n");
+
+ } else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
+ msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+ mapping_info->table->sgl, mapping_info->table->nents,
+ mapping_info->dir, mapping_info->buf);
+ }
+
+ dma_buf_unmap_attachment(mapping_info->attach,
+ mapping_info->table, mapping_info->dir);
+ dma_buf_detach(mapping_info->buf, mapping_info->attach);
+ dma_buf_put(mapping_info->buf);
+
+ mapping_info->buf = NULL;
+
+ list_del_init(&mapping_info->list);
+
+ /* free one buffer */
+ kfree(mapping_info);
+ return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+ int ion_fd, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ if (mapping->ion_fd == ion_fd) {
+ mapping->ref_count++;
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+ int ret = 0;
+
+ if (!identifier) {
+ pr_err("Error: iommu hardware name is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!handle_ptr) {
+ pr_err("Error: handle pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ /* create and put handle in the table */
+ ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+ if (ret < 0)
+ pr_err("Error: %s get handle fail\n", identifier);
+
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+ int ret = 0, idx;
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: Index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ switch (ops) {
+ case CAM_SMMU_ATTACH: {
+ ret = cam_smmu_attach(idx);
+ break;
+ }
+ case CAM_SMMU_DETACH: {
+ ret = cam_smmu_detach_device(idx);
+ break;
+ }
+ case CAM_SMMU_VOTE:
+ case CAM_SMMU_DEVOTE:
+ default:
+ pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+ ret = -EINVAL;
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr)
+{
+ unsigned long nents = virt_len / phys_len;
+ struct cam_dma_buff_info *mapping_info = NULL;
+ size_t unmapped;
+ dma_addr_t iova = 0;
+ struct scatterlist *sg;
+ int i = 0;
+ int rc;
+ struct iommu_domain *domain = NULL;
+ struct page *page;
+ struct sg_table *table = NULL;
+
+ CDBG("%s: nents = %lu, idx = %d, virt_len = %zx\n",
+ __func__, nents, idx, virt_len);
+ CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
+ __func__, phys_len, iommu_dir, virt_addr);
+
+ /*
+ * This table will go inside the 'mapping' structure
+ * where it will be held until put_scratch_buffer is called
+ */
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ rc = -ENOMEM;
+ goto err_table_alloc;
+ }
+
+ rc = sg_alloc_table(table, nents, GFP_KERNEL);
+ if (rc < 0) {
+ rc = -EINVAL;
+ goto err_sg_alloc;
+ }
+
+ page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+ if (!page) {
+ rc = -ENOMEM;
+ goto err_page_alloc;
+ }
+
+ /* Now we create the sg list */
+ for_each_sg(table->sgl, sg, table->nents, i)
+ sg_set_page(sg, page, phys_len, 0);
+
+
+ /* Get the domain from within our cb_set struct and map it*/
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+ rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+ virt_len, &iova);
+
+ if (rc < 0) {
+ pr_err("Could not find valid iova for scratch buffer");
+ goto err_iommu_map;
+ }
+
+ if (iommu_map_sg(domain,
+ iova,
+ table->sgl,
+ table->nents,
+ iommu_dir) != virt_len) {
+ pr_err("iommu_map_sg() failed");
+ goto err_iommu_map;
+ }
+
+ /* Now update our mapping information within the cb_set struct */
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ rc = -ENOMEM;
+ goto err_mapping_info;
+ }
+
+ mapping_info->ion_fd = 0xDEADBEEF;
+ mapping_info->buf = NULL;
+ mapping_info->attach = NULL;
+ mapping_info->table = table;
+ mapping_info->paddr = iova;
+ mapping_info->len = virt_len;
+ mapping_info->iommu_dir = iommu_dir;
+ mapping_info->ref_count = 1;
+ mapping_info->phys_len = phys_len;
+ mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
+
+ CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
+ __func__, (void *)mapping_info->paddr,
+ mapping_info->len, mapping_info->phys_len);
+
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ *virt_addr = (dma_addr_t)iova;
+
+ CDBG("%s: mapped virtual address = %lx\n", __func__,
+ (unsigned long)*virt_addr);
+ return 0;
+
+err_mapping_info:
+ unmapped = iommu_unmap(domain, iova, virt_len);
+ if (unmapped != virt_len)
+ pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+err_iommu_map:
+ __free_pages(page, get_order(phys_len));
+err_page_alloc:
+ sg_free_table(table);
+err_sg_alloc:
+ kfree(table);
+err_table_alloc:
+ return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ int rc = 0;
+ size_t unmapped;
+ struct iommu_domain *domain =
+ iommu_cb_set.cb_info[idx].mapping->domain;
+ struct scratch_mapping *scratch_map =
+ &iommu_cb_set.cb_info[idx].scratch_map;
+
+ if (!mapping_info->table) {
+ pr_err("Error: Invalid params: dev = %pK, table = %pK",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ return -EINVAL;
+ }
+
+ /* Clean up the mapping_info struct from the list */
+ unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+ if (unmapped != mapping_info->len)
+ pr_err("Unmapped only %zx instead of %zx",
+ unmapped, mapping_info->len);
+
+ rc = cam_smmu_free_scratch_va(scratch_map,
+ mapping_info->paddr,
+ mapping_info->len);
+ if (rc < 0) {
+ pr_err("Error: Invalid iova while freeing scratch buffer\n");
+ rc = -EINVAL;
+ }
+
+ __free_pages(sg_page(mapping_info->table->sgl),
+ get_order(mapping_info->phys_len));
+ sg_free_table(mapping_info->table);
+ kfree(mapping_info->table);
+ list_del_init(&mapping_info->list);
+
+ kfree(mapping_info);
+ mapping_info = NULL;
+
+ return rc;
+}
+
+int cam_smmu_get_scratch_iova(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len)
+{
+ int idx, rc;
+ unsigned int iommu_dir;
+
+ if (!paddr_ptr || !virt_len || !phys_len) {
+ pr_err("Error: Input pointer or lengths invalid\n");
+ return -EINVAL;
+ }
+
+ if (virt_len < phys_len) {
+ pr_err("Error: virt_len > phys_len\n");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+ if (iommu_dir == IOMMU_INVALID_DIR) {
+ pr_err("Error: translate direction failed. dir = %d\n", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ pr_err("Error: Context bank does not support scratch bufs\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
+ __func__, handle, idx, dir);
+ CDBG("%s: virt_len = %zx, phys_len = %zx\n",
+ __func__, phys_len, virt_len);
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+ pr_err("Requested scratch buffer length not page aligned\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, phys_len)) {
+ pr_err("Requested virt length not aligned with phys length\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+ virt_len,
+ phys_len,
+ iommu_dir,
+ paddr_ptr);
+ if (rc < 0)
+ pr_err("Error: mapping or add list fail\n");
+
+error:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_put_scratch_iova(int handle,
+ dma_addr_t paddr)
+{
+ int idx;
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ pr_err("Error: Context bank does not support scratch buffers\n");
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ /* Based on virtual address and index, we can find mapping info
+ * of the scratch buffer
+ */
+ mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params\n");
+ rc = -ENODEV;
+ goto handle_err;
+ }
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+ if (rc < 0) {
+ pr_err("Error: unmap or remove list fail\n");
+ goto handle_err;
+ }
+
+handle_err:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_map_sec_iova(int handle, int ion_fd,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ /* not implemented yet */
+ return -EPERM;
+}
+EXPORT_SYMBOL(cam_smmu_map_sec_iova);
+
+int cam_smmu_map_iova(int handle, int ion_fd,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int idx, rc;
+ enum dma_data_direction dma_dir;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ pr_err("Input pointers are invalid\n");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ if (region_id != CAM_SMMU_REGION_SHARED)
+ *len_ptr = (size_t)0;
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ if (dma_dir == DMA_NONE) {
+ pr_err("translate direction failed. dir = %d\n", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
+ len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CDBG("ion_fd:%d already in the list, give same addr back",
+ ion_fd);
+ rc = 0;
+ goto get_addr_end;
+ }
+ rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+ paddr_ptr, len_ptr, region_id);
+ if (rc < 0)
+ pr_err("mapping or add list fail\n");
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_iova);
+
+
+int cam_smmu_get_iova(int handle, int ion_fd,
+ dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ pr_err("Error: Input pointers are invalid\n");
+ return -EINVAL;
+ }
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+ CDBG("ion_fd:%d not in the mapped list", ion_fd);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_iova);
+
+int cam_smmu_unmap_sec_iova(int handle, int ion_fd)
+{
+ /* not implemented yet */
+ return -EPERM;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_sec_iova);
+
+int cam_smmu_unmap_iova(int handle,
+ int ion_fd,
+ enum cam_smmu_region_id region_id)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Based on ion fd and index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ mapping_info->ref_count--;
+ if (mapping_info->ref_count > 0) {
+ CDBG("There are still %u buffer(s) with same fd %d",
+ mapping_info->ref_count, mapping_info->ion_fd);
+ rc = 0;
+ goto unmap_end;
+ }
+
+ /* Unmapping one buffer from device */
+ CDBG("SMMU: removing buffer idx = %d\n", idx);
+ rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc < 0)
+ pr_err("Error: unmap or remove list fail\n");
+
+unmap_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_iova);
+
+int cam_smmu_put_iova(int handle, int ion_fd)
+{
+ int idx;
+ int rc = 0;
+ struct cam_dma_buff_info *mapping_info;
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ /* based on ion fd and index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ mapping_info->ref_count--;
+
+put_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_iova);
+
+int cam_smmu_destroy_handle(int handle)
+{
+ int idx;
+
+ if (handle == HANDLE_INIT) {
+ pr_err("Error: Invalid handle\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+ pr_err("Client %s buffer list is not clean!\n",
+ iommu_cb_set.cb_info[idx].name);
+ cam_smmu_print_list(idx);
+ cam_smmu_clean_buffer_list(idx);
+ }
+
+ iommu_cb_set.cb_info[idx].cb_count = 0;
+ iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+static void cam_smmu_deinit_cb(struct cam_context_bank_info *cb)
+{
+ arm_iommu_detach_device(cb->dev);
+
+ if (cb->io_support && cb->mapping) {
+ arm_iommu_release_mapping(cb->mapping);
+ cb->mapping = NULL;
+ }
+
+ if (cb->shared_support) {
+ gen_pool_destroy(cb->shared_mem_pool);
+ cb->shared_mem_pool = NULL;
+ }
+
+ if (cb->scratch_buf_support) {
+ kfree(cb->scratch_map.bitmap);
+ cb->scratch_map.bitmap = NULL;
+ }
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+ int i = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++)
+ cam_smmu_deinit_cb(&iommu_cb_set.cb_info[i]);
+
+ devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+ iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+ struct device *dev)
+{
+ int rc = 0;
+
+ if (!cb || !dev) {
+ pr_err("Error: invalid input params\n");
+ return -EINVAL;
+ }
+
+ cb->dev = dev;
+ cb->is_fw_allocated = false;
+
+ /* Create a pool with 4K granularity for supporting shared memory */
+ if (cb->shared_support) {
+ cb->shared_mem_pool = gen_pool_create(
+ SHARED_MEM_POOL_GRANULARITY, -1);
+
+ if (!cb->shared_mem_pool)
+ return -ENOMEM;
+
+ rc = gen_pool_add(cb->shared_mem_pool,
+ cb->shared_info.iova_start,
+ cb->shared_info.iova_len,
+ -1);
+
+ CDBG("Shared mem start->%lX\n",
+ (unsigned long)cb->shared_info.iova_start);
+ CDBG("Shared mem len->%zu\n", cb->shared_info.iova_len);
+
+ if (rc) {
+ pr_err("Genpool chunk creation failed\n");
+ gen_pool_destroy(cb->shared_mem_pool);
+ cb->shared_mem_pool = NULL;
+ return rc;
+ }
+ }
+
+ if (cb->scratch_buf_support) {
+ rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+ cb->scratch_info.iova_start,
+ cb->scratch_info.iova_len,
+ 0);
+ if (rc < 0) {
+ pr_err("Error: failed to create scratch map\n");
+ rc = -ENODEV;
+ goto end;
+ }
+ }
+
+ /* create a virtual mapping */
+ if (cb->io_support) {
+ cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
+ cb->io_info.iova_start, cb->io_info.iova_len);
+ if (IS_ERR(cb->mapping)) {
+ pr_err("Error: create mapping Failed\n");
+ rc = -ENODEV;
+ goto end;
+ }
+ } else {
+ pr_err("Context bank does not have IO region\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ return rc;
+end:
+ if (cb->shared_support) {
+ gen_pool_destroy(cb->shared_mem_pool);
+ cb->shared_mem_pool = NULL;
+ }
+
+ if (cb->scratch_buf_support) {
+ kfree(cb->scratch_map.bitmap);
+ cb->scratch_map.bitmap = NULL;
+ }
+
+ return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+ struct device_node *domains_child_node = NULL;
+
+ if (!dev) {
+ pr_err("Error: Invalid device\n");
+ return -ENODEV;
+ }
+
+ iommu_cb_set.cb_num = 0;
+
+ /* traverse thru all the child nodes and increment the cb count */
+ for_each_available_child_of_node(dev->of_node, domains_child_node) {
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,msm-cam-smmu-cb"))
+ iommu_cb_set.cb_num++;
+
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,qsmmu-cam-cb"))
+ iommu_cb_set.cb_num++;
+ }
+
+ if (iommu_cb_set.cb_num == 0) {
+ pr_err("Error: no context banks present\n");
+ return -ENOENT;
+ }
+
+ /* allocate memory for the context banks */
+ iommu_cb_set.cb_info = devm_kzalloc(dev,
+ iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+ GFP_KERNEL);
+
+ if (!iommu_cb_set.cb_info) {
+ pr_err("Error: cannot allocate context banks\n");
+ return -ENOMEM;
+ }
+
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+ iommu_cb_set.cb_init_count = 0;
+
+ CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+ return 0;
+}
+
+static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
+ struct cam_context_bank_info *cb)
+{
+ int rc = 0;
+ struct device_node *mem_map_node = NULL;
+ struct device_node *child_node = NULL;
+ const char *region_name;
+ int num_regions = 0;
+
+ if (!of_node || !cb) {
+ pr_err("Invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+ if (!mem_map_node) {
+ pr_err("iova-mem-map not present\n");
+ return -EINVAL;
+ }
+
+ for_each_available_child_of_node(mem_map_node, child_node) {
+ uint32_t region_start;
+ uint32_t region_len;
+ uint32_t region_id;
+
+ num_regions++;
+ rc = of_property_read_string(child_node,
+ "iova-region-name", ®ion_name);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ pr_err("IOVA region not found\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child_node,
+ "iova-region-start", ®ion_start);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ pr_err("Failed to read iova-region-start\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child_node,
+ "iova-region-len", ®ion_len);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ pr_err("Failed to read iova-region-len\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child_node,
+ "iova-region-id", ®ion_id);
+ if (rc < 0) {
+ of_node_put(mem_map_node);
+ pr_err("Failed to read iova-region-id\n");
+ return -EINVAL;
+ }
+
+ switch (region_id) {
+ case CAM_SMMU_REGION_FIRMWARE:
+ cb->firmware_support = 1;
+ cb->firmware_info.iova_start = region_start;
+ cb->firmware_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_SHARED:
+ cb->shared_support = 1;
+ cb->shared_info.iova_start = region_start;
+ cb->shared_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_SCRATCH:
+ cb->scratch_buf_support = 1;
+ cb->scratch_info.iova_start = region_start;
+ cb->scratch_info.iova_len = region_len;
+ break;
+ case CAM_SMMU_REGION_IO:
+ cb->io_support = 1;
+ cb->io_info.iova_start = region_start;
+ cb->io_info.iova_len = region_len;
+ break;
+ default:
+ pr_err("Incorrect region id present in DT file: %d\n",
+ region_id);
+ }
+
+ CDBG("Found label -> %s\n", cb->name);
+ CDBG("Found region -> %s\n", region_name);
+ CDBG("region_start -> %X\n", region_start);
+ CDBG("region_len -> %X\n", region_len);
+ CDBG("region_id -> %X\n", region_id);
+ }
+ of_node_put(mem_map_node);
+
+ if (!num_regions) {
+ pr_err("No memory regions found, at least one needed\n");
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+ enum cam_iommu_type type)
+{
+ int rc = 0;
+ struct cam_context_bank_info *cb;
+ struct device *ctx = NULL;
+
+ if (!dev) {
+ pr_err("Error: Invalid device\n");
+ return -ENODEV;
+ }
+
+ /* check the bounds */
+ if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+ pr_err("Error: populate more than allocated cb\n");
+ rc = -EBADHANDLE;
+ goto cb_init_fail;
+ }
+
+ /* read the context bank from cb set */
+ cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+ /* set the name of the context bank */
+ rc = of_property_read_string(dev->of_node, "label", &cb->name);
+ if (rc < 0) {
+ pr_err("Error: failed to read label from sub device\n");
+ goto cb_init_fail;
+ }
+
+ rc = cam_smmu_get_memory_regions_info(dev->of_node,
+ cb);
+ if (rc < 0) {
+ pr_err("Error: Getting region info\n");
+ return rc;
+ }
+
+ /* set up the iommu mapping for the context bank */
+ if (type == CAM_QSMMU) {
+ pr_err("Error: QSMMU ctx not supported for : %s\n", cb->name);
+ return -ENODEV;
+ }
+
+ ctx = dev;
+ CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+
+ rc = cam_smmu_setup_cb(cb, ctx);
+ if (rc < 0) {
+ pr_err("Error: failed to setup cb : %s\n", cb->name);
+ goto cb_init_fail;
+ }
+
+ if (cb->io_support && cb->mapping)
+ iommu_set_fault_handler(cb->mapping->domain,
+ cam_smmu_iommu_fault_handler,
+ (void *)cb->name);
+
+ /* increment count to next bank */
+ iommu_cb_set.cb_init_count++;
+
+ CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+
+cb_init_fail:
+ return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct device *dev = &pdev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+ rc = cam_alloc_smmu_context_banks(dev);
+ if (rc < 0) {
+ pr_err("Error: allocating context banks\n");
+ return -ENOMEM;
+ }
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+ if (rc < 0) {
+ pr_err("Error: populating context banks\n");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+ if (rc < 0) {
+ pr_err("Error: populating context banks\n");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-fw-dev")) {
+ icp_fw.fw_dev = &pdev->dev;
+ icp_fw.fw_kva = NULL;
+ icp_fw.fw_dma_hdl = 0;
+ return rc;
+ }
+
+ /* probe through all the subdevices */
+ rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+ NULL, &pdev->dev);
+ if (rc < 0) {
+ pr_err("Error: populating devices\n");
+ } else {
+ INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
+ mutex_init(&iommu_cb_set.payload_list_lock);
+ INIT_LIST_HEAD(&iommu_cb_set.payload_list);
+ }
+
+ return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+ /* release all the context banks and memory allocated */
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+ cam_smmu_release_cb(pdev);
+ return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+ .probe = cam_smmu_probe,
+ .remove = cam_smmu_remove,
+ .driver = {
+ .name = "msm_cam_smmu",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cam_smmu_dt_match,
+ },
+};
+
+static int __init cam_smmu_init_module(void)
+{
+ return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+ platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
new file mode 100644
index 0000000..76e9135
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -0,0 +1,255 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+
+/*Enum for possible CAM SMMU operations */
+enum cam_smmu_ops_param {
+ CAM_SMMU_ATTACH,
+ CAM_SMMU_DETACH,
+ CAM_SMMU_VOTE,
+ CAM_SMMU_DEVOTE,
+ CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+ CAM_SMMU_MAP_READ,
+ CAM_SMMU_MAP_WRITE,
+ CAM_SMMU_MAP_RW,
+ CAM_SMMU_MAP_INVALID
+};
+
+enum cam_smmu_region_id {
+ CAM_SMMU_REGION_FIRMWARE,
+ CAM_SMMU_REGION_SHARED,
+ CAM_SMMU_REGION_SCRATCH,
+ CAM_SMMU_REGION_IO
+};
+
+/**
+ * @brief : Gets an smmu handle
+ *
+ * @param identifier: Unique identifier to be used by clients which they
+ * should get from device tree. CAM SMMU driver will
+ * not enforce how this string is obtained and will
+ * only validate this against the list of permitted
+ * identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ * fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @brief : Performs IOMMU operations
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ * or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @brief : Maps IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ * DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr : Pointer to physical address where mapped address will be
+ * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ * CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
+ * which specifies the cpu virtual address to map.
+ * @len : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_iova(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Unmaps IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_iova(int handle,
+ int ion_fd,
+ enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir : Direction of mapping which will translate to IOMMU_READ
+ * IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ * able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_scratch_iova(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len);
+
+/**
+ * @brief : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
+ * @param paddr : Device virtual address of client's scratch buffer that
+ * will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_scratch_iova(int handle,
+ dma_addr_t paddr);
+
+/**
+ *@brief : Destroys an smmu handle
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @brief : Finds index by handle in the smmu client table
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @brief : Registers smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_reg_client_page_fault_handler(int handle,
+ void (*client_page_fault_handler)(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*), void *token);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_iova(int handle, int ion_fd,
+ dma_addr_t *paddr_ptr, size_t *len_ptr);
+/**
+ * @brief Unmaps memory from context bank
+ *
+ * @param handle: SMMU handle identifying the context bank
+ * @param ion_fd: ION fd of memory to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_iova(int handle, int ion_fd);
+
+/**
+ * @brief Maps secure memory for SMMU handle
+ *
+ * @param handle: SMMU handle identifying context bank
+ * @param ion_fd: ION fd to map securely
+ * @param dir: DMA Direction for the mapping
+ * @param dma_addr: Returned IOVA address after mapping
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_sec_iova(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr);
+
+/**
+ * @brief Unmaps secure memopry for SMMU handle
+ *
+ * @param handle: SMMU handle identifying context bank
+ * @param ion_fd: ION fd to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_sec_iova(int handle, int ion_fd);
+
+
+/**
+ * @brief Allocates firmware for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated firmware
+ * @param kvaddr: CPU mapped address of allocated firmware
+ * @param len: Length of allocated firmware memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+ dma_addr_t *iova,
+ uint64_t *kvaddr,
+ size_t *len);
+
+/**
+ * @brief Deallocates firmware memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 4f5bf87..ecc62c8 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -57,6 +57,48 @@
return 0;
}
+uint32_t cam_sync_util_get_group_object_state(struct sync_table_row *table,
+ uint32_t *sync_objs,
+ uint32_t num_objs)
+{
+ int i;
+ struct sync_table_row *child_row = NULL;
+ int success_count = 0;
+ int active_count = 0;
+
+ if (!table || !sync_objs)
+ return CAM_SYNC_STATE_SIGNALED_ERROR;
+
+ /*
+ * We need to arrive at the state of the merged object based on
+ * counts of error, active and success states of all children objects
+ */
+ for (i = 0; i < num_objs; i++) {
+ child_row = table + sync_objs[i];
+ switch (child_row->state) {
+ case CAM_SYNC_STATE_SIGNALED_ERROR:
+ return CAM_SYNC_STATE_SIGNALED_ERROR;
+ case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+ success_count++;
+ break;
+ case CAM_SYNC_STATE_ACTIVE:
+ active_count++;
+ break;
+ default:
+ pr_err("Invalid state of child object during merge\n");
+ return CAM_SYNC_STATE_SIGNALED_ERROR;
+ }
+ }
+
+ if (active_count)
+ return CAM_SYNC_STATE_ACTIVE;
+
+ if (success_count == num_objs)
+ return CAM_SYNC_STATE_SIGNALED_SUCCESS;
+
+ return CAM_SYNC_STATE_SIGNALED_ERROR;
+}
+
int cam_sync_init_group_object(struct sync_table_row *table,
uint32_t idx,
uint32_t *sync_objs,
@@ -113,12 +155,16 @@
row->type = CAM_SYNC_TYPE_GROUP;
row->sync_id = idx;
- row->state = CAM_SYNC_STATE_ACTIVE;
+ row->state = cam_sync_util_get_group_object_state(table,
+ sync_objs, num_objs);
row->remaining = num_objs;
init_completion(&row->signaled);
INIT_LIST_HEAD(&row->callback_list);
INIT_LIST_HEAD(&row->user_payload_list);
+ if (row->state != CAM_SYNC_STATE_ACTIVE)
+ complete_all(&row->signaled);
+
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return 0;
}
@@ -208,6 +254,11 @@
int i;
struct sync_table_row *row = NULL;
+ if (num_objs <= 1) {
+ pr_err("Single object merge is not allowed\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < num_objs; i++) {
row = sync_dev->sync_table + sync_obj[i];
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 683386c..b16e37e 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -412,6 +412,7 @@
goto put_regulator;
}
disable_irq(soc_info->irq_line->start);
+ soc_info->irq_data = irq_data;
}
/* Get Clock */
@@ -439,7 +440,8 @@
if (soc_info->irq_line) {
disable_irq(soc_info->irq_line->start);
- free_irq(soc_info->irq_line->start, soc_info);
+ devm_free_irq(&soc_info->pdev->dev,
+ soc_info->irq_line->start, irq_data);
}
put_regulator:
@@ -495,7 +497,8 @@
if (soc_info->irq_line) {
disable_irq(soc_info->irq_line->start);
- free_irq(soc_info->irq_line->start, soc_info);
+ devm_free_irq(&soc_info->pdev->dev,
+ soc_info->irq_line->start, soc_info->irq_data);
}
return 0;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 0baa9e6..3e8226f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -58,6 +58,7 @@
* @index: Instance id for the camera device
* @irq_name: Name of the irq associated with the device
* @irq_line: Irq resource
+ * @irq_data: Private data that is passed when IRQ is requested
* @num_mem_block: Number of entry in the "reg-names"
* @mem_block_name: Array of the reg block name
* @mem_block_cam_base: Array of offset of this register space compared
@@ -85,6 +86,7 @@
const char *irq_name;
struct resource *irq_line;
+ void *irq_data;
uint32_t num_mem_block;
const char *mem_block_name[CAM_SOC_MAX_BLOCK];
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index a0b53bb..9194b44 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -96,7 +96,6 @@
* @SDE_CAPS_R1_WB: MDSS V1.x WB block
* @SDE_CAPS_R3_WB: MDSS V3.x WB block
* @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
- * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
* @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
* @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
*/
@@ -105,7 +104,6 @@
SDE_CAPS_R3_WB,
SDE_CAPS_R3_1P5_DOWNSCALE,
SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
- SDE_CAPS_MIN_BUS_VOTE,
SDE_CAPS_SBUF_1,
SDE_CAPS_UBWC_2,
SDE_CAPS_MAX,
@@ -140,6 +138,18 @@
int domain;
};
+/*
+ * struct sde_rot_debug_bus: rotator debugbus header structure
+ * @wr_addr: write address for debugbus controller
+ * @block_id: rotator debugbus block id
+ * @test_id: rotator debugbus test id
+ */
+struct sde_rot_debug_bus {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+};
+
struct sde_rot_vbif_debug_bus {
u32 disable_bus_addr;
u32 block_bus_addr;
@@ -191,6 +201,8 @@
struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus;
u32 nrt_vbif_dbg_bus_size;
+ struct sde_rot_debug_bus *rot_dbg_bus;
+ u32 rot_dbg_bus_size;
struct sde_rot_regdump *regdump;
u32 regdump_size;
@@ -199,6 +211,8 @@
int sec_cam_en;
struct ion_client *iclient;
+
+ bool clk_always_on;
};
int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 9a28700..30fda07 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -25,6 +25,7 @@
#include <linux/msm-bus-board.h>
#include <linux/regulator/consumer.h>
#include <linux/dma-direction.h>
+#include <linux/sde_rsc.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
#include <asm/cacheflush.h>
@@ -293,14 +294,13 @@
SDEROT_DBG("core_clk %lu\n", total_clk_rate);
ATRACE_INT("core_clk", total_clk_rate);
- sde_rotator_set_clk_rate(mgr, total_clk_rate, SDE_ROTATOR_CLK_ROT_CORE);
+ sde_rotator_set_clk_rate(mgr, total_clk_rate, SDE_ROTATOR_CLK_MDSS_ROT);
return 0;
}
static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
{
- struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int ret;
if (WARN_ON(mgr->regulator_enable == on)) {
@@ -311,7 +311,7 @@
SDEROT_EVTLOG(on);
SDEROT_DBG("%s: rotator regulators\n", on ? "Enable" : "Disable");
- if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && on) {
+ if (on) {
mgr->minimum_bw_vote = mgr->enable_bw_vote;
sde_rotator_update_perf(mgr);
}
@@ -319,8 +319,13 @@
if (mgr->ops_hw_pre_pmevent)
mgr->ops_hw_pre_pmevent(mgr, on);
- ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
- mgr->module_power.num_vreg, on);
+ if (mgr->rsc_client)
+ ret = sde_rsc_client_state_update(mgr->rsc_client,
+ on ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE,
+ NULL, -1);
+ else
+ ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
+ mgr->module_power.num_vreg, on);
if (ret) {
SDEROT_WARN("Rotator regulator failed to %s\n",
on ? "enable" : "disable");
@@ -330,7 +335,7 @@
if (mgr->ops_hw_post_pmevent)
mgr->ops_hw_post_pmevent(mgr, on);
- if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && !on) {
+ if (!on) {
mgr->minimum_bw_vote = 0;
sde_rotator_update_perf(mgr);
}
@@ -407,13 +412,13 @@
if (ret)
goto error_mdss_axi;
ret = sde_rotator_enable_clk(mgr,
- SDE_ROTATOR_CLK_ROT_CORE);
- if (ret)
- goto error_rot_core;
- ret = sde_rotator_enable_clk(mgr,
SDE_ROTATOR_CLK_MDSS_ROT);
if (ret)
goto error_mdss_rot;
+ ret = sde_rotator_enable_clk(mgr,
+ SDE_ROTATOR_CLK_MDSS_ROT_SUB);
+ if (ret)
+ goto error_rot_sub;
/* Active+Sleep */
msm_bus_scale_client_update_context(
@@ -421,8 +426,9 @@
mgr->data_bus.curr_bw_uc_idx);
trace_rot_bw_ao_as_context(0);
} else {
+ sde_rotator_disable_clk(mgr,
+ SDE_ROTATOR_CLK_MDSS_ROT_SUB);
sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
- sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_ROT_CORE);
sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_GCC_AXI);
@@ -438,9 +444,9 @@
}
return ret;
+error_rot_sub:
+ sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT);
error_mdss_rot:
- sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_ROT_CORE);
-error_rot_core:
sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AXI);
error_mdss_axi:
sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MDSS_AHB);
@@ -551,6 +557,12 @@
if (!input)
dir = DMA_FROM_DEVICE;
+ if (buffer->plane_count > SDE_ROT_MAX_PLANES) {
+ SDEROT_ERR("buffer plane_count exceeds MAX_PLANE limit:%d\n",
+ buffer->plane_count);
+ return -EINVAL;
+ }
+
data->sbuf = buffer->sbuf;
data->scid = buffer->scid;
data->writeback = buffer->writeback;
@@ -2731,11 +2743,19 @@
sde_rotator_search_dt_clk(pdev, mgr, "axi_clk",
SDE_ROTATOR_CLK_MDSS_AXI, true) ||
sde_rotator_search_dt_clk(pdev, mgr, "rot_core_clk",
- SDE_ROTATOR_CLK_ROT_CORE, true) ||
- sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
- SDE_ROTATOR_CLK_MDSS_ROT, true))
+ SDE_ROTATOR_CLK_MDSS_ROT, false))
rc = -EINVAL;
+ /*
+ * If 'MDSS_ROT' is already present, place 'rot_clk' under
+ * MDSS_ROT_SUB. Otherwise, place it directly into MDSS_ROT.
+ */
+ if (sde_rotator_get_clk(mgr, SDE_ROTATOR_CLK_MDSS_ROT))
+ rc = sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
+ SDE_ROTATOR_CLK_MDSS_ROT_SUB, true);
+ else
+ rc = sde_rotator_search_dt_clk(pdev, mgr, "rot_clk",
+ SDE_ROTATOR_CLK_MDSS_ROT, true);
clk_err:
return rc;
}
@@ -2766,9 +2786,21 @@
{
int ret;
- ret = sde_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
- if (ret)
+ mgr->rsc_client = sde_rsc_client_create(
+ SDE_RSC_INDEX, "sde_rotator_core", false);
+ if (IS_ERR(mgr->rsc_client)) {
+ ret = PTR_ERR(mgr->rsc_client);
+ pr_err("rsc client create returned %d\n", ret);
+ mgr->rsc_client = NULL;
return ret;
+ }
+
+ if (!mgr->rsc_client) {
+ ret = sde_rotator_get_dt_vreg_data(
+ &pdev->dev, &mgr->module_power);
+ if (ret)
+ return ret;
+ }
ret = sde_rotator_register_clk(pdev, mgr);
if (ret)
@@ -2788,9 +2820,15 @@
{
struct platform_device *pdev = mgr->pdev;
- sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
sde_rotator_unregister_clk(mgr);
sde_rotator_bus_scale_unregister(mgr);
+
+ if (mgr->rsc_client) {
+ sde_rsc_client_destroy(mgr->rsc_client);
+ mgr->rsc_client = NULL;
+ } else {
+ sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
+ }
}
int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 980e4af..0051e96 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -125,7 +125,7 @@
enum sde_rotator_clk_type {
SDE_ROTATOR_CLK_MDSS_AHB,
SDE_ROTATOR_CLK_MDSS_AXI,
- SDE_ROTATOR_CLK_ROT_CORE,
+ SDE_ROTATOR_CLK_MDSS_ROT_SUB,
SDE_ROTATOR_CLK_MDSS_ROT,
SDE_ROTATOR_CLK_MNOC_AHB,
SDE_ROTATOR_CLK_GCC_AHB,
@@ -373,6 +373,7 @@
* @reg_bus: register bus configuration state
* @module_power: power/clock configuration state
* @regulator_enable: true if foot switch is enabled; false otherwise
+ * @rsc_client: pointer to rsc client handle
* @res_ref_cnt: reference count of how many times resource is requested
* @rot_enable_clk_cnt: reference count of how many times clock is requested
* @rot_clk: array of rotator and periphery clocks
@@ -417,6 +418,8 @@
struct sde_module_power module_power;
bool regulator_enable;
+ struct sde_rsc_client *rsc_client;
+
int res_ref_cnt;
int rot_enable_clk_cnt;
struct sde_rot_clk *rot_clk;
@@ -459,6 +462,7 @@
bool input);
int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
int len);
+ int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
void *hw_data;
};
@@ -490,6 +494,14 @@
return 0;
}
+static inline int sde_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
+{
+ if (mgr && mgr->ops_hw_get_maxlinewidth)
+ return mgr->ops_hw_get_maxlinewidth(mgr);
+
+ return 2048;
+}
+
static inline int __compare_session_item_rect(
struct sde_rotation_buf_info *s_rect,
struct sde_rect *i_rect, uint32_t i_fmt, bool src)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index e56c70a..e9ff67c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -30,6 +30,7 @@
#define SDE_EVTLOG_DEFAULT_PANIC 1
#define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
#define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
+#define SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
/*
* evtlog will print this number of entries when it is called through
@@ -53,6 +54,8 @@
#define GROUP_BYTES 4
#define ROW_BYTES 16
+#define SDE_ROT_TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
static DEFINE_SPINLOCK(sde_rot_xlock);
/*
@@ -86,11 +89,14 @@
* @panic_on_err - boolean indicates issue panic after EVTLOG dump
* @enable_reg_dump - control in-log/memory dump for rotator registers
* @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
+ * @enable_rot_dbgbus_dump - control in-log/memroy dump for rotator debug bus
* @evtlog_dump_work - schedule work strucutre for timeout handler
* @work_dump_reg - storage for register dump control in schedule work
* @work_panic - storage for panic control in schedule work
* @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
+ * @work_rot_dbgbus - storage for rotator debug bus control in schedule work
* @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
+ * @rot_dbgbus_dump - memory buffer for rotator debug bus dumping
* @reg_dump_array - memory buffer for rotator registers dumping
*/
struct sde_rot_dbg_evtlog {
@@ -103,14 +109,88 @@
u32 panic_on_err;
u32 enable_reg_dump;
u32 enable_vbif_dbgbus_dump;
+ u32 enable_rot_dbgbus_dump;
struct work_struct evtlog_dump_work;
bool work_dump_reg;
bool work_panic;
bool work_vbif_dbgbus;
+ bool work_rot_dbgbus;
u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
+ u32 *rot_dbgbus_dump;
u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
} sde_rot_dbg_evtlog;
+static void sde_rot_dump_debug_bus(u32 bus_dump_flag, u32 **dump_mem)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct sde_rot_debug_bus *head;
+ phys_addr_t phys = 0;
+ int i;
+ u32 offset;
+ void __iomem *base;
+
+ in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
+ in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
+ base = mdata->sde_io.base;
+
+ if (!base || !mdata->rot_dbg_bus || !mdata->rot_dbg_bus_size)
+ return;
+
+ pr_info("======== SDE Rotator Debug bus DUMP =========\n");
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ mdata->rot_dbg_bus_size * 4 * sizeof(u32),
+ &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+ __func__, dump_addr,
+ dump_addr + (u32)mdata->rot_dbg_bus_size * 16);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ sde_smmu_ctrl(1);
+
+ for (i = 0; i < mdata->rot_dbg_bus_size; i++) {
+ head = mdata->rot_dbg_bus + i;
+ writel_relaxed(SDE_ROT_TEST_MASK(head->block_id, head->test_id),
+ base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ offset = head->wr_addr + 0x4;
+
+ status = readl_relaxed(base + offset);
+
+ if (in_log)
+ pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id, head->test_id,
+ status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, base + head->wr_addr);
+ }
+
+ sde_smmu_ctrl(0);
+
+ pr_info("========End Debug bus=========\n");
+}
+
/*
* sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
* enable/disable
@@ -518,18 +598,26 @@
* @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
*/
static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
- bool dump_rot, bool dump_vbif_debug_bus)
+ bool dump_rot, bool dump_vbif_debug_bus, bool dump_rot_debug_bus)
{
sde_rot_evtlog_dump_all();
- if (dump_rot)
- sde_rot_dump_reg_all();
+ if (dump_rot_debug_bus)
+ sde_rot_dump_debug_bus(
+ sde_rot_dbg_evtlog.enable_rot_dbgbus_dump,
+ &sde_rot_dbg_evtlog.rot_dbgbus_dump);
if (dump_vbif_debug_bus)
sde_rot_dump_vbif_debug_bus(
sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
&sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
+ /*
+ * Rotator registers always dump last
+ */
+ if (dump_rot)
+ sde_rot_dump_reg_all();
+
if (dead)
panic(panic_name);
}
@@ -544,7 +632,8 @@
sde_rot_dbg_evtlog.work_panic,
"evtlog_workitem",
sde_rot_dbg_evtlog.work_dump_reg,
- sde_rot_dbg_evtlog.work_vbif_dbgbus);
+ sde_rot_dbg_evtlog.work_vbif_dbgbus,
+ sde_rot_dbg_evtlog.work_rot_dbgbus);
}
/*
@@ -569,6 +658,7 @@
bool dead = false;
bool dump_rot = false;
bool dump_vbif_dbgbus = false;
+ bool dump_rot_dbgbus = false;
char *blk_name = NULL;
va_list args;
@@ -590,6 +680,9 @@
if (!strcmp(blk_name, "vbif_dbg_bus"))
dump_vbif_dbgbus = true;
+ if (!strcmp(blk_name, "rot_dbg_bus"))
+ dump_rot_dbgbus = true;
+
if (!strcmp(blk_name, "panic"))
dead = true;
}
@@ -600,10 +693,11 @@
sde_rot_dbg_evtlog.work_panic = dead;
sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
+ sde_rot_dbg_evtlog.work_rot_dbgbus = dump_rot_dbgbus;
schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
} else {
sde_rot_evtlog_dump_helper(dead, name, dump_rot,
- dump_vbif_dbgbus);
+ dump_vbif_dbgbus, dump_rot_dbgbus);
}
}
@@ -836,6 +930,13 @@
return -EINVAL;
}
+ mdata->clk_always_on = false;
+ if (!debugfs_create_bool("clk_always_on", 0644,
+ debugfs_root, &mdata->clk_always_on)) {
+ SDEROT_WARN("failed to create debugfs clk_always_on\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -919,12 +1020,16 @@
&sde_rot_dbg_evtlog.enable_reg_dump);
debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
&sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
+ debugfs_create_u32("rot_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
+ &sde_rot_dbg_evtlog.enable_rot_dbgbus_dump);
sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
+ sde_rot_dbg_evtlog.enable_rot_dbgbus_dump =
+ SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP;
pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
sde_rot_dbg_evtlog.evtlog_enable,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index c061446..90b7194 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -438,6 +438,8 @@
{
struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotator_request *request;
+ struct list_head *curr, *next;
int i;
int ret;
@@ -458,6 +460,21 @@
sde_rot_mgr_lock(rot_dev->mgr);
sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
sde_rot_mgr_unlock(rot_dev->mgr);
+ list_for_each_safe(curr, next, &ctx->pending_list) {
+ request = container_of(curr, struct sde_rotator_request,
+ list);
+
+ SDEDEV_DBG(rot_dev->dev, "cancel request s:%d\n",
+ ctx->session_id);
+ mutex_unlock(q->lock);
+ cancel_work_sync(&request->submit_work);
+ cancel_work_sync(&request->retire_work);
+ mutex_lock(q->lock);
+ spin_lock(&ctx->list_lock);
+ list_del_init(&request->list);
+ list_add_tail(&request->list, &ctx->retired_list);
+ spin_unlock(&ctx->list_lock);
+ }
}
sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
@@ -1315,6 +1332,35 @@
EXPORT_SYMBOL(sde_rotator_inline_get_downscale_caps);
/*
+ * sde_rotator_inline_get_maxlinewidth - get maximum line width of rotator
+ * @pdev: Pointer to platform device
+ * return: maximum line width
+ */
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
+{
+ struct sde_rotator_device *rot_dev;
+ int maxlinewidth;
+
+ if (!pdev) {
+ SDEROT_ERR("invalid platform device\n");
+ return -EINVAL;
+ }
+
+ rot_dev = (struct sde_rotator_device *)platform_get_drvdata(pdev);
+ if (!rot_dev || !rot_dev->mgr) {
+ SDEROT_ERR("invalid rotator device\n");
+ return -EINVAL;
+ }
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ maxlinewidth = sde_rotator_get_maxlinewidth(rot_dev->mgr);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+
+ return maxlinewidth;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_maxlinewidth);
+
+/*
* sde_rotator_inline_get_pixfmt_caps - get pixel format capability
* @pdev: Pointer to platform device
* @pixfmt: array of pixel format buffer
@@ -2565,8 +2611,13 @@
static long sde_rotator_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct video_device *vdev = video_devdata(file);
+ struct sde_rotator_ctx *ctx =
+ sde_rotator_ctx_from_fh(file->private_data);
long ret;
+ mutex_lock(vdev->lock);
+
switch (cmd) {
case VIDIOC_S_SDE_ROTATOR_FENCE:
case VIDIOC_G_SDE_ROTATOR_FENCE:
@@ -2575,14 +2626,14 @@
if (copy_from_user(&fence, (void __user *)arg,
sizeof(struct msm_sde_rotator_fence)))
- return -EFAULT;
+ goto ioctl32_error;
ret = sde_rotator_private_ioctl(file, file->private_data,
0, cmd, (void *)&fence);
if (copy_to_user((void __user *)arg, &fence,
sizeof(struct msm_sde_rotator_fence)))
- return -EFAULT;
+ goto ioctl32_error;
break;
}
@@ -2593,24 +2644,31 @@
if (copy_from_user(&comp_ratio, (void __user *)arg,
sizeof(struct msm_sde_rotator_comp_ratio)))
- return -EFAULT;
+ goto ioctl32_error;
ret = sde_rotator_private_ioctl(file, file->private_data,
0, cmd, (void *)&comp_ratio);
if (copy_to_user((void __user *)arg, &comp_ratio,
sizeof(struct msm_sde_rotator_comp_ratio)))
- return -EFAULT;
+ goto ioctl32_error;
break;
}
default:
+ SDEDEV_ERR(ctx->rot_dev->dev, "invalid ioctl32 type:%x\n", cmd);
ret = -ENOIOCTLCMD;
break;
}
+ mutex_unlock(vdev->lock);
return ret;
+
+ioctl32_error:
+ mutex_unlock(vdev->lock);
+ SDEDEV_ERR(ctx->rot_dev->dev, "error handling ioctl32 cmd:%x\n", cmd);
+ return -EFAULT;
}
#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index ec89785..27fd0c3 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -104,6 +104,7 @@
u32 src_pixfmt, u32 *dst_pixfmt);
int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
char *downscale_caps, int len);
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev);
int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
bool input, u32 *pixfmt, int len);
int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 9071361..1bab010 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -55,6 +55,8 @@
#define DEFAULT_UBWC_MALSIZE 1
#define DEFAULT_UBWC_SWIZZLE 1
+#define DEFAULT_MAXLINEWIDTH 4096
+
/* Macro for constructing the REGDMA command */
#define SDE_REGDMA_WRITE(p, off, data) \
do { \
@@ -383,11 +385,95 @@
};
static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
- {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
+ {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
{0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
};
+static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
+ /*
+ * rottop - 0xA8850
+ */
+ /* REGDMA */
+ { 0XA8850, 0, 0 },
+ { 0XA8850, 0, 1 },
+ { 0XA8850, 0, 2 },
+ { 0XA8850, 0, 3 },
+ { 0XA8850, 0, 4 },
+
+ /* ROT_WB */
+ { 0XA8850, 1, 0 },
+ { 0XA8850, 1, 1 },
+ { 0XA8850, 1, 2 },
+ { 0XA8850, 1, 3 },
+ { 0XA8850, 1, 4 },
+ { 0XA8850, 1, 5 },
+ { 0XA8850, 1, 6 },
+ { 0XA8850, 1, 7 },
+
+ /* UBWC_DEC */
+ { 0XA8850, 2, 0 },
+
+ /* UBWC_ENC */
+ { 0XA8850, 3, 0 },
+
+ /* ROT_FETCH_0 */
+ { 0XA8850, 4, 0 },
+ { 0XA8850, 4, 1 },
+ { 0XA8850, 4, 2 },
+ { 0XA8850, 4, 3 },
+ { 0XA8850, 4, 4 },
+ { 0XA8850, 4, 5 },
+ { 0XA8850, 4, 6 },
+ { 0XA8850, 4, 7 },
+
+ /* ROT_FETCH_1 */
+ { 0XA8850, 5, 0 },
+ { 0XA8850, 5, 1 },
+ { 0XA8850, 5, 2 },
+ { 0XA8850, 5, 3 },
+ { 0XA8850, 5, 4 },
+ { 0XA8850, 5, 5 },
+ { 0XA8850, 5, 6 },
+ { 0XA8850, 5, 7 },
+
+ /* ROT_FETCH_2 */
+ { 0XA8850, 6, 0 },
+ { 0XA8850, 6, 1 },
+ { 0XA8850, 6, 2 },
+ { 0XA8850, 6, 3 },
+ { 0XA8850, 6, 4 },
+ { 0XA8850, 6, 5 },
+ { 0XA8850, 6, 6 },
+ { 0XA8850, 6, 7 },
+
+ /* ROT_FETCH_3 */
+ { 0XA8850, 7, 0 },
+ { 0XA8850, 7, 1 },
+ { 0XA8850, 7, 2 },
+ { 0XA8850, 7, 3 },
+ { 0XA8850, 7, 4 },
+ { 0XA8850, 7, 5 },
+ { 0XA8850, 7, 6 },
+ { 0XA8850, 7, 7 },
+
+ /* ROT_FETCH_4 */
+ { 0XA8850, 8, 0 },
+ { 0XA8850, 8, 1 },
+ { 0XA8850, 8, 2 },
+ { 0XA8850, 8, 3 },
+ { 0XA8850, 8, 4 },
+ { 0XA8850, 8, 5 },
+ { 0XA8850, 8, 6 },
+ { 0XA8850, 8, 7 },
+
+ /* ROT_UNPACK_0*/
+ { 0XA8850, 9, 0 },
+ { 0XA8850, 9, 1 },
+ { 0XA8850, 9, 2 },
+ { 0XA8850, 9, 3 },
+};
+
static struct sde_rot_regdump sde_rot_r3_regdump[] = {
{ "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
{ "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
@@ -1428,7 +1514,8 @@
sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
if (status & ROT_ERROR_BIT)
- SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
+ "vbif_dbg_bus", "panic");
return sts;
}
@@ -1612,8 +1699,8 @@
SDEROT_ERR(
"Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
swts, hwts, regdmasts, rotsts);
- SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus",
- "panic");
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
+ "vbif_dbg_bus", "panic");
}
/* Turn off rotator clock after checking rotator registers */
@@ -2132,6 +2219,17 @@
SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
BIT(XIN_WRITEBACK));
+ /*
+ * For debug purpose, disable clock gating, i.e. Clocks always on
+ */
+ if (mdata->clk_always_on) {
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
+ 0xFFFF);
+ SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
+ }
+
return 0;
error:
@@ -2258,6 +2356,9 @@
mdata->nrt_vbif_dbg_bus_size =
ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
+ mdata->rot_dbg_bus = rot_dbgbus_r3;
+ mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
+
mdata->regdump = sde_rot_r3_regdump;
mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
@@ -2265,7 +2366,6 @@
/* features exposed via mdss h/w version */
if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
SDEROT_DBG("Supporting sys cache inline rotation\n");
- set_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map);
set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
@@ -2457,11 +2557,24 @@
struct sde_rot_entry *entry)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_hw_rotator *hw_data;
int ret = 0;
u16 src_w, src_h, dst_w, dst_h;
struct sde_rotation_item *item = &entry->item;
struct sde_mdp_format_params *fmt;
+ if (!mgr || !entry || !mgr->hw_data) {
+ SDEROT_ERR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ hw_data = mgr->hw_data;
+
+ if (hw_data->maxlinewidth < item->src_rect.w) {
+ SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
+ return -EINVAL;
+ }
+
src_w = item->src_rect.w;
src_h = item->src_rect.h;
@@ -2483,6 +2596,11 @@
}
if ((src_w != dst_w) || (src_h != dst_h)) {
+ if (!dst_w || !dst_h) {
+ SDEROT_DBG("zero output width/height not support\n");
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
if ((src_w % dst_w) || (src_h % dst_h)) {
SDEROT_DBG("non integral scale not support\n");
ret = -EINVAL;
@@ -2756,6 +2874,25 @@
}
/*
+ * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
+ * @mgr: Pointer to rotator manager
+ * return: maximum line width supported by hardware
+ */
+static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
+{
+ struct sde_hw_rotator *rot;
+
+ if (!mgr || !mgr->hw_data) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ rot = mgr->hw_data;
+
+ return rot->maxlinewidth;
+}
+
+/*
* sde_hw_rotator_parse_dt - parse r3 specific device tree settings
* @hw_data: Pointer to rotator hw
* @dev: Pointer to platform device
@@ -2824,6 +2961,16 @@
hw_data->sbuf_headroom = data;
}
+ ret = of_property_read_u32(dev->dev.of_node,
+ "qcom,mdss-rot-linewidth", &data);
+ if (ret) {
+ ret = 0;
+ hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
+ } else {
+ SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
+ hw_data->maxlinewidth = data;
+ }
+
return ret;
}
@@ -2871,6 +3018,7 @@
mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
+ mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
if (ret)
@@ -2934,9 +3082,9 @@
goto error_hw_rev_init;
/* set rotator CBCR to shutoff memory/periphery on clock off.*/
- clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
+ clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
CLKFLAG_NORETAIN_MEM);
- clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_ROT_CORE].clk,
+ clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
CLKFLAG_NORETAIN_PERIPH);
mdata->sde_rot_hw = rot;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index d1607d9..22eaa3f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -262,6 +262,7 @@
* @outpixfmts: array of supported output pixel formats in fourcc
* @num_outpixfmt: size of the supported output pixel formats array
* @downscale_caps: capability string of scaling
+ * @maxlinewidth: maximum line width supported
*/
struct sde_hw_rotator {
/* base */
@@ -322,6 +323,7 @@
u32 *outpixfmts;
u32 num_outpixfmt;
const char *downscale_caps;
+ u32 maxlinewidth;
};
/**
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 87a4ac8..7215fdf 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -920,6 +920,8 @@
pkt->session_id = hash32_ptr(session);
pkt->num_properties = 1;
+ dprintk(VIDC_DBG, "Setting HAL Property = 0x%x\n", ptype);
+
switch (ptype) {
case HAL_CONFIG_FRAME_RATE:
{
@@ -1115,6 +1117,14 @@
pkt->size += sizeof(u32) * 2;
break;
}
+ case HAL_PARAM_SECURE:
+ {
+ create_pkt_enable(pkt->rg_property_data,
+ HFI_PROPERTY_PARAM_SECURE_SESSION,
+ ((struct hal_enable *)pdata)->enable);
+ pkt->size += sizeof(u32) * 2;
+ break;
+ }
case HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER:
{
create_pkt_enable(pkt->rg_property_data,
@@ -1265,6 +1275,7 @@
hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
hal_quant->qpb << 16;
hfi->layer_id = hal_quant->layer_id;
+ hfi->enable = hal_quant->enable;
pkt->size += sizeof(u32) + sizeof(struct hfi_quantization);
break;
}
@@ -1297,18 +1308,6 @@
sizeof(struct hfi_quantization_range);
break;
}
- case HAL_PARAM_VENC_MAX_NUM_B_FRAMES:
- {
- struct hfi_max_num_b_frames *hfi;
-
- pkt->rg_property_data[0] =
- HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
- hfi = (struct hfi_max_num_b_frames *) &pkt->rg_property_data[1];
- memcpy(hfi, (struct hfi_max_num_b_frames *) pdata,
- sizeof(struct hfi_max_num_b_frames));
- pkt->size += sizeof(u32) + sizeof(struct hfi_max_num_b_frames);
- break;
- }
case HAL_CONFIG_VENC_INTRA_PERIOD:
{
struct hfi_intra_period *hfi;
@@ -1811,6 +1810,34 @@
pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
break;
}
+ case HAL_PARAM_VIDEO_CORES_USAGE:
+ {
+ struct hal_videocores_usage_info *hal = pdata;
+ struct hfi_videocores_usage_type *core_info =
+ (struct hfi_videocores_usage_type *)
+ &pkt->rg_property_data[1];
+
+ core_info->video_core_enable_mask = hal->video_core_enable_mask;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ pkt->size += sizeof(u32) + sizeof(*core_info);
+ break;
+ }
+ case HAL_PARAM_VIDEO_WORK_MODE:
+ {
+ struct hal_video_work_mode *hal = pdata;
+ struct hfi_video_work_mode *work_mode =
+ (struct hfi_video_work_mode *)
+ &pkt->rg_property_data[1];
+
+ work_mode->video_work_mode = hal->video_work_mode;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_WORK_MODE;
+ pkt->size += sizeof(u32) + sizeof(*work_mode);
+ break;
+ }
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.h b/drivers/media/platform/msm/vidc/hfi_packetization.h
index e0def0f..06c0574 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.h
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.h
@@ -18,9 +18,9 @@
#include "vidc_hfi.h"
#include "vidc_hfi_api.h"
-#define call_hfi_pkt_op(q, op, args...) \
+#define call_hfi_pkt_op(q, op, ...) \
(((q) && (q)->pkt_ops && (q)->pkt_ops->op) ? \
- ((q)->pkt_ops->op(args)) : 0)
+ ((q)->pkt_ops->op(__VA_ARGS__)) : 0)
enum hfi_packetization_type {
HFI_PACKETIZATION_4XX,
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 00830cc..b424fbb 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -241,10 +241,8 @@
} while (num_properties_changed > 0);
}
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_EVENT_CHANGE,
- .response.event = event_notify,
- };
+ info->response_type = HAL_SESSION_EVENT_CHANGE;
+ info->response.event = event_notify;
return 0;
}
@@ -275,10 +273,8 @@
event_notify.packet_buffer = data->packet_buffer;
event_notify.extra_data_buffer = data->extra_data_buffer;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_EVENT_CHANGE,
- .response.event = event_notify,
- };
+ info->response_type = HAL_SESSION_EVENT_CHANGE;
+ info->response.event = event_notify;
return 0;
}
@@ -289,10 +285,8 @@
cmd_done.device_id = device_id;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SYS_ERROR,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SYS_ERROR;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -306,8 +300,8 @@
cmd_done.device_id = device_id;
cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
cmd_done.status = hfi_map_err_status(pkt->event_data1);
- dprintk(VIDC_INFO, "Received: SESSION_ERROR with event id : %d\n",
- pkt->event_data1);
+ dprintk(VIDC_INFO, "Received: SESSION_ERROR with event id : %#x %#x\n",
+ pkt->event_data1, pkt->event_data2);
switch (pkt->event_data1) {
case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
@@ -315,17 +309,15 @@
case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
cmd_done.status = VIDC_ERR_NONE;
dprintk(VIDC_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_RESPONSE_UNUSED,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_RESPONSE_UNUSED;
+ info->response.cmd = cmd_done;
return 0;
default:
- dprintk(VIDC_ERR, "HFI_EVENT_SESSION_ERROR\n");
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_ERROR,
- .response.cmd = cmd_done,
- };
+ dprintk(VIDC_ERR,
+ "HFI_EVENT_SESSION_ERROR: data1 %#x, data2 %#x\n",
+ pkt->event_data1, pkt->event_data2);
+ info->response_type = HAL_SESSION_ERROR;
+ info->response.cmd = cmd_done;
return 0;
}
}
@@ -403,10 +395,10 @@
cmd_done.session_id = NULL;
cmd_done.status = (u32)status;
cmd_done.size = sizeof(struct vidc_hal_sys_init_done);
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SYS_INIT_DONE,
- .response.cmd = cmd_done,
- };
+
+ info->response_type = HAL_SYS_INIT_DONE;
+ info->response.cmd = cmd_done;
+
return 0;
}
@@ -433,10 +425,8 @@
cmd_done.status = (u32) status;
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SYS_RELEASE_RESOURCE_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SYS_RELEASE_RESOURCE_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1162,10 +1152,8 @@
cmd_done.data.property.buf_req = buff_req;
cmd_done.size = sizeof(buff_req);
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_PROPERTY_INFO,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_PROPERTY_INFO;
+ info->response.cmd = cmd_done;
return 0;
default:
@@ -1197,10 +1185,8 @@
cmd_done.data.session_init_done = session_init_done;
cmd_done.size = sizeof(struct vidc_hal_session_init_done);
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_INIT_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_INIT_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1227,10 +1213,8 @@
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_LOAD_RESOURCE_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_LOAD_RESOURCE_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1272,10 +1256,8 @@
return -EINVAL;
}
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_FLUSH_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_FLUSH_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1323,10 +1305,8 @@
(u32)pkt->packet_buffer, -1, -1,
pkt->filled_len, pkt->offset);
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_ETB_DONE,
- .response.data = data_done,
- };
+ info->response_type = HAL_SESSION_ETB_DONE;
+ info->response.data = data_done;
return 0;
}
@@ -1450,10 +1430,8 @@
data_done.output_done.filled_len1,
data_done.output_done.offset1);
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_FTB_DONE,
- .response.data = data_done,
- };
+ info->response_type = HAL_SESSION_FTB_DONE;
+ info->response.data = data_done;
return 0;
}
@@ -1479,10 +1457,8 @@
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_START_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_START_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1507,10 +1483,8 @@
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_STOP_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_STOP_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1536,10 +1510,8 @@
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_RELEASE_RESOURCE_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_RELEASE_RESOURCE_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1571,10 +1543,8 @@
dprintk(VIDC_ERR, "invalid payload in rel_buff_done\n");
}
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_RELEASE_BUFFER_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1598,10 +1568,8 @@
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_END_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_END_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
@@ -1626,10 +1594,8 @@
cmd_done.status = hfi_map_err_status(pkt->error_type);
cmd_done.size = 0;
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_ABORT_DONE,
- .response.cmd = cmd_done,
- };
+ info->response_type = HAL_SESSION_ABORT_DONE;
+ info->response.cmd = cmd_done;
return 0;
}
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index a949c55..19a1e3f 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -202,8 +202,8 @@
unsigned long align = SZ_4K;
unsigned long ion_flags = 0;
-#ifndef CONFIG_ION
- hndl = ion_import_dma_buf(client->clnt, fd);
+#ifdef CONFIG_ION
+ hndl = ion_import_dma_buf_fd(client->clnt, fd);
#endif
dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
if (IS_ERR_OR_NULL(hndl)) {
@@ -476,8 +476,8 @@
clt, priv);
return false;
}
-#ifndef CONFIG_ION
- handle = ion_import_dma_buf(client->clnt, fd);
+#ifdef CONFIG_ION
+ handle = ion_import_dma_buf_fd(client->clnt, fd);
#endif
ret = handle == priv;
(!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index c82db74..c5c4269 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -135,6 +135,14 @@
return msm_vidc_s_ext_ctrl((void *)vidc_inst, a);
}
+int msm_v4l2_g_ext_ctrl(struct file *file, void *fh,
+ struct v4l2_ext_controls *a)
+{
+ struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+ return msm_vidc_g_ext_ctrl((void *)vidc_inst, a);
+}
+
int msm_v4l2_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *b)
{
@@ -250,6 +258,7 @@
.vidioc_g_ctrl = msm_v4l2_g_ctrl,
.vidioc_queryctrl = msm_v4l2_queryctrl,
.vidioc_s_ext_ctrls = msm_v4l2_s_ext_ctrl,
+ .vidioc_g_ext_ctrls = msm_v4l2_g_ext_ctrl,
.vidioc_subscribe_event = msm_v4l2_subscribe_event,
.vidioc_unsubscribe_event = msm_v4l2_unsubscribe_event,
.vidioc_decoder_cmd = msm_v4l2_decoder_cmd,
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 7c99e90..a86c677 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -23,7 +23,6 @@
#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
#define MB_SIZE_IN_PIXEL (16 * 16)
-#define MAX_OPERATING_FRAME_RATE (300 << 16)
#define OPERATING_FRAME_RATE_STEP (1 << 16)
static const char *const mpeg_video_stream_format[] = {
@@ -360,7 +359,7 @@
.name = "Set Decoder Operating rate",
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
- .maximum = MAX_OPERATING_FRAME_RATE,
+ .maximum = INT_MAX,
.default_value = 0,
.step = OPERATING_FRAME_RATE_STEP,
},
@@ -368,17 +367,6 @@
#define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
-static u32 get_frame_size_nv12(int plane,
- u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
static u32 get_frame_size_compressed_full_yuv(int plane,
u32 max_mbs_per_frame, u32 size_per_mb)
{
@@ -391,11 +379,6 @@
return (max_mbs_per_frame * size_per_mb * 3/2)/2;
}
-static u32 get_frame_size_nv12_ubwc_10bit(int plane, u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
-}
-
static u32 get_frame_size(struct msm_vidc_inst *inst,
const struct msm_vidc_format *fmt,
int fmt_type, int plane)
@@ -446,7 +429,7 @@
.name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
.description = "UBWC Y/CbCr 4:2:0 10bit",
.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
- .get_frame_size = get_frame_size_nv12_ubwc_10bit,
+ .get_frame_size = get_frame_size_tp10_ubwc,
.type = CAPTURE_PORT,
},
{
@@ -522,7 +505,7 @@
inst->prop.width[CAPTURE_PORT] == f->fmt.pix_mp.width &&
inst->prop.height[CAPTURE_PORT] ==
f->fmt.pix_mp.height) {
- dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+ dprintk(VIDC_DBG, "No change in CAPTURE port params\n");
return 0;
}
memcpy(&inst->fmts[fmt->type], fmt,
@@ -564,6 +547,8 @@
inst->bufq[CAPTURE_PORT].plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
+
+ rc = msm_comm_try_get_bufreqs(inst);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
@@ -590,7 +575,7 @@
inst->prop.width[OUTPUT_PORT] == f->fmt.pix_mp.width &&
inst->prop.height[OUTPUT_PORT] ==
f->fmt.pix_mp.height) {
- dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+ dprintk(VIDC_DBG, "No change in OUTPUT port params\n");
return 0;
}
inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
@@ -679,7 +664,7 @@
inst->bufq[OUTPUT_PORT].num_planes = 1;
inst->bufq[CAPTURE_PORT].num_planes = 1;
inst->prop.fps = DEFAULT_FPS;
- inst->operating_rate = 0;
+ inst->clk_data.operating_rate = 0;
memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
sizeof(struct msm_vidc_format));
memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
@@ -776,7 +761,10 @@
msm_dcvs_try_enable(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+ property_id = HAL_PARAM_SECURE;
inst->flags |= VIDC_SECURE;
+ property_val = !!(inst->flags & VIDC_SECURE);
+ pdata = &property_val;
dprintk(VIDC_DBG, "Setting secure mode to: %d\n",
!!(inst->flags & VIDC_SECURE));
break;
@@ -890,6 +878,7 @@
"Failed setting OUTPUT2 size : %d\n",
rc);
+ rc = msm_comm_try_get_bufreqs(inst);
break;
default:
dprintk(VIDC_ERR,
@@ -962,8 +951,12 @@
case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
dprintk(VIDC_DBG,
"inst(%pK) operating rate changed from %d to %d\n",
- inst, inst->operating_rate >> 16, ctrl->val >> 16);
- inst->operating_rate = ctrl->val;
+ inst, inst->clk_data.operating_rate >> 16,
+ ctrl->val >> 16);
+ inst->clk_data.operating_rate = ctrl->val;
+
+ msm_vidc_update_operating_rate(inst);
+
break;
default:
break;
@@ -974,8 +967,8 @@
if (!rc && property_id) {
dprintk(VIDC_DBG,
- "Control: HAL property=%#x,ctrl: id=%#x,value=%#x\n",
- property_id, ctrl->id, ctrl->val);
+ "Control: Name = %s, ID = 0x%x Value = %d\n",
+ ctrl->name, ctrl->id, ctrl->val);
rc = call_hfi_op(hdev, session_set_property, (void *)
inst->session, property_id, pdata);
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 13cc1b2..c0b6683 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -23,7 +23,6 @@
#define DEFAULT_BIT_RATE 64000
#define BIT_RATE_STEP 100
#define DEFAULT_FRAME_RATE 15
-#define MAX_OPERATING_FRAME_RATE (300 << 16)
#define OPERATING_FRAME_RATE_STEP (1 << 16)
#define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
#define MIN_SLICE_BYTE_SIZE 512
@@ -286,6 +285,17 @@
.qmenu = NULL,
},
{
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK,
+ .name = "QP mask for diff frame types",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = 7,
+ .default_value = 7,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
.id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
.name = "Intra Period for B frames",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -352,20 +362,6 @@
.qmenu = mpeg_video_rate_control,
},
{
- .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
- .name = "Bitrate Control",
- .type = V4L2_CTRL_TYPE_MENU,
- .minimum = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
- .maximum = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
- .default_value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
- .step = 0,
- .menu_skip_mask = ~(
- (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
- (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
- ),
- .qmenu = NULL,
- },
- {
.id = V4L2_CID_MPEG_VIDEO_BITRATE,
.name = "Bit Rate",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -822,7 +818,7 @@
.name = "Layer ID for different settings",
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
- .maximum = 6,
+ .maximum = MSM_VIDC_ALL_LAYER_ID,
.default_value = 0,
.step = 1,
.qmenu = NULL,
@@ -881,7 +877,7 @@
.name = "Set Encoder Operating rate",
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
- .maximum = MAX_OPERATING_FRAME_RATE,
+ .maximum = INT_MAX,
.default_value = 0,
.step = OPERATING_FRAME_RATE_STEP,
},
@@ -997,26 +993,6 @@
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
-static u32 get_frame_size_nv12(int plane, u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
-static u32 get_frame_size_rgba(int plane, u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
-}
-
-static u32 get_frame_size_nv21(int plane, u32 height, u32 width)
-{
- return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
-}
-
static u32 get_frame_size_compressed(int plane, u32 height, u32 width)
{
int sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2;
@@ -1074,6 +1050,13 @@
.get_frame_size = get_frame_size_nv21,
.type = OUTPUT_PORT,
},
+ {
+ .name = "TP10 UBWC 4:2:0",
+ .description = "TP10 UBWC 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
+ .get_frame_size = get_frame_size_tp10_ubwc,
+ .type = OUTPUT_PORT,
+ },
};
static int msm_venc_set_csc(struct msm_vidc_inst *inst);
@@ -1093,7 +1076,7 @@
if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8)
return 0;
- num_enh_layers = layers ? : 0;
+ num_enh_layers = layers ? layers : 0;
dprintk(VIDC_DBG, "%s Hier-P in firmware\n",
num_enh_layers ? "Enable" : "Disable");
@@ -1109,49 +1092,6 @@
return rc;
}
-static inline int msm_venc_power_save_mode_enable(struct msm_vidc_inst *inst)
-{
- u32 rc = 0;
- u32 prop_id = 0, power_save_min = 0, power_save_max = 0, inst_load = 0;
- void *pdata = NULL;
- struct hfi_device *hdev = NULL;
- enum hal_perf_mode venc_mode;
- enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
- LOAD_CALC_IGNORE_THUMBNAIL_LOAD;
-
- if (!inst || !inst->core || !inst->core->device) {
- dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
- return -EINVAL;
- }
-
- inst_load = msm_comm_get_inst_load(inst, quirks);
- power_save_min = inst->capability.mbs_per_sec_power_save.min;
- power_save_max = inst->capability.mbs_per_sec_power_save.max;
-
- if (!power_save_min || !power_save_max)
- return rc;
-
- hdev = inst->core->device;
- if (inst_load >= power_save_min && inst_load <= power_save_max) {
- prop_id = HAL_CONFIG_VENC_PERF_MODE;
- venc_mode = HAL_PERF_MODE_POWER_SAVE;
- pdata = &venc_mode;
- rc = call_hfi_op(hdev, session_set_property,
- (void *)inst->session, prop_id, pdata);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed to set power save mode for inst: %pK\n",
- __func__, inst);
- goto fail_power_mode_set;
- }
- inst->flags |= VIDC_LOW_POWER;
- dprintk(VIDC_INFO, "Power Save Mode set for inst: %pK\n", inst);
- }
-
-fail_power_mode_set:
- return rc;
-}
-
static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
struct v4l2_ctrl **cluster, int ncontrols)
{
@@ -1244,7 +1184,6 @@
case V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES:
{
int num_p, num_b;
- u32 max_num_b_frames;
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES);
num_b = temp_ctrl->val;
@@ -1257,34 +1196,10 @@
else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
num_b = ctrl->val;
- max_num_b_frames = num_b ? MAX_NUM_B_FRAMES : 0;
- property_id = HAL_PARAM_VENC_MAX_NUM_B_FRAMES;
- pdata = &max_num_b_frames;
- rc = call_hfi_op(hdev, session_set_property,
- (void *)inst->session, property_id, pdata);
- if (rc) {
- dprintk(VIDC_ERR,
- "Failed : Setprop MAX_NUM_B_FRAMES %d\n",
- rc);
- break;
- }
-
property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
intra_period.pframes = num_p;
intra_period.bframes = num_b;
- /*
- *Incase firmware does not have B-Frame support,
- *offload the b-frame count to p-frame to make up
- *for the requested Intraperiod
- */
- if (!inst->capability.bframe.max) {
- intra_period.pframes = num_p + num_b;
- intra_period.bframes = 0;
- dprintk(VIDC_DBG,
- "No bframe support, changing pframe from %d to %d\n",
- num_p, intra_period.pframes);
- }
pdata = &intra_period;
break;
}
@@ -1294,59 +1209,10 @@
pdata = &request_iframe;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL:
- case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
{
- int final_mode = 0;
- struct v4l2_ctrl update_ctrl = {.id = 0};
-
- /* V4L2_CID_MPEG_VIDEO_BITRATE_MODE and _RATE_CONTROL
- * manipulate the same thing. If one control's state
- * changes, try to mirror the state in the other control's
- * value
- */
- if (ctrl->id == V4L2_CID_MPEG_VIDEO_BITRATE_MODE) {
- if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
- final_mode = HAL_RATE_CONTROL_VBR_CFR;
- update_ctrl.val =
- V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR;
- } else {/* ...if (ctrl->val == _BITRATE_MODE_CBR) */
- final_mode = HAL_RATE_CONTROL_CBR_CFR;
- update_ctrl.val =
- V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR;
- }
-
- update_ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL;
-
- } else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL) {
- switch (ctrl->val) {
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF:
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR:
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
- update_ctrl.val =
- V4L2_MPEG_VIDEO_BITRATE_MODE_VBR;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR:
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR:
- case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR:
- update_ctrl.val =
- V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
- break;
- }
-
- final_mode = ctrl->val;
- update_ctrl.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
- }
-
- if (update_ctrl.id) {
- temp_ctrl = TRY_GET_CTRL(update_ctrl.id);
- temp_ctrl->val = update_ctrl.val;
- }
-
property_id = HAL_PARAM_VENC_RATE_CONTROL;
- property_val = final_mode;
+ property_val = ctrl->val;
pdata = &property_val;
-
break;
}
case V4L2_CID_MPEG_VIDEO_BITRATE:
@@ -1355,7 +1221,7 @@
bitrate.bit_rate = ctrl->val;
bitrate.layer_id = 0;
pdata = &bitrate;
- inst->bitrate = ctrl->val;
+ inst->clk_data.bitrate = ctrl->val;
break;
}
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -1632,6 +1498,9 @@
break;
case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
inst->flags |= VIDC_SECURE;
+ property_id = HAL_PARAM_SECURE;
+ property_val = !!(inst->flags & VIDC_SECURE);
+ pdata = &property_val;
dprintk(VIDC_INFO, "Setting secure mode to: %d\n",
!!(inst->flags & VIDC_SECURE));
break;
@@ -1772,43 +1641,65 @@
pdata = &baselayerid;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP: {
- struct v4l2_ctrl *qpp, *qpb;
+ struct v4l2_ctrl *qpp, *qpb, *mask;
property_id = HAL_CONFIG_VENC_FRAME_QP;
qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+ mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
quant.qpi = ctrl->val;
quant.qpp = qpp->val;
quant.qpb = qpb->val;
+ quant.enable = mask->val;
quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
pdata = &quant;
break;
}
case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP: {
- struct v4l2_ctrl *qpi, *qpb;
+ struct v4l2_ctrl *qpi, *qpb, *mask;
property_id = HAL_CONFIG_VENC_FRAME_QP;
qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+ mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
quant.qpp = ctrl->val;
quant.qpi = qpi->val;
quant.qpb = qpb->val;
+ quant.enable = mask->val;
quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
pdata = &quant;
break;
}
case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP: {
- struct v4l2_ctrl *qpp, *qpi;
+ struct v4l2_ctrl *qpp, *qpi, *mask;
property_id = HAL_CONFIG_VENC_FRAME_QP;
qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
+ mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
quant.qpb = ctrl->val;
quant.qpp = qpp->val;
quant.qpi = qpi->val;
+ quant.enable = mask->val;
+ quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
+ pdata = &quant;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK: {
+ struct v4l2_ctrl *qpi, *qpp, *qpb;
+
+ property_id = HAL_CONFIG_VENC_FRAME_QP;
+ qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
+ qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
+ qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+
+ quant.qpi = qpi->val;
+ quant.qpp = qpp->val;
+ quant.qpb = qpb->val;
+ quant.enable = ctrl->val;
quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
pdata = &quant;
break;
@@ -1842,8 +1733,12 @@
case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
dprintk(VIDC_DBG,
"inst(%pK) operating rate changed from %d to %d\n",
- inst, inst->operating_rate >> 16, ctrl->val >> 16);
- inst->operating_rate = ctrl->val;
+ inst, inst->clk_data.operating_rate >> 16,
+ ctrl->val >> 16);
+ inst->clk_data.operating_rate = ctrl->val;
+
+ msm_vidc_update_operating_rate(inst);
+
break;
case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
{
@@ -1926,6 +1821,7 @@
else
enable.enable = 0;
pdata = &enable;
+ inst->clk_data.low_latency_mode = (bool) enable.enable;
break;
}
case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
@@ -1963,9 +1859,9 @@
#undef TRY_GET_CTRL
if (!rc && property_id) {
- dprintk(VIDC_DBG, "Control: HAL property=%x,ctrl_value=%d\n",
- property_id,
- ctrl->val);
+ dprintk(VIDC_DBG,
+ "Control: Name = %s, ID = 0x%x Value = %d\n",
+ ctrl->name, ctrl->id, ctrl->val);
rc = call_hfi_op(hdev, session_set_property,
(void *)inst->session, property_id, pdata);
}
@@ -1976,11 +1872,11 @@
int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
struct v4l2_ext_controls *ctrl)
{
- int rc = 0, i, j = 0;
+ int rc = 0, i;
struct v4l2_ext_control *control;
struct hfi_device *hdev;
struct hal_ltr_mode ltr_mode;
- u32 property_id = 0, layer_id = MSM_VIDC_ALL_LAYER_ID;
+ u32 property_id = 0;
void *pdata = NULL;
struct msm_vidc_capability *cap = NULL;
struct hal_aspect_ratio sar;
@@ -2044,32 +1940,6 @@
property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
pdata = &sar;
break;
- case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
- {
- if (control[i].value) {
- bitrate.layer_id = i;
- bitrate.bit_rate = control[i].value;
- property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
- pdata = &bitrate;
- dprintk(VIDC_DBG, "bitrate for layer(%d)=%d\n",
- i, bitrate.bit_rate);
- rc = call_hfi_op(hdev, session_set_property,
- (void *)inst->session, property_id,
- pdata);
- if (rc) {
- dprintk(VIDC_DBG, "prop %x failed\n",
- property_id);
- return rc;
- }
- if (i == MAX_HYBRID_HIER_P_LAYERS - 1) {
- dprintk(VIDC_DBG, "HAL property=%x\n",
- property_id);
- property_id = 0;
- rc = 0;
- }
- }
- break;
- }
case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH:
property_id = HAL_CONFIG_VENC_BLUR_RESOLUTION;
blur_res.width = control[i].value;
@@ -2084,92 +1954,83 @@
pdata = &blur_res;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID:
- j = i;
- layer_id = control[j].value;
- do {
- switch (control[j].id) {
- case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
- qp.qpi = control[j].value;
- qp.layer_id = layer_id;
- property_id =
- HAL_CONFIG_VENC_FRAME_QP;
- pdata = &qp;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
- qp.qpp = control[j].value;
- qp.layer_id = layer_id;
- property_id =
- HAL_CONFIG_VENC_FRAME_QP;
- pdata = &qp;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
- qp.qpb = control[j].value;
- qp.layer_id = layer_id;
- property_id =
- HAL_CONFIG_VENC_FRAME_QP;
- pdata = &qp;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
- qp_range.qpi_min = control[j].value;
- qp_range.layer_id = layer_id;
- property_id =
- HAL_PARAM_VENC_SESSION_QP_RANGE;
- pdata = &qp_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
- qp_range.qpp_min = control[j].value;
- qp_range.layer_id = layer_id;
- property_id =
- HAL_PARAM_VENC_SESSION_QP_RANGE;
- pdata = &qp_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
- qp_range.qpb_min = control[j].value;
- qp_range.layer_id = layer_id;
- property_id =
- HAL_PARAM_VENC_SESSION_QP_RANGE;
- pdata = &qp_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
- qp_range.qpi_max = control[j].value;
- qp_range.layer_id = layer_id;
- property_id =
- HAL_PARAM_VENC_SESSION_QP_RANGE;
- pdata = &qp_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
- qp_range.qpp_max = control[j].value;
- qp_range.layer_id = layer_id;
- property_id =
- HAL_PARAM_VENC_SESSION_QP_RANGE;
- pdata = &qp_range;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
- qp_range.qpb_max = control[j].value;
- qp_range.layer_id = layer_id;
- property_id =
- HAL_PARAM_VENC_SESSION_QP_RANGE;
- pdata = &qp_range;
- break;
- }
- j++;
- } while ((j < ctrl->count) &&
- control[j].id !=
- V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID);
- if (!rc && property_id) {
- dprintk(VIDC_DBG, "Control: HAL property=%x\n",
- property_id);
- rc = call_hfi_op(hdev, session_set_property,
- (void *)inst->session,
- property_id, pdata);
- if (rc) {
- dprintk(VIDC_ERR, "prop %x failed\n",
- property_id);
- return rc;
- }
- property_id = 0;
+ qp.layer_id = control[i].value;
+ /* Enable QP for all frame types by default */
+ qp.enable = 7;
+ qp_range.layer_id = control[i].value;
+ bitrate.layer_id = control[i].value;
+ i++;
+ while (i < ctrl->count) {
+ switch (control[i].id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
+ qp.qpi = control[i].value;
+ property_id =
+ HAL_CONFIG_VENC_FRAME_QP;
+ pdata = &qp;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
+ qp.qpp = control[i].value;
+ property_id =
+ HAL_CONFIG_VENC_FRAME_QP;
+ pdata = &qp;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
+ qp.qpb = control[i].value;
+ property_id =
+ HAL_CONFIG_VENC_FRAME_QP;
+ pdata = &qp;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK:
+ qp.enable = control[i].value;
+ property_id =
+ HAL_CONFIG_VENC_FRAME_QP;
+ pdata = &qp;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
+ qp_range.qpi_min = control[i].value;
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP_RANGE;
+ pdata = &qp_range;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
+ qp_range.qpp_min = control[i].value;
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP_RANGE;
+ pdata = &qp_range;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
+ qp_range.qpb_min = control[i].value;
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP_RANGE;
+ pdata = &qp_range;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
+ qp_range.qpi_max = control[i].value;
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP_RANGE;
+ pdata = &qp_range;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
+ qp_range.qpp_max = control[i].value;
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP_RANGE;
+ pdata = &qp_range;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
+ qp_range.qpb_max = control[i].value;
+ property_id =
+ HAL_PARAM_VENC_SESSION_QP_RANGE;
+ pdata = &qp_range;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
+ bitrate.bit_rate = control[i].value;
+ property_id =
+ HAL_CONFIG_VENC_TARGET_BITRATE;
+ pdata = &bitrate;
+ break;
}
- i = j - 1;
+ i++;
+ }
break;
default:
dprintk(VIDC_ERR, "Invalid id set: %d\n",
@@ -2214,7 +2075,7 @@
/* To start with, both ports are 1 plane each */
inst->bufq[OUTPUT_PORT].num_planes = 1;
inst->bufq[CAPTURE_PORT].num_planes = 1;
- inst->operating_rate = 0;
+ inst->clk_data.operating_rate = 0;
memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
sizeof(struct msm_vidc_format));
@@ -2286,6 +2147,7 @@
struct hfi_device *hdev;
int extra_idx = 0, i = 0;
struct hal_buffer_requirements *buff_req_buffer;
+ struct hal_frame_size frame_sz;
if (!inst || !f) {
dprintk(VIDC_ERR,
@@ -2324,6 +2186,19 @@
inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
+ frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+ frame_sz.width = inst->prop.width[CAPTURE_PORT];
+ frame_sz.height = inst->prop.height[CAPTURE_PORT];
+ dprintk(VIDC_DBG, "CAPTURE port width = %d, height = %d\n",
+ frame_sz.width, frame_sz.height);
+ rc = call_hfi_op(hdev, session_set_property, (void *)
+ inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to set framesize for CAPTURE port\n");
+ goto exit;
+ }
+
rc = msm_comm_try_get_bufreqs(inst);
if (rc) {
dprintk(VIDC_ERR,
@@ -2372,7 +2247,7 @@
frame_sz.buffer_type = HAL_BUFFER_INPUT;
frame_sz.width = inst->prop.width[OUTPUT_PORT];
frame_sz.height = inst->prop.height[OUTPUT_PORT];
- dprintk(VIDC_DBG, "width = %d, height = %d\n",
+ dprintk(VIDC_DBG, "OUTPUT port width = %d, height = %d\n",
frame_sz.width, frame_sz.height);
rc = call_hfi_op(hdev, session_set_property, (void *)
inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 114a702..576809b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -26,6 +26,11 @@
#define MAX_EVENTS 30
+static int try_get_ctrl(struct msm_vidc_inst *inst,
+ struct v4l2_ctrl *ctrl);
+static int msm_vidc_get_count(struct msm_vidc_inst *inst,
+ struct v4l2_ctrl *ctrl);
+
static int get_poll_flags(void *instance)
{
struct msm_vidc_inst *inst = instance;
@@ -150,6 +155,7 @@
case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS:
msm_vidc_ctrl_get_range(ctrl, &inst->capability.hier_p);
break;
+ case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
case V4L2_CID_MPEG_VIDEO_BITRATE:
msm_vidc_ctrl_get_range(ctrl, &inst->capability.bitrate);
break;
@@ -253,14 +259,57 @@
int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
{
struct msm_vidc_inst *inst = instance;
+ struct v4l2_ctrl *ctrl = NULL;
+ int rc = 0;
if (!inst || !control)
return -EINVAL;
- return msm_comm_g_ctrl(instance, control);
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler, control->id);
+ if (ctrl) {
+ rc = try_get_ctrl(inst, ctrl);
+ if (!rc)
+ control->value = ctrl->val;
+ }
+
+ return rc;
}
EXPORT_SYMBOL(msm_vidc_g_ctrl);
+int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
+{
+ struct msm_vidc_inst *inst = instance;
+ struct v4l2_ext_control *ext_control;
+ struct v4l2_ctrl ctrl;
+ int i = 0, rc = 0;
+
+ if (!inst || !control)
+ return -EINVAL;
+
+ ext_control = control->controls;
+
+ for (i = 0; i < control->count; i++) {
+ switch (ext_control[i].id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ ctrl.id = ext_control[i].id;
+ ctrl.val = ext_control[i].value;
+
+ msm_vidc_get_count(inst, &ctrl);
+ ext_control->value = ctrl.val;
+ break;
+ default:
+ dprintk(VIDC_ERR,
+ "This control %x is not supported yet\n",
+ ext_control[i].id);
+ rc = -EINVAL;
+ break;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_vidc_g_ext_ctrl);
+
int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
{
struct msm_vidc_inst *inst = instance;
@@ -1005,12 +1054,6 @@
b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
b->m.planes[i].reserved[0] = buffer_info->fd[i];
b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
- if (!b->m.planes[i].m.userptr) {
- dprintk(VIDC_ERR,
- "%s: Failed to find user virtual address, %#lx, %d, %d\n",
- __func__, b->m.planes[i].m.userptr, b->type, i);
- return -EINVAL;
- }
}
if (!buffer_info) {
@@ -1180,6 +1223,8 @@
buf_count.buffer_type = type;
buf_count.buffer_count_actual = act_count;
buf_count.buffer_count_min_host = host_count;
+ dprintk(VIDC_DBG, "%s : Act count = %d Host count = %d\n",
+ __func__, act_count, host_count);
rc = call_hfi_op(hdev, session_set_property,
inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
if (rc)
@@ -1221,21 +1266,22 @@
HAL_BUFFER_INPUT);
return -EINVAL;
}
- if (*num_buffers < bufreq->buffer_count_actual) {
+ if (*num_buffers < bufreq->buffer_count_min_host) {
dprintk(VIDC_ERR,
"Invalid parameters : Req = %d Act = %d\n",
- *num_buffers, bufreq->buffer_count_actual);
+ *num_buffers, bufreq->buffer_count_min_host);
return -EINVAL;
}
*num_planes = inst->bufq[OUTPUT_PORT].num_planes;
if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
- *num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+ bufreq->buffer_count_actual = *num_buffers =
+ MIN_NUM_OUTPUT_BUFFERS;
for (i = 0; i < *num_planes; i++)
sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i];
bufreq->buffer_count_actual = *num_buffers;
- rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+ rc = set_buffer_count(inst, bufreq->buffer_count_actual,
*num_buffers, HAL_BUFFER_INPUT);
}
@@ -1250,22 +1296,27 @@
buffer_type);
return -EINVAL;
}
- if (*num_buffers < bufreq->buffer_count_actual) {
- dprintk(VIDC_ERR,
- "Invalid parameters : Req = %d Act = %d\n",
- *num_buffers, bufreq->buffer_count_actual);
- return -EINVAL;
+ if (inst->session_type != MSM_VIDC_DECODER &&
+ inst->state > MSM_VIDC_LOAD_RESOURCES_DONE) {
+ if (*num_buffers < bufreq->buffer_count_min_host) {
+ dprintk(VIDC_ERR,
+ "Invalid parameters : Req = %d Act = %d\n",
+ *num_buffers,
+ bufreq->buffer_count_min_host);
+ return -EINVAL;
+ }
}
*num_planes = inst->bufq[CAPTURE_PORT].num_planes;
if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
*num_buffers > MAX_NUM_CAPTURE_BUFFERS)
- *num_buffers = MIN_NUM_CAPTURE_BUFFERS;
+ bufreq->buffer_count_actual = *num_buffers =
+ MIN_NUM_CAPTURE_BUFFERS;
for (i = 0; i < *num_planes; i++)
sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i];
bufreq->buffer_count_actual = *num_buffers;
- rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+ rc = set_buffer_count(inst, bufreq->buffer_count_actual,
*num_buffers, buffer_type);
}
break;
@@ -1277,36 +1328,44 @@
return rc;
}
-static inline int msm_vidc_decide_core_and_power_mode(
- struct msm_vidc_inst *inst)
-{
- dprintk(VIDC_DBG,
- "Core selection is not yet implemented for inst = %pK\n",
- inst);
- return 0;
-}
static inline int msm_vidc_verify_buffer_counts(struct msm_vidc_inst *inst)
{
int rc = 0, i = 0;
+ /* For decoder No need to sanity till LOAD_RESOURCES */
+ if (inst->session_type == MSM_VIDC_DECODER &&
+ inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
+ dprintk(VIDC_DBG,
+ "No need to verify buffer counts : %pK\n", inst);
+ return 0;
+ }
+
for (i = 0; i < HAL_BUFFER_MAX; i++) {
struct hal_buffer_requirements *req = &inst->buff_req.buffer[i];
- dprintk(VIDC_DBG, "Verifying Buffer : %d\n", req->buffer_type);
- if (!req ||
- req->buffer_count_actual < req->buffer_count_min_host ||
- req->buffer_count_min_host < req->buffer_count_min) {
- dprintk(VIDC_ERR, "Invalid data : Counts mismatch\n");
- dprintk(VIDC_ERR,
- "Min Count = %d ", req->buffer_count_min);
- dprintk(VIDC_ERR,
- "Min Host Count = %d ",
- req->buffer_count_min_host);
- dprintk(VIDC_ERR,
- "Min Actual Count = %d\n",
- req->buffer_count_actual);
- rc = -EINVAL;
- break;
+ if (req && (msm_comm_get_hal_output_buffer(inst) ==
+ req->buffer_type)) {
+ dprintk(VIDC_DBG, "Verifying Buffer : %d\n",
+ req->buffer_type);
+ if (req->buffer_count_actual <
+ req->buffer_count_min_host ||
+ req->buffer_count_min_host <
+ req->buffer_count_min) {
+
+ dprintk(VIDC_ERR,
+ "Invalid data : Counts mismatch\n");
+ dprintk(VIDC_ERR,
+ "Min Count = %d ",
+ req->buffer_count_min);
+ dprintk(VIDC_ERR,
+ "Min Host Count = %d ",
+ req->buffer_count_min_host);
+ dprintk(VIDC_ERR,
+ "Min Actual Count = %d\n",
+ req->buffer_count_actual);
+ rc = -EINVAL;
+ break;
+ }
}
}
return rc;
@@ -1329,15 +1388,22 @@
goto fail_start;
}
+ /* Decide work mode for current session */
+ rc = msm_vidc_decide_work_mode(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to decide work mode for session %pK\n", inst);
+ goto fail_start;
+ }
+
/* Assign Core and LP mode for current session */
rc = msm_vidc_decide_core_and_power_mode(inst);
if (rc) {
dprintk(VIDC_ERR,
- "This session can't be submitted to HW%pK\n", inst);
+ "This session can't be submitted to HW %pK\n", inst);
goto fail_start;
}
-
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
b.buffer_type = HAL_BUFFER_OUTPUT2;
@@ -1352,7 +1418,7 @@
rc = msm_comm_try_get_bufreqs(inst);
- /* Check if current session is under HW capability */
+ /* Verify if buffer counts are correct */
rc = msm_vidc_verify_buffer_counts(inst);
if (rc) {
dprintk(VIDC_ERR,
@@ -1393,18 +1459,9 @@
* - v4l2 client issues CONTINUE to firmware to resume decoding of
* submitted ETBs.
*/
- if (inst->in_reconfig) {
- dprintk(VIDC_DBG, "send session_continue after reconfig\n");
- rc = call_hfi_op(hdev, session_continue,
- (void *) inst->session);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s - failed to send session_continue\n",
- __func__);
- goto fail_start;
- }
- }
- inst->in_reconfig = false;
+ rc = msm_comm_session_continue(inst);
+ if (rc)
+ goto fail_start;
msm_comm_scale_clocks_and_bus(inst);
@@ -1414,7 +1471,9 @@
"Failed to move inst: %pK to start done state\n", inst);
goto fail_start;
}
- msm_dcvs_init(inst);
+
+ msm_clock_data_reset(inst);
+
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
rc = msm_comm_queue_output_buffers(inst);
@@ -1440,7 +1499,6 @@
return rc;
}
-
static int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct msm_vidc_inst *inst;
@@ -1501,6 +1559,9 @@
dprintk(VIDC_ERR,
"Failed to move inst: %pK to state %d\n",
inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+
+ msm_clock_data_reset(inst);
+
return rc;
}
@@ -1728,7 +1789,7 @@
}
-static int msm_vdec_get_count(struct msm_vidc_inst *inst,
+static int msm_vidc_get_count(struct msm_vidc_inst *inst,
struct v4l2_ctrl *ctrl)
{
int rc = 0;
@@ -1749,15 +1810,19 @@
}
if (ctrl->val > bufreq->buffer_count_min_host) {
dprintk(VIDC_DBG,
- "Interesting : Usually shouldn't happen\n");
+ "Buffer count Host changed from %d to %d\n",
+ bufreq->buffer_count_min_host,
+ ctrl->val);
bufreq->buffer_count_min_host = ctrl->val;
+ } else {
+ ctrl->val = bufreq->buffer_count_min_host;
}
- rc = set_actual_buffer_count(inst, ctrl->val,
+ rc = set_actual_buffer_count(inst,
+ bufreq->buffer_count_min_host,
HAL_BUFFER_INPUT);
return rc;
} else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) {
- int count = 0;
buffer_type = msm_comm_get_hal_output_buffer(inst);
bufreq = get_buff_req_buffer(inst,
@@ -1774,7 +1839,7 @@
else
return 0;
}
- count = bufreq->buffer_count_min_host;
+
if (inst->in_reconfig) {
rc = msm_comm_try_get_bufreqs(inst);
@@ -1786,21 +1851,28 @@
buffer_type);
return 0;
}
- newreq->buffer_count_min_host = count =
- newreq->buffer_count_min +
- msm_dcvs_get_extra_buff_count(inst);
+ ctrl->val = newreq->buffer_count_min;
}
- if (!inst->in_reconfig &&
+ if (inst->session_type == MSM_VIDC_DECODER &&
+ !inst->in_reconfig &&
inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
- dprintk(VIDC_DBG, "Clients will correct this\n");
- rc = set_actual_buffer_count(inst, ctrl->val,
- buffer_type);
+ dprintk(VIDC_DBG,
+ "Clients updates Buffer count from %d to %d\n",
+ bufreq->buffer_count_min_host, ctrl->val);
bufreq->buffer_count_min_host = ctrl->val;
- return 0;
}
- bufreq->buffer_count_min_host = ctrl->val = count;
- rc = set_actual_buffer_count(inst, ctrl->val,
- buffer_type);
+ if (ctrl->val > bufreq->buffer_count_min_host) {
+ dprintk(VIDC_DBG,
+ "Buffer count Host changed from %d to %d\n",
+ bufreq->buffer_count_min_host,
+ ctrl->val);
+ bufreq->buffer_count_min_host = ctrl->val;
+ } else {
+ ctrl->val = bufreq->buffer_count_min_host;
+ }
+ rc = set_actual_buffer_count(inst,
+ bufreq->buffer_count_min_host,
+ HAL_BUFFER_OUTPUT);
return rc;
}
@@ -1810,34 +1882,52 @@
static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
{
int rc = 0;
-
- /*
- * HACK: unlock the control prior to querying the hardware. Otherwise
- * lower level code that attempts to do g_ctrl() will end up deadlocking
- * us.
- */
- v4l2_ctrl_unlock(ctrl);
+ struct hal_buffer_requirements *bufreq = NULL;
+ enum hal_buffer buffer_type;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
ctrl->val = inst->profile;
- break;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+ case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
ctrl->val = inst->level;
- break;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
ctrl->val = inst->entropy_mode;
- break;
+ break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (inst->in_reconfig)
+ msm_comm_try_get_bufreqs(inst);
+
+ buffer_type = msm_comm_get_hal_output_buffer(inst);
+ bufreq = get_buff_req_buffer(inst,
+ buffer_type);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed to find bufreqs for buffer type = %d\n",
+ buffer_type);
+ return -EINVAL;
+ }
+ ctrl->val = bufreq->buffer_count_min_host;
+ break;
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
- rc = msm_vdec_get_count(inst, ctrl);
+ bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed to find bufreqs for buffer type = %d\n",
+ HAL_BUFFER_INPUT);
+ return -EINVAL;
+ }
+ ctrl->val = bufreq->buffer_count_min_host;
break;
default:
/*
@@ -1846,7 +1936,6 @@
*/
break;
}
- v4l2_ctrl_lock(ctrl);
return rc;
}
@@ -1944,9 +2033,11 @@
inst->session_type = session_type;
inst->state = MSM_VIDC_CORE_UNINIT_DONE;
inst->core = core;
- inst->freq = 0;
+ inst->clk_data.min_freq = 0;
+ inst->clk_data.curr_freq = 0;
+ inst->clk_data.bitrate = 0;
+ inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
- inst->bitrate = 0;
inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
inst->colour_space = MSM_VIDC_BT601_6_525;
inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -2003,12 +2094,15 @@
goto fail_init;
}
+ msm_dcvs_try_enable(inst);
if (msm_vidc_check_for_inst_overload(core)) {
dprintk(VIDC_ERR,
"Instance count reached Max limit, rejecting session");
goto fail_init;
}
+ msm_comm_scale_clocks_and_bus(inst);
+
inst->debugfs_root =
msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 70427d3..cd518fb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -23,17 +23,12 @@
struct msm_vidc_inst *inst = NULL;
struct vidc_bus_vote_data *vote_data = NULL;
- if (!core) {
+ if (!core || !core->device) {
dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
return -EINVAL;
}
hdev = core->device;
- if (!hdev) {
- dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
- __func__, hdev);
- return -EINVAL;
- }
mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list)
@@ -65,12 +60,17 @@
vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
inst->prop.height[OUTPUT_PORT]);
- if (inst->operating_rate)
- vote_data[i].fps = (inst->operating_rate >> 16) ?
- inst->operating_rate >> 16 : 1;
+ if (inst->clk_data.operating_rate)
+ vote_data[i].fps =
+ (inst->clk_data.operating_rate >> 16) ?
+ inst->clk_data.operating_rate >> 16 : 1;
else
vote_data[i].fps = inst->prop.fps;
+ if (!msm_vidc_clock_scaling ||
+ inst->clk_data.buffer_counter < DCVS_FTB_WINDOW)
+ vote_data[i].power_mode = VIDC_POWER_TURBO;
+
/*
* TODO: support for OBP-DBP split mode hasn't been yet
* implemented, once it is, this part of code needs to be
@@ -126,18 +126,19 @@
int buffers_outside_fw = 0;
struct msm_vidc_core *core;
struct hal_buffer_requirements *output_buf_req;
- struct dcvs_stats *dcvs;
+ struct clock_data *dcvs;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
return -EINVAL;
}
- if (!inst->dcvs_mode) {
+
+ if (!inst->clk_data.dcvs_mode) {
dprintk(VIDC_DBG, "DCVS is not enabled\n");
return 0;
}
- dcvs = &inst->dcvs;
+ dcvs = &inst->clk_data;
core = inst->core;
mutex_lock(&inst->lock);
@@ -210,7 +211,7 @@
}
mutex_unlock(&inst->freqs.lock);
- inst->dcvs.buffer_counter++;
+ inst->clk_data.buffer_counter++;
}
@@ -227,12 +228,13 @@
/* If current requirement is within DCVS limits, try DCVS. */
- if (freq < inst->dcvs.load_high) {
+ if (freq < inst->clk_data.load_high) {
dprintk(VIDC_DBG, "Calling DCVS now\n");
// TODO calling DCVS here may reduce the residency. Re-visit.
msm_dcvs_scale_clocks(inst);
- freq = inst->dcvs.load;
+ freq = inst->clk_data.load;
}
+ dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
return freq;
}
@@ -274,9 +276,10 @@
unsigned long freq = 0;
unsigned long vpp_cycles = 0, vsp_cycles = 0;
u32 vpp_cycles_per_mb;
- u32 mbs_per_frame;
+ u32 mbs_per_second;
- mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+ mbs_per_second = msm_comm_get_inst_load(inst,
+ LOAD_CALC_NO_QUIRKS);
/*
* Calculate vpp, vsp cycles separately for encoder and decoder.
@@ -286,17 +289,17 @@
if (inst->session_type == MSM_VIDC_ENCODER) {
vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ?
- inst->entry->low_power_cycles :
- inst->entry->vpp_cycles;
+ inst->clk_data.entry->low_power_cycles :
+ inst->clk_data.entry->vpp_cycles;
- vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+ vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
/* 10 / 7 is overhead factor */
- vsp_cycles += (inst->bitrate * 10) / 7;
+ vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
} else if (inst->session_type == MSM_VIDC_DECODER) {
- vpp_cycles = mbs_per_frame * inst->entry->vpp_cycles;
+ vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
- vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+ vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
/* 10 / 7 is overhead factor */
vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
@@ -306,6 +309,8 @@
return freq;
}
+ dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+
freq = max(vpp_cycles, vsp_cycles);
return freq;
@@ -321,7 +326,7 @@
hdev = core->device;
allowed_clks_tbl = core->resources.allowed_clks_tbl;
- if (!hdev || !allowed_clks_tbl) {
+ if (!allowed_clks_tbl) {
dprintk(VIDC_ERR,
"%s Invalid parameters\n", __func__);
return -EINVAL;
@@ -329,35 +334,104 @@
mutex_lock(&core->lock);
list_for_each_entry(temp, &core->instances, list) {
- freq += temp->freq;
+ freq += temp->clk_data.curr_freq;
}
for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
rate = allowed_clks_tbl[i].clock_rate;
if (rate >= freq)
break;
}
+ core->min_freq = freq;
+ core->curr_freq = rate;
mutex_unlock(&core->lock);
- core->freq = rate;
- dprintk(VIDC_PROF, "Voting for freq = %lu", freq);
+ dprintk(VIDC_PROF, "Min freq = %lu Current Freq = %lu\n",
+ core->min_freq, core->curr_freq);
rc = call_hfi_op(hdev, scale_clocks,
- hdev->hfi_device_data, rate);
+ hdev->hfi_device_data, core->curr_freq);
return rc;
}
-static unsigned long msm_vidc_max_freq(struct msm_vidc_inst *inst)
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
{
struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
unsigned long freq = 0;
- allowed_clks_tbl = inst->core->resources.allowed_clks_tbl;
+ allowed_clks_tbl = core->resources.allowed_clks_tbl;
freq = allowed_clks_tbl[0].clock_rate;
dprintk(VIDC_PROF, "Max rate = %lu", freq);
return freq;
}
+int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
+{
+ struct v4l2_ctrl *ctrl = NULL;
+ struct msm_vidc_inst *temp;
+ struct msm_vidc_core *core;
+ unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
+ unsigned long mbs_per_frame;
+
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
+ return -EINVAL;
+ }
+ core = inst->core;
+
+ mutex_lock(&core->lock);
+ max_freq = msm_vidc_max_freq(core);
+ list_for_each_entry(temp, &core->instances, list) {
+ if (temp == inst ||
+ temp->state < MSM_VIDC_START_DONE ||
+ temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
+ continue;
+
+ freq += temp->clk_data.min_freq;
+ }
+
+ freq_left = max_freq - freq;
+
+ list_for_each_entry(temp, &core->instances, list) {
+
+ mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+ cycles = temp->clk_data.entry->vpp_cycles;
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ cycles = temp->flags & VIDC_LOW_POWER ?
+ inst->clk_data.entry->low_power_cycles :
+ cycles;
+
+ load = cycles * mbs_per_frame;
+
+ ops_left = load ? (freq_left / load) : 0;
+ /* Convert remaining operating rate to Q16 format */
+ ops_left = ops_left << 16;
+
+ ctrl = v4l2_ctrl_find(&temp->ctrl_handler,
+ V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE);
+ if (ctrl) {
+ dprintk(VIDC_DBG,
+ "%s: Before Range = %lld --> %lld\n",
+ ctrl->name, ctrl->minimum, ctrl->maximum);
+ dprintk(VIDC_DBG,
+ "%s: Before Def value = %lld Cur val = %d\n",
+ ctrl->name, ctrl->default_value, ctrl->val);
+ v4l2_ctrl_modify_range(ctrl, ctrl->minimum,
+ ctrl->val + ops_left, ctrl->step,
+ ctrl->minimum);
+ dprintk(VIDC_DBG,
+ "%s: Updated Range = %lld --> %lld\n",
+ ctrl->name, ctrl->minimum, ctrl->maximum);
+ dprintk(VIDC_DBG,
+ "%s: Updated Def value = %lld Cur val = %d\n",
+ ctrl->name, ctrl->default_value, ctrl->val);
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ return 0;
+}
+
int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
{
struct vb2_buf_entry *temp, *next;
@@ -365,9 +439,10 @@
u32 filled_len = 0;
ion_phys_addr_t device_addr = 0;
- if (inst->dcvs.buffer_counter < DCVS_FTB_WINDOW) {
- freq = msm_vidc_max_freq(inst);
- goto decision_done;
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+ __func__, inst);
+ return -EINVAL;
}
mutex_lock(&inst->pendingq.lock);
@@ -381,7 +456,7 @@
mutex_unlock(&inst->pendingq.lock);
if (!filled_len || !device_addr) {
- freq = inst->freq;
+ dprintk(VIDC_PROF, "No Change in frequency\n");
goto decision_done;
}
@@ -391,8 +466,15 @@
freq = msm_vidc_adjust_freq(inst);
+ inst->clk_data.min_freq = freq;
+
+ if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW ||
+ !msm_vidc_clock_scaling)
+ inst->clk_data.curr_freq = msm_vidc_max_freq(inst->core);
+ else
+ inst->clk_data.curr_freq = freq;
+
decision_done:
- inst->freq = freq;
msm_vidc_set_clocks(inst->core);
return 0;
}
@@ -426,15 +508,25 @@
dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
return -EINVAL;
}
- if (inst->flags & VIDC_THUMBNAIL) {
- dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
- inst);
+
+ if (!msm_vidc_clock_scaling ||
+ inst->flags & VIDC_THUMBNAIL ||
+ inst->clk_data.low_latency_mode) {
+ dprintk(VIDC_PROF,
+ "This session doesn't need DCVS : %pK\n",
+ inst);
+ inst->clk_data.extra_capture_buffer_count = 0;
+ inst->clk_data.extra_output_buffer_count = 0;
+ inst->clk_data.dcvs_mode = false;
return false;
}
- inst->dcvs_mode = true;
+ inst->clk_data.dcvs_mode = true;
// TODO : Update with proper number based on on-target tuning.
- inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+ inst->clk_data.extra_capture_buffer_count =
+ DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+ inst->clk_data.extra_output_buffer_count =
+ DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
return true;
}
@@ -472,6 +564,12 @@
struct clock_profile_entry *entry = NULL;
int fourcc;
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+ __func__, inst);
+ return -EINVAL;
+ }
+
clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
fourcc = inst->session_type == MSM_VIDC_DECODER ?
inst->fmts[OUTPUT_PORT].fourcc :
@@ -488,7 +586,7 @@
inst->session_type);
if (matched) {
- inst->entry = entry;
+ inst->clk_data.entry = entry;
break;
}
}
@@ -502,7 +600,7 @@
return rc;
}
-static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
+static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
{
dprintk(VIDC_DBG,
"DCVS: Load_Low %d, Load High %d\n",
@@ -514,31 +612,31 @@
dcvs->min_threshold, dcvs->max_threshold);
}
-void msm_dcvs_init(struct msm_vidc_inst *inst)
+void msm_clock_data_reset(struct msm_vidc_inst *inst)
{
struct msm_vidc_core *core;
- int i = 0;
+ int i = 0, rc = 0;
struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
u64 total_freq = 0, rate = 0, load;
int cycles;
- struct dcvs_stats *dcvs;
+ struct clock_data *dcvs;
dprintk(VIDC_DBG, "Init DCVS Load\n");
if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+ dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+ __func__, inst);
return;
}
core = inst->core;
- dcvs = &inst->dcvs;
- inst->dcvs = (struct dcvs_stats){0};
+ dcvs = &inst->clk_data;
load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
- cycles = inst->entry->vpp_cycles;
+ cycles = inst->clk_data.entry->vpp_cycles;
allowed_clks_tbl = core->resources.allowed_clks_tbl;
if (inst->session_type == MSM_VIDC_ENCODER) {
cycles = inst->flags & VIDC_LOW_POWER ?
- inst->entry->low_power_cycles :
+ inst->clk_data.entry->low_power_cycles :
cycles;
dcvs->buffer_type = HAL_BUFFER_INPUT;
@@ -563,17 +661,324 @@
dcvs->load = dcvs->load_high = rate;
dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
+ inst->clk_data.buffer_counter = 0;
+
msm_dcvs_print_dcvs_stats(dcvs);
+
+ msm_vidc_update_operating_rate(inst);
+
+ rc = msm_comm_scale_clocks_and_bus(inst);
+
+ if (rc)
+ dprintk(VIDC_ERR, "%s Failed to scale Clocks and Bus\n",
+ __func__);
}
-int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
+int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
+ enum hal_buffer buffer_type)
{
if (!inst) {
dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
return 0;
}
- return inst->dcvs.extra_buffer_count;
+ return buffer_type == HAL_BUFFER_INPUT ?
+ inst->clk_data.extra_output_buffer_count :
+ inst->clk_data.extra_capture_buffer_count;
+}
+
+int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ struct hfi_device *hdev;
+ struct hal_video_work_mode pdata;
+ struct hal_enable latency;
+
+ if (!inst || !inst->core || !inst->core->device) {
+ dprintk(VIDC_ERR,
+ "%s Invalid args: Inst = %pK\n",
+ __func__, inst);
+ return -EINVAL;
+ }
+
+ hdev = inst->core->device;
+ if (inst->clk_data.low_latency_mode) {
+ pdata.video_work_mode = VIDC_WORK_MODE_1;
+ goto decision_done;
+ }
+
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ pdata.video_work_mode = VIDC_WORK_MODE_2;
+ switch (inst->fmts[OUTPUT_PORT].fourcc) {
+ case V4L2_PIX_FMT_MPEG2:
+ pdata.video_work_mode = VIDC_WORK_MODE_1;
+ break;
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_HEVC:
+ if (inst->prop.height[OUTPUT_PORT] *
+ inst->prop.width[OUTPUT_PORT] <=
+ 1280 * 720)
+ pdata.video_work_mode = VIDC_WORK_MODE_1;
+ break;
+ }
+ } else if (inst->session_type == MSM_VIDC_ENCODER) {
+ u32 rc_mode = 0;
+
+ pdata.video_work_mode = VIDC_WORK_MODE_1;
+ rc_mode = msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL);
+ if (rc_mode == V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR ||
+ rc_mode ==
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR)
+ pdata.video_work_mode = VIDC_WORK_MODE_2;
+ } else {
+ return -EINVAL;
+ }
+
+decision_done:
+
+ inst->clk_data.work_mode = pdata.video_work_mode;
+ rc = call_hfi_op(hdev, session_set_property,
+ (void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE,
+ (void *)&pdata);
+ if (rc)
+ dprintk(VIDC_WARN,
+ " Failed to configure Work Mode %pK\n", inst);
+
+ /* For WORK_MODE_1, set Low Latency mode by default to HW. */
+
+ if (inst->session_type == MSM_VIDC_ENCODER &&
+ inst->clk_data.work_mode == VIDC_WORK_MODE_1) {
+ latency.enable = 1;
+ rc = call_hfi_op(hdev, session_set_property,
+ (void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY,
+ (void *)&latency);
+ }
+
+ rc = msm_comm_scale_clocks_and_bus(inst);
+
+ return rc;
+}
+
+static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst,
+ bool enable)
+{
+ u32 rc = 0, mbs_per_frame;
+ u32 prop_id = 0;
+ void *pdata = NULL;
+ struct hfi_device *hdev = NULL;
+ enum hal_perf_mode venc_mode;
+
+ hdev = inst->core->device;
+ if (inst->session_type != MSM_VIDC_ENCODER) {
+ dprintk(VIDC_DBG,
+ "%s : Not an encoder session. Nothing to do\n",
+ __func__);
+ return 0;
+ }
+ mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+ if (mbs_per_frame >= inst->core->resources.max_hq_mbs_per_frame ||
+ inst->prop.fps >= inst->core->resources.max_hq_fps) {
+ enable = true;
+ }
+
+ prop_id = HAL_CONFIG_VENC_PERF_MODE;
+ venc_mode = enable ? HAL_PERF_MODE_POWER_SAVE :
+ HAL_PERF_MODE_POWER_MAX_QUALITY;
+ pdata = &venc_mode;
+ rc = call_hfi_op(hdev, session_set_property,
+ (void *)inst->session, prop_id, pdata);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: Failed to set power save mode for inst: %pK\n",
+ __func__, inst);
+ goto fail_power_mode_set;
+ }
+ inst->flags = enable ?
+ inst->flags | VIDC_LOW_POWER :
+ inst->flags & ~VIDC_LOW_POWER;
+
+ dprintk(VIDC_PROF,
+ "Power Save Mode for inst: %pK Enable = %d\n", inst, enable);
+fail_power_mode_set:
+ return rc;
+}
+
+static int msm_vidc_move_core_to_power_save_mode(struct msm_vidc_core *core,
+ u32 core_id)
+{
+ struct msm_vidc_inst *inst = NULL;
+
+ dprintk(VIDC_PROF, "Core %d : Moving all inst to LP mode\n", core_id);
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == core_id &&
+ inst->session_type == MSM_VIDC_ENCODER)
+ msm_vidc_power_save_mode_enable(inst, true);
+ }
+ mutex_unlock(&core->lock);
+
+ return 0;
+}
+
+static u32 get_core_load(struct msm_vidc_core *core,
+ u32 core_id, bool lp_mode)
+{
+ struct msm_vidc_inst *inst = NULL;
+ u32 current_inst_mbs_per_sec = 0, load = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ u32 cycles, lp_cycles;
+
+ if (!(inst->clk_data.core_id && core_id))
+ continue;
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
+ } else if (inst->session_type == MSM_VIDC_ENCODER) {
+ lp_mode |= inst->flags & VIDC_LOW_POWER;
+ cycles = lp_mode ?
+ inst->clk_data.entry->low_power_cycles :
+ inst->clk_data.entry->vpp_cycles;
+ } else {
+ continue;
+ }
+ if (inst->clk_data.core_id == 3)
+ cycles = cycles / 2;
+
+ current_inst_mbs_per_sec = msm_comm_get_inst_load(inst,
+ LOAD_CALC_NO_QUIRKS);
+ load += current_inst_mbs_per_sec * cycles;
+ }
+ mutex_unlock(&core->lock);
+
+ return load;
+}
+
+int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
+{
+ int rc = 0, hier_mode = 0;
+ struct hfi_device *hdev;
+ struct msm_vidc_core *core;
+ unsigned long max_freq, lp_cycles = 0;
+ struct hal_videocores_usage_info core_info;
+ u32 core0_load = 0, core1_load = 0, core0_lp_load = 0,
+ core1_lp_load = 0;
+ u32 current_inst_load = 0, current_inst_lp_load = 0,
+ min_load = 0, min_lp_load = 0;
+ u32 min_core_id, min_lp_core_id;
+
+ if (!inst || !inst->core || !inst->core->device) {
+ dprintk(VIDC_ERR,
+ "%s Invalid args: Inst = %pK\n",
+ __func__, inst);
+ return -EINVAL;
+ }
+
+ core = inst->core;
+ hdev = core->device;
+ max_freq = msm_vidc_max_freq(inst->core);
+ inst->clk_data.core_id = 0;
+
+ core0_load = get_core_load(core, VIDC_CORE_ID_1, false);
+ core1_load = get_core_load(core, VIDC_CORE_ID_2, false);
+ core0_lp_load = get_core_load(core, VIDC_CORE_ID_1, true);
+ core1_lp_load = get_core_load(core, VIDC_CORE_ID_2, true);
+
+ min_load = min(core0_load, core1_load);
+ min_core_id = core0_load < core1_load ?
+ VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
+ min_lp_load = min(core0_lp_load, core1_lp_load);
+ min_lp_core_id = core0_lp_load < core1_lp_load ?
+ VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
+
+ lp_cycles = inst->session_type == MSM_VIDC_ENCODER ?
+ inst->clk_data.entry->low_power_cycles :
+ inst->clk_data.entry->vpp_cycles;
+
+ current_inst_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
+ inst->clk_data.entry->vpp_cycles;
+
+ current_inst_lp_load = msm_comm_get_inst_load(inst,
+ LOAD_CALC_NO_QUIRKS) * lp_cycles;
+
+ dprintk(VIDC_DBG, "Core 0 Load = %d Core 1 Load = %d\n",
+ core0_load, core1_load);
+ dprintk(VIDC_DBG, "Core 0 LP Load = %d Core 1 LP Load = %d\n",
+ core0_lp_load, core1_lp_load);
+ dprintk(VIDC_DBG, "Max Load = %lu\n", max_freq);
+ dprintk(VIDC_DBG, "Current Load = %d Current LP Load = %d\n",
+ current_inst_load, current_inst_lp_load);
+
+ /* Hier mode can be normal HP or Hybrid HP. */
+
+ hier_mode = msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS);
+ hier_mode |= msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE);
+
+ /* Try for preferred core based on settings. */
+ if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
+ if (current_inst_load / 2 + core0_load <= max_freq &&
+ current_inst_load / 2 + core1_load <= max_freq) {
+ inst->clk_data.core_id = VIDC_CORE_ID_3;
+ msm_vidc_power_save_mode_enable(inst, false);
+ goto decision_done;
+ }
+ }
+
+ if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
+ if (current_inst_lp_load / 2 +
+ core0_lp_load <= max_freq &&
+ current_inst_lp_load / 2 +
+ core1_lp_load <= max_freq) {
+ inst->clk_data.core_id = VIDC_CORE_ID_3;
+ msm_vidc_power_save_mode_enable(inst, true);
+ goto decision_done;
+ }
+ }
+
+ if (current_inst_load + min_load < max_freq) {
+ inst->clk_data.core_id = min_core_id;
+ dprintk(VIDC_DBG,
+ "Selected normally : Core ID = %d\n",
+ inst->clk_data.core_id);
+ msm_vidc_power_save_mode_enable(inst, false);
+ } else if (current_inst_lp_load + min_load < max_freq) {
+ /* Move current instance to LP and return */
+ inst->clk_data.core_id = min_core_id;
+ dprintk(VIDC_DBG,
+ "Selected by moving current to LP : Core ID = %d\n",
+ inst->clk_data.core_id);
+ msm_vidc_power_save_mode_enable(inst, true);
+
+ } else if (current_inst_lp_load + min_lp_load < max_freq) {
+ /* Move all instances to LP mode and return */
+ inst->clk_data.core_id = min_lp_core_id;
+ dprintk(VIDC_DBG,
+ "Moved all inst's to LP: Core ID = %d\n",
+ inst->clk_data.core_id);
+ msm_vidc_move_core_to_power_save_mode(core, min_lp_core_id);
+ } else {
+ rc = -EINVAL;
+ dprintk(VIDC_ERR,
+ "Sorry ... Core Can't support this load\n");
+ return rc;
+ }
+
+decision_done:
+ core_info.video_core_enable_mask = inst->clk_data.core_id;
+
+ rc = call_hfi_op(hdev, session_set_property,
+ (void *)inst->session,
+ HAL_PARAM_VIDEO_CORES_USAGE, &core_info);
+ if (rc)
+ dprintk(VIDC_WARN,
+ " Failed to configure CORE ID %pK\n", inst);
+
+ rc = msm_comm_scale_clocks_and_bus(inst);
+
+ return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 0229ccbb..fe4822b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -31,12 +31,16 @@
/* Considering one safeguard buffer */
#define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
-void msm_dcvs_init(struct msm_vidc_inst *inst);
-int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
+void msm_clock_data_reset(struct msm_vidc_inst *inst);
+int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst);
+int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
+ enum hal_buffer buffer_type);
int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst);
void msm_comm_free_freq_table(struct msm_vidc_inst *inst);
+int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
ion_phys_addr_t device_addr);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 5e49f42..0efe93b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -23,20 +23,9 @@
#include "msm_vidc_debug.h"
#include "msm_vidc_clocks.h"
-#define IS_ALREADY_IN_STATE(__p, __d) ({\
- int __rc = (__p >= __d);\
- __rc; \
-})
-
-#define SUM_ARRAY(__arr, __start, __end) ({\
- int __index;\
- typeof((__arr)[0]) __sum = 0;\
- for (__index = (__start); __index <= (__end); __index++) {\
- if (__index >= 0 && __index < ARRAY_SIZE(__arr))\
- __sum += __arr[__index];\
- } \
- __sum;\
-})
+#define IS_ALREADY_IN_STATE(__p, __d) (\
+ (__p >= __d)\
+)
#define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT
@@ -52,6 +41,8 @@
"Extradata none",
"Extradata MB Quantization",
"Extradata Interlace Video",
+ "Reserved",
+ "Reserved",
"Extradata timestamp",
"Extradata S3D Frame Packing",
"Extradata Frame Rate",
@@ -129,7 +120,7 @@
};
rc = msm_comm_g_ctrl(inst, &ctrl);
- return rc ?: ctrl.value;
+ return rc ? rc : ctrl.value;
}
static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst,
@@ -593,9 +584,9 @@
capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT],
inst->prop.height[CAPTURE_PORT]);
- if (inst->operating_rate) {
- fps = (inst->operating_rate >> 16) ?
- inst->operating_rate >> 16 : 1;
+ if (inst->clk_data.operating_rate) {
+ fps = (inst->clk_data.operating_rate >> 16) ?
+ inst->clk_data.operating_rate >> 16 : 1;
/*
* Check if operating rate is less than fps.
* If Yes, then use fps to scale clocks
@@ -874,11 +865,13 @@
/* This should come from sys_init_done */
core->resources.max_inst_count =
- sys_init_msg->max_sessions_supported ? :
+ sys_init_msg->max_sessions_supported ?
+ sys_init_msg->max_sessions_supported :
MAX_SUPPORTED_INSTANCES;
core->resources.max_secure_inst_count =
- core->resources.max_secure_inst_count ? :
+ core->resources.max_secure_inst_count ?
+ core->resources.max_secure_inst_count :
core->resources.max_inst_count;
if (core->id == MSM_VIDC_CORE_VENUS &&
@@ -1174,50 +1167,70 @@
static void msm_vidc_comm_update_ctrl_limits(struct msm_vidc_inst *inst)
{
- msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE,
- &inst->capability.hier_p_hybrid);
- msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS,
- &inst->capability.hier_b);
- msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS,
- &inst->capability.hier_p);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
- &inst->capability.bitrate);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
- &inst->capability.peakbitrate);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP,
- &inst->capability.i_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP,
- &inst->capability.p_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP,
- &inst->capability.b_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN,
- &inst->capability.i_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN,
- &inst->capability.p_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN,
- &inst->capability.b_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX,
- &inst->capability.i_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX,
- &inst->capability.p_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX,
- &inst->capability.b_qp);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH,
- &inst->capability.blur_width);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT,
- &inst->capability.blur_height);
- msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
- &inst->capability.slice_bytes);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
- &inst->capability.slice_mbs);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT,
- &inst->capability.ltr_count);
- msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
- &inst->capability.bframe);
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE,
+ &inst->capability.hier_p_hybrid);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS,
+ &inst->capability.hier_b);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS,
+ &inst->capability.hier_p);
+ msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
+ &inst->capability.bitrate);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE,
+ &inst->capability.bitrate);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ &inst->capability.peakbitrate);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP,
+ &inst->capability.i_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP,
+ &inst->capability.p_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP,
+ &inst->capability.b_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN,
+ &inst->capability.i_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN,
+ &inst->capability.p_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN,
+ &inst->capability.b_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX,
+ &inst->capability.i_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX,
+ &inst->capability.p_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX,
+ &inst->capability.b_qp);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH,
+ &inst->capability.blur_width);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT,
+ &inst->capability.blur_height);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+ &inst->capability.slice_bytes);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ &inst->capability.slice_mbs);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT,
+ &inst->capability.ltr_count);
+ msm_vidc_comm_update_ctrl(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
+ &inst->capability.bframe);
+ }
}
static void handle_session_init_done(enum hal_command_response cmd, void *data)
@@ -1365,7 +1378,7 @@
switch (event_notify->hal_event_type) {
case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+ event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
break;
case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
@@ -1438,18 +1451,17 @@
}
/* Bit depth and pic struct changed event are combined into a single
- * event (insufficient event) for the userspace. Currently bitdepth
- * changes is only for HEVC and interlaced support is for all
- * codecs except HEVC
- * event data is now as follows:
- * u32 *ptr = seq_changed_event.u.data;
- * ptr[0] = height
- * ptr[1] = width
- * ptr[2] = flag to indicate bit depth or/and pic struct changed
- * ptr[3] = bit depth
- * ptr[4] = pic struct (progressive or interlaced)
- * ptr[5] = colour space
- */
+ * event (insufficient event) for the userspace. Currently bitdepth
+ * changes is only for HEVC and interlaced support is for all
+ * codecs except HEVC
+ * event data is now as follows:
+ * u32 *ptr = seq_changed_event.u.data;
+ * ptr[0] = height
+ * ptr[1] = width
+ * ptr[2] = bit depth
+ * ptr[3] = pic struct (progressive or interlaced)
+ * ptr[4] = colour space
+ */
inst->entropy_mode = msm_comm_hal_to_v4l2(
V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
@@ -1462,55 +1474,35 @@
event_notify->level);
ptr = (u32 *)seq_changed_event.u.data;
+ ptr[0] = event_notify->height;
+ ptr[1] = event_notify->width;
+ ptr[2] = event_notify->bit_depth;
+ ptr[3] = event_notify->pic_struct;
+ ptr[4] = event_notify->colour_space;
- if (ptr != NULL) {
- ptr[2] = 0x0;
- ptr[3] = inst->bit_depth;
- ptr[4] = inst->pic_struct;
- ptr[5] = inst->colour_space;
+ dprintk(VIDC_DBG,
+ "Event payload: height = %d width = %d\n",
+ event_notify->height, event_notify->width);
- if (inst->bit_depth != event_notify->bit_depth) {
- inst->bit_depth = event_notify->bit_depth;
- ptr[2] |= V4L2_EVENT_BITDEPTH_FLAG;
- ptr[3] = inst->bit_depth;
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
- dprintk(VIDC_DBG,
- "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to bit-depth change\n");
- }
+ dprintk(VIDC_DBG,
+ "Event payload: bit_depth = %d pic_struct = %d colour_space = %d\n",
+ event_notify->bit_depth, event_notify->pic_struct,
+ event_notify->colour_space);
- if (inst->pic_struct != event_notify->pic_struct) {
- inst->pic_struct = event_notify->pic_struct;
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
- ptr[2] |= V4L2_EVENT_PICSTRUCT_FLAG;
- ptr[4] = inst->pic_struct;
- dprintk(VIDC_DBG,
- "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to pic-struct change\n");
- }
-
- if (inst->bit_depth == MSM_VIDC_BIT_DEPTH_10
- && inst->colour_space !=
- event_notify->colour_space) {
- inst->colour_space = event_notify->colour_space;
- event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
- ptr[2] |= V4L2_EVENT_COLOUR_SPACE_FLAG;
- ptr[5] = inst->colour_space;
- dprintk(VIDC_DBG,
- "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT due to colour space change\n");
- }
-
- }
+ mutex_lock(&inst->lock);
+ inst->in_reconfig = true;
+ inst->reconfig_height = event_notify->height;
+ inst->reconfig_width = event_notify->width;
+ mutex_unlock(&inst->lock);
if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
dprintk(VIDC_DBG, "V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT\n");
- inst->reconfig_height = event_notify->height;
- inst->reconfig_width = event_notify->width;
- inst->in_reconfig = true;
} else {
dprintk(VIDC_DBG, "V4L2_EVENT_SEQ_CHANGED_SUFFICIENT\n");
dprintk(VIDC_DBG,
- "event_notify->height = %d event_notify->width = %d\n",
- event_notify->height,
- event_notify->width);
+ "event_notify->height = %d event_notify->width = %d\n",
+ event_notify->height,
+ event_notify->width);
inst->prop.height[OUTPUT_PORT] = event_notify->height;
inst->prop.width[OUTPUT_PORT] = event_notify->width;
}
@@ -1518,13 +1510,6 @@
rc = msm_vidc_check_session_supported(inst);
if (!rc) {
seq_changed_event.type = event;
- if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
- u32 *ptr = NULL;
-
- ptr = (u32 *)seq_changed_event.u.data;
- ptr[0] = event_notify->height;
- ptr[1] = event_notify->width;
- }
v4l2_event_queue_fh(&inst->event_handler, &seq_changed_event);
} else if (rc == -ENOTSUPP) {
msm_vidc_queue_v4l2_event(inst,
@@ -2525,7 +2510,7 @@
}
tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
- freq = core->freq;
+ freq = core->curr_freq;
is_turbo = is_core_turbo(core, freq);
dprintk(VIDC_DBG,
@@ -3655,6 +3640,11 @@
}
break;
}
+ case V4L2_QCOM_CMD_SESSION_CONTINUE:
+ {
+ rc = msm_comm_session_continue(inst);
+ break;
+ }
default:
dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd);
rc = -ENOTSUPP;
@@ -3850,17 +3840,17 @@
* Don't queue if:
* 1) Hardware isn't ready (that's simple)
*/
- defer = defer ?: inst->state != MSM_VIDC_START_DONE;
+ defer = defer ? defer : (inst->state != MSM_VIDC_START_DONE);
/*
* 2) The client explicitly tells us not to because it wants this
* buffer to be batched with future frames. The batch size (on both
* capabilities) is completely determined by the client.
*/
- defer = defer ?: vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER;
+ defer = defer ? defer : (vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER);
/* 3) If we're in batch mode, we must have full batches of both types */
- defer = defer ?: batch_mode && (!output_count || !capture_count);
+ defer = defer ? defer:(batch_mode && (!output_count || !capture_count));
if (defer) {
dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb);
@@ -3998,32 +3988,122 @@
return rc;
}
+static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst)
+{
+ int extra_buffers;
+ struct hal_buffer_requirements *bufreq;
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_INPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_INPUT);
+ return -EINVAL;
+ }
+ extra_buffers = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
+
+ bufreq->buffer_count_min_host = bufreq->buffer_count_min +
+ extra_buffers;
+
+ if (msm_comm_get_stream_output_mode(inst) ==
+ HAL_VIDEO_DECODER_SECONDARY) {
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT);
+ return -EINVAL;
+ }
+
+ /* For DPB buffers, no need to add Extra buffers */
+
+ bufreq->buffer_count_actual = bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min;
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT2);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT2);
+ return -EINVAL;
+ }
+
+ extra_buffers = msm_vidc_get_extra_buff_count(inst,
+ HAL_BUFFER_OUTPUT);
+
+ bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min + extra_buffers;
+ } else {
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_ERR,
+ "Failed : No buffer requirements : %x\n",
+ HAL_BUFFER_OUTPUT);
+ return -EINVAL;
+ }
+
+ extra_buffers = msm_vidc_get_extra_buff_count(inst,
+ HAL_BUFFER_OUTPUT);
+
+ bufreq->buffer_count_actual = bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min + extra_buffers;
+ }
+
+ return 0;
+}
+
int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst)
{
int rc = 0, i = 0;
union hal_get_property hprop;
+ memset(&hprop, 0x0, sizeof(hprop));
+
rc = msm_comm_try_get_prop(inst, HAL_PARAM_GET_BUFFER_REQUIREMENTS,
- &hprop);
+ &hprop);
if (rc) {
dprintk(VIDC_ERR, "Failed getting buffer requirements: %d", rc);
return rc;
}
- dprintk(VIDC_DBG, "Buffer requirements:\n");
- dprintk(VIDC_DBG, "%15s %8s %8s\n", "buffer type", "count", "size");
+ dprintk(VIDC_DBG, "Buffer requirements from HW:\n");
+ dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+ "buffer type", "count", "mincount_host", "mincount_fw", "size");
for (i = 0; i < HAL_BUFFER_MAX; i++) {
struct hal_buffer_requirements req = hprop.buf_req.buffer[i];
inst->buff_req.buffer[i] = req;
- dprintk(VIDC_DBG, "%15s %8d %8d\n",
+ if (req.buffer_type != HAL_BUFFER_NONE) {
+ dprintk(VIDC_DBG, "%15s %8d %8d %8d %8d\n",
get_buffer_name(req.buffer_type),
- req.buffer_count_actual, req.buffer_size);
+ req.buffer_count_actual,
+ req.buffer_count_min_host,
+ req.buffer_count_min, req.buffer_size);
+ }
}
- dprintk(VIDC_PROF, "Input buffers: %d, Output buffers: %d\n",
- inst->buff_req.buffer[0].buffer_count_actual,
- inst->buff_req.buffer[1].buffer_count_actual);
+ rc = msm_vidc_update_host_buff_counts(inst);
+
+ dprintk(VIDC_DBG, "Buffer requirements host adjusted:\n");
+ dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+ "buffer type", "count", "mincount_host", "mincount_fw", "size");
+ for (i = 0; i < HAL_BUFFER_MAX; i++) {
+ struct hal_buffer_requirements req = inst->buff_req.buffer[i];
+
+ if (req.buffer_type != HAL_BUFFER_NONE) {
+ dprintk(VIDC_DBG, "%15s %8d %8d %8d %8d\n",
+ get_buffer_name(req.buffer_type),
+ req.buffer_count_actual,
+ req.buffer_count_min_host,
+ req.buffer_count_min, req.buffer_size);
+ }
+ }
return rc;
}
@@ -4657,9 +4737,9 @@
return 0;
}
- // Finish FLUSH As Soon As Possible.
- inst->dcvs.buffer_counter = 0;
- msm_comm_scale_clocks_and_bus(inst);
+ /* Finish FLUSH As Soon As Possible. */
+
+ msm_clock_data_reset(inst);
msm_comm_flush_dynamic_buffers(inst);
@@ -5298,21 +5378,18 @@
goto exit;
}
- if (inst->prop.fps != fps) {
- dprintk(VIDC_PROF, "reported fps changed for %pK: %d->%d\n",
- inst, inst->prop.fps, fps);
- inst->prop.fps = fps;
+ dprintk(VIDC_PROF, "reported fps changed for %pK: %d->%d\n",
+ inst, inst->prop.fps, fps);
+ inst->prop.fps = fps;
+ if (inst->session_type == MSM_VIDC_ENCODER) {
frame_rate.frame_rate = inst->prop.fps * BIT(16);
frame_rate.buffer_type = HAL_BUFFER_OUTPUT;
pdata = &frame_rate;
- if (inst->session_type == MSM_VIDC_ENCODER) {
- rc = call_hfi_op(hdev, session_set_property,
- inst->session, property_id, pdata);
-
- if (rc)
- dprintk(VIDC_WARN,
- "Failed to set frame rate %d\n", rc);
- }
+ rc = call_hfi_op(hdev, session_set_property,
+ inst->session, property_id, pdata);
+ if (rc)
+ dprintk(VIDC_WARN,
+ "Failed to set frame rate %d\n", rc);
}
exit:
return rc;
@@ -5396,7 +5473,7 @@
}
core = inst->core;
- dprintk(VIDC_ERR, "Venus core frequency = %lu", core->freq);
+ dprintk(VIDC_ERR, "Venus core frequency = %lu", core->curr_freq);
mutex_lock(&core->lock);
dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
msm_comm_print_inst_info(inst);
@@ -5409,3 +5486,61 @@
}
mutex_unlock(&core->lock);
}
+
+int msm_comm_session_continue(void *instance)
+{
+ struct msm_vidc_inst *inst = instance;
+ int rc = 0;
+ struct hfi_device *hdev;
+
+ if (!inst || !inst->core || !inst->core->device)
+ return -EINVAL;
+ hdev = inst->core->device;
+ mutex_lock(&inst->lock);
+ if (inst->session_type == MSM_VIDC_DECODER && inst->in_reconfig) {
+ dprintk(VIDC_DBG, "send session_continue\n");
+ rc = call_hfi_op(hdev, session_continue,
+ (void *)inst->session);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "failed to send session_continue\n");
+ rc = -EINVAL;
+ goto sess_continue_fail;
+ }
+ inst->in_reconfig = false;
+ } else if (inst->session_type == MSM_VIDC_ENCODER) {
+ dprintk(VIDC_DBG,
+ "session_continue not supported for encoder");
+ } else {
+ dprintk(VIDC_ERR,
+ "session_continue called in wrong state for decoder");
+ }
+sess_continue_fail:
+ mutex_unlock(&inst->lock);
+ return rc;
+}
+
+u32 get_frame_size_nv12(int plane, u32 height, u32 width)
+{
+ return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
+}
+
+u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
+{
+ return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
+}
+
+u32 get_frame_size_rgba(int plane, u32 height, u32 width)
+{
+ return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
+}
+
+u32 get_frame_size_nv21(int plane, u32 height, u32 width)
+{
+ return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
+}
+
+u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width)
+{
+ return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 39a28b3..098063d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -97,4 +97,10 @@
void msm_comm_print_inst_info(struct msm_vidc_inst *inst);
int msm_comm_v4l2_to_hal(int id, int value);
int msm_comm_hal_to_v4l2(int id, int value);
+int msm_comm_session_continue(void *instance);
+u32 get_frame_size_nv12(int plane, u32 height, u32 width);
+u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width);
+u32 get_frame_size_rgba(int plane, u32 height, u32 width);
+u32 get_frame_size_nv21(int plane, u32 height, u32 width);
+u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 15ee8a8..f62c132 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -26,12 +26,10 @@
int msm_vidc_fw_low_power_mode = 1;
int msm_vidc_hw_rsp_timeout = 2000;
bool msm_vidc_fw_coverage = !true;
-bool msm_vidc_dec_dcvs_mode = true;
-bool msm_vidc_enc_dcvs_mode = true;
bool msm_vidc_sys_idle_indicator = !true;
int msm_vidc_firmware_unload_delay = 15000;
bool msm_vidc_thermal_mitigation_disabled = !true;
-bool msm_vidc_bitrate_clock_scaling = true;
+bool msm_vidc_clock_scaling = true;
bool msm_vidc_debug_timeout = !true;
#define MAX_DBG_BUF_SIZE 4096
@@ -174,8 +172,6 @@
__debugfs_create(x32, "fw_level", &msm_vidc_fw_debug) &&
__debugfs_create(u32, "fw_debug_mode", &msm_vidc_fw_debug_mode) &&
__debugfs_create(bool, "fw_coverage", &msm_vidc_fw_coverage) &&
- __debugfs_create(bool, "dcvs_dec_mode", &msm_vidc_dec_dcvs_mode) &&
- __debugfs_create(bool, "dcvs_enc_mode", &msm_vidc_enc_dcvs_mode) &&
__debugfs_create(u32, "fw_low_power_mode",
&msm_vidc_fw_low_power_mode) &&
__debugfs_create(u32, "debug_output", &msm_vidc_debug_out) &&
@@ -186,8 +182,8 @@
&msm_vidc_firmware_unload_delay) &&
__debugfs_create(bool, "disable_thermal_mitigation",
&msm_vidc_thermal_mitigation_disabled) &&
- __debugfs_create(bool, "bitrate_clock_scaling",
- &msm_vidc_bitrate_clock_scaling) &&
+ __debugfs_create(bool, "clock_scaling",
+ &msm_vidc_clock_scaling) &&
__debugfs_create(bool, "debug_timeout",
&msm_vidc_debug_timeout);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index cf5ce22..f5c8e5a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -59,12 +59,10 @@
extern int msm_vidc_fw_low_power_mode;
extern int msm_vidc_hw_rsp_timeout;
extern bool msm_vidc_fw_coverage;
-extern bool msm_vidc_dec_dcvs_mode;
-extern bool msm_vidc_enc_dcvs_mode;
extern bool msm_vidc_sys_idle_indicator;
extern int msm_vidc_firmware_unload_delay;
extern bool msm_vidc_thermal_mitigation_disabled;
-extern bool msm_vidc_bitrate_clock_scaling;
+extern bool msm_vidc_clock_scaling;
extern bool msm_vidc_debug_timeout;
#define VIDC_MSG_PRIO2STRING(__level) ({ \
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 8562e8f..37bccbd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -52,7 +52,7 @@
/* Maintains the number of FTB's between each FBD over a window */
-#define DCVS_FTB_WINDOW 32
+#define DCVS_FTB_WINDOW 16
#define V4L2_EVENT_VIDC_BASE 10
@@ -205,15 +205,25 @@
int ebd;
};
-struct dcvs_stats {
+struct clock_data {
int buffer_counter;
int load;
int load_low;
int load_high;
int min_threshold;
int max_threshold;
- unsigned int extra_buffer_count;
+ unsigned int extra_capture_buffer_count;
+ unsigned int extra_output_buffer_count;
enum hal_buffer buffer_type;
+ bool dcvs_mode;
+ unsigned long bitrate;
+ unsigned long min_freq;
+ unsigned long curr_freq;
+ u32 operating_rate;
+ struct clock_profile_entry *entry;
+ u32 core_id;
+ enum hal_work_mode work_mode;
+ bool low_latency_mode;
};
struct profile_data {
@@ -258,7 +268,8 @@
struct msm_vidc_capability *capabilities;
struct delayed_work fw_unload_work;
bool smmu_fault_handled;
- unsigned long freq;
+ unsigned long min_freq;
+ unsigned long curr_freq;
};
struct msm_vidc_inst {
@@ -292,26 +303,21 @@
void *priv;
struct msm_vidc_debug debug;
struct buf_count count;
- struct dcvs_stats dcvs;
+ struct clock_data clk_data;
enum msm_vidc_modes flags;
struct msm_vidc_capability capability;
u32 buffer_size_limit;
enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM];
struct v4l2_ctrl **ctrls;
- bool dcvs_mode;
enum msm_vidc_pixel_depth bit_depth;
struct kref kref;
- unsigned long bitrate;
- unsigned long freq;
u32 buffers_held_in_driver;
atomic_t in_flush;
u32 pic_struct;
u32 colour_space;
- u32 operating_rate;
u32 profile;
u32 level;
u32 entropy_mode;
- struct clock_profile_entry *entry;
};
extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 8b9018c..0a6de41 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -499,7 +499,7 @@
"qcom,vpp-cycles-per-mb", NULL)) {
rc = of_property_read_u32(child_node,
"qcom,vpp-cycles-per-mb",
- &entry->vsp_cycles);
+ &entry->vpp_cycles);
if (rc) {
dprintk(VIDC_ERR,
"qcom,vpp-cycles-per-mb not found\n");
@@ -940,6 +940,24 @@
goto err_load_max_hw_load;
}
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-hq-mbs-per-frame",
+ &res->max_hq_mbs_per_frame);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to determine Max HQ mbs per frame: %d\n", rc);
+ goto err_load_HQ_values;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,max-hq-frames-per-sec",
+ &res->max_hq_fps);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to determine Max HQ fps: %d\n", rc);
+ goto err_load_HQ_values;
+ }
+
rc = msm_vidc_populate_legacy_context_bank(res);
if (rc) {
dprintk(VIDC_ERR,
@@ -985,6 +1003,7 @@
return rc;
err_setup_legacy_cb:
+err_load_HQ_values:
err_load_max_hw_load:
msm_vidc_free_allowed_clocks_table(res);
err_load_allowed_clocks_table:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 8fd43006..20b0ffc 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -165,6 +165,8 @@
uint32_t imem_size;
enum imem_type imem_type;
uint32_t max_load;
+ uint32_t max_hq_mbs_per_frame;
+ uint32_t max_hq_fps;
struct platform_device *pdev;
struct regulator_set regulator_set;
struct clock_set clock_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index eb36b33..1a1078d 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1195,44 +1195,6 @@
return prop;
}
-static int __halt_axi(struct venus_hfi_device *device)
-{
- u32 reg;
- int rc = 0;
-
- if (!device) {
- dprintk(VIDC_ERR, "Invalid input: %pK\n", device);
- return -EINVAL;
- }
-
- /*
- * Driver needs to make sure that clocks are enabled to read Venus AXI
- * registers. If not skip AXI HALT.
- */
- if (!device->power_enabled) {
- dprintk(VIDC_WARN,
- "Clocks are OFF, skipping AXI HALT\n");
- WARN_ON(1);
- return -EINVAL;
- }
-
- /* Halt AXI and AXI IMEM VBIF Access */
- reg = __read_register(device, VENUS_VBIF_AXI_HALT_CTRL0);
- reg |= VENUS_VBIF_AXI_HALT_CTRL0_HALT_REQ;
- __write_register(device, VENUS_VBIF_AXI_HALT_CTRL0, reg);
-
- /* Request for AXI bus port halt */
- rc = readl_poll_timeout(device->hal_data->register_base
- + VENUS_VBIF_AXI_HALT_CTRL1,
- reg, reg & VENUS_VBIF_AXI_HALT_CTRL1_HALT_ACK,
- POLL_INTERVAL_US,
- VENUS_VBIF_AXI_HALT_ACK_TIMEOUT_US);
- if (rc)
- dprintk(VIDC_WARN, "AXI bus port halt timeout\n");
-
- return rc;
-}
-
static int __set_clocks(struct venus_hfi_device *device, u32 freq)
{
struct clock_info *cl;
@@ -1432,12 +1394,6 @@
__strict_check(device);
- if (!__core_in_valid_state(device)) {
- dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
- rc = -EINVAL;
- goto dbg_error_null;
- }
-
if (device->iface_queues[VIDC_IFACEQ_DBGQ_IDX].
q_array.align_virtual_addr == 0) {
dprintk(VIDC_ERR, "cannot read from shared DBG Q's\n");
@@ -2009,6 +1965,8 @@
if (device->res->pm_qos_latency_us &&
pm_qos_request_active(&device->qos))
pm_qos_remove_request(&device->qos);
+
+ __resume(device);
__set_state(device, VENUS_STATE_DEINIT);
__unload_fw(device);
@@ -2921,9 +2879,17 @@
VIDC_WRAPPER_CPU_STATUS);
idle_status = __read_register(device,
VIDC_CPU_CS_SCIACMDARG0);
- if (!(wfi_status & BIT(0)) ||
- !(idle_status & BIT(30))) {
- dprintk(VIDC_WARN, "Skipping PC\n");
+ if (!(wfi_status & BIT(0))) {
+ dprintk(VIDC_WARN,
+ "Skipping PC as wfi_status (%#x) bit not set\n",
+ wfi_status);
+ goto skip_power_off;
+ }
+ if (device->res->sys_idle_indicator &&
+ !(idle_status & BIT(30))) {
+ dprintk(VIDC_WARN,
+ "Skipping PC as idle_status (%#x) bit not set\n",
+ idle_status);
goto skip_power_off;
}
@@ -2981,15 +2947,6 @@
__set_state(device, VENUS_STATE_DEINIT);
- /*
- * Once SYS_ERROR received from HW, it is safe to halt the AXI.
- * With SYS_ERROR, Venus FW may have crashed and HW might be
- * active and causing unnecessary transactions. Hence it is
- * safe to stop all AXI transactions from venus sub-system.
- */
- if (__halt_axi(device))
- dprintk(VIDC_WARN, "Failed to halt AXI after SYS_ERROR\n");
-
vsfr = (struct hfi_sfr_struct *)device->sfr.align_virtual_addr;
if (vsfr) {
void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
@@ -3425,6 +3382,7 @@
static inline void __disable_unprepare_clks(struct venus_hfi_device *device)
{
struct clock_info *cl;
+ int rc = 0;
if (!device) {
dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
@@ -3434,6 +3392,18 @@
venus_hfi_for_each_clock_reverse(device, cl) {
dprintk(VIDC_DBG, "Clock: %s disable and unprepare\n",
cl->name);
+ rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_PERIPH);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed set flag NORETAIN_PERIPH %s\n",
+ cl->name);
+ }
+ rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_MEM);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed set flag NORETAIN_MEM %s\n",
+ cl->name);
+ }
clk_disable_unprepare(cl->clk);
}
}
@@ -3457,22 +3427,18 @@
if (cl->has_scaling)
clk_set_rate(cl->clk, clk_round_rate(cl->clk, 0));
- if (cl->has_mem_retention) {
- rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_PERIPH);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed set flag NORETAIN_PERIPH %s\n",
+ rc = clk_set_flags(cl->clk, CLKFLAG_RETAIN_PERIPH);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed set flag RETAIN_PERIPH %s\n",
cl->name);
- }
-
- rc = clk_set_flags(cl->clk, CLKFLAG_NORETAIN_MEM);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed set flag NORETAIN_MEM %s\n",
- cl->name);
- }
}
-
+ rc = clk_set_flags(cl->clk, CLKFLAG_RETAIN_MEM);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed set flag RETAIN_MEM %s\n",
+ cl->name);
+ }
rc = clk_prepare_enable(cl->clk);
if (rc) {
dprintk(VIDC_ERR, "Failed to enable clocks\n");
@@ -3897,7 +3863,7 @@
return rc;
}
-static void __venus_power_off(struct venus_hfi_device *device, bool halt_axi)
+static void __venus_power_off(struct venus_hfi_device *device)
{
if (!device->power_enabled)
return;
@@ -3906,12 +3872,6 @@
disable_irq_nosync(device->hal_data->irq);
device->intr_status = 0;
- /* Halt the AXI to make sure there are no pending transactions.
- * Clocks should be unprepared after making sure axi is halted.
- */
- if (halt_axi && __halt_axi(device))
- dprintk(VIDC_WARN, "Failed to halt AXI\n");
-
__disable_unprepare_clks(device);
if (__disable_regulators(device))
dprintk(VIDC_WARN, "Failed to disable regulators\n");
@@ -3947,7 +3907,7 @@
goto err_tzbsp_suspend;
}
- __venus_power_off(device, true);
+ __venus_power_off(device);
dprintk(VIDC_PROF, "Venus power collapsed\n");
return rc;
@@ -4013,7 +3973,7 @@
err_reset_core:
__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
err_set_video_state:
- __venus_power_off(device, true);
+ __venus_power_off(device);
err_venus_power_on:
dprintk(VIDC_ERR, "Failed to resume from power collapse\n");
return rc;
@@ -4072,7 +4032,7 @@
subsystem_put(device->resources.fw.cookie);
device->resources.fw.cookie = NULL;
fail_load_fw:
- __venus_power_off(device, true);
+ __venus_power_off(device);
fail_venus_power_on:
fail_init_pkt:
__deinit_resources(device);
@@ -4093,9 +4053,11 @@
__vote_buses(device, NULL, 0);
subsystem_put(device->resources.fw.cookie);
__interface_queues_release(device);
- __venus_power_off(device, false);
+ __venus_power_off(device);
device->resources.fw.cookie = NULL;
__deinit_resources(device);
+
+ dprintk(VIDC_PROF, "Firmware unloaded successfully\n");
}
static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 2a833dc..48a6f17 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -588,6 +588,7 @@
struct hfi_frame_cr_stats_type {
u32 frame_index;
struct hfi_ubwc_cr_stats_info_type ubwc_stats_info;
+ u32 complexity_number;
};
struct hfi_msg_session_empty_buffer_done_packet {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 8aa0bbb..8752378 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -20,17 +20,15 @@
#include <media/msm_vidc.h>
#include "msm_vidc_resources.h"
-#define CONTAINS(__a, __sz, __t) ({\
- int __rc = __t >= __a && \
- __t < __a + __sz; \
- __rc; \
-})
+#define CONTAINS(__a, __sz, __t) (\
+ (__t >= __a) && \
+ (__t < __a + __sz) \
+)
-#define OVERLAPS(__t, __tsz, __a, __asz) ({\
- int __rc = __t <= __a && \
- __t + __tsz >= __a + __asz; \
- __rc; \
-})
+#define OVERLAPS(__t, __tsz, __a, __asz) (\
+ (__t <= __a) && \
+ (__t + __tsz >= __a + __asz) \
+)
#define HAL_BUFFERFLAG_EOS 0x00000001
#define HAL_BUFFERFLAG_STARTTIME 0x00000002
@@ -191,7 +189,6 @@
HAL_CONFIG_VENC_MAX_BITRATE,
HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
HAL_PARAM_VENC_GENERATE_AUDNAL,
- HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
HAL_PARAM_BUFFER_ALLOC_MODE,
HAL_PARAM_VDEC_FRAME_ASSEMBLY,
HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
@@ -224,6 +221,9 @@
HAL_PARAM_VENC_H264_TRANSFORM_8x8,
HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
HAL_PARAM_VENC_IFRAMESIZE_TYPE,
+ HAL_PARAM_VIDEO_CORES_USAGE,
+ HAL_PARAM_VIDEO_WORK_MODE,
+ HAL_PARAM_SECURE,
};
enum hal_domain {
@@ -595,6 +595,7 @@
u32 qpp;
u32 qpb;
u32 layer_id;
+ u32 enable;
};
struct hal_quantization_range {
@@ -818,12 +819,30 @@
u32 time_scale;
};
-struct hal_h264_vui_bitstream_restrc {
+struct hal_preserve_text_quality {
u32 enable;
};
-struct hal_preserve_text_quality {
- u32 enable;
+enum hal_core_id {
+ VIDC_CORE_ID_DEFAULT = 0,
+ VIDC_CORE_ID_1 = 1, /* 0b01 */
+ VIDC_CORE_ID_2 = 2, /* 0b10 */
+ VIDC_CORE_ID_3 = 3, /* 0b11 */
+ VIDC_CORE_ID_UNUSED = 0x10000000,
+};
+
+struct hal_videocores_usage_info {
+ u32 video_core_enable_mask;
+};
+
+enum hal_work_mode {
+ VIDC_WORK_MODE_1,
+ VIDC_WORK_MODE_2,
+ VIDC_WORK_MODE_UNUSED = 0x10000000,
+};
+
+struct hal_video_work_mode {
+ u32 video_work_mode;
};
struct hal_vpe_color_space_conversion {
@@ -1018,7 +1037,6 @@
struct hal_multi_view_select multi_view_select;
struct hal_timestamp_scale timestamp_scale;
struct hal_h264_vui_timing_info h264_vui_timing_info;
- struct hal_h264_vui_bitstream_restrc h264_vui_bitstream_restrc;
struct hal_preserve_text_quality preserve_text_quality;
struct hal_buffer_info buffer_info;
struct hal_buffer_alloc_mode buffer_alloc_mode;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 0d73410..77164be 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -163,6 +163,9 @@
#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1
#define HFI_VENC_PERFMODE_POWER_SAVE 0x2
+#define HFI_WORKMODE_1 (HFI_COMMON_BASE + 0x1)
+#define HFI_WORKMODE_2 (HFI_COMMON_BASE + 0x2)
+
struct hfi_buffer_info {
u32 buffer_addr;
u32 extra_data_addr;
@@ -215,11 +218,17 @@
(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED \
(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
+#define HFI_PROPERTY_PARAM_SECURE_SESSION \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x011)
+#define HFI_PROPERTY_PARAM_WORK_MODE \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x015)
#define HFI_PROPERTY_CONFIG_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
#define HFI_PROPERTY_CONFIG_FRAME_RATE \
(HFI_PROPERTY_CONFIG_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE \
+ (HFI_PROPERTY_CONFIG_COMMON_START + 0x002)
#define HFI_PROPERTY_PARAM_VDEC_COMMON_START \
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
@@ -276,8 +285,6 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
-#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
- (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
#define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
@@ -465,10 +472,6 @@
u32 flip;
};
-struct hfi_max_num_b_frames {
- u32 max_num_b_frames;
-};
-
struct hfi_conceal_color {
u32 conceal_color;
};
@@ -533,7 +536,8 @@
struct hfi_quantization {
u32 qp_packed;
u32 layer_id;
- u32 reserved[4];
+ u32 enable;
+ u32 reserved[3];
};
struct hfi_quantization_range {
@@ -567,6 +571,14 @@
u32 height;
};
+struct hfi_videocores_usage_type {
+ u32 video_core_enable_mask;
+};
+
+struct hfi_video_work_mode {
+ u32 video_work_mode;
+};
+
struct hfi_video_signal_metadata {
u32 enable;
u32 video_format;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index a8e6624..a9bb2dd 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -1013,8 +1013,8 @@
void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = d->name;
- struct device dev = d->udev->dev;
+ const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
+ const char *drvname = d->name;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -1024,8 +1024,9 @@
dvb_usbv2_exit(d);
- dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
- KBUILD_MODNAME, name);
+ pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
+ KBUILD_MODNAME, drvname, devname);
+ kfree(devname);
}
EXPORT_SYMBOL(dvb_usbv2_disconnect);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 2434030..9fd43a3 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -59,23 +59,24 @@
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct cxusb_state *st = d->priv;
- int ret, wo;
+ int ret;
if (1 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
- wo = (rbuf == NULL || rlen == 0); /* write-only */
+ if (rlen > MAX_XFER_SIZE) {
+ warn("i2c rd: len=%d is too big!\n", rlen);
+ return -EOPNOTSUPP;
+ }
mutex_lock(&d->data_mutex);
st->data[0] = cmd;
memcpy(&st->data[1], wbuf, wlen);
- if (wo)
- ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
- else
- ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
- rbuf, rlen, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, st->data, rlen, 0);
+ if (!ret && rbuf && rlen)
+ memcpy(rbuf, st->data, rlen);
mutex_unlock(&d->data_mutex);
return ret;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index dd048a7..b8d2ac5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -35,42 +35,51 @@
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- struct hexline hx;
- u8 reset;
- int ret,pos=0;
+ struct hexline *hx;
+ u8 *buf;
+ int ret, pos = 0;
+ u16 cpu_cs_register = cypress[type].cpu_cs_register;
+
+ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ hx = (struct hexline *)buf;
/* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ buf[0] = 1;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
- if (ret != hx.len) {
+ if (ret != hx->len) {
err("error while transferring firmware "
"(transferred size: %d, block size: %d)",
- ret,hx.len);
+ ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
+ kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
- reset = 0;
- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+ buf[0] = 0;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
+ kfree(buf);
+
return ret;
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index bacecbd..f37d64c 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -45,27 +45,36 @@
compat_caddr_t bitmap;
};
-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+static int get_v4l2_window32(struct v4l2_window __user *kp,
+ struct v4l2_window32 __user *up)
{
+ u32 clipcount = 0;
+
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
- copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
- get_user(kp->field, &up->field) ||
- get_user(kp->chromakey, &up->chromakey) ||
- get_user(kp->clipcount, &up->clipcount))
+ !access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_window)) ||
+ copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
+ copy_in_user(&kp->field, &up->field, sizeof(up->field)) ||
+ copy_in_user(&kp->chromakey, &up->chromakey,
+ sizeof(up->chromakey)) ||
+ copy_in_user(&kp->clipcount, &up->clipcount,
+ sizeof(up->clipcount)))
return -EFAULT;
- if (kp->clipcount > 2048)
+ if (get_user(clipcount, &kp->clipcount))
+ return -EFAULT;
+ if (clipcount > 2048)
return -EINVAL;
- if (kp->clipcount) {
+ if (clipcount) {
struct v4l2_clip32 __user *uclips;
struct v4l2_clip __user *kclips;
- int n = kp->clipcount;
+ int n = clipcount;
compat_caddr_t p;
if (get_user(p, &up->clips))
return -EFAULT;
uclips = compat_ptr(p);
kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
- kp->clips = kclips;
+ if (put_user(kclips, &kp->clips))
+ return -EFAULT;
while (--n >= 0) {
if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
return -EFAULT;
@@ -74,89 +83,106 @@
uclips += 1;
kclips += 1;
}
- } else
- kp->clips = NULL;
- return 0;
-}
-
-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
-{
- if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
- put_user(kp->field, &up->field) ||
- put_user(kp->chromakey, &up->chromakey) ||
- put_user(kp->clipcount, &up->clipcount))
+ } else {
+ if (put_user(NULL, &kp->clips))
return -EFAULT;
+ }
return 0;
}
-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+static int put_v4l2_window32(struct v4l2_window __user *kp,
+ struct v4l2_window32 __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+ if (copy_in_user(&up->w, &kp->w, sizeof(up->w)) ||
+ copy_in_user(&up->field, &kp->field, sizeof(up->field)) ||
+ copy_in_user(&up->chromakey, &kp->chromakey,
+ sizeof(up->chromakey)) ||
+ copy_in_user(&up->clipcount, &kp->clipcount,
+ sizeof(up->clipcount)))
return -EFAULT;
return 0;
}
-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+static inline int get_v4l2_pix_format(struct v4l2_pix_format __user *kp,
+ struct v4l2_pix_format __user *up)
+{
+ if (copy_in_user(kp, up, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_pix_format_mplane(
+ struct v4l2_pix_format_mplane __user *kp,
struct v4l2_pix_format_mplane __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
+ if (copy_in_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+static inline int put_v4l2_pix_format(struct v4l2_pix_format __user *kp,
+ struct v4l2_pix_format __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ if (copy_in_user(up, kp, sizeof(struct v4l2_pix_format)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+static inline int put_v4l2_pix_format_mplane(
+ struct v4l2_pix_format_mplane __user *kp,
struct v4l2_pix_format_mplane __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
+ if (copy_in_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
return -EFAULT;
return 0;
}
-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static inline int get_v4l2_vbi_format(struct v4l2_vbi_format __user *kp,
+ struct v4l2_vbi_format __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
+ if (copy_in_user(kp, up, sizeof(struct v4l2_vbi_format)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+static inline int put_v4l2_vbi_format(struct v4l2_vbi_format __user *kp,
+ struct v4l2_vbi_format __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+ if (copy_in_user(up, kp, sizeof(struct v4l2_vbi_format)))
return -EFAULT;
return 0;
}
-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static inline int get_v4l2_sliced_vbi_format(
+ struct v4l2_sliced_vbi_format __user *kp,
+ struct v4l2_sliced_vbi_format __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
+ if (copy_in_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+static inline int put_v4l2_sliced_vbi_format(
+ struct v4l2_sliced_vbi_format __user *kp,
+ struct v4l2_sliced_vbi_format __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
+ if (copy_in_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
return -EFAULT;
return 0;
}
-static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+static inline int get_v4l2_sdr_format(struct v4l2_sdr_format __user *kp,
+ struct v4l2_sdr_format __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format)))
+ if (copy_in_user(kp, up, sizeof(struct v4l2_sdr_format)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up)
+static inline int put_v4l2_sdr_format(struct v4l2_sdr_format __user *kp,
+ struct v4l2_sdr_format __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format)))
+ if (copy_in_user(up, kp, sizeof(struct v4l2_sdr_format)))
return -EFAULT;
return 0;
}
@@ -191,12 +217,17 @@
__u32 reserved[8];
};
-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __get_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up)
{
- if (get_user(kp->type, &up->type))
+ u32 type;
+
+ if (copy_in_user(&kp->type, &up->type, sizeof(up->type)))
return -EFAULT;
- switch (kp->type) {
+ if (get_user(type, &kp->type))
+ return -EFAULT;
+ switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
@@ -223,27 +254,39 @@
}
}
-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int get_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+ !access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_format)))
return -EFAULT;
return __get_v4l2_format32(kp, up);
}
-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
+ struct v4l2_create_buffers32 __user *up)
{
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+ !access_ok(VERIFY_WRITE, kp,
+ sizeof(struct v4l2_create_buffers)) ||
+ copy_in_user(kp, up,
+ offsetof(struct v4l2_create_buffers32, format)))
return -EFAULT;
return __get_v4l2_format32(&kp->format, &up->format);
}
-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int __put_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up)
{
- if (put_user(kp->type, &up->type))
+ u32 type;
+
+ if (copy_in_user(&up->type, &kp->type, sizeof(up->type)))
return -EFAULT;
- switch (kp->type) {
+ if (get_user(type, &kp->type))
+ return -EFAULT;
+
+ switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
@@ -270,18 +313,24 @@
}
}
-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+static int put_v4l2_format32(struct v4l2_format __user *kp,
+ struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
+ !access_ok(VERIFY_READ, kp, sizeof(struct v4l2_format)))
return -EFAULT;
return __put_v4l2_format32(kp, up);
}
-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
+ struct v4l2_create_buffers32 __user *up)
{
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+ !access_ok(VERIFY_READ, kp,
+ sizeof(struct v4l2_create_buffers)) ||
+ copy_in_user(up, kp,
+ offsetof(struct v4l2_create_buffers32, format)) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
return -EFAULT;
return __put_v4l2_format32(&kp->format, &up->format);
}
@@ -295,24 +344,30 @@
__u32 reserved[4];
};
-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int get_v4l2_standard32(struct v4l2_standard __user *kp,
+ struct v4l2_standard32 __user *up)
{
/* other fields are not set by the user, nor used by the driver */
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
- get_user(kp->index, &up->index))
+ !access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_standard)) ||
+ copy_in_user(&kp->index, &up->index, sizeof(up->index)))
return -EFAULT;
return 0;
}
-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+static int put_v4l2_standard32(struct v4l2_standard __user *kp,
+ struct v4l2_standard32 __user *up)
{
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
- put_user(kp->index, &up->index) ||
- put_user(kp->id, &up->id) ||
- copy_to_user(up->name, kp->name, 24) ||
- copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
- put_user(kp->framelines, &up->framelines) ||
- copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
+ !access_ok(VERIFY_READ, kp, sizeof(struct v4l2_standard)) ||
+ copy_in_user(&up->index, &kp->index, sizeof(up->index)) ||
+ copy_in_user(&up->id, &kp->id, sizeof(up->id)) ||
+ copy_in_user(up->name, kp->name, 24) ||
+ copy_in_user(&up->frameperiod, &kp->frameperiod,
+ sizeof(up->frameperiod)) ||
+ copy_in_user(&up->framelines, &kp->framelines,
+ sizeof(up->framelines)) ||
+ copy_in_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
return -EFAULT;
return 0;
}
@@ -360,6 +415,10 @@
if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
copy_in_user(&up->data_offset, &up32->data_offset,
+ sizeof(__u32)) ||
+ copy_in_user(up->reserved, up32->reserved,
+ sizeof(up->reserved)) ||
+ copy_in_user(&up->length, &up32->length,
sizeof(__u32)))
return -EFAULT;
@@ -386,7 +445,9 @@
{
if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
copy_in_user(&up32->data_offset, &up->data_offset,
- sizeof(__u32)))
+ sizeof(__u32)) ||
+ copy_in_user(up32->reserved, up->reserved,
+ sizeof(up32->reserved)))
return -EFAULT;
/* For MMAP, driver might've set up the offset, so copy it back.
@@ -404,34 +465,48 @@
return 0;
}
-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
+ struct v4l2_buffer32 __user *up)
{
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
int num_planes;
+ struct timeval time;
+ u32 plane_count, memory, type;
int ret;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
- get_user(kp->index, &up->index) ||
- get_user(kp->type, &up->type) ||
- get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory) ||
- get_user(kp->length, &up->length))
+ !access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_buffer)) ||
+ copy_in_user(&kp->index, &up->index, sizeof(up->index)) ||
+ copy_in_user(&kp->type, &up->type, sizeof(up->type)) ||
+ copy_in_user(&kp->flags, &up->flags, sizeof(up->flags)) ||
+ copy_in_user(&kp->memory, &up->memory, sizeof(up->memory)) ||
+ copy_in_user(&kp->length, &up->length, sizeof(up->length)))
return -EFAULT;
- if (V4L2_TYPE_IS_OUTPUT(kp->type))
- if (get_user(kp->bytesused, &up->bytesused) ||
- get_user(kp->field, &up->field) ||
- get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- get_user(kp->timestamp.tv_usec,
- &up->timestamp.tv_usec))
+ if (get_user(type, &kp->type))
+ return -EFAULT;
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ if (copy_in_user(&kp->bytesused, &up->bytesused,
+ sizeof(up->bytesused)) ||
+ copy_in_user(&kp->field, &up->field,
+ sizeof(up->field)) ||
+ get_user(time.tv_sec, &up->timestamp.tv_sec) ||
+ get_user(time.tv_usec, &up->timestamp.tv_usec) ||
+ put_user(time.tv_sec, &kp->timestamp.tv_sec) ||
+ put_user(time.tv_usec, &kp->timestamp.tv_usec))
return -EFAULT;
- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
+ if (get_user(memory, &kp->memory))
+ return -EFAULT;
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ if (get_user(plane_count, &kp->length))
+ return -EFAULT;
+ num_planes = plane_count;
if (num_planes == 0) {
- kp->m.planes = NULL;
+ if (put_user(NULL, &kp->m.planes))
+ return -EFAULT;
/* num_planes == 0 is legal, e.g. when userspace doesn't
* need planes array on DQBUF*/
return 0;
@@ -449,37 +524,43 @@
* by passing a very big num_planes value */
uplane = compat_alloc_user_space(num_planes *
sizeof(struct v4l2_plane));
- kp->m.planes = (__force struct v4l2_plane *)uplane;
+ if (put_user(uplane, &kp->m.planes))
+ return -EFAULT;
while (--num_planes >= 0) {
- ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
+ ret = get_v4l2_plane32(uplane, uplane32, memory);
if (ret)
return ret;
++uplane;
++uplane32;
}
} else {
- switch (kp->memory) {
+ switch (memory) {
case V4L2_MEMORY_MMAP:
- if (get_user(kp->m.offset, &up->m.offset))
+ if (copy_in_user(&kp->m.offset, &up->m.offset,
+ sizeof(up->m.offset)))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
{
compat_long_t tmp;
+ unsigned long userptr;
if (get_user(tmp, &up->m.userptr))
return -EFAULT;
- kp->m.userptr = (unsigned long)compat_ptr(tmp);
+ userptr = (unsigned long)compat_ptr(tmp);
+ put_user(userptr, &kp->m.userptr);
}
break;
case V4L2_MEMORY_OVERLAY:
- if (get_user(kp->m.offset, &up->m.offset))
+ if (copy_in_user(&kp->m.offset, &up->m.offset,
+ sizeof(up->m.offset)))
return -EFAULT;
break;
case V4L2_MEMORY_DMABUF:
- if (get_user(kp->m.fd, &up->m.fd))
+ if (copy_in_user(&kp->m.fd, &up->m.fd,
+ sizeof(up->m.fd)))
return -EFAULT;
break;
}
@@ -488,65 +569,86 @@
return 0;
}
-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
+ struct v4l2_buffer32 __user *up)
{
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
compat_caddr_t p;
int num_planes;
int ret;
+ struct timeval time;
+ u32 memory, type, length;
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
- put_user(kp->index, &up->index) ||
- put_user(kp->type, &up->type) ||
- put_user(kp->flags, &up->flags) ||
- put_user(kp->memory, &up->memory))
- return -EFAULT;
+ !access_ok(VERIFY_READ, kp, sizeof(struct v4l2_buffer)) ||
+ copy_in_user(&up->index, &kp->index, sizeof(up->index)) ||
+ copy_in_user(&up->type, &kp->type, sizeof(up->type)) ||
+ copy_in_user(&up->flags, &kp->flags, sizeof(up->flags)) ||
+ copy_in_user(&up->memory, &kp->memory, sizeof(up->memory)))
+ return -EFAULT;
- if (put_user(kp->bytesused, &up->bytesused) ||
- put_user(kp->field, &up->field) ||
- put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
- put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
- copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
- put_user(kp->sequence, &up->sequence) ||
- put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved) ||
- put_user(kp->length, &up->length))
- return -EFAULT;
+ if (copy_in_user(&up->bytesused, &kp->bytesused,
+ sizeof(up->bytesused)) ||
+ copy_in_user(&up->field, &kp->field, sizeof(up->field)) ||
+ get_user(time.tv_sec, &kp->timestamp.tv_sec) ||
+ get_user(time.tv_usec, &kp->timestamp.tv_usec) ||
+ put_user(time.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(time.tv_usec, &up->timestamp.tv_usec) ||
+ copy_in_user(&up->timecode, &kp->timecode,
+ sizeof(struct v4l2_timecode)) ||
+ copy_in_user(&up->sequence, &kp->sequence,
+ sizeof(up->sequence)) ||
+ copy_in_user(&up->reserved2, &kp->reserved2,
+ sizeof(up->reserved2)) ||
+ copy_in_user(&up->reserved, &kp->reserved,
+ sizeof(up->reserved)) ||
+ copy_in_user(&up->length, &kp->length, sizeof(up->length)))
+ return -EFAULT;
- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- num_planes = kp->length;
+ if (get_user(type, &kp->type) ||
+ get_user(memory, &kp->memory) ||
+ get_user(length, &kp->length))
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ num_planes = length;
if (num_planes == 0)
return 0;
- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
+ if (get_user(uplane, &kp->m.planes))
+ return -EFAULT;
if (get_user(p, &up->m.planes))
return -EFAULT;
uplane32 = compat_ptr(p);
while (--num_planes >= 0) {
- ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
+ ret = put_v4l2_plane32(uplane, uplane32, memory);
if (ret)
return ret;
++uplane;
++uplane32;
}
} else {
- switch (kp->memory) {
+ switch (memory) {
case V4L2_MEMORY_MMAP:
- if (put_user(kp->m.offset, &up->m.offset))
+ if (copy_in_user(&up->m.offset, &kp->m.offset,
+ sizeof(up->m.offset)))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
- if (put_user(kp->m.userptr, &up->m.userptr))
+ if (copy_in_user(&up->m.userptr, &kp->m.userptr,
+ sizeof(up->m.userptr)))
return -EFAULT;
break;
case V4L2_MEMORY_OVERLAY:
- if (put_user(kp->m.offset, &up->m.offset))
+ if (copy_in_user(&up->m.offset, &kp->m.offset,
+ sizeof(up->m.offset)))
return -EFAULT;
break;
case V4L2_MEMORY_DMABUF:
- if (put_user(kp->m.fd, &up->m.fd))
+ if (copy_in_user(&up->m.fd, &kp->m.fd,
+ sizeof(up->m.fd)))
return -EFAULT;
break;
}
@@ -571,29 +673,39 @@
} fmt;
};
-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
+ struct v4l2_framebuffer32 __user *up)
{
u32 tmp;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
+ !access_ok(VERIFY_WRITE, kp,
+ sizeof(struct v4l2_framebuffer)) ||
get_user(tmp, &up->base) ||
- get_user(kp->capability, &up->capability) ||
- get_user(kp->flags, &up->flags) ||
- copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
+ put_user(compat_ptr(tmp), &kp->base) ||
+ copy_in_user(&kp->capability, &up->capability,
+ sizeof(up->capability)) ||
+ copy_in_user(&kp->flags, &up->flags, sizeof(up->flags)) ||
+ copy_in_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
return -EFAULT;
- kp->base = (__force void *)compat_ptr(tmp);
+
return 0;
}
-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
+ struct v4l2_framebuffer32 __user *up)
{
- u32 tmp = (u32)((unsigned long)kp->base);
+ unsigned long base;
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
- put_user(tmp, &up->base) ||
- put_user(kp->capability, &up->capability) ||
- put_user(kp->flags, &up->flags) ||
- copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
+ !access_ok(VERIFY_READ, kp,
+ sizeof(struct v4l2_framebuffer)) ||
+ copy_from_user(&base, &kp->base, sizeof(base)) ||
+ put_user((u32)base, &up->base) ||
+ copy_in_user(&up->capability, &kp->capability,
+ sizeof(up->capability)) ||
+ copy_in_user(&up->flags, &kp->flags, sizeof(up->flags)) ||
+ copy_in_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
return -EFAULT;
return 0;
}
@@ -611,16 +723,18 @@
/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
Otherwise it is identical to the 32-bit version. */
-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int get_v4l2_input32(struct v4l2_input __user *kp,
+ struct v4l2_input32 __user *up)
{
- if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
+ if (copy_in_user(kp, up, sizeof(struct v4l2_input32)))
return -EFAULT;
return 0;
}
-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
+static inline int put_v4l2_input32(struct v4l2_input __user *kp,
+ struct v4l2_input32 __user *up)
{
- if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
+ if (copy_in_user(up, kp, sizeof(struct v4l2_input32)))
return -EFAULT;
return 0;
}
@@ -661,23 +775,33 @@
}
}
-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int get_v4l2_ext_controls32(struct v4l2_ext_controls __user *kp,
+ struct v4l2_ext_controls32 __user *up)
{
struct v4l2_ext_control32 __user *ucontrols;
struct v4l2_ext_control __user *kcontrols;
int n;
compat_caddr_t p;
+ u32 count;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
- get_user(kp->which, &up->which) ||
- get_user(kp->count, &up->count) ||
- get_user(kp->error_idx, &up->error_idx) ||
- copy_from_user(kp->reserved, up->reserved,
- sizeof(kp->reserved)))
+ !access_ok(VERIFY_WRITE, kp,
+ sizeof(struct v4l2_ext_controls)) ||
+ copy_in_user(&kp->which, &up->which,
+ sizeof(up->which)) ||
+ copy_in_user(&kp->count, &up->count, sizeof(up->count)) ||
+ copy_in_user(&kp->error_idx, &up->error_idx,
+ sizeof(up->error_idx)) ||
+ copy_in_user(kp->reserved, up->reserved,
+ sizeof(up->reserved)))
return -EFAULT;
- n = kp->count;
+
+ if (get_user(count, &kp->count))
+ return -EFAULT;
+ n = count;
if (n == 0) {
- kp->controls = NULL;
+ if (put_user(NULL, &kp->controls))
+ return -EINVAL;
return 0;
}
if (get_user(p, &up->controls))
@@ -687,7 +811,9 @@
n * sizeof(struct v4l2_ext_control32)))
return -EFAULT;
kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
+ if (put_user(kcontrols, &kp->controls))
+ return -EFAULT;
+
while (--n >= 0) {
u32 id;
@@ -710,23 +836,33 @@
return 0;
}
-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
+static int put_v4l2_ext_controls32(struct v4l2_ext_controls __user *kp,
+ struct v4l2_ext_controls32 __user *up)
{
struct v4l2_ext_control32 __user *ucontrols;
- struct v4l2_ext_control __user *kcontrols =
- (__force struct v4l2_ext_control __user *)kp->controls;
- int n = kp->count;
+ struct v4l2_ext_control __user *kcontrols;
+ int n;
+ u32 count;
compat_caddr_t p;
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
- put_user(kp->which, &up->which) ||
- put_user(kp->count, &up->count) ||
- put_user(kp->error_idx, &up->error_idx) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ !access_ok(VERIFY_READ, kp,
+ sizeof(struct v4l2_ext_controls)) ||
+ copy_in_user(&up->which, &kp->which,
+ sizeof(up->which)) ||
+ copy_in_user(&up->count, &kp->count,
+ sizeof(up->count)) ||
+ copy_in_user(&up->error_idx, &kp->error_idx,
+ sizeof(up->error_idx)) ||
+ copy_in_user(up->reserved, kp->reserved,
+ sizeof(up->reserved)) ||
+ get_user(count, &kp->count) ||
+ get_user(kcontrols, &kp->controls))
return -EFAULT;
- if (!kp->count)
+ if (!count)
return 0;
+ n = count;
if (get_user(p, &up->controls))
return -EFAULT;
ucontrols = compat_ptr(p);
@@ -766,16 +902,22 @@
__u32 reserved[8];
};
-static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+static int put_v4l2_event32(struct v4l2_event __user *kp,
+ struct v4l2_event32 __user *up)
{
+ struct timespec ts;
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
- put_user(kp->type, &up->type) ||
- copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
- put_user(kp->pending, &up->pending) ||
- put_user(kp->sequence, &up->sequence) ||
- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
- put_user(kp->id, &up->id) ||
- copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ !access_ok(VERIFY_READ, kp, sizeof(struct v4l2_event)) ||
+ copy_in_user(&up->type, &kp->type, sizeof(up->type)) ||
+ copy_in_user(&up->u, &kp->u, sizeof(up->u)) ||
+ copy_in_user(&up->pending, &kp->pending,
+ sizeof(up->pending)) ||
+ copy_in_user(&up->sequence, &kp->sequence,
+ sizeof(up->sequence)) ||
+ copy_from_user(&ts, &kp->timestamp, sizeof(ts)) ||
+ compat_put_timespec(&ts, &up->timestamp) ||
+ copy_in_user(&up->id, &kp->id, sizeof(up->id)) ||
+ copy_in_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
return 0;
}
@@ -788,31 +930,39 @@
compat_caddr_t edid;
};
-static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int get_v4l2_edid32(struct v4l2_edid __user *kp,
+ struct v4l2_edid32 __user *up)
{
u32 tmp;
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
- get_user(kp->pad, &up->pad) ||
- get_user(kp->start_block, &up->start_block) ||
- get_user(kp->blocks, &up->blocks) ||
+ !access_ok(VERIFY_WRITE, kp, sizeof(struct v4l2_edid)) ||
+ copy_in_user(&kp->pad, &up->pad, sizeof(up->pad)) ||
+ copy_in_user(&kp->start_block, &up->start_block,
+ sizeof(up->start_block)) ||
+ copy_in_user(&kp->blocks, &up->blocks, sizeof(up->blocks)) ||
get_user(tmp, &up->edid) ||
- copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ put_user(compat_ptr(tmp), &kp->edid) ||
+ copy_in_user(kp->reserved, up->reserved,
+ sizeof(kp->reserved)))
return -EFAULT;
- kp->edid = (__force u8 *)compat_ptr(tmp);
return 0;
}
-static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+static int put_v4l2_edid32(struct v4l2_edid __user *kp,
+ struct v4l2_edid32 __user *up)
{
- u32 tmp = (u32)((unsigned long)kp->edid);
+ unsigned long ptr;
if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
- put_user(kp->pad, &up->pad) ||
- put_user(kp->start_block, &up->start_block) ||
- put_user(kp->blocks, &up->blocks) ||
- put_user(tmp, &up->edid) ||
- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+ !access_ok(VERIFY_READ, kp, sizeof(struct v4l2_edid)) ||
+ copy_in_user(&up->pad, &kp->pad, sizeof(up->pad)) ||
+ copy_in_user(&up->start_block, &kp->start_block,
+ sizeof(up->start_block)) ||
+ copy_in_user(&up->blocks, &kp->blocks, sizeof(up->blocks)) ||
+ copy_from_user(&ptr, &kp->edid, sizeof(ptr)) ||
+ put_user((u32)ptr, &up->edid) ||
+ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
return -EFAULT;
return 0;
}
@@ -859,11 +1009,16 @@
struct v4l2_edid v2edid;
unsigned long vx;
int vi;
- } karg;
+ } *karg;
void __user *up = compat_ptr(arg);
int compatible_arg = 1;
long err = 0;
+ karg = compat_alloc_user_space(sizeof(*karg));
+ if (karg == NULL) {
+ return -EFAULT;
+ }
+
/* First, convert the command. */
switch (cmd) {
case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
@@ -899,7 +1054,8 @@
case VIDIOC_STREAMOFF:
case VIDIOC_S_INPUT:
case VIDIOC_S_OUTPUT:
- err = get_user(karg.vi, (s32 __user *)up);
+ err = copy_in_user(&karg->vi, (s32 __user *)up,
+ sizeof(karg->vi));
compatible_arg = 0;
break;
@@ -910,19 +1066,19 @@
case VIDIOC_G_EDID:
case VIDIOC_S_EDID:
- err = get_v4l2_edid32(&karg.v2edid, up);
+ err = get_v4l2_edid32(&karg->v2edid, up);
compatible_arg = 0;
break;
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
- err = get_v4l2_format32(&karg.v2f, up);
+ err = get_v4l2_format32(&karg->v2f, up);
compatible_arg = 0;
break;
case VIDIOC_CREATE_BUFS:
- err = get_v4l2_create32(&karg.v2crt, up);
+ err = get_v4l2_create32(&karg->v2crt, up);
compatible_arg = 0;
break;
@@ -930,12 +1086,12 @@
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
- err = get_v4l2_buffer32(&karg.v2b, up);
+ err = get_v4l2_buffer32(&karg->v2b, up);
compatible_arg = 0;
break;
case VIDIOC_S_FBUF:
- err = get_v4l2_framebuffer32(&karg.v2fb, up);
+ err = get_v4l2_framebuffer32(&karg->v2fb, up);
compatible_arg = 0;
break;
@@ -944,19 +1100,19 @@
break;
case VIDIOC_ENUMSTD:
- err = get_v4l2_standard32(&karg.v2s, up);
+ err = get_v4l2_standard32(&karg->v2s, up);
compatible_arg = 0;
break;
case VIDIOC_ENUMINPUT:
- err = get_v4l2_input32(&karg.v2i, up);
+ err = get_v4l2_input32(&karg->v2i, up);
compatible_arg = 0;
break;
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS:
- err = get_v4l2_ext_controls32(&karg.v2ecs, up);
+ err = get_v4l2_ext_controls32(&karg->v2ecs, up);
compatible_arg = 0;
break;
case VIDIOC_DQEVENT:
@@ -969,11 +1125,7 @@
if (compatible_arg)
err = native_ioctl(file, cmd, (unsigned long)up);
else {
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- err = native_ioctl(file, cmd, (unsigned long)&karg);
- set_fs(old_fs);
+ err = native_ioctl(file, cmd, (unsigned long)karg);
}
/* Special case: even after an error we need to put the
@@ -983,7 +1135,7 @@
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS:
- if (put_v4l2_ext_controls32(&karg.v2ecs, up))
+ if (put_v4l2_ext_controls32(&karg->v2ecs, up))
err = -EFAULT;
break;
}
@@ -995,44 +1147,44 @@
case VIDIOC_S_OUTPUT:
case VIDIOC_G_INPUT:
case VIDIOC_G_OUTPUT:
- err = put_user(((s32)karg.vi), (s32 __user *)up);
+ err = copy_in_user(up, &karg->vi, sizeof(s32));
break;
case VIDIOC_G_FBUF:
- err = put_v4l2_framebuffer32(&karg.v2fb, up);
+ err = put_v4l2_framebuffer32(&karg->v2fb, up);
break;
case VIDIOC_DQEVENT:
- err = put_v4l2_event32(&karg.v2ev, up);
+ err = put_v4l2_event32(&karg->v2ev, up);
break;
case VIDIOC_G_EDID:
case VIDIOC_S_EDID:
- err = put_v4l2_edid32(&karg.v2edid, up);
+ err = put_v4l2_edid32(&karg->v2edid, up);
break;
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
- err = put_v4l2_format32(&karg.v2f, up);
+ err = put_v4l2_format32(&karg->v2f, up);
break;
case VIDIOC_CREATE_BUFS:
- err = put_v4l2_create32(&karg.v2crt, up);
+ err = put_v4l2_create32(&karg->v2crt, up);
break;
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
- err = put_v4l2_buffer32(&karg.v2b, up);
+ err = put_v4l2_buffer32(&karg->v2b, up);
break;
case VIDIOC_ENUMSTD:
- err = put_v4l2_standard32(&karg.v2s, up);
+ err = put_v4l2_standard32(&karg->v2s, up);
break;
case VIDIOC_ENUMINPUT:
- err = put_v4l2_input32(&karg.v2i, up);
+ err = put_v4l2_input32(&karg->v2i, up);
break;
}
return err;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index adc2147..0898414 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -337,6 +337,7 @@
"4.2",
"5",
"5.1",
+ "5.2",
NULL,
};
static const char * const h264_loop_filter[] = {
@@ -363,6 +364,7 @@
"Scalable High Intra",
"Stereo High",
"Multiview High",
+ "Constrained High",
NULL,
};
static const char * const vui_sar_idc[] = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9c9d130..0ac1cf7 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -479,6 +479,14 @@
the genalloc API. It is supposed to be used for small on-chip SRAM
areas found on many SoCs.
+config QSEECOM
+ tristate "QTI Secure Execution Communicator driver"
+ help
+ Provides a communication interface between userspace and
+ QTI Secure Execution Environment (QSEE) using Secure Channel
+ Manager (SCM) interface. It exposes APIs for both userspace and
+ kernel clients.
+
config VEXPRESS_SYSCFG
bool "Versatile Express System Configuration driver"
depends on VEXPRESS_CONFIG
@@ -780,6 +788,15 @@
help
Memory time statistics exported to /sys/kernel/memory_state_time
+config QPNP_MISC
+ tristate "QPNP Misc Peripheral"
+ depends on MFD_SPMI_PMIC
+ help
+ Say 'y' here to include support for the QTI QPNP MISC
+ peripheral. The MISC peripheral holds the USB ID interrupt
+ and the driver provides an API to check if this interrupt
+ is available on the current PMIC chip.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 19f9e1d..e1c6ae1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,10 +49,12 @@
obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
+obj-$(CONFIG_QSEECOM) += qseecom.o
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
+obj-$(CONFIG_QPNP_MISC) += qpnp-misc.o
obj-y += qcom/
obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
diff --git a/drivers/misc/compat_qseecom.c b/drivers/misc/compat_qseecom.c
new file mode 100644
index 0000000..96d200f
--- /dev/null
+++ b/drivers/misc/compat_qseecom.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/qseecom.h>
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+static int compat_get_qseecom_register_listener_req(
+ struct compat_qseecom_register_listener_req __user *data32,
+ struct qseecom_register_listener_req __user *data)
+{
+ int err;
+ compat_ulong_t listener_id;
+ compat_long_t ifd_data_fd;
+ compat_uptr_t virt_sb_base;
+ compat_ulong_t sb_size;
+
+ err = get_user(listener_id, &data32->listener_id);
+ err |= put_user(listener_id, &data->listener_id);
+ err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+ err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+
+ err |= get_user(virt_sb_base, &data32->virt_sb_base);
+ /* upper bits won't get set, zero them */
+ err |= put_user(NULL, &data->virt_sb_base);
+ err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+
+ err |= get_user(sb_size, &data32->sb_size);
+ err |= put_user(sb_size, &data->sb_size);
+ return err;
+}
+
+static int compat_get_qseecom_load_img_req(
+ struct compat_qseecom_load_img_req __user *data32,
+ struct qseecom_load_img_req __user *data)
+{
+ int err;
+ compat_ulong_t mdt_len;
+ compat_ulong_t img_len;
+ compat_long_t ifd_data_fd;
+ compat_ulong_t app_arch;
+ compat_uint_t app_id;
+
+ err = get_user(mdt_len, &data32->mdt_len);
+ err |= put_user(mdt_len, &data->mdt_len);
+ err |= get_user(img_len, &data32->img_len);
+ err |= put_user(img_len, &data->img_len);
+ err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+ err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+ err |= copy_in_user(data->img_name, data32->img_name,
+ MAX_APP_NAME_SIZE);
+ err |= get_user(app_arch, &data32->app_arch);
+ err |= put_user(app_arch, &data->app_arch);
+ err |= get_user(app_id, &data32->app_id);
+ err |= put_user(app_id, &data->app_id);
+ return err;
+}
+
+static int compat_get_qseecom_send_cmd_req(
+ struct compat_qseecom_send_cmd_req __user *data32,
+ struct qseecom_send_cmd_req __user *data)
+{
+ int err;
+ compat_uptr_t cmd_req_buf;
+ compat_uint_t cmd_req_len;
+ compat_uptr_t resp_buf;
+ compat_uint_t resp_len;
+
+ err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+ err |= put_user(NULL, &data->cmd_req_buf);
+ err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+ err |= get_user(cmd_req_len, &data32->cmd_req_len);
+ err |= put_user(cmd_req_len, &data->cmd_req_len);
+
+ err |= get_user(resp_buf, &data32->resp_buf);
+ err |= put_user(NULL, &data->resp_buf);
+ err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+ err |= get_user(resp_len, &data32->resp_len);
+ err |= put_user(resp_len, &data->resp_len);
+ return err;
+}
+
+static int compat_get_qseecom_send_modfd_cmd_req(
+ struct compat_qseecom_send_modfd_cmd_req __user *data32,
+ struct qseecom_send_modfd_cmd_req __user *data)
+{
+ int err;
+ unsigned int i;
+ compat_uptr_t cmd_req_buf;
+ compat_uint_t cmd_req_len;
+ compat_uptr_t resp_buf;
+ compat_uint_t resp_len;
+ compat_long_t fd;
+ compat_ulong_t cmd_buf_offset;
+
+ err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+ err |= put_user(NULL, &data->cmd_req_buf);
+ err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+ err |= get_user(cmd_req_len, &data32->cmd_req_len);
+ err |= put_user(cmd_req_len, &data->cmd_req_len);
+ err |= get_user(resp_buf, &data32->resp_buf);
+ err |= put_user(NULL, &data->resp_buf);
+ err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+ err |= get_user(resp_len, &data32->resp_len);
+ err |= put_user(resp_len, &data->resp_len);
+ for (i = 0; i < MAX_ION_FD; i++) {
+ err |= get_user(fd, &data32->ifd_data[i].fd);
+ err |= put_user(fd, &data->ifd_data[i].fd);
+ err |= get_user(cmd_buf_offset,
+ &data32->ifd_data[i].cmd_buf_offset);
+ err |= put_user(cmd_buf_offset,
+ &data->ifd_data[i].cmd_buf_offset);
+ }
+ return err;
+}
+
+static int compat_get_qseecom_set_sb_mem_param_req(
+ struct compat_qseecom_set_sb_mem_param_req __user *data32,
+ struct qseecom_set_sb_mem_param_req __user *data)
+{
+ int err;
+ compat_long_t ifd_data_fd;
+ compat_uptr_t virt_sb_base;
+ compat_ulong_t sb_len;
+
+ err = get_user(ifd_data_fd, &data32->ifd_data_fd);
+ err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+ err |= get_user(virt_sb_base, &data32->virt_sb_base);
+ err |= put_user(NULL, &data->virt_sb_base);
+ err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+ err |= get_user(sb_len, &data32->sb_len);
+ err |= put_user(sb_len, &data->sb_len);
+ return err;
+}
+
+static int compat_get_qseecom_qseos_version_req(
+ struct compat_qseecom_qseos_version_req __user *data32,
+ struct qseecom_qseos_version_req __user *data)
+{
+ int err;
+ compat_uint_t qseos_version;
+
+ err = get_user(qseos_version, &data32->qseos_version);
+ err |= put_user(qseos_version, &data->qseos_version);
+ return err;
+}
+
+static int compat_get_qseecom_qseos_app_load_query(
+ struct compat_qseecom_qseos_app_load_query __user *data32,
+ struct qseecom_qseos_app_load_query __user *data)
+{
+ int err = 0;
+ unsigned int i;
+ compat_uint_t app_id;
+ char app_name;
+ compat_ulong_t app_arch;
+
+ for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+ err |= get_user(app_name, &(data32->app_name[i]));
+ err |= put_user(app_name, &(data->app_name[i]));
+ }
+ err |= get_user(app_id, &data32->app_id);
+ err |= put_user(app_id, &data->app_id);
+ err |= get_user(app_arch, &data32->app_arch);
+ err |= put_user(app_arch, &data->app_arch);
+ return err;
+}
+
+static int compat_get_qseecom_send_svc_cmd_req(
+ struct compat_qseecom_send_svc_cmd_req __user *data32,
+ struct qseecom_send_svc_cmd_req __user *data)
+{
+ int err;
+ compat_ulong_t cmd_id;
+ compat_uptr_t cmd_req_buf;
+ compat_uint_t cmd_req_len;
+ compat_uptr_t resp_buf;
+ compat_uint_t resp_len;
+
+ err = get_user(cmd_id, &data32->cmd_id);
+ err |= put_user(cmd_id, &data->cmd_id);
+ err |= get_user(cmd_req_buf, &data32->cmd_req_buf);
+ err |= put_user(NULL, &data->cmd_req_buf);
+ err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+ err |= get_user(cmd_req_len, &data32->cmd_req_len);
+ err |= put_user(cmd_req_len, &data->cmd_req_len);
+ err |= get_user(resp_buf, &data32->resp_buf);
+ err |= put_user(NULL, &data->resp_buf);
+ err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+ err |= get_user(resp_len, &data32->resp_len);
+ err |= put_user(resp_len, &data->resp_len);
+ return err;
+}
+
+static int compat_get_qseecom_create_key_req(
+ struct compat_qseecom_create_key_req __user *data32,
+ struct qseecom_create_key_req __user *data)
+{
+ int err;
+ compat_uint_t usage;
+
+ err = copy_in_user(data->hash32, data32->hash32, QSEECOM_HASH_SIZE);
+ err |= get_user(usage, &data32->usage);
+ err |= put_user(usage, &data->usage);
+
+ return err;
+}
+
+static int compat_get_qseecom_wipe_key_req(
+ struct compat_qseecom_wipe_key_req __user *data32,
+ struct qseecom_wipe_key_req __user *data)
+{
+ int err;
+ compat_uint_t usage;
+ compat_int_t wipe_key_flag;
+
+ err = get_user(usage, &data32->usage);
+ err |= put_user(usage, &data->usage);
+ err |= get_user(wipe_key_flag, &data32->wipe_key_flag);
+ err |= put_user(wipe_key_flag, &data->wipe_key_flag);
+
+ return err;
+}
+
+static int compat_get_qseecom_update_key_userinfo_req(
+ struct compat_qseecom_update_key_userinfo_req __user *data32,
+ struct qseecom_update_key_userinfo_req __user *data)
+{
+ int err = 0;
+ compat_uint_t usage;
+
+ err = copy_in_user(data->current_hash32, data32->current_hash32,
+ QSEECOM_HASH_SIZE);
+ err |= copy_in_user(data->new_hash32, data32->new_hash32,
+ QSEECOM_HASH_SIZE);
+ err |= get_user(usage, &data32->usage);
+ err |= put_user(usage, &data->usage);
+
+ return err;
+}
+
+static int compat_get_qseecom_save_partition_hash_req(
+ struct compat_qseecom_save_partition_hash_req __user *data32,
+ struct qseecom_save_partition_hash_req __user *data)
+{
+ int err;
+ compat_int_t partition_id;
+
+ err = get_user(partition_id, &data32->partition_id);
+ err |= put_user(partition_id, &data->partition_id);
+ err |= copy_in_user(data->digest, data32->digest,
+ SHA256_DIGEST_LENGTH);
+ return err;
+}
+
+static int compat_get_qseecom_is_es_activated_req(
+ struct compat_qseecom_is_es_activated_req __user *data32,
+ struct qseecom_is_es_activated_req __user *data)
+{
+ compat_int_t is_activated;
+ int err;
+
+ err = get_user(is_activated, &data32->is_activated);
+ err |= put_user(is_activated, &data->is_activated);
+ return err;
+}
+
+static int compat_get_qseecom_mdtp_cipher_dip_req(
+ struct compat_qseecom_mdtp_cipher_dip_req __user *data32,
+ struct qseecom_mdtp_cipher_dip_req __user *data)
+{
+ int err;
+ compat_int_t in_buf_size;
+ compat_uptr_t in_buf;
+ compat_int_t out_buf_size;
+ compat_uptr_t out_buf;
+ compat_int_t direction;
+
+ err = get_user(in_buf_size, &data32->in_buf_size);
+ err |= put_user(in_buf_size, &data->in_buf_size);
+ err |= get_user(out_buf_size, &data32->out_buf_size);
+ err |= put_user(out_buf_size, &data->out_buf_size);
+ err |= get_user(direction, &data32->direction);
+ err |= put_user(direction, &data->direction);
+ err |= get_user(in_buf, &data32->in_buf);
+ err |= put_user(NULL, &data->in_buf);
+ err |= put_user(in_buf, (compat_uptr_t *)&data->in_buf);
+ err |= get_user(out_buf, &data32->out_buf);
+ err |= put_user(NULL, &data->out_buf);
+ err |= put_user(out_buf, (compat_uptr_t *)&data->out_buf);
+
+ return err;
+}
+
+static int compat_get_qseecom_send_modfd_listener_resp(
+ struct compat_qseecom_send_modfd_listener_resp __user *data32,
+ struct qseecom_send_modfd_listener_resp __user *data)
+{
+ int err;
+ unsigned int i;
+ compat_uptr_t resp_buf_ptr;
+ compat_uint_t resp_len;
+ compat_long_t fd;
+ compat_ulong_t cmd_buf_offset;
+
+ err = get_user(resp_buf_ptr, &data32->resp_buf_ptr);
+ err |= put_user(NULL, &data->resp_buf_ptr);
+ err |= put_user(resp_buf_ptr, (compat_uptr_t *)&data->resp_buf_ptr);
+ err |= get_user(resp_len, &data32->resp_len);
+ err |= put_user(resp_len, &data->resp_len);
+
+ for (i = 0; i < MAX_ION_FD; i++) {
+ err |= get_user(fd, &data32->ifd_data[i].fd);
+ err |= put_user(fd, &data->ifd_data[i].fd);
+ err |= get_user(cmd_buf_offset,
+ &data32->ifd_data[i].cmd_buf_offset);
+ err |= put_user(cmd_buf_offset,
+ &data->ifd_data[i].cmd_buf_offset);
+ }
+ return err;
+}
+
+
+static int compat_get_qseecom_qteec_req(
+ struct compat_qseecom_qteec_req __user *data32,
+ struct qseecom_qteec_req __user *data)
+{
+ compat_uptr_t req_ptr;
+ compat_ulong_t req_len;
+ compat_uptr_t resp_ptr;
+ compat_ulong_t resp_len;
+ int err;
+
+ err = get_user(req_ptr, &data32->req_ptr);
+ err |= put_user(NULL, &data->req_ptr);
+ err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+ err |= get_user(req_len, &data32->req_len);
+ err |= put_user(req_len, &data->req_len);
+
+ err |= get_user(resp_ptr, &data32->resp_ptr);
+ err |= put_user(NULL, &data->resp_ptr);
+ err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+ err |= get_user(resp_len, &data32->resp_len);
+ err |= put_user(resp_len, &data->resp_len);
+ return err;
+}
+
+static int compat_get_qseecom_qteec_modfd_req(
+ struct compat_qseecom_qteec_modfd_req __user *data32,
+ struct qseecom_qteec_modfd_req __user *data)
+{
+ compat_uptr_t req_ptr;
+ compat_ulong_t req_len;
+ compat_uptr_t resp_ptr;
+ compat_ulong_t resp_len;
+ compat_long_t fd;
+ compat_ulong_t cmd_buf_offset;
+ int err, i;
+
+ err = get_user(req_ptr, &data32->req_ptr);
+ err |= put_user(NULL, &data->req_ptr);
+ err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+ err |= get_user(req_len, &data32->req_len);
+ err |= put_user(req_len, &data->req_len);
+
+ err |= get_user(resp_ptr, &data32->resp_ptr);
+ err |= put_user(NULL, &data->resp_ptr);
+ err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+ err |= get_user(resp_len, &data32->resp_len);
+ err |= put_user(resp_len, &data->resp_len);
+
+ for (i = 0; i < MAX_ION_FD; i++) {
+ err |= get_user(fd, &data32->ifd_data[i].fd);
+ err |= put_user(fd, &data->ifd_data[i].fd);
+ err |= get_user(cmd_buf_offset,
+ &data32->ifd_data[i].cmd_buf_offset);
+ err |= put_user(cmd_buf_offset,
+ &data->ifd_data[i].cmd_buf_offset);
+ }
+ return err;
+}
+
+static int compat_get_int(compat_int_t __user *data32,
+ int __user *data)
+{
+ compat_int_t x;
+ int err;
+
+ err = get_user(x, data32);
+ err |= put_user(x, data);
+ return err;
+}
+
+static int compat_put_qseecom_load_img_req(
+ struct compat_qseecom_load_img_req __user *data32,
+ struct qseecom_load_img_req __user *data)
+{
+ int err;
+ compat_ulong_t mdt_len;
+ compat_ulong_t img_len;
+ compat_long_t ifd_data_fd;
+ compat_ulong_t app_arch;
+ compat_int_t app_id;
+
+ err = get_user(mdt_len, &data->mdt_len);
+ err |= put_user(mdt_len, &data32->mdt_len);
+ err |= get_user(img_len, &data->img_len);
+ err |= put_user(img_len, &data32->img_len);
+ err |= get_user(ifd_data_fd, &data->ifd_data_fd);
+ err |= put_user(ifd_data_fd, &data32->ifd_data_fd);
+ err |= copy_in_user(data32->img_name, data->img_name,
+ MAX_APP_NAME_SIZE);
+ err |= get_user(app_arch, &data->app_arch);
+ err |= put_user(app_arch, &data32->app_arch);
+ err |= get_user(app_id, &data->app_id);
+ err |= put_user(app_id, &data32->app_id);
+ return err;
+}
+
+static int compat_put_qseecom_qseos_version_req(
+ struct compat_qseecom_qseos_version_req __user *data32,
+ struct qseecom_qseos_version_req __user *data)
+{
+ compat_uint_t qseos_version;
+ int err;
+
+ err = get_user(qseos_version, &data->qseos_version);
+ err |= put_user(qseos_version, &data32->qseos_version);
+ return err;
+}
+
+static int compat_put_qseecom_qseos_app_load_query(
+ struct compat_qseecom_qseos_app_load_query __user *data32,
+ struct qseecom_qseos_app_load_query __user *data)
+{
+ int err = 0;
+ unsigned int i;
+ compat_int_t app_id;
+ compat_ulong_t app_arch;
+ char app_name;
+
+ for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+ err |= get_user(app_name, &(data->app_name[i]));
+ err |= put_user(app_name, &(data32->app_name[i]));
+ }
+ err |= get_user(app_id, &data->app_id);
+ err |= put_user(app_id, &data32->app_id);
+ err |= get_user(app_arch, &data->app_arch);
+ err |= put_user(app_arch, &data32->app_arch);
+
+ return err;
+}
+
+static int compat_put_qseecom_is_es_activated_req(
+ struct compat_qseecom_is_es_activated_req __user *data32,
+ struct qseecom_is_es_activated_req __user *data)
+{
+ compat_int_t is_activated;
+ int err;
+
+ err = get_user(is_activated, &data->is_activated);
+ err |= put_user(is_activated, &data32->is_activated);
+ return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+ switch (cmd) {
+ case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ:
+ return QSEECOM_IOCTL_REGISTER_LISTENER_REQ;
+ case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+ return QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ;
+ case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ:
+ return QSEECOM_IOCTL_LOAD_APP_REQ;
+ case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+ return QSEECOM_IOCTL_RECEIVE_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+ return QSEECOM_IOCTL_SEND_RESP_REQ;
+ case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+ return QSEECOM_IOCTL_UNLOAD_APP_REQ;
+ case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+ return QSEECOM_IOCTL_PERF_ENABLE_REQ;
+ case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+ return QSEECOM_IOCTL_PERF_DISABLE_REQ;
+ case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ:
+ return QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ;
+ case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ:
+ return QSEECOM_IOCTL_SET_BUS_SCALING_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ:
+ return QSEECOM_IOCTL_SEND_CMD_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+ return QSEECOM_IOCTL_SEND_MODFD_CMD_REQ;
+ case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ:
+ return QSEECOM_IOCTL_SET_MEM_PARAM_REQ;
+ case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ:
+ return QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ;
+ case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ:
+ return QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ;
+ case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ:
+ return QSEECOM_IOCTL_APP_LOADED_QUERY_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ:
+ return QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ;
+ case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ:
+ return QSEECOM_IOCTL_CREATE_KEY_REQ;
+ case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ:
+ return QSEECOM_IOCTL_WIPE_KEY_REQ;
+ case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ:
+ return QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ;
+ case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ:
+ return QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ;
+ case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ:
+ return QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+ return QSEECOM_IOCTL_SEND_MODFD_RESP;
+ case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+ return QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ;
+ case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ:
+ return QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ;
+ case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+ return QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ;
+ case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ:
+ return QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ;
+ case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ:
+ return QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ:
+ return QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ;
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64:
+ return QSEECOM_IOCTL_SEND_MODFD_RESP_64;
+
+ default:
+ return cmd;
+ }
+}
+
+long compat_qseecom_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+
+ case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+ case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+ case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+ case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+ case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+ case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+ case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+ return qseecom_ioctl(file, convert_cmd(cmd), 0);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+ struct compat_qseecom_register_listener_req __user *data32;
+ struct qseecom_register_listener_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_register_listener_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: {
+ struct compat_qseecom_load_img_req __user *data32;
+ struct qseecom_load_img_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_load_img_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ err = compat_put_qseecom_load_img_req(data32, data);
+ return ret ? ret : err;
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: {
+ struct compat_qseecom_send_cmd_req __user *data32;
+ struct qseecom_send_cmd_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_send_cmd_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+ struct compat_qseecom_send_modfd_cmd_req __user *data32;
+ struct qseecom_send_modfd_cmd_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_send_modfd_cmd_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+ struct compat_qseecom_set_sb_mem_param_req __user *data32;
+ struct qseecom_set_sb_mem_param_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_set_sb_mem_param_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+ struct compat_qseecom_qseos_version_req __user *data32;
+ struct qseecom_qseos_version_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_qseos_version_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ err = compat_put_qseecom_qseos_version_req(data32, data);
+
+ return ret ? ret : err;
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+ compat_int_t __user *data32;
+ int __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+ err = compat_get_int(data32, data);
+ if (err)
+ return err;
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+ struct compat_qseecom_load_img_req __user *data32;
+ struct qseecom_load_img_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_load_img_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+ struct compat_qseecom_qseos_app_load_query __user *data32;
+ struct qseecom_qseos_app_load_query __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_qseos_app_load_query(data32, data);
+ if (err)
+ return err;
+
+ ret = qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ err = compat_put_qseecom_qseos_app_load_query(data32, data);
+ return ret ? ret : err;
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+ struct compat_qseecom_send_svc_cmd_req __user *data32;
+ struct qseecom_send_svc_cmd_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_send_svc_cmd_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: {
+ struct compat_qseecom_create_key_req __user *data32;
+ struct qseecom_create_key_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_create_key_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: {
+ struct compat_qseecom_wipe_key_req __user *data32;
+ struct qseecom_wipe_key_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_wipe_key_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+ struct compat_qseecom_update_key_userinfo_req __user *data32;
+ struct qseecom_update_key_userinfo_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_update_key_userinfo_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+ struct compat_qseecom_save_partition_hash_req __user *data32;
+ struct qseecom_save_partition_hash_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_save_partition_hash_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+ struct compat_qseecom_is_es_activated_req __user *data32;
+ struct qseecom_is_es_activated_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_is_es_activated_req(data32, data);
+ if (err)
+ return err;
+
+ ret = qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ err = compat_put_qseecom_is_es_activated_req(data32, data);
+ return ret ? ret : err;
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+ struct compat_qseecom_mdtp_cipher_dip_req __user *data32;
+ struct qseecom_mdtp_cipher_dip_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_mdtp_cipher_dip_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+ case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+ struct compat_qseecom_send_modfd_listener_resp __user *data32;
+ struct qseecom_send_modfd_listener_resp __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_send_modfd_listener_resp(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+ struct compat_qseecom_qteec_req __user *data32;
+ struct qseecom_qteec_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_qteec_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+ case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+ case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+ struct compat_qseecom_qteec_modfd_req __user *data32;
+ struct qseecom_qteec_modfd_req __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_qseecom_qteec_modfd_req(data32, data);
+ if (err)
+ return err;
+
+ return qseecom_ioctl(file, convert_cmd(cmd),
+ (unsigned long)data);
+ }
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/misc/compat_qseecom.h b/drivers/misc/compat_qseecom.h
new file mode 100644
index 0000000..fa76d4c
--- /dev/null
+++ b/drivers/misc/compat_qseecom.h
@@ -0,0 +1,333 @@
+#ifndef _UAPI_COMPAT_QSEECOM_H_
+#define _UAPI_COMPAT_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/*
+ * struct compat_qseecom_register_listener_req -
+ * for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct compat_qseecom_register_listener_req {
+ compat_ulong_t listener_id; /* in */
+ compat_long_t ifd_data_fd; /* in */
+ compat_uptr_t virt_sb_base; /* in */
+ compat_ulong_t sb_size; /* in */
+};
+
+/*
+ * struct compat_qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct compat_qseecom_send_cmd_req {
+ compat_uptr_t cmd_req_buf; /* in */
+ compat_uint_t cmd_req_len; /* in */
+ compat_uptr_t resp_buf; /* in/out */
+ compat_uint_t resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_ion_fd_info {
+ compat_long_t fd;
+ compat_ulong_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_cmd_req {
+ compat_uptr_t cmd_req_buf; /* in */
+ compat_uint_t cmd_req_len; /* in */
+ compat_uptr_t resp_buf; /* in/out */
+ compat_uint_t resp_len; /* in/out */
+ struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct compat_qseecom_listener_send_resp_req
+ * signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct compat_qseecom_send_resp_req {
+ compat_uptr_t resp_buf; /* in */
+ compat_uint_t resp_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_load_img_data
+ * for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ */
+struct compat_qseecom_load_img_req {
+ compat_ulong_t mdt_len; /* in */
+ compat_ulong_t img_len; /* in */
+ compat_long_t ifd_data_fd; /* in */
+ char img_name[MAX_APP_NAME_SIZE]; /* in */
+ compat_ulong_t app_arch; /* in */
+ compat_uint_t app_id; /* out*/
+};
+
+struct compat_qseecom_set_sb_mem_param_req {
+ compat_long_t ifd_data_fd; /* in */
+ compat_uptr_t virt_sb_base; /* in */
+ compat_ulong_t sb_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct compat_qseecom_qseos_version_req {
+ compat_uint_t qseos_version; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]- name of the app.
+ * @app_id - app id.
+ */
+struct compat_qseecom_qseos_app_load_query {
+ char app_name[MAX_APP_NAME_SIZE]; /* in */
+ compat_uint_t app_id; /* out */
+ compat_ulong_t app_arch;
+};
+
+struct compat_qseecom_send_svc_cmd_req {
+ compat_ulong_t cmd_id;
+ compat_uptr_t cmd_req_buf; /* in */
+ compat_uint_t cmd_req_len; /* in */
+ compat_uptr_t resp_buf; /* in/out */
+ compat_uint_t resp_len; /* in/out */
+};
+
+struct compat_qseecom_create_key_req {
+ unsigned char hash32[QSEECOM_HASH_SIZE];
+ enum qseecom_key_management_usage_type usage;
+};
+
+struct compat_qseecom_wipe_key_req {
+ enum qseecom_key_management_usage_type usage;
+ compat_int_t wipe_key_flag;
+};
+
+struct compat_qseecom_update_key_userinfo_req {
+ unsigned char current_hash32[QSEECOM_HASH_SIZE];
+ unsigned char new_hash32[QSEECOM_HASH_SIZE];
+ enum qseecom_key_management_usage_type usage;
+};
+
+/*
+ * struct compat_qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] - sha256 digest.
+ */
+struct compat_qseecom_save_partition_hash_req {
+ compat_int_t partition_id; /* in */
+ char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct compat_qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct compat_qseecom_is_es_activated_req {
+ compat_int_t is_activated; /* out */
+};
+
+/*
+ * struct compat_qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct compat_qseecom_mdtp_cipher_dip_req {
+ compat_uptr_t in_buf;
+ compat_uint_t in_buf_size;
+ compat_uptr_t out_buf;
+ compat_uint_t out_buf_size;
+ compat_uint_t direction;
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_listener_resp {
+ compat_uptr_t resp_buf_ptr; /* in */
+ compat_uint_t resp_len; /* in */
+ struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct compat_qseecom_qteec_req {
+ compat_uptr_t req_ptr;
+ compat_ulong_t req_len;
+ compat_uptr_t resp_ptr;
+ compat_ulong_t resp_len;
+};
+
+struct compat_qseecom_qteec_modfd_req {
+ compat_uptr_t req_ptr;
+ compat_ulong_t req_len;
+ compat_uptr_t resp_ptr;
+ compat_ulong_t resp_len;
+ struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct compat_qseecom_ce_pipe_entry {
+ compat_int_t valid;
+ compat_uint_t ce_num;
+ compat_uint_t ce_pipe_pair;
+};
+
+struct compat_qseecom_ce_info_req {
+ unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+ compat_uint_t usage;
+ compat_uint_t unit_num;
+ compat_uint_t num_ce_pipe_entries;
+ struct compat_qseecom_ce_pipe_entry
+ ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct file;
+extern long compat_qseecom_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 1, struct compat_qseecom_register_listener_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 2)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 3, struct compat_qseecom_send_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 4, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_RECEIVE_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 5)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 6)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 7, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 8, struct compat_qseecom_set_sb_mem_param_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 9)
+
+#define COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 10, struct compat_qseecom_qseos_version_req)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 11)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 12)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 13, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+ _IO(QSEECOM_IOC_MAGIC, 14)
+
+#define COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 15, struct compat_qseecom_qseos_app_load_query)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 16, struct compat_qseecom_send_svc_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 17, struct compat_qseecom_create_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 18, struct compat_qseecom_wipe_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 19, \
+ struct compat_qseecom_save_partition_hash_req)
+
+#define COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 20, struct compat_qseecom_is_es_activated_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP \
+ _IOWR(QSEECOM_IOC_MAGIC, 21, \
+ struct compat_qseecom_send_modfd_listener_resp)
+
+#define COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 24, \
+ struct compat_qseecom_update_key_userinfo_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 30, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 31, struct compat_qseecom_qteec_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 32, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 33, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+ _IOWR(QSEECOM_IOC_MAGIC, 35, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+ _IOWR(QSEECOM_IOC_MAGIC, 36, \
+ struct compat_qseecom_send_modfd_listener_resp)
+#define COMPAT_QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+ _IOWR(QSEECOM_IOC_MAGIC, 40, \
+ struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+ _IOWR(QSEECOM_IOC_MAGIC, 41, \
+ struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+ _IOWR(QSEECOM_IOC_MAGIC, 42, \
+ struct compat_qseecom_ce_info_req)
+
+#endif
+#endif /* _UAPI_COMPAT_QSEECOM_H_ */
diff --git a/drivers/misc/qpnp-misc.c b/drivers/misc/qpnp-misc.c
new file mode 100644
index 0000000..3c11de0
--- /dev/null
+++ b/drivers/misc/qpnp-misc.c
@@ -0,0 +1,352 @@
+/* Copyright (c) 2013-2014,2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/qpnp/qpnp-misc.h>
+
+#define QPNP_MISC_DEV_NAME "qcom,qpnp-misc"
+
+#define REG_DIG_MAJOR_REV 0x01
+#define REG_SUBTYPE 0x05
+#define REG_PWM_SEL 0x49
+#define REG_GP_DRIVER_EN 0x4C
+
+#define PWM_SEL_MAX 0x03
+#define GP_DRIVER_EN_BIT BIT(0)
+
+static DEFINE_MUTEX(qpnp_misc_dev_list_mutex);
+static LIST_HEAD(qpnp_misc_dev_list);
+
+struct qpnp_misc_version {
+ u8 subtype;
+ u8 dig_major_rev;
+};
+
+/**
+ * struct qpnp_misc_dev - holds controller device specific information
+ * @list: Doubly-linked list parameter linking to other
+ * qpnp_misc devices.
+ * @mutex: Mutex lock that is used to ensure mutual
+ * exclusion between probing and accessing misc
+ * driver information
+ * @dev: Device pointer to the misc device
+ * @regmap: Regmap pointer to the misc device
+ * @version: struct that holds the subtype and dig_major_rev
+ * of the chip.
+ */
+struct qpnp_misc_dev {
+ struct list_head list;
+ struct mutex mutex;
+ struct device *dev;
+ struct regmap *regmap;
+ struct qpnp_misc_version version;
+
+ u32 base;
+ u8 pwm_sel;
+ bool enable_gp_driver;
+};
+
+static const struct of_device_id qpnp_misc_match_table[] = {
+ { .compatible = QPNP_MISC_DEV_NAME },
+ {}
+};
+
+enum qpnp_misc_version_name {
+ INVALID,
+ PM8941,
+ PM8226,
+ PMA8084,
+ PMDCALIFORNIUM,
+};
+
+static struct qpnp_misc_version irq_support_version[] = {
+ {0x00, 0x00}, /* INVALID */
+ {0x01, 0x02}, /* PM8941 */
+ {0x07, 0x00}, /* PM8226 */
+ {0x09, 0x00}, /* PMA8084 */
+ {0x16, 0x00}, /* PMDCALIFORNIUM */
+};
+
+static int qpnp_write_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 val)
+{
+ int rc;
+
+ rc = regmap_write(mdev->regmap, mdev->base + addr, val);
+ if (rc)
+ pr_err("regmap write failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qpnp_read_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 *val)
+{
+ unsigned int temp;
+ int rc;
+
+ rc = regmap_read(mdev->regmap, mdev->base + addr, &temp);
+ if (rc) {
+ pr_err("regmap read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = (u8)temp;
+ return rc;
+}
+
+static int get_qpnp_misc_version_name(struct qpnp_misc_dev *dev)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(irq_support_version); i++)
+ if (dev->version.subtype == irq_support_version[i].subtype &&
+ dev->version.dig_major_rev >=
+ irq_support_version[i].dig_major_rev)
+ return i;
+
+ return INVALID;
+}
+
+static bool __misc_irqs_available(struct qpnp_misc_dev *dev)
+{
+ int version_name = get_qpnp_misc_version_name(dev);
+
+ if (version_name == INVALID)
+ return 0;
+ return 1;
+}
+
+int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val)
+{
+ struct qpnp_misc_dev *mdev = NULL;
+ struct qpnp_misc_dev *mdev_found = NULL;
+ int rc;
+ u8 temp;
+
+ if (IS_ERR_OR_NULL(node)) {
+ pr_err("Invalid device node pointer\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&qpnp_misc_dev_list_mutex);
+ list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+ if (mdev->dev->of_node == node) {
+ mdev_found = mdev;
+ break;
+ }
+ }
+ mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+ if (!mdev_found) {
+ /*
+ * No MISC device was found. This API should only
+ * be called by drivers which have specified the
+ * misc phandle in their device tree node.
+ */
+ pr_err("no probed misc device found\n");
+ return -EPROBE_DEFER;
+ }
+
+ rc = qpnp_read_byte(mdev, addr, &temp);
+ if (rc < 0) {
+ dev_err(mdev->dev, "Failed to read addr %x, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ *val = temp;
+ return 0;
+}
+
+int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+ struct device_node *misc_node = NULL;
+ struct qpnp_misc_dev *mdev = NULL;
+ struct qpnp_misc_dev *mdev_found = NULL;
+
+ if (IS_ERR_OR_NULL(consumer_dev)) {
+ pr_err("Invalid consumer device pointer\n");
+ return -EINVAL;
+ }
+
+ misc_node = of_parse_phandle(consumer_dev->of_node, "qcom,misc-ref", 0);
+ if (!misc_node) {
+ pr_debug("Could not find qcom,misc-ref property in %s\n",
+ consumer_dev->of_node->full_name);
+ return 0;
+ }
+
+ mutex_lock(&qpnp_misc_dev_list_mutex);
+ list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+ if (mdev->dev->of_node == misc_node) {
+ mdev_found = mdev;
+ break;
+ }
+ }
+ mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+ if (!mdev_found) {
+ /*
+ * No MISC device was found. This API should only
+ * be called by drivers which have specified the
+ * misc phandle in their device tree node.
+ */
+ pr_err("no probed misc device found\n");
+ return -EPROBE_DEFER;
+ }
+
+ return __misc_irqs_available(mdev_found);
+}
+
+static int qpnp_misc_dt_init(struct qpnp_misc_dev *mdev)
+{
+ struct device_node *node = mdev->dev->of_node;
+ u32 val;
+ int rc;
+
+ rc = of_property_read_u32(node, "reg", &mdev->base);
+ if (rc < 0 || !mdev->base) {
+ dev_err(mdev->dev, "Base address not defined or invalid\n");
+ return -EINVAL;
+ }
+
+ if (!of_property_read_u32(node, "qcom,pwm-sel", &val)) {
+ if (val > PWM_SEL_MAX) {
+ dev_err(mdev->dev, "Invalid value for pwm-sel\n");
+ return -EINVAL;
+ }
+ mdev->pwm_sel = (u8)val;
+ }
+ mdev->enable_gp_driver = of_property_read_bool(node,
+ "qcom,enable-gp-driver");
+
+ WARN((mdev->pwm_sel > 0 && !mdev->enable_gp_driver),
+ "Setting PWM source without enabling gp driver\n");
+ WARN((mdev->pwm_sel == 0 && mdev->enable_gp_driver),
+ "Enabling gp driver without setting PWM source\n");
+
+ return 0;
+}
+
+static int qpnp_misc_config(struct qpnp_misc_dev *mdev)
+{
+ int rc, version_name;
+
+ version_name = get_qpnp_misc_version_name(mdev);
+
+ switch (version_name) {
+ case PMDCALIFORNIUM:
+ if (mdev->pwm_sel > 0 && mdev->enable_gp_driver) {
+ rc = qpnp_write_byte(mdev, REG_PWM_SEL, mdev->pwm_sel);
+ if (rc < 0) {
+ dev_err(mdev->dev,
+ "Failed to write PWM_SEL reg\n");
+ return rc;
+ }
+
+ rc = qpnp_write_byte(mdev, REG_GP_DRIVER_EN,
+ GP_DRIVER_EN_BIT);
+ if (rc < 0) {
+ dev_err(mdev->dev,
+ "Failed to write GP_DRIVER_EN reg\n");
+ return rc;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int qpnp_misc_probe(struct platform_device *pdev)
+{
+ struct qpnp_misc_dev *mdev = ERR_PTR(-EINVAL);
+ int rc;
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->dev = &pdev->dev;
+ mdev->regmap = dev_get_regmap(mdev->dev->parent, NULL);
+ if (!mdev->regmap) {
+ dev_err(mdev->dev, "Parent regmap is unavailable\n");
+ return -ENXIO;
+ }
+
+ rc = qpnp_misc_dt_init(mdev);
+ if (rc < 0) {
+ dev_err(mdev->dev,
+ "Error reading device tree properties, rc=%d\n", rc);
+ return rc;
+ }
+
+
+ rc = qpnp_read_byte(mdev, REG_SUBTYPE, &mdev->version.subtype);
+ if (rc < 0) {
+ dev_err(mdev->dev, "Failed to read subtype, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_read_byte(mdev, REG_DIG_MAJOR_REV,
+ &mdev->version.dig_major_rev);
+ if (rc < 0) {
+ dev_err(mdev->dev, "Failed to read dig_major_rev, rc=%d\n", rc);
+ return rc;
+ }
+
+ mutex_lock(&qpnp_misc_dev_list_mutex);
+ list_add_tail(&mdev->list, &qpnp_misc_dev_list);
+ mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+ rc = qpnp_misc_config(mdev);
+ if (rc < 0) {
+ dev_err(mdev->dev,
+ "Error configuring module registers, rc=%d\n", rc);
+ return rc;
+ }
+
+ dev_info(mdev->dev, "probe successful\n");
+ return 0;
+}
+
+static struct platform_driver qpnp_misc_driver = {
+ .probe = qpnp_misc_probe,
+ .driver = {
+ .name = QPNP_MISC_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qpnp_misc_match_table,
+ },
+};
+
+static int __init qpnp_misc_init(void)
+{
+ return platform_driver_register(&qpnp_misc_driver);
+}
+
+static void __exit qpnp_misc_exit(void)
+{
+ return platform_driver_unregister(&qpnp_misc_driver);
+}
+
+subsys_initcall(qpnp_misc_init);
+module_exit(qpnp_misc_exit);
+
+MODULE_DESCRIPTION(QPNP_MISC_DEV_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_MISC_DEV_NAME);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
new file mode 100644
index 0000000..877c4d1
--- /dev/null
+++ b/drivers/misc/qseecom.c
@@ -0,0 +1,8928 @@
+/*
+ * QTI Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/qseecom.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/freezer.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/qseecomi.h>
+#include <asm/cacheflush.h>
+#include "qseecom_kernel.h"
+#include <crypto/ice.h>
+#include <linux/delay.h>
+
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+#define QSEECOM_DEV "qseecom"
+#define QSEOS_VERSION_14 0x14
+#define QSEEE_VERSION_00 0x400000
+#define QSEE_VERSION_01 0x401000
+#define QSEE_VERSION_02 0x402000
+#define QSEE_VERSION_03 0x403000
+#define QSEE_VERSION_04 0x404000
+#define QSEE_VERSION_05 0x405000
+#define QSEE_VERSION_20 0x800000
+#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
+
+#define QSEE_CE_CLK_100MHZ 100000000
+#define CE_CLK_DIV 1000000
+
+#define QSEECOM_MAX_SG_ENTRY 512
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
+ (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
+#define QSEECOM_INVALID_KEY_ID 0xff
+
+/* Save partition image hash for authentication check */
+#define SCM_SAVE_PARTITION_HASH_ID 0x01
+
+/* Check if enterprise security is activate */
+#define SCM_IS_ACTIVATED_ID 0x02
+
+/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
+#define SCM_MDTP_CIPHER_DIP 0x01
+
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP 0x20000
+
+#define RPMB_SERVICE 0x2000
+#define SSD_SERVICE 0x3000
+
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
+#define TWO 2
+#define QSEECOM_UFS_ICE_CE_NUM 10
+#define QSEECOM_SDCC_ICE_CE_NUM 20
+#define QSEECOM_ICE_FDE_KEY_INDEX 0
+
+#define PHY_ADDR_4G (1ULL<<32)
+
+#define QSEECOM_STATE_NOT_READY 0
+#define QSEECOM_STATE_SUSPEND 1
+#define QSEECOM_STATE_READY 2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
+
+/*
+ * default ce info unit to 0 for
+ * services which
+ * support only single instance.
+ * Most of services are in this category.
+ */
+#define DEFAULT_CE_INFO_UNIT 0
+#define DEFAULT_NUM_CE_INFO_UNIT 1
+
+enum qseecom_clk_definitions {
+ CLK_DFAB = 0,
+ CLK_SFPB,
+};
+
+enum qseecom_ice_key_size_type {
+ QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+ (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+ QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+ (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+ QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+ (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
+enum qseecom_client_handle_type {
+ QSEECOM_CLIENT_APP = 1,
+ QSEECOM_LISTENER_SERVICE,
+ QSEECOM_SECURE_SERVICE,
+ QSEECOM_GENERIC,
+ QSEECOM_UNAVAILABLE_CLIENT_APP,
+};
+
+enum qseecom_ce_hw_instance {
+ CLK_QSEE = 0,
+ CLK_CE_DRV,
+ CLK_INVALID,
+};
+
+static struct class *driver_class;
+static dev_t qseecom_device_no;
+
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
+
+struct sglist_info {
+ uint32_t indexAndFlags;
+ uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set, the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
+ ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+struct qseecom_registered_listener_list {
+ struct list_head list;
+ struct qseecom_register_listener_req svc;
+ void *user_virt_sb_base;
+ u8 *sb_virt;
+ phys_addr_t sb_phys;
+ size_t sb_length;
+ struct ion_handle *ihandle; /* Retrieve phy addr */
+ wait_queue_head_t rcv_req_wq;
+ int rcv_req_flag;
+ int send_resp_flag;
+ bool listener_in_use;
+ /* wq for thread blocked on this listener*/
+ wait_queue_head_t listener_block_app_wq;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+ uint32_t sglist_cnt;
+};
+
+struct qseecom_registered_app_list {
+ struct list_head list;
+ u32 app_id;
+ u32 ref_cnt;
+ char app_name[MAX_APP_NAME_SIZE];
+ u32 app_arch;
+ bool app_blocked;
+ u32 blocked_on_listener_id;
+};
+
+struct qseecom_registered_kclient_list {
+ struct list_head list;
+ struct qseecom_handle *handle;
+};
+
+struct qseecom_ce_info_use {
+ unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+ unsigned int unit_num;
+ unsigned int num_ce_pipe_entries;
+ struct qseecom_ce_pipe_entry *ce_pipe_entry;
+ bool alloc;
+ uint32_t type;
+};
+
+struct ce_hw_usage_info {
+ uint32_t qsee_ce_hw_instance;
+ uint32_t num_fde;
+ struct qseecom_ce_info_use *fde;
+ uint32_t num_pfe;
+ struct qseecom_ce_info_use *pfe;
+};
+
+struct qseecom_clk {
+ enum qseecom_ce_hw_instance instance;
+ struct clk *ce_core_clk;
+ struct clk *ce_clk;
+ struct clk *ce_core_src_clk;
+ struct clk *ce_bus_clk;
+ uint32_t clk_access_cnt;
+};
+
+struct qseecom_control {
+ struct ion_client *ion_clnt; /* Ion client */
+ struct list_head registered_listener_list_head;
+ spinlock_t registered_listener_list_lock;
+
+ struct list_head registered_app_list_head;
+ spinlock_t registered_app_list_lock;
+
+ struct list_head registered_kclient_list_head;
+ spinlock_t registered_kclient_list_lock;
+
+ wait_queue_head_t send_resp_wq;
+ int send_resp_flag;
+
+ uint32_t qseos_version;
+ uint32_t qsee_version;
+ struct device *pdev;
+ bool whitelist_support;
+ bool commonlib_loaded;
+ bool commonlib64_loaded;
+ struct ion_handle *cmnlib_ion_handle;
+ struct ce_hw_usage_info ce_info;
+
+ int qsee_bw_count;
+ int qsee_sfpb_bw_count;
+
+ uint32_t qsee_perf_client;
+ struct qseecom_clk qsee;
+ struct qseecom_clk ce_drv;
+
+ bool support_bus_scaling;
+ bool support_fde;
+ bool support_pfe;
+ bool fde_key_size;
+ uint32_t cumulative_mode;
+ enum qseecom_bandwidth_request_mode current_mode;
+ struct timer_list bw_scale_down_timer;
+ struct work_struct bw_inactive_req_ws;
+ struct cdev cdev;
+ bool timer_running;
+ bool no_clock_support;
+ unsigned int ce_opp_freq_hz;
+ bool appsbl_qseecom_support;
+ uint32_t qsee_reentrancy_support;
+
+ uint32_t app_block_ref_cnt;
+ wait_queue_head_t app_block_wq;
+ atomic_t qseecom_state;
+ int is_apps_region_protected;
+};
+
+struct qseecom_sec_buf_fd_info {
+ bool is_sec_buf_fd;
+ size_t size;
+ void *vbase;
+ dma_addr_t pbase;
+};
+
+struct qseecom_param_memref {
+ uint32_t buffer;
+ uint32_t size;
+};
+
+struct qseecom_client_handle {
+ u32 app_id;
+ u8 *sb_virt;
+ phys_addr_t sb_phys;
+ unsigned long user_virt_sb_base;
+ size_t sb_length;
+ struct ion_handle *ihandle; /* Retrieve phy addr */
+ char app_name[MAX_APP_NAME_SIZE];
+ u32 app_arch;
+ struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+};
+
+struct qseecom_listener_handle {
+ u32 id;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+ enum qseecom_client_handle_type type;
+ union {
+ struct qseecom_client_handle client;
+ struct qseecom_listener_handle listener;
+ };
+ bool released;
+ int abort;
+ wait_queue_head_t abort_wq;
+ atomic_t ioctl_count;
+ bool perf_enabled;
+ bool fast_load_enabled;
+ enum qseecom_bandwidth_request_mode mode;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+ uint32_t sglist_cnt;
+ bool use_legacy_cmd;
+};
+
+struct qseecom_key_id_usage_desc {
+ uint8_t desc[QSEECOM_KEY_ID_SIZE];
+};
+
+struct qseecom_crypto_info {
+ unsigned int unit_num;
+ unsigned int ce;
+ unsigned int pipe_pair;
+};
+
+static struct qseecom_key_id_usage_desc key_id_array[] = {
+ {
+ .desc = "Undefined Usage Index",
+ },
+
+ {
+ .desc = "Full Disk Encryption",
+ },
+
+ {
+ .desc = "Per File Encryption",
+ },
+
+ {
+ .desc = "UFS ICE Full Disk Encryption",
+ },
+
+ {
+ .desc = "SDCC ICE Full Disk Encryption",
+ },
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+ char *cmnlib_name);
+static int qseecom_enable_ice_setup(int usage);
+static int qseecom_disable_ice_setup(int usage);
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+ void __user *argp);
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+ void __user *argp);
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+ void __user *argp);
+
+static int get_qseecom_keymaster_status(char *str)
+{
+ get_option(&str, &qseecom.is_apps_region_protected);
+ return 1;
+}
+__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+
+static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
+ const void *req_buf, void *resp_buf)
+{
+ int ret = 0;
+ uint32_t smc_id = 0;
+ uint32_t qseos_cmd_id = 0;
+ struct scm_desc desc = {0};
+ struct qseecom_command_scm_resp *scm_resp = NULL;
+
+ if (!req_buf || !resp_buf) {
+ pr_err("Invalid buffer pointer\n");
+ return -EINVAL;
+ }
+ qseos_cmd_id = *(uint32_t *)req_buf;
+ scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
+
+ switch (svc_id) {
+ case 6: {
+ if (tz_cmd_id == 3) {
+ smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+ desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+ desc.args[0] = *(uint32_t *)req_buf;
+ } else {
+ pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
+ svc_id, tz_cmd_id);
+ return -EINVAL;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case SCM_SVC_ES: {
+ switch (tz_cmd_id) {
+ case SCM_SAVE_PARTITION_HASH_ID: {
+ u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
+ struct qseecom_save_partition_hash_req *p_hash_req =
+ (struct qseecom_save_partition_hash_req *)
+ req_buf;
+ char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+ if (!tzbuf)
+ return -ENOMEM;
+ memset(tzbuf, 0, tzbuflen);
+ memcpy(tzbuf, p_hash_req->digest,
+ SHA256_DIGEST_LENGTH);
+ dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+ smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
+ desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
+ desc.args[0] = p_hash_req->partition_id;
+ desc.args[1] = virt_to_phys(tzbuf);
+ desc.args[2] = SHA256_DIGEST_LENGTH;
+ ret = scm_call2(smc_id, &desc);
+ kzfree(tzbuf);
+ break;
+ }
+ default: {
+ pr_err("tz_cmd_id %d is not supported by scm_call2\n",
+ tz_cmd_id);
+ ret = -EINVAL;
+ break;
+ }
+ } /* end of switch (tz_cmd_id) */
+ break;
+ } /* end of case SCM_SVC_ES */
+ case SCM_SVC_TZSCHEDULER: {
+ switch (qseos_cmd_id) {
+ case QSEOS_APP_START_COMMAND: {
+ struct qseecom_load_app_ireq *req;
+ struct qseecom_load_app_64bit_ireq *req_64bit;
+
+ smc_id = TZ_OS_APP_START_ID;
+ desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_load_app_ireq *)req_buf;
+ desc.args[0] = req->mdt_len;
+ desc.args[1] = req->img_len;
+ desc.args[2] = req->phy_addr;
+ } else {
+ req_64bit =
+ (struct qseecom_load_app_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->mdt_len;
+ desc.args[1] = req_64bit->img_len;
+ desc.args[2] = req_64bit->phy_addr;
+ }
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_APP_SHUTDOWN_COMMAND: {
+ struct qseecom_unload_app_ireq *req;
+
+ req = (struct qseecom_unload_app_ireq *)req_buf;
+ smc_id = TZ_OS_APP_SHUTDOWN_ID;
+ desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
+ desc.args[0] = req->app_id;
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_APP_LOOKUP_COMMAND: {
+ struct qseecom_check_app_ireq *req;
+ u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
+ char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+ if (!tzbuf)
+ return -ENOMEM;
+ req = (struct qseecom_check_app_ireq *)req_buf;
+ pr_debug("Lookup app_name = %s\n", req->app_name);
+ strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
+ dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+ smc_id = TZ_OS_APP_LOOKUP_ID;
+ desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
+ desc.args[0] = virt_to_phys(tzbuf);
+ desc.args[1] = strlen(req->app_name);
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ kzfree(tzbuf);
+ break;
+ }
+ case QSEOS_APP_REGION_NOTIFICATION: {
+ struct qsee_apps_region_info_ireq *req;
+ struct qsee_apps_region_info_64bit_ireq *req_64bit;
+
+ smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
+ desc.arginfo =
+ TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qsee_apps_region_info_ireq *)
+ req_buf;
+ desc.args[0] = req->addr;
+ desc.args[1] = req->size;
+ } else {
+ req_64bit =
+ (struct qsee_apps_region_info_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->addr;
+ desc.args[1] = req_64bit->size;
+ }
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
+ struct qseecom_load_lib_image_ireq *req;
+ struct qseecom_load_lib_image_64bit_ireq *req_64bit;
+
+ smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
+ desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_load_lib_image_ireq *)
+ req_buf;
+ desc.args[0] = req->mdt_len;
+ desc.args[1] = req->img_len;
+ desc.args[2] = req->phy_addr;
+ } else {
+ req_64bit =
+ (struct qseecom_load_lib_image_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->mdt_len;
+ desc.args[1] = req_64bit->img_len;
+ desc.args[2] = req_64bit->phy_addr;
+ }
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
+ smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
+ desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_REGISTER_LISTENER: {
+ struct qseecom_register_listener_ireq *req;
+ struct qseecom_register_listener_64bit_ireq *req_64bit;
+
+ desc.arginfo =
+ TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_register_listener_ireq *)
+ req_buf;
+ desc.args[0] = req->listener_id;
+ desc.args[1] = req->sb_ptr;
+ desc.args[2] = req->sb_len;
+ } else {
+ req_64bit =
+ (struct qseecom_register_listener_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->listener_id;
+ desc.args[1] = req_64bit->sb_ptr;
+ desc.args[2] = req_64bit->sb_len;
+ }
+ smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ if (ret) {
+ smc_id = TZ_OS_REGISTER_LISTENER_ID;
+ __qseecom_reentrancy_check_if_no_app_blocked(
+ smc_id);
+ ret = scm_call2(smc_id, &desc);
+ }
+ break;
+ }
+ case QSEOS_DEREGISTER_LISTENER: {
+ struct qseecom_unregister_listener_ireq *req;
+
+ req = (struct qseecom_unregister_listener_ireq *)
+ req_buf;
+ smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
+ desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
+ desc.args[0] = req->listener_id;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_LISTENER_DATA_RSP_COMMAND: {
+ struct qseecom_client_listener_data_irsp *req;
+
+ req = (struct qseecom_client_listener_data_irsp *)
+ req_buf;
+ smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
+ desc.arginfo =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
+ desc.args[0] = req->listener_id;
+ desc.args[1] = req->status;
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+ struct qseecom_client_listener_data_irsp *req;
+ struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+ smc_id =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req =
+ (struct qseecom_client_listener_data_irsp *)
+ req_buf;
+ desc.args[0] = req->listener_id;
+ desc.args[1] = req->status;
+ desc.args[2] = req->sglistinfo_ptr;
+ desc.args[3] = req->sglistinfo_len;
+ } else {
+ req_64 =
+ (struct qseecom_client_listener_data_64bit_irsp *)
+ req_buf;
+ desc.args[0] = req_64->listener_id;
+ desc.args[1] = req_64->status;
+ desc.args[2] = req_64->sglistinfo_ptr;
+ desc.args[3] = req_64->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
+ struct qseecom_load_app_ireq *req;
+ struct qseecom_load_app_64bit_ireq *req_64bit;
+
+ smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
+ desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_load_app_ireq *)req_buf;
+ desc.args[0] = req->mdt_len;
+ desc.args[1] = req->img_len;
+ desc.args[2] = req->phy_addr;
+ } else {
+ req_64bit =
+ (struct qseecom_load_app_64bit_ireq *)req_buf;
+ desc.args[0] = req_64bit->mdt_len;
+ desc.args[1] = req_64bit->img_len;
+ desc.args[2] = req_64bit->phy_addr;
+ }
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
+ smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
+ desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+
+ case QSEOS_CLIENT_SEND_DATA_COMMAND: {
+ struct qseecom_client_send_data_ireq *req;
+ struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
+ desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_client_send_data_ireq *)
+ req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->rsp_ptr;
+ desc.args[4] = req->rsp_len;
+ } else {
+ req_64bit =
+ (struct qseecom_client_send_data_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->rsp_ptr;
+ desc.args[4] = req_64bit->rsp_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+ struct qseecom_client_send_data_ireq *req;
+ struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_client_send_data_ireq *)
+ req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->rsp_ptr;
+ desc.args[4] = req->rsp_len;
+ desc.args[5] = req->sglistinfo_ptr;
+ desc.args[6] = req->sglistinfo_len;
+ } else {
+ req_64bit =
+ (struct qseecom_client_send_data_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->rsp_ptr;
+ desc.args[4] = req_64bit->rsp_len;
+ desc.args[5] = req_64bit->sglistinfo_ptr;
+ desc.args[6] = req_64bit->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
+ struct qseecom_client_send_service_ireq *req;
+
+ req = (struct qseecom_client_send_service_ireq *)
+ req_buf;
+ smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
+ desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
+ desc.args[0] = req->key_type;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_RPMB_ERASE_COMMAND: {
+ smc_id = TZ_OS_RPMB_ERASE_ID;
+ desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
+ smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
+ desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_GENERATE_KEY: {
+ u32 tzbuflen = PAGE_ALIGN(sizeof
+ (struct qseecom_key_generate_ireq) -
+ sizeof(uint32_t));
+ char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+ if (!tzbuf)
+ return -ENOMEM;
+ memset(tzbuf, 0, tzbuflen);
+ memcpy(tzbuf, req_buf + sizeof(uint32_t),
+ (sizeof(struct qseecom_key_generate_ireq) -
+ sizeof(uint32_t)));
+ dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+ smc_id = TZ_OS_KS_GEN_KEY_ID;
+ desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
+ desc.args[0] = virt_to_phys(tzbuf);
+ desc.args[1] = tzbuflen;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ kzfree(tzbuf);
+ break;
+ }
+ case QSEOS_DELETE_KEY: {
+ u32 tzbuflen = PAGE_ALIGN(sizeof
+ (struct qseecom_key_delete_ireq) -
+ sizeof(uint32_t));
+ char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+ if (!tzbuf)
+ return -ENOMEM;
+ memset(tzbuf, 0, tzbuflen);
+ memcpy(tzbuf, req_buf + sizeof(uint32_t),
+ (sizeof(struct qseecom_key_delete_ireq) -
+ sizeof(uint32_t)));
+ dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+ smc_id = TZ_OS_KS_DEL_KEY_ID;
+ desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
+ desc.args[0] = virt_to_phys(tzbuf);
+ desc.args[1] = tzbuflen;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ kzfree(tzbuf);
+ break;
+ }
+ case QSEOS_SET_KEY: {
+ u32 tzbuflen = PAGE_ALIGN(sizeof
+ (struct qseecom_key_select_ireq) -
+ sizeof(uint32_t));
+ char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+ if (!tzbuf)
+ return -ENOMEM;
+ memset(tzbuf, 0, tzbuflen);
+ memcpy(tzbuf, req_buf + sizeof(uint32_t),
+ (sizeof(struct qseecom_key_select_ireq) -
+ sizeof(uint32_t)));
+ dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+ smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
+ desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
+ desc.args[0] = virt_to_phys(tzbuf);
+ desc.args[1] = tzbuflen;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ kzfree(tzbuf);
+ break;
+ }
+ case QSEOS_UPDATE_KEY_USERINFO: {
+ u32 tzbuflen = PAGE_ALIGN(sizeof
+ (struct qseecom_key_userinfo_update_ireq) -
+ sizeof(uint32_t));
+ char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+ if (!tzbuf)
+ return -ENOMEM;
+ memset(tzbuf, 0, tzbuflen);
+ memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
+ (struct qseecom_key_userinfo_update_ireq) -
+ sizeof(uint32_t)));
+ dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+ smc_id = TZ_OS_KS_UPDATE_KEY_ID;
+ desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
+ desc.args[0] = virt_to_phys(tzbuf);
+ desc.args[1] = tzbuflen;
+ __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+ ret = scm_call2(smc_id, &desc);
+ kzfree(tzbuf);
+ break;
+ }
+ case QSEOS_TEE_OPEN_SESSION: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
+ desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ desc.args[5] = req->sglistinfo_ptr;
+ desc.args[6] = req->sglistinfo_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ desc.args[5] = req_64bit->sglistinfo_ptr;
+ desc.args[6] = req_64bit->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_TEE_INVOKE_COMMAND: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
+ desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ desc.args[5] = req->sglistinfo_ptr;
+ desc.args[6] = req->sglistinfo_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ desc.args[5] = req_64bit->sglistinfo_ptr;
+ desc.args[6] = req_64bit->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_TEE_CLOSE_SESSION: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
+ desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_TEE_REQUEST_CANCELLATION: {
+ struct qseecom_qteec_ireq *req;
+ struct qseecom_qteec_64bit_ireq *req_64bit;
+
+ smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
+ desc.arginfo =
+ TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req = (struct qseecom_qteec_ireq *)req_buf;
+ desc.args[0] = req->app_id;
+ desc.args[1] = req->req_ptr;
+ desc.args[2] = req->req_len;
+ desc.args[3] = req->resp_ptr;
+ desc.args[4] = req->resp_len;
+ } else {
+ req_64bit = (struct qseecom_qteec_64bit_ireq *)
+ req_buf;
+ desc.args[0] = req_64bit->app_id;
+ desc.args[1] = req_64bit->req_ptr;
+ desc.args[2] = req_64bit->req_len;
+ desc.args[3] = req_64bit->resp_ptr;
+ desc.args[4] = req_64bit->resp_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
+ struct qseecom_continue_blocked_request_ireq *req =
+ (struct qseecom_continue_blocked_request_ireq *)
+ req_buf;
+ smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+ desc.arginfo =
+ TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
+ desc.args[0] = req->app_id;
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
+ default: {
+ pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
+ qseos_cmd_id);
+ ret = -EINVAL;
+ break;
+ }
+ } /*end of switch (qsee_cmd_id) */
+ break;
+ } /*end of case SCM_SVC_TZSCHEDULER*/
+ default: {
+ pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
+ svc_id);
+ ret = -EINVAL;
+ break;
+ }
+ } /*end of switch svc_id */
+ scm_resp->result = desc.ret[0];
+ scm_resp->resp_type = desc.ret[1];
+ scm_resp->data = desc.ret[2];
+ pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
+ svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
+ pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
+ scm_resp->result, scm_resp->resp_type, scm_resp->data);
+ return ret;
+}
+
+
+static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+ if (!is_scm_armv8())
+ return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
+ resp_buf, resp_len);
+ else
+ return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
+}
+
+static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
+ struct qseecom_register_listener_req *svc)
+{
+ struct qseecom_registered_listener_list *ptr;
+ int unique = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+ list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
+ if (ptr->svc.listener_id == svc->listener_id) {
+ pr_err("Service id: %u is already registered\n",
+ ptr->svc.listener_id);
+ unique = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+ return unique;
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+ int32_t listener_id)
+{
+ struct qseecom_registered_listener_list *entry = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_listener_list_head, list) {
+ if (entry->svc.listener_id == listener_id)
+ break;
+ }
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+ if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+ pr_err("Service id: %u is not found\n", listener_id);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+ struct qseecom_dev_handle *handle,
+ struct qseecom_register_listener_req *listener)
+{
+ int ret = 0;
+ struct qseecom_register_listener_ireq req;
+ struct qseecom_register_listener_64bit_ireq req_64bit;
+ struct qseecom_command_scm_resp resp;
+ ion_phys_addr_t pa;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+
+ /* Get the handle of the shared fd */
+ svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ listener->ifd_data_fd);
+ if (IS_ERR_OR_NULL(svc->ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ return -ENOMEM;
+ }
+
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
+ if (ret) {
+ pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+ ret);
+ return ret;
+ }
+ /* Populate the structure for sending scm call to load image */
+ svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
+ if (IS_ERR_OR_NULL(svc->sb_virt)) {
+ pr_err("ION memory mapping for listener shared buffer failed\n");
+ return -ENOMEM;
+ }
+ svc->sb_phys = (phys_addr_t)pa;
+
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+ req.listener_id = svc->svc.listener_id;
+ req.sb_len = svc->sb_length;
+ req.sb_ptr = (uint32_t)svc->sb_phys;
+ cmd_buf = (void *)&req;
+ cmd_len = sizeof(struct qseecom_register_listener_ireq);
+ } else {
+ req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+ req_64bit.listener_id = svc->svc.listener_id;
+ req_64bit.sb_len = svc->sb_length;
+ req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
+ cmd_buf = (void *)&req_64bit;
+ cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
+ }
+
+ resp.result = QSEOS_RESULT_INCOMPLETE;
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("qseecom_scm_call failed with err: %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("Error SB registration req: resp.result = %d\n",
+ resp.result);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct qseecom_register_listener_req rcvd_lstnr;
+ struct qseecom_registered_listener_list *new_entry;
+
+ ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
+ rcvd_lstnr.sb_size))
+ return -EFAULT;
+
+ data->listener.id = 0;
+ if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
+ pr_err("Service is not unique and is already registered\n");
+ data->released = true;
+ return -EBUSY;
+ }
+
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+ new_entry->rcv_req_flag = 0;
+
+ new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+ new_entry->sb_length = rcvd_lstnr.sb_size;
+ new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
+ if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+ pr_err("qseecom_set_sb_memoryfailed\n");
+ kzfree(new_entry);
+ return -ENOMEM;
+ }
+
+ data->listener.id = rcvd_lstnr.listener_id;
+ init_waitqueue_head(&new_entry->rcv_req_wq);
+ init_waitqueue_head(&new_entry->listener_block_app_wq);
+ new_entry->send_resp_flag = 0;
+ new_entry->listener_in_use = false;
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+ list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+ return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+ unsigned long flags;
+ uint32_t unmap_mem = 0;
+ struct qseecom_register_listener_ireq req;
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ struct qseecom_command_scm_resp resp;
+ struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
+
+ req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+ req.listener_id = data->listener.id;
+ resp.result = QSEOS_RESULT_INCOMPLETE;
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+ sizeof(req), &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
+ ret, data->listener.id);
+ return ret;
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
+ resp.result, data->listener.id);
+ return -EPERM;
+ }
+
+ data->abort = 1;
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+ list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
+ list) {
+ if (ptr_svc->svc.listener_id == data->listener.id) {
+ wake_up_all(&ptr_svc->rcv_req_wq);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+ while (atomic_read(&data->ioctl_count) > 1) {
+ if (wait_event_freezable(data->abort_wq,
+ atomic_read(&data->ioctl_count) <= 1)) {
+ pr_err("Interrupted from abort\n");
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+ list_for_each_entry(ptr_svc,
+ &qseecom.registered_listener_list_head, list) {
+ if (ptr_svc->svc.listener_id == data->listener.id) {
+ if (ptr_svc->sb_virt) {
+ unmap_mem = 1;
+ ihandle = ptr_svc->ihandle;
+ }
+ list_del(&ptr_svc->list);
+ kzfree(ptr_svc);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+ /* Unmap the memory */
+ if (unmap_mem) {
+ if (!IS_ERR_OR_NULL(ihandle)) {
+ ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+ ion_free(qseecom.ion_clnt, ihandle);
+ }
+ }
+ data->released = true;
+ return ret;
+}
+
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+ int ret = 0;
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ if (qclk->ce_core_src_clk != NULL) {
+ if (mode == INACTIVE) {
+ __qseecom_disable_clk(CLK_QSEE);
+ } else {
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ pr_err("CLK enabling failed (%d) MODE (%d)\n",
+ ret, mode);
+ }
+ }
+
+ if ((!ret) && (qseecom.current_mode != mode)) {
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, mode);
+ if (ret) {
+ pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+ ret, mode);
+ if (qclk->ce_core_src_clk != NULL) {
+ if (mode == INACTIVE) {
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ pr_err("CLK enable failed\n");
+ } else
+ __qseecom_disable_clk(CLK_QSEE);
+ }
+ }
+ qseecom.current_mode = mode;
+ }
+ return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+ mutex_lock(&app_access_lock);
+ mutex_lock(&qsee_bw_mutex);
+ if (qseecom.timer_running)
+ __qseecom_set_msm_bus_request(INACTIVE);
+ pr_debug("current_mode = %d, cumulative_mode = %d\n",
+ qseecom.current_mode, qseecom.cumulative_mode);
+ qseecom.timer_running = false;
+ mutex_unlock(&qsee_bw_mutex);
+ mutex_unlock(&app_access_lock);
+}
+
+static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
+{
+ schedule_work(&qseecom.bw_inactive_req_ws);
+}
+
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+ struct qseecom_clk *qclk;
+ int ret = 0;
+
+ mutex_lock(&clk_access_lock);
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ if (qclk->clk_access_cnt > 2) {
+ pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+ ret = -EINVAL;
+ goto err_dec_ref_cnt;
+ }
+ if (qclk->clk_access_cnt == 2)
+ qclk->clk_access_cnt--;
+
+err_dec_ref_cnt:
+ mutex_unlock(&clk_access_lock);
+ return ret;
+}
+
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
+{
+ int32_t ret = 0;
+ int32_t request_mode = INACTIVE;
+
+ mutex_lock(&qsee_bw_mutex);
+ if (mode == 0) {
+ if (qseecom.cumulative_mode > MEDIUM)
+ request_mode = HIGH;
+ else
+ request_mode = qseecom.cumulative_mode;
+ } else {
+ request_mode = mode;
+ }
+
+ ret = __qseecom_set_msm_bus_request(request_mode);
+ if (ret) {
+ pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+ ret, request_mode);
+ goto err_scale_timer;
+ }
+
+ if (qseecom.timer_running) {
+ ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+ if (ret) {
+ pr_err("Failed to decrease clk ref count.\n");
+ goto err_scale_timer;
+ }
+ del_timer_sync(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = false;
+ }
+err_scale_timer:
+ mutex_unlock(&qsee_bw_mutex);
+ return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+ struct qseecom_dev_handle *data)
+{
+ int32_t ret = 0;
+
+ qseecom.cumulative_mode -= data->mode;
+ data->mode = INACTIVE;
+
+ return ret;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+ struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+ int32_t ret = 0;
+
+ if (data->mode == INACTIVE) {
+ qseecom.cumulative_mode += request_mode;
+ data->mode = request_mode;
+ } else {
+ if (data->mode != request_mode) {
+ qseecom.cumulative_mode -= data->mode;
+ qseecom.cumulative_mode += request_mode;
+ data->mode = request_mode;
+ }
+ }
+ return ret;
+}
+
+static int qseecom_perf_enable(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+
+ ret = qsee_vote_for_clock(data, CLK_DFAB);
+ if (ret) {
+ pr_err("Failed to vote for DFAB clock with err %d\n", ret);
+ goto perf_enable_exit;
+ }
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
+ if (ret) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ pr_err("Failed to vote for SFPB clock with err %d\n", ret);
+ goto perf_enable_exit;
+ }
+
+perf_enable_exit:
+ return ret;
+}
+
+static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int32_t ret = 0;
+ int32_t req_mode;
+
+ if (qseecom.no_clock_support)
+ return 0;
+
+ ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ if (req_mode > HIGH) {
+ pr_err("Invalid bandwidth mode (%d)\n", req_mode);
+ return -EINVAL;
+ }
+
+ /*
+ * Register bus bandwidth needs if bus scaling feature is enabled;
+ * otherwise, qseecom enable/disable clocks for the client directly.
+ */
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ pr_debug("Bus scaling feature is NOT enabled\n");
+ pr_debug("request bandwidth mode %d for the client\n",
+ req_mode);
+ if (req_mode != INACTIVE) {
+ ret = qseecom_perf_enable(data);
+ if (ret)
+ pr_err("Failed to vote for clock with err %d\n",
+ ret);
+ } else {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ }
+ }
+ return ret;
+}
+
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+ if (qseecom.no_clock_support)
+ return;
+
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(duration);
+ mod_timer(&(qseecom.bw_scale_down_timer),
+ qseecom.bw_scale_down_timer.expires);
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+ if (!qseecom.support_bus_scaling)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ else
+ __qseecom_add_bw_scale_down_timer(
+ QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+
+ if (qseecom.support_bus_scaling) {
+ ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+ if (ret)
+ pr_err("Failed to set bw MEDIUM.\n");
+ } else {
+ ret = qsee_vote_for_clock(data, CLK_SFPB);
+ if (ret)
+ pr_err("Fail vote for clk SFPB ret %d\n", ret);
+ }
+ return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ ion_phys_addr_t pa;
+ int32_t ret;
+ struct qseecom_set_sb_mem_param_req req;
+ size_t len;
+
+ /* Copy the relevant information needed for loading the image */
+ if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
+ return -EFAULT;
+
+ if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
+ (req.sb_len == 0)) {
+ pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
+ req.ifd_data_fd, req.sb_len, req.virt_sb_base);
+ return -EFAULT;
+ }
+ if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
+ req.sb_len))
+ return -EFAULT;
+
+ /* Get the handle of the shared fd */
+ data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ req.ifd_data_fd);
+ if (IS_ERR_OR_NULL(data->client.ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ return -ENOMEM;
+ }
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+ if (ret) {
+
+ pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ if (len < req.sb_len) {
+ pr_err("Requested length (0x%x) is > allocated (%zu)\n",
+ req.sb_len, len);
+ return -EINVAL;
+ }
+ /* Populate the structure for sending scm call to load image */
+ data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+ data->client.ihandle);
+ if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+ pr_err("ION memory mapping for client shared buf failed\n");
+ return -ENOMEM;
+ }
+ data->client.sb_phys = (phys_addr_t)pa;
+ data->client.sb_length = req.sb_len;
+ data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
+ return 0;
+}
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
+{
+ int ret;
+
+ ret = (qseecom.send_resp_flag != 0);
+ return ret || data->abort;
+}
+
+static int __qseecom_reentrancy_listener_has_sent_rsp(
+ struct qseecom_dev_handle *data,
+ struct qseecom_registered_listener_list *ptr_svc)
+{
+ int ret;
+
+ ret = (ptr_svc->send_resp_flag != 0);
+ return ret || data->abort;
+}
+
+static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_client_listener_data_irsp *send_data_rsp,
+ struct qseecom_registered_listener_list *ptr_svc,
+ uint32_t lstnr) {
+ int ret = 0;
+
+ send_data_rsp->status = QSEOS_RESULT_FAILURE;
+ qseecom.send_resp_flag = 0;
+ send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ send_data_rsp->listener_id = lstnr;
+ if (ptr_svc)
+ pr_warn("listener_id:%x, lstnr: %x\n",
+ ptr_svc->svc.listener_id, lstnr);
+ if (ptr_svc && ptr_svc->ihandle) {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
+ ptr_svc->sb_virt, ptr_svc->sb_length,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (lstnr == RPMB_SERVICE) {
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+ }
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
+ sizeof(send_data_rsp), resp, sizeof(*resp));
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+ ret, data->client.app_id);
+ if (lstnr == RPMB_SERVICE)
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+ }
+ if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+ (resp->result != QSEOS_RESULT_INCOMPLETE)) {
+ pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+ resp->result, data->client.app_id, lstnr);
+ ret = -EINVAL;
+ }
+ if (lstnr == RPMB_SERVICE)
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+}
+
+static void __qseecom_clean_listener_sglistinfo(
+ struct qseecom_registered_listener_list *ptr_svc)
+{
+ if (ptr_svc->sglist_cnt) {
+ memset(ptr_svc->sglistinfo_ptr, 0,
+ SGLISTINFO_TABLE_SIZE);
+ ptr_svc->sglist_cnt = 0;
+ }
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+ struct qseecom_command_scm_resp *resp)
+{
+ int ret = 0;
+ int rc = 0;
+ uint32_t lstnr;
+ unsigned long flags;
+ struct qseecom_client_listener_data_irsp send_data_rsp;
+ struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ sigset_t new_sigset;
+ sigset_t old_sigset;
+ uint32_t status;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = NULL;
+
+ while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+ lstnr = resp->data;
+ /*
+ * Wake up blocking lsitener service with the lstnr id
+ */
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+ flags);
+ list_for_each_entry(ptr_svc,
+ &qseecom.registered_listener_list_head, list) {
+ if (ptr_svc->svc.listener_id == lstnr) {
+ ptr_svc->listener_in_use = true;
+ ptr_svc->rcv_req_flag = 1;
+ wake_up_interruptible(&ptr_svc->rcv_req_wq);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+ flags);
+
+ if (ptr_svc == NULL) {
+ pr_err("Listener Svc %d does not exist\n", lstnr);
+ __qseecom_qseos_fail_return_resp_tz(data, resp,
+ &send_data_rsp, ptr_svc, lstnr);
+ return -EINVAL;
+ }
+
+ if (!ptr_svc->ihandle) {
+ pr_err("Client handle is not initialized\n");
+ __qseecom_qseos_fail_return_resp_tz(data, resp,
+ &send_data_rsp, ptr_svc, lstnr);
+ return -EINVAL;
+ }
+
+ if (ptr_svc->svc.listener_id != lstnr) {
+ pr_warn("Service requested does not exist\n");
+ __qseecom_qseos_fail_return_resp_tz(data, resp,
+ &send_data_rsp, ptr_svc, lstnr);
+ return -ERESTARTSYS;
+ }
+ pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+ /* initialize the new signal mask with all signals*/
+ sigfillset(&new_sigset);
+ /* block all signals */
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+ do {
+ /*
+ * When reentrancy is not supported, check global
+ * send_resp_flag; otherwise, check this listener's
+ * send_resp_flag.
+ */
+ if (!qseecom.qsee_reentrancy_support &&
+ !wait_event_freezable(qseecom.send_resp_wq,
+ __qseecom_listener_has_sent_rsp(data))) {
+ break;
+ }
+
+ if (qseecom.qsee_reentrancy_support &&
+ !wait_event_freezable(qseecom.send_resp_wq,
+ __qseecom_reentrancy_listener_has_sent_rsp(
+ data, ptr_svc))) {
+ break;
+ }
+ } while (1);
+
+ /* restore signal mask */
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+ if (data->abort) {
+ pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+ data->client.app_id, lstnr, ret);
+ rc = -ENODEV;
+ status = QSEOS_RESULT_FAILURE;
+ } else {
+ status = QSEOS_RESULT_SUCCESS;
+ }
+
+ qseecom.send_resp_flag = 0;
+ ptr_svc->send_resp_flag = 0;
+ table = ptr_svc->sglistinfo_ptr;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_rsp.listener_id = lstnr;
+ send_data_rsp.status = status;
+ send_data_rsp.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_rsp.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp;
+ cmd_len = sizeof(send_data_rsp);
+ } else {
+ send_data_rsp_64bit.listener_id = lstnr;
+ send_data_rsp_64bit.status = status;
+ send_data_rsp_64bit.sglistinfo_ptr =
+ virt_to_phys(table);
+ send_data_rsp_64bit.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp_64bit;
+ cmd_len = sizeof(send_data_rsp_64bit);
+ }
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ else
+ *(uint32_t *)cmd_buf =
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+ if (ptr_svc) {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ptr_svc->ihandle,
+ ptr_svc->sb_virt, ptr_svc->sb_length,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+ }
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len, resp, sizeof(*resp));
+ ptr_svc->listener_in_use = false;
+ __qseecom_clean_listener_sglistinfo(ptr_svc);
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+ ret, data->client.app_id);
+ if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+ }
+ if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+ (resp->result != QSEOS_RESULT_INCOMPLETE)) {
+ pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+ resp->result, data->client.app_id, lstnr);
+ ret = -EINVAL;
+ }
+ if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+ __qseecom_disable_clk(CLK_QSEE);
+
+ }
+ if (rc)
+ return rc;
+
+ return ret;
+}
+
+int __qseecom_process_reentrancy_blocked_on_listener(
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ struct qseecom_registered_listener_list *list_ptr;
+ int ret = 0;
+ struct qseecom_continue_blocked_request_ireq ireq;
+ struct qseecom_command_scm_resp continue_resp;
+ sigset_t new_sigset, old_sigset;
+ unsigned long flags;
+ bool found_app = false;
+
+ if (!resp || !data) {
+ pr_err("invalid resp or data pointer\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* find app_id & img_name from list */
+ if (!ptr_app) {
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+ list) {
+ if ((ptr_app->app_id == data->client.app_id) &&
+ (!strcmp(ptr_app->app_name,
+ data->client.app_name))) {
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags);
+ if (!found_app) {
+ pr_err("app_id %d (%s) is not found\n",
+ data->client.app_id,
+ (char *)data->client.app_name);
+ ret = -ENOENT;
+ goto exit;
+ }
+ }
+
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID\n");
+ ret = -ENODATA;
+ goto exit;
+ }
+ pr_debug("lsntr %d in_use = %d\n",
+ resp->data, list_ptr->listener_in_use);
+ ptr_app->blocked_on_listener_id = resp->data;
+ /* sleep until listener is available */
+ do {
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+ mutex_unlock(&app_access_lock);
+ do {
+ if (!wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ break;
+ }
+ } while (1);
+ mutex_lock(&app_access_lock);
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use == true);
+ ptr_app->blocked_on_listener_id = 0;
+ /* notify the blocked app that listener is available */
+ pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
+ resp->data, data->client.app_id,
+ data->client.app_name);
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+ ireq.app_id = data->client.app_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret) {
+ pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
+ data->client.app_id,
+ data->client.app_name, ret);
+ goto exit;
+ }
+ /*
+ * After TZ app is unblocked, then continue to next case
+ * for incomplete request processing
+ */
+ resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+ return ret;
+}
+
+static int __qseecom_reentrancy_process_incomplete_cmd(
+ struct qseecom_dev_handle *data,
+ struct qseecom_command_scm_resp *resp)
+{
+ int ret = 0;
+ int rc = 0;
+ uint32_t lstnr;
+ unsigned long flags;
+ struct qseecom_client_listener_data_irsp send_data_rsp;
+ struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ sigset_t new_sigset;
+ sigset_t old_sigset;
+ uint32_t status;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = NULL;
+
+ while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
+ lstnr = resp->data;
+ /*
+ * Wake up blocking lsitener service with the lstnr id
+ */
+ spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+ flags);
+ list_for_each_entry(ptr_svc,
+ &qseecom.registered_listener_list_head, list) {
+ if (ptr_svc->svc.listener_id == lstnr) {
+ ptr_svc->listener_in_use = true;
+ ptr_svc->rcv_req_flag = 1;
+ wake_up_interruptible(&ptr_svc->rcv_req_wq);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+ flags);
+
+ if (ptr_svc == NULL) {
+ pr_err("Listener Svc %d does not exist\n", lstnr);
+ return -EINVAL;
+ }
+
+ if (!ptr_svc->ihandle) {
+ pr_err("Client handle is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (ptr_svc->svc.listener_id != lstnr) {
+ pr_warn("Service requested does not exist\n");
+ return -ERESTARTSYS;
+ }
+ pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+ /* initialize the new signal mask with all signals*/
+ sigfillset(&new_sigset);
+
+ /* block all signals */
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+ /* unlock mutex btw waking listener and sleep-wait */
+ mutex_unlock(&app_access_lock);
+ do {
+ if (!wait_event_freezable(qseecom.send_resp_wq,
+ __qseecom_reentrancy_listener_has_sent_rsp(
+ data, ptr_svc))) {
+ break;
+ }
+ } while (1);
+ /* lock mutex again after resp sent */
+ mutex_lock(&app_access_lock);
+ ptr_svc->send_resp_flag = 0;
+ qseecom.send_resp_flag = 0;
+
+ /* restore signal mask */
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+ if (data->abort) {
+ pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+ data->client.app_id, lstnr, ret);
+ rc = -ENODEV;
+ status = QSEOS_RESULT_FAILURE;
+ } else {
+ status = QSEOS_RESULT_SUCCESS;
+ }
+ table = ptr_svc->sglistinfo_ptr;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_rsp.listener_id = lstnr;
+ send_data_rsp.status = status;
+ send_data_rsp.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp;
+ cmd_len = sizeof(send_data_rsp);
+ } else {
+ send_data_rsp_64bit.listener_id = lstnr;
+ send_data_rsp_64bit.status = status;
+ send_data_rsp_64bit.sglistinfo_ptr =
+ virt_to_phys(table);
+ send_data_rsp_64bit.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp_64bit;
+ cmd_len = sizeof(send_data_rsp_64bit);
+ }
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ else
+ *(uint32_t *)cmd_buf =
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+ if (ptr_svc) {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ptr_svc->ihandle,
+ ptr_svc->sb_virt, ptr_svc->sb_length,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+ }
+ if (lstnr == RPMB_SERVICE) {
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+ }
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len, resp, sizeof(*resp));
+ ptr_svc->listener_in_use = false;
+ __qseecom_clean_listener_sglistinfo(ptr_svc);
+ wake_up_interruptible(&ptr_svc->listener_block_app_wq);
+
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+ ret, data->client.app_id);
+ goto exit;
+ }
+
+ switch (resp->result) {
+ case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+ pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+ lstnr, data->client.app_id, resp->data);
+ if (lstnr == resp->data) {
+ pr_err("lstnr %d should not be blocked!\n",
+ lstnr);
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = __qseecom_process_reentrancy_blocked_on_listener(
+ resp, NULL, data);
+ if (ret) {
+ pr_err("failed to process App(%d) %s blocked on listener %d\n",
+ data->client.app_id,
+ data->client.app_name, resp->data);
+ goto exit;
+ }
+ case QSEOS_RESULT_SUCCESS:
+ case QSEOS_RESULT_INCOMPLETE:
+ break;
+ default:
+ pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+ resp->result, data->client.app_id, lstnr);
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ if (lstnr == RPMB_SERVICE)
+ __qseecom_disable_clk(CLK_QSEE);
+
+ }
+ if (rc)
+ return rc;
+
+ return ret;
+}
+
+/*
+ * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
+ * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
+ * So, needs to first check if no app blocked before sending OS level scm call,
+ * then wait until all apps are unblocked.
+ */
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
+{
+ sigset_t new_sigset, old_sigset;
+
+ if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
+ qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
+ IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
+ /* thread sleep until this app unblocked */
+ while (qseecom.app_block_ref_cnt > 0) {
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+ mutex_unlock(&app_access_lock);
+ do {
+ if (!wait_event_freezable(qseecom.app_block_wq,
+ (qseecom.app_block_ref_cnt == 0)))
+ break;
+ } while (1);
+ mutex_lock(&app_access_lock);
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+ }
+ }
+}
+
+/*
+ * scm_call of send data will fail if this TA is blocked or there are more
+ * than one TA requesting listener services; So, first check to see if need
+ * to wait.
+ */
+static void __qseecom_reentrancy_check_if_this_app_blocked(
+ struct qseecom_registered_app_list *ptr_app)
+{
+ sigset_t new_sigset, old_sigset;
+
+ if (qseecom.qsee_reentrancy_support) {
+ while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
+ /* thread sleep until this app unblocked */
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+ mutex_unlock(&app_access_lock);
+ do {
+ if (!wait_event_freezable(qseecom.app_block_wq,
+ (!ptr_app->app_blocked &&
+ qseecom.app_block_ref_cnt <= 1)))
+ break;
+ } while (1);
+ mutex_lock(&app_access_lock);
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+ }
+ }
+}
+
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
+ uint32_t *app_id)
+{
+ int32_t ret;
+ struct qseecom_command_scm_resp resp;
+ bool found_app = false;
+ struct qseecom_registered_app_list *entry = NULL;
+ unsigned long flags = 0;
+
+ if (!app_id) {
+ pr_err("Null pointer to app_id\n");
+ return -EINVAL;
+ }
+ *app_id = 0;
+
+ /* check if app exists and has been registered locally */
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list) {
+ if (!strcmp(entry->app_name, req.app_name)) {
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+ if (found_app) {
+ pr_debug("Found app with id %d\n", entry->app_id);
+ *app_id = entry->app_id;
+ return 0;
+ }
+
+ memset((void *)&resp, 0, sizeof(resp));
+
+ /* SCM_CALL to check if app_id for the mentioned app exists */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+ sizeof(struct qseecom_check_app_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to check if app is already loaded failed\n");
+ return -EINVAL;
+ }
+
+ if (resp.result == QSEOS_RESULT_FAILURE)
+ return 0;
+
+ switch (resp.resp_type) {
+ /*qsee returned listener type response */
+ case QSEOS_LISTENER_ID:
+ pr_err("resp type is of listener type instead of app");
+ return -EINVAL;
+ case QSEOS_APP_ID:
+ *app_id = resp.data;
+ return 0;
+ default:
+ pr_err("invalid resp type (%d) from qsee",
+ resp.resp_type);
+ return -ENODEV;
+ }
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+ struct qseecom_registered_app_list *entry = NULL;
+ unsigned long flags = 0;
+ u32 app_id = 0;
+ struct ion_handle *ihandle; /* Ion handle */
+ struct qseecom_load_img_req load_img_req;
+ int32_t ret = 0;
+ ion_phys_addr_t pa = 0;
+ size_t len;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_check_app_ireq req;
+ struct qseecom_load_app_ireq load_req;
+ struct qseecom_load_app_64bit_ireq load_req_64bit;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ bool first_time = false;
+
+ /* Copy the relevant information needed for loading the image */
+ if (copy_from_user(&load_img_req,
+ (void __user *)argp,
+ sizeof(struct qseecom_load_img_req))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ /* Check and load cmnlib */
+ if (qseecom.qsee_version > QSEEE_VERSION_00) {
+ if (!qseecom.commonlib_loaded &&
+ load_img_req.app_arch == ELFCLASS32) {
+ ret = qseecom_load_commonlib_image(data, "cmnlib");
+ if (ret) {
+ pr_err("failed to load cmnlib\n");
+ return -EIO;
+ }
+ qseecom.commonlib_loaded = true;
+ pr_debug("cmnlib is loaded\n");
+ }
+
+ if (!qseecom.commonlib64_loaded &&
+ load_img_req.app_arch == ELFCLASS64) {
+ ret = qseecom_load_commonlib_image(data, "cmnlib64");
+ if (ret) {
+ pr_err("failed to load cmnlib64\n");
+ return -EIO;
+ }
+ qseecom.commonlib64_loaded = true;
+ pr_debug("cmnlib64 is loaded\n");
+ }
+ }
+
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+ mutex_unlock(&qsee_bw_mutex);
+ if (ret)
+ return ret;
+ }
+
+ /* Vote for the SFPB clock */
+ ret = __qseecom_enable_clk_scale_up(data);
+ if (ret)
+ goto enable_clk_err;
+
+ req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+ load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
+ strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+ ret = __qseecom_check_app_exists(req, &app_id);
+ if (ret < 0)
+ goto loadapp_err;
+
+ if (app_id) {
+ pr_debug("App id %d (%s) already exists\n", app_id,
+ (char *)(req.app_name));
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list){
+ if (entry->app_id == app_id) {
+ entry->ref_cnt++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ ret = 0;
+ } else {
+ first_time = true;
+ pr_warn("App (%s) does'nt exist, loading apps for first time\n",
+ (char *)(load_img_req.img_name));
+ /* Get the handle of the shared fd */
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ load_img_req.ifd_data_fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ ret = -ENOMEM;
+ goto loadapp_err;
+ }
+
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+ if (ret) {
+ pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+ ret);
+ goto loadapp_err;
+ }
+ if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+ pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+ len, load_img_req.mdt_len,
+ load_img_req.img_len);
+ ret = -EINVAL;
+ goto loadapp_err;
+ }
+ /* Populate the structure for sending scm call to load image */
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req.mdt_len = load_img_req.mdt_len;
+ load_req.img_len = load_img_req.img_len;
+ strlcpy(load_req.app_name, load_img_req.img_name,
+ MAX_APP_NAME_SIZE);
+ load_req.phy_addr = (uint32_t)pa;
+ cmd_buf = (void *)&load_req;
+ cmd_len = sizeof(struct qseecom_load_app_ireq);
+ } else {
+ load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req_64bit.mdt_len = load_img_req.mdt_len;
+ load_req_64bit.img_len = load_img_req.img_len;
+ strlcpy(load_req_64bit.app_name, load_img_req.img_name,
+ MAX_APP_NAME_SIZE);
+ load_req_64bit.phy_addr = (uint64_t)pa;
+ cmd_buf = (void *)&load_req_64bit;
+ cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+ }
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto loadapp_err;
+ }
+
+ /* SCM_CALL to load the app and get the app_id back */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
+ cmd_len, &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to load app failed\n");
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ ret = -EINVAL;
+ goto loadapp_err;
+ }
+
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ ret = -EFAULT;
+ goto loadapp_err;
+ }
+
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ ret = -EFAULT;
+ goto loadapp_err;
+ }
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("scm_call failed resp.result unknown, %d\n",
+ resp.result);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ ret = -EFAULT;
+ goto loadapp_err;
+ }
+
+ app_id = resp.data;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto loadapp_err;
+ }
+ entry->app_id = app_id;
+ entry->ref_cnt = 1;
+ entry->app_arch = load_img_req.app_arch;
+ /*
+ * keymaster app may be first loaded as "keymaste" by qseecomd,
+ * and then used as "keymaster" on some targets. To avoid app
+ * name checking error, register "keymaster" into app_list and
+ * thread private data.
+ */
+ if (!strcmp(load_img_req.img_name, "keymaste"))
+ strlcpy(entry->app_name, "keymaster",
+ MAX_APP_NAME_SIZE);
+ else
+ strlcpy(entry->app_name, load_img_req.img_name,
+ MAX_APP_NAME_SIZE);
+ entry->app_blocked = false;
+ entry->blocked_on_listener_id = 0;
+
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags);
+
+ pr_warn("App with id %u (%s) now loaded\n", app_id,
+ (char *)(load_img_req.img_name));
+ }
+ data->client.app_id = app_id;
+ data->client.app_arch = load_img_req.app_arch;
+ if (!strcmp(load_img_req.img_name, "keymaste"))
+ strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
+ else
+ strlcpy(data->client.app_name, load_img_req.img_name,
+ MAX_APP_NAME_SIZE);
+ load_img_req.app_id = app_id;
+ if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+ pr_err("copy_to_user failed\n");
+ ret = -EFAULT;
+ if (first_time == true) {
+ spin_lock_irqsave(
+ &qseecom.registered_app_list_lock, flags);
+ list_del(&entry->list);
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ kzfree(entry);
+ }
+ }
+
+loadapp_err:
+ __qseecom_disable_clk_scale_down(data);
+enable_clk_err:
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom_unregister_bus_bandwidth_needs(data);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+ return ret;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+ int ret = 1; /* Set unload app */
+
+ wake_up_all(&qseecom.send_resp_wq);
+ if (qseecom.qsee_reentrancy_support)
+ mutex_unlock(&app_access_lock);
+ while (atomic_read(&data->ioctl_count) > 1) {
+ if (wait_event_freezable(data->abort_wq,
+ atomic_read(&data->ioctl_count) <= 1)) {
+ pr_err("Interrupted from abort\n");
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ if (qseecom.qsee_reentrancy_support)
+ mutex_lock(&app_access_lock);
+ return ret;
+}
+
+static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+
+ if (!IS_ERR_OR_NULL(data->client.ihandle)) {
+ ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
+ ion_free(qseecom.ion_clnt, data->client.ihandle);
+ data->client.ihandle = NULL;
+ }
+ return ret;
+}
+
+static int qseecom_unload_app(struct qseecom_dev_handle *data,
+ bool app_crash)
+{
+ unsigned long flags;
+ unsigned long flags1;
+ int ret = 0;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_registered_app_list *ptr_app = NULL;
+ bool unload = false;
+ bool found_app = false;
+ bool found_dead_app = false;
+
+ if (!data) {
+ pr_err("Invalid/uninitialized device handle\n");
+ return -EINVAL;
+ }
+
+ if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+ pr_debug("Do not unload keymaster app from tz\n");
+ goto unload_exit;
+ }
+
+ __qseecom_cleanup_app(data);
+ __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
+
+ if (data->client.app_id > 0) {
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+ list) {
+ if (ptr_app->app_id == data->client.app_id) {
+ if (!strcmp((void *)ptr_app->app_name,
+ (void *)data->client.app_name)) {
+ found_app = true;
+ if (app_crash || ptr_app->ref_cnt == 1)
+ unload = true;
+ break;
+ }
+ found_dead_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags);
+ if (found_app == false && found_dead_app == false) {
+ pr_err("Cannot find app with id = %d (%s)\n",
+ data->client.app_id,
+ (char *)data->client.app_name);
+ ret = -EINVAL;
+ goto unload_exit;
+ }
+ }
+
+ if (found_dead_app)
+ pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
+ (char *)data->client.app_name);
+
+ if (unload) {
+ struct qseecom_unload_app_ireq req;
+ /* Populate the structure for sending scm call to load image */
+ req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+ req.app_id = data->client.app_id;
+
+ /* SCM_CALL to unload the app */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+ sizeof(struct qseecom_unload_app_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to unload app (id = %d) failed\n",
+ req.app_id);
+ ret = -EFAULT;
+ goto unload_exit;
+ } else {
+ pr_warn("App id %d now unloaded\n", req.app_id);
+ }
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ pr_err("app (%d) unload_failed!!\n",
+ data->client.app_id);
+ ret = -EFAULT;
+ goto unload_exit;
+ }
+ if (resp.result == QSEOS_RESULT_SUCCESS)
+ pr_debug("App (%d) is unloaded!!\n",
+ data->client.app_id);
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd fail err: %d\n",
+ ret);
+ goto unload_exit;
+ }
+ }
+ }
+
+ if (found_app) {
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
+ if (app_crash) {
+ ptr_app->ref_cnt = 0;
+ pr_debug("app_crash: ref_count = 0\n");
+ } else {
+ if (ptr_app->ref_cnt == 1) {
+ ptr_app->ref_cnt = 0;
+ pr_debug("ref_count set to 0\n");
+ } else {
+ ptr_app->ref_cnt--;
+ pr_debug("Can't unload app(%d) inuse\n",
+ ptr_app->app_id);
+ }
+ }
+ if (unload) {
+ list_del(&ptr_app->list);
+ kzfree(ptr_app);
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags1);
+ }
+unload_exit:
+ qseecom_unmap_ion_allocated_memory(data);
+ data->released = true;
+ return ret;
+}
+
+static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+ unsigned long virt)
+{
+ return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
+ unsigned long virt)
+{
+ return (uintptr_t)data->client.sb_virt +
+ (virt - data->client.user_virt_sb_base);
+}
+
+int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+ struct qseecom_send_svc_cmd_req *req_ptr,
+ struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+ int ret = 0;
+ void *req_buf = NULL;
+
+ if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+ pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+ req_ptr, send_svc_ireq_ptr);
+ return -EINVAL;
+ }
+
+ /* Clients need to ensure req_buf is at base offset of shared buffer */
+ if ((uintptr_t)req_ptr->cmd_req_buf !=
+ data_ptr->client.user_virt_sb_base) {
+ pr_err("cmd buf not pointing to base offset of shared buffer\n");
+ return -EINVAL;
+ }
+
+ if (data_ptr->client.sb_length <
+ sizeof(struct qseecom_rpmb_provision_key)) {
+ pr_err("shared buffer is too small to hold key type\n");
+ return -EINVAL;
+ }
+ req_buf = data_ptr->client.sb_virt;
+
+ send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+ send_svc_ireq_ptr->key_type =
+ ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
+ send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+ send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+ data_ptr, (uintptr_t)req_ptr->resp_buf));
+ send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+ return ret;
+}
+
+int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
+ struct qseecom_send_svc_cmd_req *req_ptr,
+ struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
+{
+ int ret = 0;
+ uint32_t reqd_len_sb_in = 0;
+
+ if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+ pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+ req_ptr, send_svc_ireq_ptr);
+ return -EINVAL;
+ }
+
+ reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
+ if (reqd_len_sb_in > data_ptr->client.sb_length) {
+ pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
+ pr_err("Required: %u, Available: %zu\n",
+ reqd_len_sb_in, data_ptr->client.sb_length);
+ return -ENOMEM;
+ }
+
+ send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+ send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+ send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+ data_ptr, (uintptr_t)req_ptr->resp_buf));
+ send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+ send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+ data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
+
+
+ return ret;
+}
+
+static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
+ struct qseecom_send_svc_cmd_req *req)
+{
+ if (!req || !req->resp_buf || !req->cmd_req_buf) {
+ pr_err("req or cmd buffer or response buffer is null\n");
+ return -EINVAL;
+ }
+
+ if (!data || !data->client.ihandle) {
+ pr_err("Client or client handle is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (data->client.sb_virt == NULL) {
+ pr_err("sb_virt null\n");
+ return -EINVAL;
+ }
+
+ if (data->client.user_virt_sb_base == 0) {
+ pr_err("user_virt_sb_base is null\n");
+ return -EINVAL;
+ }
+
+ if (data->client.sb_length == 0) {
+ pr_err("sb_length is 0\n");
+ return -EINVAL;
+ }
+
+ if (((uintptr_t)req->cmd_req_buf <
+ data->client.user_virt_sb_base) ||
+ ((uintptr_t)req->cmd_req_buf >=
+ (data->client.user_virt_sb_base + data->client.sb_length))) {
+ pr_err("cmd buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+ if (((uintptr_t)req->resp_buf <
+ data->client.user_virt_sb_base) ||
+ ((uintptr_t)req->resp_buf >=
+ (data->client.user_virt_sb_base + data->client.sb_length))) {
+ pr_err("response buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+ if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
+ (req->cmd_req_len > data->client.sb_length) ||
+ (req->resp_len > data->client.sb_length)) {
+ pr_err("cmd buf length or response buf length not valid\n");
+ return -EINVAL;
+ }
+ if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+ pr_err("Integer overflow detected in req_len & rsp_len\n");
+ return -EINVAL;
+ }
+
+ if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+ pr_debug("Not enough memory to fit cmd_buf.\n");
+ pr_debug("resp_buf. Required: %u, Available: %zu\n",
+ (req->cmd_req_len + req->resp_len),
+ data->client.sb_length);
+ return -ENOMEM;
+ }
+ if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+ pr_err("Integer overflow in req_len & cmd_req_buf\n");
+ return -EINVAL;
+ }
+ if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+ pr_err("Integer overflow in resp_len & resp_buf\n");
+ return -EINVAL;
+ }
+ if (data->client.user_virt_sb_base >
+ (ULONG_MAX - data->client.sb_length)) {
+ pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+ return -EINVAL;
+ }
+ if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+ ((uintptr_t)data->client.user_virt_sb_base +
+ data->client.sb_length)) ||
+ (((uintptr_t)req->resp_buf + req->resp_len) >
+ ((uintptr_t)data->client.user_virt_sb_base +
+ data->client.sb_length))) {
+ pr_err("cmd buf or resp buf is out of shared buffer region\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int ret = 0;
+ struct qseecom_client_send_service_ireq send_svc_ireq;
+ struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_send_svc_cmd_req req;
+ void *send_req_ptr;
+ size_t req_buf_size;
+
+ /*struct qseecom_command_scm_resp resp;*/
+
+ if (copy_from_user(&req,
+ (void __user *)argp,
+ sizeof(req))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if (__validate_send_service_cmd_inputs(data, &req))
+ return -EINVAL;
+
+ data->type = QSEECOM_SECURE_SERVICE;
+
+ switch (req.cmd_id) {
+ case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+ case QSEOS_RPMB_ERASE_COMMAND:
+ case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
+ send_req_ptr = &send_svc_ireq;
+ req_buf_size = sizeof(send_svc_ireq);
+ if (__qseecom_process_rpmb_svc_cmd(data, &req,
+ send_req_ptr))
+ return -EINVAL;
+ break;
+ case QSEOS_FSM_LTEOTA_REQ_CMD:
+ case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
+ case QSEOS_FSM_IKE_REQ_CMD:
+ case QSEOS_FSM_IKE_REQ_RSP_CMD:
+ case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
+ case QSEOS_FSM_OEM_FUSE_READ_ROW:
+ case QSEOS_FSM_ENCFS_REQ_CMD:
+ case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
+ send_req_ptr = &send_fsm_key_svc_ireq;
+ req_buf_size = sizeof(send_fsm_key_svc_ireq);
+ if (__qseecom_process_fsm_key_svc_cmd(data, &req,
+ send_req_ptr))
+ return -EINVAL;
+ break;
+ default:
+ pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+ return -EINVAL;
+ }
+
+ if (qseecom.support_bus_scaling) {
+ ret = qseecom_scale_bus_bandwidth_timer(HIGH);
+ if (ret) {
+ pr_err("Fail to set bw HIGH\n");
+ return ret;
+ }
+ } else {
+ ret = qseecom_perf_enable(data);
+ if (ret) {
+ pr_err("Failed to vote for clocks with err %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt, data->client.sb_length,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto exit;
+ }
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ (const void *)send_req_ptr,
+ req_buf_size, &resp, sizeof(resp));
+ if (ret) {
+ pr_err("qseecom_scm_call failed with err: %d\n", ret);
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ } else {
+ __qseecom_add_bw_scale_down_timer(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ }
+ goto exit;
+ }
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt, data->client.sb_length,
+ ION_IOC_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto exit;
+ }
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ pr_debug("qseos_result_incomplete\n");
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd fail with result: %d\n",
+ resp.result);
+ }
+ if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
+ pr_warn("RPMB key status is 0x%x\n", resp.result);
+ *(uint32_t *)req.resp_buf = resp.result;
+ ret = 0;
+ }
+ break;
+ case QSEOS_RESULT_FAILURE:
+ pr_err("scm call failed with resp.result: %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ default:
+ pr_err("Response result %d not supported\n",
+ resp.result);
+ ret = -EINVAL;
+ break;
+ }
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ } else {
+ __qseecom_add_bw_scale_down_timer(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ }
+
+exit:
+ return ret;
+}
+
+static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
+ struct qseecom_send_cmd_req *req)
+
+{
+ if (!data || !data->client.ihandle) {
+ pr_err("Client or client handle is not initialized\n");
+ return -EINVAL;
+ }
+ if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
+ (req->cmd_req_buf == NULL)) {
+ pr_err("cmd buffer or response buffer is null\n");
+ return -EINVAL;
+ }
+ if (((uintptr_t)req->cmd_req_buf <
+ data->client.user_virt_sb_base) ||
+ ((uintptr_t)req->cmd_req_buf >=
+ (data->client.user_virt_sb_base + data->client.sb_length))) {
+ pr_err("cmd buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+ if (((uintptr_t)req->resp_buf <
+ data->client.user_virt_sb_base) ||
+ ((uintptr_t)req->resp_buf >=
+ (data->client.user_virt_sb_base + data->client.sb_length))) {
+ pr_err("response buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+ if ((req->cmd_req_len == 0) ||
+ (req->cmd_req_len > data->client.sb_length) ||
+ (req->resp_len > data->client.sb_length)) {
+ pr_err("cmd buf length or response buf length not valid\n");
+ return -EINVAL;
+ }
+ if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+ pr_err("Integer overflow detected in req_len & rsp_len\n");
+ return -EINVAL;
+ }
+
+ if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+ pr_debug("Not enough memory to fit cmd_buf.\n");
+ pr_debug("resp_buf. Required: %u, Available: %zu\n",
+ (req->cmd_req_len + req->resp_len),
+ data->client.sb_length);
+ return -ENOMEM;
+ }
+ if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+ pr_err("Integer overflow in req_len & cmd_req_buf\n");
+ return -EINVAL;
+ }
+ if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+ pr_err("Integer overflow in resp_len & resp_buf\n");
+ return -EINVAL;
+ }
+ if (data->client.user_virt_sb_base >
+ (ULONG_MAX - data->client.sb_length)) {
+ pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+ return -EINVAL;
+ }
+ if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+ ((uintptr_t)data->client.user_virt_sb_base +
+ data->client.sb_length)) ||
+ (((uintptr_t)req->resp_buf + req->resp_len) >
+ ((uintptr_t)data->client.user_virt_sb_base +
+ data->client.sb_length))) {
+ pr_err("cmd buf or resp buf is out of shared buffer region\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+
+ switch (resp->result) {
+ case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+ pr_warn("App(%d) %s is blocked on listener %d\n",
+ data->client.app_id, data->client.app_name,
+ resp->data);
+ ret = __qseecom_process_reentrancy_blocked_on_listener(
+ resp, ptr_app, data);
+ if (ret) {
+ pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+ data->client.app_id, data->client.app_name, resp->data);
+ return ret;
+ }
+
+ case QSEOS_RESULT_INCOMPLETE:
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+ wake_up_interruptible(&qseecom.app_block_wq);
+ if (ret)
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ return ret;
+ case QSEOS_RESULT_SUCCESS:
+ return ret;
+ default:
+ pr_err("Response result %d not supported\n",
+ resp->result);
+ return -EINVAL;
+ }
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+ struct qseecom_send_cmd_req *req)
+{
+ int ret = 0;
+ u32 reqd_len_sb_in = 0;
+ struct qseecom_client_send_data_ireq send_data_req = {0};
+ struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+ struct qseecom_command_scm_resp resp;
+ unsigned long flags;
+ struct qseecom_registered_app_list *ptr_app;
+ bool found_app = false;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = data->sglistinfo_ptr;
+
+ reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+ /* find app_id & img_name from list */
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+ list) {
+ if ((ptr_app->app_id == data->client.app_id) &&
+ (!strcmp(ptr_app->app_name, data->client.app_name))) {
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+ if (!found_app) {
+ pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+ (char *)data->client.app_name);
+ return -ENOENT;
+ }
+
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_req.app_id = data->client.app_id;
+ send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+ data, (uintptr_t)req->cmd_req_buf));
+ send_data_req.req_len = req->cmd_req_len;
+ send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+ data, (uintptr_t)req->resp_buf));
+ send_data_req.rsp_len = req->resp_len;
+ send_data_req.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_req;
+ cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+ } else {
+ send_data_req_64bit.app_id = data->client.app_id;
+ send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->cmd_req_buf);
+ send_data_req_64bit.req_len = req->cmd_req_len;
+ send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->resp_buf);
+ send_data_req_64bit.rsp_len = req->resp_len;
+ /* check if 32bit app's phys_addr region is under 4GB.*/
+ if ((data->client.app_arch == ELFCLASS32) &&
+ ((send_data_req_64bit.req_ptr >=
+ PHY_ADDR_4G - send_data_req_64bit.req_len) ||
+ (send_data_req_64bit.rsp_ptr >=
+ PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
+ pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
+ data->client.app_name,
+ send_data_req_64bit.req_ptr,
+ send_data_req_64bit.req_len,
+ send_data_req_64bit.rsp_ptr,
+ send_data_req_64bit.rsp_len);
+ return -EFAULT;
+ }
+ send_data_req_64bit.sglistinfo_ptr =
+ (uint64_t)virt_to_phys(table);
+ send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_req_64bit;
+ cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+ }
+
+ if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
+ *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+ else
+ *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt,
+ reqd_len_sb_in,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+
+ __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+ ret, data->client.app_id);
+ return ret;
+ }
+
+ if (qseecom.qsee_reentrancy_support) {
+ ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+ } else {
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("Response result %d not supported\n",
+ resp.result);
+ ret = -EINVAL;
+ }
+ }
+ }
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt, data->client.sb_length,
+ ION_IOC_INV_CACHES);
+ if (ret)
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+}
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+ int ret = 0;
+ struct qseecom_send_cmd_req req;
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ if (__validate_send_cmd_inputs(data, &req))
+ return -EINVAL;
+
+ ret = __qseecom_send_cmd(data, &req);
+
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
+ struct qseecom_send_modfd_listener_resp *lstnr_resp,
+ struct qseecom_dev_handle *data, int i) {
+
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (req->ifd_data[i].fd > 0)) {
+ if ((req->cmd_req_len < sizeof(uint32_t)) ||
+ (req->ifd_data[i].cmd_buf_offset >
+ req->cmd_req_len - sizeof(uint32_t))) {
+ pr_err("Invalid offset (req len) 0x%x\n",
+ req->ifd_data[i].cmd_buf_offset);
+ return -EINVAL;
+ }
+ } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+ (lstnr_resp->ifd_data[i].fd > 0)) {
+ if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
+ (lstnr_resp->ifd_data[i].cmd_buf_offset >
+ lstnr_resp->resp_len - sizeof(uint32_t))) {
+ pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+ lstnr_resp->ifd_data[i].cmd_buf_offset);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
+ struct qseecom_dev_handle *data)
+{
+ struct ion_handle *ihandle;
+ char *field;
+ int ret = 0;
+ int i = 0;
+ uint32_t len = 0;
+ struct scatterlist *sg;
+ struct qseecom_send_modfd_cmd_req *req = NULL;
+ struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+ struct qseecom_registered_listener_list *this_lstnr = NULL;
+ uint32_t offset;
+ struct sg_table *sg_ptr;
+
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (data->type != QSEECOM_CLIENT_APP))
+ return -EFAULT;
+
+ if (msg == NULL) {
+ pr_err("Invalid address\n");
+ return -EINVAL;
+ }
+ if (data->type == QSEECOM_LISTENER_SERVICE) {
+ lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+ this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (IS_ERR_OR_NULL(this_lstnr)) {
+ pr_err("Invalid listener ID\n");
+ return -ENOMEM;
+ }
+ } else {
+ req = (struct qseecom_send_modfd_cmd_req *)msg;
+ }
+
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (req->ifd_data[i].fd > 0)) {
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ req->ifd_data[i].fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client can't retrieve the handle\n");
+ return -ENOMEM;
+ }
+ field = (char *) req->cmd_req_buf +
+ req->ifd_data[i].cmd_buf_offset;
+ } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+ (lstnr_resp->ifd_data[i].fd > 0)) {
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ lstnr_resp->ifd_data[i].fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client can't retrieve the handle\n");
+ return -ENOMEM;
+ }
+ field = lstnr_resp->resp_buf_ptr +
+ lstnr_resp->ifd_data[i].cmd_buf_offset;
+ } else {
+ continue;
+ }
+ /* Populate the cmd data structure with the phys_addr */
+ sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+ if (IS_ERR_OR_NULL(sg_ptr)) {
+ pr_err("IOn client could not retrieve sg table\n");
+ goto err;
+ }
+ if (sg_ptr->nents == 0) {
+ pr_err("Num of scattered entries is 0\n");
+ goto err;
+ }
+ if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+ pr_err("Num of scattered entries");
+ pr_err(" (%d) is greater than max supported %d\n",
+ sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+ goto err;
+ }
+ sg = sg_ptr->sgl;
+ if (sg_ptr->nents == 1) {
+ uint32_t *update;
+
+ if (__boundary_checks_offset(req, lstnr_resp, data, i))
+ goto err;
+ if ((data->type == QSEECOM_CLIENT_APP &&
+ (data->client.app_arch == ELFCLASS32 ||
+ data->client.app_arch == ELFCLASS64)) ||
+ (data->type == QSEECOM_LISTENER_SERVICE)) {
+ /*
+ * Check if sg list phy add region is under 4GB
+ */
+ if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
+ (!cleanup) &&
+ ((uint64_t)sg_dma_address(sg_ptr->sgl)
+ >= PHY_ADDR_4G - sg->length)) {
+ pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+ data->client.app_name,
+ &(sg_dma_address(sg_ptr->sgl)),
+ sg->length);
+ goto err;
+ }
+ update = (uint32_t *) field;
+ *update = cleanup ? 0 :
+ (uint32_t)sg_dma_address(sg_ptr->sgl);
+ } else {
+ pr_err("QSEE app arch %u is not supported\n",
+ data->client.app_arch);
+ goto err;
+ }
+ len += (uint32_t)sg->length;
+ } else {
+ struct qseecom_sg_entry *update;
+ int j = 0;
+
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (req->ifd_data[i].fd > 0)) {
+
+ if ((req->cmd_req_len <
+ SG_ENTRY_SZ * sg_ptr->nents) ||
+ (req->ifd_data[i].cmd_buf_offset >
+ (req->cmd_req_len -
+ SG_ENTRY_SZ * sg_ptr->nents))) {
+ pr_err("Invalid offset = 0x%x\n",
+ req->ifd_data[i].cmd_buf_offset);
+ goto err;
+ }
+
+ } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+ (lstnr_resp->ifd_data[i].fd > 0)) {
+
+ if ((lstnr_resp->resp_len <
+ SG_ENTRY_SZ * sg_ptr->nents) ||
+ (lstnr_resp->ifd_data[i].cmd_buf_offset >
+ (lstnr_resp->resp_len -
+ SG_ENTRY_SZ * sg_ptr->nents))) {
+ goto err;
+ }
+ }
+ if ((data->type == QSEECOM_CLIENT_APP &&
+ (data->client.app_arch == ELFCLASS32 ||
+ data->client.app_arch == ELFCLASS64)) ||
+ (data->type == QSEECOM_LISTENER_SERVICE)) {
+ update = (struct qseecom_sg_entry *)field;
+ for (j = 0; j < sg_ptr->nents; j++) {
+ /*
+ * Check if sg list PA is under 4GB
+ */
+ if ((qseecom.qsee_version >=
+ QSEE_VERSION_40) &&
+ (!cleanup) &&
+ ((uint64_t)(sg_dma_address(sg))
+ >= PHY_ADDR_4G - sg->length)) {
+ pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+ data->client.app_name,
+ &(sg_dma_address(sg)),
+ sg->length);
+ goto err;
+ }
+ update->phys_addr = cleanup ? 0 :
+ (uint32_t)sg_dma_address(sg);
+ update->len = cleanup ? 0 : sg->length;
+ update++;
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+ } else {
+ pr_err("QSEE app arch %u is not supported\n",
+ data->client.app_arch);
+ goto err;
+ }
+ }
+
+ if (cleanup) {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ihandle, NULL, len,
+ ION_IOC_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto err;
+ }
+ } else {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ihandle, NULL, len,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto err;
+ }
+ if (data->type == QSEECOM_CLIENT_APP) {
+ offset = req->ifd_data[i].cmd_buf_offset;
+ data->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0, offset);
+ data->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ data->sglist_cnt = i + 1;
+ } else {
+ offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+ + (uintptr_t)lstnr_resp->resp_buf_ptr -
+ (uintptr_t)this_lstnr->sb_virt);
+ this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0, offset);
+ this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ this_lstnr->sglist_cnt = i + 1;
+ }
+ }
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ }
+ return ret;
+err:
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ return -ENOMEM;
+}
+
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+ char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+ struct scatterlist *sg = sg_ptr->sgl;
+ struct qseecom_sg_entry_64bit *sg_entry;
+ struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+ void *buf;
+ uint i;
+ size_t size;
+ dma_addr_t coh_pmem;
+
+ if (fd_idx >= MAX_ION_FD) {
+ pr_err("fd_idx [%d] is invalid\n", fd_idx);
+ return -ENOMEM;
+ }
+ buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+ memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+ /* Allocate a contiguous kernel buffer */
+ size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+ size = (size + PAGE_SIZE) & PAGE_MASK;
+ buf = dma_alloc_coherent(qseecom.pdev,
+ size, &coh_pmem, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("failed to alloc memory for sg buf\n");
+ return -ENOMEM;
+ }
+ /* update qseecom_sg_list_buf_hdr_64bit */
+ buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+ buf_hdr->new_buf_phys_addr = coh_pmem;
+ buf_hdr->nents_total = sg_ptr->nents;
+ /* save the left sg entries into new allocated buf */
+ sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+ for (i = 0; i < sg_ptr->nents; i++) {
+ sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+ sg_entry->len = sg->length;
+ sg_entry++;
+ sg = sg_next(sg);
+ }
+
+ data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+ data->client.sec_buf_fd[fd_idx].vbase = buf;
+ data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+ data->client.sec_buf_fd[fd_idx].size = size;
+
+ return 0;
+}
+
+static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
+ struct qseecom_dev_handle *data)
+{
+ struct ion_handle *ihandle;
+ char *field;
+ int ret = 0;
+ int i = 0;
+ uint32_t len = 0;
+ struct scatterlist *sg;
+ struct qseecom_send_modfd_cmd_req *req = NULL;
+ struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+ struct qseecom_registered_listener_list *this_lstnr = NULL;
+ uint32_t offset;
+ struct sg_table *sg_ptr;
+
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (data->type != QSEECOM_CLIENT_APP))
+ return -EFAULT;
+
+ if (msg == NULL) {
+ pr_err("Invalid address\n");
+ return -EINVAL;
+ }
+ if (data->type == QSEECOM_LISTENER_SERVICE) {
+ lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+ this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (IS_ERR_OR_NULL(this_lstnr)) {
+ pr_err("Invalid listener ID\n");
+ return -ENOMEM;
+ }
+ } else {
+ req = (struct qseecom_send_modfd_cmd_req *)msg;
+ }
+
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (req->ifd_data[i].fd > 0)) {
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ req->ifd_data[i].fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client can't retrieve the handle\n");
+ return -ENOMEM;
+ }
+ field = (char *) req->cmd_req_buf +
+ req->ifd_data[i].cmd_buf_offset;
+ } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+ (lstnr_resp->ifd_data[i].fd > 0)) {
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ lstnr_resp->ifd_data[i].fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client can't retrieve the handle\n");
+ return -ENOMEM;
+ }
+ field = lstnr_resp->resp_buf_ptr +
+ lstnr_resp->ifd_data[i].cmd_buf_offset;
+ } else {
+ continue;
+ }
+ /* Populate the cmd data structure with the phys_addr */
+ sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+ if (IS_ERR_OR_NULL(sg_ptr)) {
+ pr_err("IOn client could not retrieve sg table\n");
+ goto err;
+ }
+ if (sg_ptr->nents == 0) {
+ pr_err("Num of scattered entries is 0\n");
+ goto err;
+ }
+ if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+ pr_warn("Num of scattered entries");
+ pr_warn(" (%d) is greater than %d\n",
+ sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+ if (cleanup) {
+ if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+ data->client.sec_buf_fd[i].vbase)
+ dma_free_coherent(qseecom.pdev,
+ data->client.sec_buf_fd[i].size,
+ data->client.sec_buf_fd[i].vbase,
+ data->client.sec_buf_fd[i].pbase);
+ } else {
+ ret = __qseecom_allocate_sg_list_buffer(data,
+ field, i, sg_ptr);
+ if (ret) {
+ pr_err("Failed to allocate sg list buffer\n");
+ goto err;
+ }
+ }
+ len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+ sg = sg_ptr->sgl;
+ goto cleanup;
+ }
+ sg = sg_ptr->sgl;
+ if (sg_ptr->nents == 1) {
+ uint64_t *update_64bit;
+
+ if (__boundary_checks_offset(req, lstnr_resp, data, i))
+ goto err;
+ /* 64bit app uses 64bit address */
+ update_64bit = (uint64_t *) field;
+ *update_64bit = cleanup ? 0 :
+ (uint64_t)sg_dma_address(sg_ptr->sgl);
+ len += (uint32_t)sg->length;
+ } else {
+ struct qseecom_sg_entry_64bit *update_64bit;
+ int j = 0;
+
+ if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+ (req->ifd_data[i].fd > 0)) {
+
+ if ((req->cmd_req_len <
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+ (req->ifd_data[i].cmd_buf_offset >
+ (req->cmd_req_len -
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+ pr_err("Invalid offset = 0x%x\n",
+ req->ifd_data[i].cmd_buf_offset);
+ goto err;
+ }
+
+ } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+ (lstnr_resp->ifd_data[i].fd > 0)) {
+
+ if ((lstnr_resp->resp_len <
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+ (lstnr_resp->ifd_data[i].cmd_buf_offset >
+ (lstnr_resp->resp_len -
+ SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+ goto err;
+ }
+ }
+ /* 64bit app uses 64bit address */
+ update_64bit = (struct qseecom_sg_entry_64bit *)field;
+ for (j = 0; j < sg_ptr->nents; j++) {
+ update_64bit->phys_addr = cleanup ? 0 :
+ (uint64_t)sg_dma_address(sg);
+ update_64bit->len = cleanup ? 0 :
+ (uint32_t)sg->length;
+ update_64bit++;
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+ }
+cleanup:
+ if (cleanup) {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ihandle, NULL, len,
+ ION_IOC_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto err;
+ }
+ } else {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ihandle, NULL, len,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto err;
+ }
+ if (data->type == QSEECOM_CLIENT_APP) {
+ offset = req->ifd_data[i].cmd_buf_offset;
+ data->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 1, offset);
+ data->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ data->sglist_cnt = i + 1;
+ } else {
+ offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+ + (uintptr_t)lstnr_resp->resp_buf_ptr -
+ (uintptr_t)this_lstnr->sb_virt);
+ this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 1, offset);
+ this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ this_lstnr->sglist_cnt = i + 1;
+ }
+ }
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ }
+ return ret;
+err:
+ for (i = 0; i < MAX_ION_FD; i++)
+ if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+ data->client.sec_buf_fd[i].vbase)
+ dma_free_coherent(qseecom.pdev,
+ data->client.sec_buf_fd[i].size,
+ data->client.sec_buf_fd[i].vbase,
+ data->client.sec_buf_fd[i].pbase);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ return -ENOMEM;
+}
+
+static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+ void __user *argp,
+ bool is_64bit_addr)
+{
+ int ret = 0;
+ int i;
+ struct qseecom_send_modfd_cmd_req req;
+ struct qseecom_send_cmd_req send_cmd_req;
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+ send_cmd_req.cmd_req_len = req.cmd_req_len;
+ send_cmd_req.resp_buf = req.resp_buf;
+ send_cmd_req.resp_len = req.resp_len;
+
+ if (__validate_send_cmd_inputs(data, &send_cmd_req))
+ return -EINVAL;
+
+ /* validate offsets */
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
+ pr_err("Invalid offset %d = 0x%x\n",
+ i, req.ifd_data[i].cmd_buf_offset);
+ return -EINVAL;
+ }
+ }
+ req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.cmd_req_buf);
+ req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.resp_buf);
+
+ if (!is_64bit_addr) {
+ ret = __qseecom_update_cmd_buf(&req, false, data);
+ if (ret)
+ return ret;
+ ret = __qseecom_send_cmd(data, &send_cmd_req);
+ if (ret)
+ return ret;
+ ret = __qseecom_update_cmd_buf(&req, true, data);
+ if (ret)
+ return ret;
+ } else {
+ ret = __qseecom_update_cmd_buf_64(&req, false, data);
+ if (ret)
+ return ret;
+ ret = __qseecom_send_cmd(data, &send_cmd_req);
+ if (ret)
+ return ret;
+ ret = __qseecom_update_cmd_buf_64(&req, true, data);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ return __qseecom_send_modfd_cmd(data, argp, false);
+}
+
+static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ return __qseecom_send_modfd_cmd(data, argp, true);
+}
+
+
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+ struct qseecom_registered_listener_list *svc)
+{
+ int ret;
+
+ ret = (svc->rcv_req_flag != 0);
+ return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+ struct qseecom_registered_listener_list *this_lstnr;
+
+ this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (!this_lstnr) {
+ pr_err("Invalid listener ID\n");
+ return -ENODATA;
+ }
+
+ while (1) {
+ if (wait_event_freezable(this_lstnr->rcv_req_wq,
+ __qseecom_listener_has_rcvd_req(data,
+ this_lstnr))) {
+ pr_debug("Interrupted: exiting Listener Service = %d\n",
+ (uint32_t)data->listener.id);
+ /* woken up for different reason */
+ return -ERESTARTSYS;
+ }
+
+ if (data->abort) {
+ pr_err("Aborting Listener Service = %d\n",
+ (uint32_t)data->listener.id);
+ return -ENODEV;
+ }
+ this_lstnr->rcv_req_flag = 0;
+ break;
+ }
+ return ret;
+}
+
+static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
+{
+ unsigned char app_arch = 0;
+ struct elf32_hdr *ehdr;
+ struct elf64_hdr *ehdr64;
+
+ app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+
+ switch (app_arch) {
+ case ELFCLASS32: {
+ ehdr = (struct elf32_hdr *)fw_entry->data;
+ if (fw_entry->size < sizeof(*ehdr)) {
+ pr_err("%s: Not big enough to be an elf32 header\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ pr_err("%s: Not an elf32 header\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ if (ehdr->e_phnum == 0) {
+ pr_err("%s: No loadable segments\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+ sizeof(struct elf32_hdr) > fw_entry->size) {
+ pr_err("%s: Program headers not within mdt\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ break;
+ }
+ case ELFCLASS64: {
+ ehdr64 = (struct elf64_hdr *)fw_entry->data;
+ if (fw_entry->size < sizeof(*ehdr64)) {
+ pr_err("%s: Not big enough to be an elf64 header\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
+ pr_err("%s: Not an elf64 header\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ if (ehdr64->e_phnum == 0) {
+ pr_err("%s: No loadable segments\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
+ sizeof(struct elf64_hdr) > fw_entry->size) {
+ pr_err("%s: Program headers not within mdt\n",
+ qseecom.pdev->init_name);
+ return false;
+ }
+ break;
+ }
+ default: {
+ pr_err("QSEE app arch %u is not supported\n", app_arch);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
+ uint32_t *app_arch)
+{
+ int ret = -1;
+ int i = 0, rc = 0;
+ const struct firmware *fw_entry = NULL;
+ char fw_name[MAX_APP_NAME_SIZE];
+ struct elf32_hdr *ehdr;
+ struct elf64_hdr *ehdr64;
+ int num_images = 0;
+
+ snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+ rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+ if (rc) {
+ pr_err("error with request_firmware\n");
+ ret = -EIO;
+ goto err;
+ }
+ if (!__qseecom_is_fw_image_valid(fw_entry)) {
+ ret = -EIO;
+ goto err;
+ }
+ *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+ *fw_size = fw_entry->size;
+ if (*app_arch == ELFCLASS32) {
+ ehdr = (struct elf32_hdr *)fw_entry->data;
+ num_images = ehdr->e_phnum;
+ } else if (*app_arch == ELFCLASS64) {
+ ehdr64 = (struct elf64_hdr *)fw_entry->data;
+ num_images = ehdr64->e_phnum;
+ } else {
+ pr_err("QSEE %s app, arch %u is not supported\n",
+ appname, *app_arch);
+ ret = -EIO;
+ goto err;
+ }
+ pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
+ release_firmware(fw_entry);
+ fw_entry = NULL;
+ for (i = 0; i < num_images; i++) {
+ memset(fw_name, 0, sizeof(fw_name));
+ snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+ ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+ if (ret)
+ goto err;
+ if (*fw_size > U32_MAX - fw_entry->size) {
+ pr_err("QSEE %s app file size overflow\n", appname);
+ ret = -EINVAL;
+ goto err;
+ }
+ *fw_size += fw_entry->size;
+ release_firmware(fw_entry);
+ fw_entry = NULL;
+ }
+
+ return ret;
+err:
+ if (fw_entry)
+ release_firmware(fw_entry);
+ *fw_size = 0;
+ return ret;
+}
+
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+ uint32_t fw_size,
+ struct qseecom_load_app_ireq *load_req)
+{
+ int ret = -1;
+ int i = 0, rc = 0;
+ const struct firmware *fw_entry = NULL;
+ char fw_name[MAX_APP_NAME_SIZE];
+ u8 *img_data_ptr = img_data;
+ struct elf32_hdr *ehdr;
+ struct elf64_hdr *ehdr64;
+ int num_images = 0;
+ unsigned char app_arch = 0;
+
+ snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+ rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+ if (rc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ load_req->img_len = fw_entry->size;
+ if (load_req->img_len > fw_size) {
+ pr_err("app %s size %zu is larger than buf size %u\n",
+ appname, fw_entry->size, fw_size);
+ ret = -EINVAL;
+ goto err;
+ }
+ memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+ img_data_ptr = img_data_ptr + fw_entry->size;
+ load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
+
+ app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+ if (app_arch == ELFCLASS32) {
+ ehdr = (struct elf32_hdr *)fw_entry->data;
+ num_images = ehdr->e_phnum;
+ } else if (app_arch == ELFCLASS64) {
+ ehdr64 = (struct elf64_hdr *)fw_entry->data;
+ num_images = ehdr64->e_phnum;
+ } else {
+ pr_err("QSEE %s app, arch %u is not supported\n",
+ appname, app_arch);
+ ret = -EIO;
+ goto err;
+ }
+ release_firmware(fw_entry);
+ fw_entry = NULL;
+ for (i = 0; i < num_images; i++) {
+ snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+ ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+ if (ret) {
+ pr_err("Failed to locate blob %s\n", fw_name);
+ goto err;
+ }
+ if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+ (fw_entry->size + load_req->img_len > fw_size)) {
+ pr_err("Invalid file size for %s\n", fw_name);
+ ret = -EINVAL;
+ goto err;
+ }
+ memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+ img_data_ptr = img_data_ptr + fw_entry->size;
+ load_req->img_len += fw_entry->size;
+ release_firmware(fw_entry);
+ fw_entry = NULL;
+ }
+ return ret;
+err:
+ release_firmware(fw_entry);
+ return ret;
+}
+
+static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
+ u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
+{
+ size_t len = 0;
+ int ret = 0;
+ ion_phys_addr_t pa;
+ struct ion_handle *ihandle = NULL;
+ u8 *img_data = NULL;
+
+ ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
+ SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("ION alloc failed\n");
+ return -ENOMEM;
+ }
+ img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
+ ihandle);
+
+ if (IS_ERR_OR_NULL(img_data)) {
+ pr_err("ION memory mapping for image loading failed\n");
+ ret = -ENOMEM;
+ goto exit_ion_free;
+ }
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+ if (ret) {
+ pr_err("physical memory retrieval failure\n");
+ ret = -EIO;
+ goto exit_ion_unmap_kernel;
+ }
+
+ *pihandle = ihandle;
+ *data = img_data;
+ *paddr = pa;
+ return ret;
+
+exit_ion_unmap_kernel:
+ ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+exit_ion_free:
+ ion_free(qseecom.ion_clnt, ihandle);
+ ihandle = NULL;
+ return ret;
+}
+
+static void __qseecom_free_img_data(struct ion_handle **ihandle)
+{
+ ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
+ ion_free(qseecom.ion_clnt, *ihandle);
+ *ihandle = NULL;
+}
+
+static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
+ uint32_t *app_id)
+{
+ int ret = -1;
+ uint32_t fw_size = 0;
+ struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+ struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+ struct qseecom_command_scm_resp resp;
+ u8 *img_data = NULL;
+ ion_phys_addr_t pa = 0;
+ struct ion_handle *ihandle = NULL;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ uint32_t app_arch = 0;
+
+ if (!data || !appname || !app_id) {
+ pr_err("Null pointer to data or appname or appid\n");
+ return -EINVAL;
+ }
+ *app_id = 0;
+ if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
+ return -EIO;
+ data->client.app_arch = app_arch;
+
+ /* Check and load cmnlib */
+ if (qseecom.qsee_version > QSEEE_VERSION_00) {
+ if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
+ ret = qseecom_load_commonlib_image(data, "cmnlib");
+ if (ret) {
+ pr_err("failed to load cmnlib\n");
+ return -EIO;
+ }
+ qseecom.commonlib_loaded = true;
+ pr_debug("cmnlib is loaded\n");
+ }
+
+ if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
+ ret = qseecom_load_commonlib_image(data, "cmnlib64");
+ if (ret) {
+ pr_err("failed to load cmnlib64\n");
+ return -EIO;
+ }
+ qseecom.commonlib64_loaded = true;
+ pr_debug("cmnlib64 is loaded\n");
+ }
+ }
+
+ ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
+ if (ret)
+ return ret;
+
+ ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
+ if (ret) {
+ ret = -EIO;
+ goto exit_free_img_data;
+ }
+
+ /* Populate the load_req parameters */
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req.mdt_len = load_req.mdt_len;
+ load_req.img_len = load_req.img_len;
+ strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
+ load_req.phy_addr = (uint32_t)pa;
+ cmd_buf = (void *)&load_req;
+ cmd_len = sizeof(struct qseecom_load_app_ireq);
+ } else {
+ load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req_64bit.mdt_len = load_req.mdt_len;
+ load_req_64bit.img_len = load_req.img_len;
+ strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
+ load_req_64bit.phy_addr = (uint64_t)pa;
+ cmd_buf = (void *)&load_req_64bit;
+ cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+ }
+
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+ mutex_unlock(&qsee_bw_mutex);
+ if (ret) {
+ ret = -EIO;
+ goto exit_free_img_data;
+ }
+ }
+
+ ret = __qseecom_enable_clk_scale_up(data);
+ if (ret) {
+ ret = -EIO;
+ goto exit_unregister_bus_bw_need;
+ }
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
+ img_data, fw_size,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto exit_disable_clk_vote;
+ }
+
+ /* SCM_CALL to load the image */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to load failed : ret %d\n", ret);
+ ret = -EIO;
+ goto exit_disable_clk_vote;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ *app_id = resp.data;
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret)
+ pr_err("process_incomplete_cmd FAILED\n");
+ else
+ *app_id = resp.data;
+ break;
+ case QSEOS_RESULT_FAILURE:
+ pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
+ break;
+ default:
+ pr_err("scm call return unknown response %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ }
+
+exit_disable_clk_vote:
+ __qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom_unregister_bus_bandwidth_needs(data);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+
+exit_free_img_data:
+ __qseecom_free_img_data(&ihandle);
+ return ret;
+}
+
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+ char *cmnlib_name)
+{
+ int ret = 0;
+ uint32_t fw_size = 0;
+ struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+ struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+ struct qseecom_command_scm_resp resp;
+ u8 *img_data = NULL;
+ ion_phys_addr_t pa = 0;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ uint32_t app_arch = 0;
+
+ if (!cmnlib_name) {
+ pr_err("cmnlib_name is NULL\n");
+ return -EINVAL;
+ }
+ if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
+ pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
+ cmnlib_name, strlen(cmnlib_name));
+ return -EINVAL;
+ }
+
+ if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
+ return -EIO;
+
+ ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
+ &img_data, fw_size, &pa);
+ if (ret)
+ return -EIO;
+
+ ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
+ if (ret) {
+ ret = -EIO;
+ goto exit_free_img_data;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ load_req.phy_addr = (uint32_t)pa;
+ load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+ cmd_buf = (void *)&load_req;
+ cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
+ } else {
+ load_req_64bit.phy_addr = (uint64_t)pa;
+ load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+ load_req_64bit.img_len = load_req.img_len;
+ load_req_64bit.mdt_len = load_req.mdt_len;
+ cmd_buf = (void *)&load_req_64bit;
+ cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
+ }
+
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+ mutex_unlock(&qsee_bw_mutex);
+ if (ret) {
+ ret = -EIO;
+ goto exit_free_img_data;
+ }
+ }
+
+ /* Vote for the SFPB clock */
+ ret = __qseecom_enable_clk_scale_up(data);
+ if (ret) {
+ ret = -EIO;
+ goto exit_unregister_bus_bw_need;
+ }
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
+ img_data, fw_size,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto exit_disable_clk_vote;
+ }
+
+ /* SCM_CALL to load the image */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to load failed : ret %d\n", ret);
+ ret = -EIO;
+ goto exit_disable_clk_vote;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_FAILURE:
+ pr_err("scm call failed w/response result%d\n", resp.result);
+ ret = -EINVAL;
+ goto exit_disable_clk_vote;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n", ret);
+ goto exit_disable_clk_vote;
+ }
+ break;
+ default:
+ pr_err("scm call return unknown response %d\n", resp.result);
+ ret = -EINVAL;
+ goto exit_disable_clk_vote;
+ }
+
+exit_disable_clk_vote:
+ __qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom_unregister_bus_bandwidth_needs(data);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+
+exit_free_img_data:
+ __qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
+ return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+ int ret = -EINVAL;
+ struct qseecom_unload_lib_image_ireq unload_req = {0};
+ struct qseecom_command_scm_resp resp;
+
+ /* Populate the remaining parameters */
+ unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+
+ /* SCM_CALL to load the image */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
+ sizeof(struct qseecom_unload_lib_image_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to unload lib failed : ret %d\n", ret);
+ ret = -EIO;
+ } else {
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_FAILURE:
+ pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+ break;
+ default:
+ pr_err("scm call return unknown response %d\n",
+ resp.result);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int qseecom_start_app(struct qseecom_handle **handle,
+ char *app_name, uint32_t size)
+{
+ int32_t ret = 0;
+ unsigned long flags = 0;
+ struct qseecom_dev_handle *data = NULL;
+ struct qseecom_check_app_ireq app_ireq;
+ struct qseecom_registered_app_list *entry = NULL;
+ struct qseecom_registered_kclient_list *kclient_entry = NULL;
+ bool found_app = false;
+ size_t len;
+ ion_phys_addr_t pa;
+ uint32_t fw_size, app_arch;
+ uint32_t app_id = 0;
+
+ if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+ pr_err("Not allowed to be called in %d state\n",
+ atomic_read(&qseecom.qseecom_state));
+ return -EPERM;
+ }
+ if (!app_name) {
+ pr_err("failed to get the app name\n");
+ return -EINVAL;
+ }
+
+ if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
+ pr_err("The app_name (%s) with length %zu is not valid\n",
+ app_name, strlen(app_name));
+ return -EINVAL;
+ }
+
+ *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+ if (!(*handle))
+ return -ENOMEM;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ if (ret == 0) {
+ kfree(*handle);
+ *handle = NULL;
+ }
+ return -ENOMEM;
+ }
+ data->abort = 0;
+ data->type = QSEECOM_CLIENT_APP;
+ data->released = false;
+ data->client.sb_length = size;
+ data->client.user_virt_sb_base = 0;
+ data->client.ihandle = NULL;
+
+ init_waitqueue_head(&data->abort_wq);
+
+ data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
+ ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(data->client.ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ kfree(data);
+ kfree(*handle);
+ *handle = NULL;
+ return -EINVAL;
+ }
+ mutex_lock(&app_access_lock);
+
+ app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+ strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
+ ret = __qseecom_check_app_exists(app_ireq, &app_id);
+ if (ret)
+ goto err;
+
+ strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
+ if (app_id) {
+ pr_warn("App id %d for [%s] app exists\n", app_id,
+ (char *)app_ireq.app_name);
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list){
+ if (entry->app_id == app_id) {
+ entry->ref_cnt++;
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ if (!found_app)
+ pr_warn("App_id %d [%s] was loaded but not registered\n",
+ ret, (char *)app_ireq.app_name);
+ } else {
+ /* load the app and get the app_id */
+ pr_debug("%s: Loading app for the first time'\n",
+ qseecom.pdev->init_name);
+ ret = __qseecom_load_fw(data, app_name, &app_id);
+ if (ret < 0)
+ goto err;
+ }
+ data->client.app_id = app_id;
+ if (!found_app) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("kmalloc for app entry failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ entry->app_id = app_id;
+ entry->ref_cnt = 1;
+ strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
+ if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
+ ret = -EIO;
+ kfree(entry);
+ goto err;
+ }
+ entry->app_arch = app_arch;
+ entry->app_blocked = false;
+ entry->blocked_on_listener_id = 0;
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags);
+ }
+
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+ if (ret) {
+ pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ /* Populate the structure for sending scm call to load image */
+ data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+ data->client.ihandle);
+ if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+ pr_err("ION memory mapping for client shared buf failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
+ data->client.sb_phys = (phys_addr_t)pa;
+ (*handle)->dev = (void *)data;
+ (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
+ (*handle)->sbuf_len = data->client.sb_length;
+
+ kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
+ if (!kclient_entry) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ kclient_entry->handle = *handle;
+
+ spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+ list_add_tail(&kclient_entry->list,
+ &qseecom.registered_kclient_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+ mutex_unlock(&app_access_lock);
+ return 0;
+
+err:
+ kfree(data);
+ kfree(*handle);
+ *handle = NULL;
+ mutex_unlock(&app_access_lock);
+ return ret;
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+ int ret = -EINVAL;
+ struct qseecom_dev_handle *data;
+
+ struct qseecom_registered_kclient_list *kclient = NULL;
+ unsigned long flags = 0;
+ bool found_handle = false;
+
+ if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+ pr_err("Not allowed to be called in %d state\n",
+ atomic_read(&qseecom.qseecom_state));
+ return -EPERM;
+ }
+
+ if ((handle == NULL) || (*handle == NULL)) {
+ pr_err("Handle is not initialized\n");
+ return -EINVAL;
+ }
+ data = (struct qseecom_dev_handle *) ((*handle)->dev);
+ mutex_lock(&app_access_lock);
+
+ spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+ list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+ list) {
+ if (kclient->handle == (*handle)) {
+ list_del(&kclient->list);
+ found_handle = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+ if (!found_handle)
+ pr_err("Unable to find the handle, exiting\n");
+ else
+ ret = qseecom_unload_app(data, false);
+
+ mutex_unlock(&app_access_lock);
+ if (ret == 0) {
+ kzfree(data);
+ kzfree(*handle);
+ kzfree(kclient);
+ *handle = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+ uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+ int ret = 0;
+ struct qseecom_send_cmd_req req = {0, 0, 0, 0};
+ struct qseecom_dev_handle *data;
+ bool perf_enabled = false;
+
+ if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+ pr_err("Not allowed to be called in %d state\n",
+ atomic_read(&qseecom.qseecom_state));
+ return -EPERM;
+ }
+
+ if (handle == NULL) {
+ pr_err("Handle is not initialized\n");
+ return -EINVAL;
+ }
+ data = handle->dev;
+
+ req.cmd_req_len = sbuf_len;
+ req.resp_len = rbuf_len;
+ req.cmd_req_buf = send_buf;
+ req.resp_buf = resp_buf;
+
+ if (__validate_send_cmd_inputs(data, &req))
+ return -EINVAL;
+
+ mutex_lock(&app_access_lock);
+ if (qseecom.support_bus_scaling) {
+ ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+ if (ret) {
+ pr_err("Failed to set bw.\n");
+ mutex_unlock(&app_access_lock);
+ return ret;
+ }
+ }
+ /*
+ * On targets where crypto clock is handled by HLOS,
+ * if clk_access_cnt is zero and perf_enabled is false,
+ * then the crypto clock was not enabled before sending cmd
+ * to tz, qseecom will enable the clock to avoid service failure.
+ */
+ if (!qseecom.no_clock_support &&
+ !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+ pr_debug("ce clock is not enabled!\n");
+ ret = qseecom_perf_enable(data);
+ if (ret) {
+ pr_err("Failed to vote for clock with err %d\n",
+ ret);
+ mutex_unlock(&app_access_lock);
+ return -EINVAL;
+ }
+ perf_enabled = true;
+ }
+ if (!strcmp(data->client.app_name, "securemm"))
+ data->use_legacy_cmd = true;
+
+ ret = __qseecom_send_cmd(data, &req);
+ data->use_legacy_cmd = false;
+ if (qseecom.support_bus_scaling)
+ __qseecom_add_bw_scale_down_timer(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+
+ if (perf_enabled) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ }
+
+ mutex_unlock(&app_access_lock);
+
+ if (ret)
+ return ret;
+
+ pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
+ req.resp_len, req.resp_buf);
+ return ret;
+}
+EXPORT_SYMBOL(qseecom_send_command);
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+ int ret = 0;
+
+ if ((handle == NULL) || (handle->dev == NULL)) {
+ pr_err("No valid kernel client\n");
+ return -EINVAL;
+ }
+ if (high) {
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_register_bus_bandwidth_needs(handle->dev,
+ HIGH);
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ ret = qseecom_perf_enable(handle->dev);
+ if (ret)
+ pr_err("Failed to vote for clock with err %d\n",
+ ret);
+ }
+ } else {
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+ qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+ } else {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom_unregister_bus_bandwidth_needs(handle->dev);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
+{
+ struct qseecom_registered_app_list dummy_app_entry = { {0} };
+ struct qseecom_dev_handle dummy_private_data = {0};
+ struct qseecom_command_scm_resp resp;
+ int ret = 0;
+
+ if (!desc) {
+ pr_err("desc is NULL\n");
+ return -EINVAL;
+ }
+
+ resp.result = desc->ret[0]; /*req_cmd*/
+ resp.resp_type = desc->ret[1]; /*app_id*/
+ resp.data = desc->ret[2]; /*listener_id*/
+
+ dummy_private_data.client.app_id = desc->ret[1];
+ dummy_app_entry.app_id = desc->ret[1];
+
+ mutex_lock(&app_access_lock);
+ ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+ &dummy_private_data);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+ (int)desc->ret[0], (int)desc->ret[2],
+ (int)desc->ret[1], ret);
+ desc->ret[0] = resp.result;
+ desc->ret[1] = resp.resp_type;
+ desc->ret[2] = resp.data;
+ return ret;
+}
+EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
+
+static int qseecom_send_resp(void)
+{
+ qseecom.send_resp_flag = 1;
+ wake_up_interruptible(&qseecom.send_resp_wq);
+ return 0;
+}
+
+static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
+{
+ struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+ pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
+ this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (this_lstnr == NULL)
+ return -EINVAL;
+ qseecom.send_resp_flag = 1;
+ this_lstnr->send_resp_flag = 1;
+ wake_up_interruptible(&qseecom.send_resp_wq);
+ return 0;
+}
+
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+ struct qseecom_send_modfd_listener_resp *resp,
+ struct qseecom_registered_listener_list *this_lstnr)
+{
+ int i;
+
+ if (!data || !resp || !this_lstnr) {
+ pr_err("listener handle or resp msg is null\n");
+ return -EINVAL;
+ }
+
+ if (resp->resp_buf_ptr == NULL) {
+ pr_err("resp buffer is null\n");
+ return -EINVAL;
+ }
+ /* validate resp buf length */
+ if ((resp->resp_len == 0) ||
+ (resp->resp_len > this_lstnr->sb_length)) {
+ pr_err("resp buf length %d not valid\n", resp->resp_len);
+ return -EINVAL;
+ }
+
+ if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+ pr_err("Integer overflow in resp_len & resp_buf\n");
+ return -EINVAL;
+ }
+ if ((uintptr_t)this_lstnr->user_virt_sb_base >
+ (ULONG_MAX - this_lstnr->sb_length)) {
+ pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+ return -EINVAL;
+ }
+ /* validate resp buf */
+ if (((uintptr_t)resp->resp_buf_ptr <
+ (uintptr_t)this_lstnr->user_virt_sb_base) ||
+ ((uintptr_t)resp->resp_buf_ptr >=
+ ((uintptr_t)this_lstnr->user_virt_sb_base +
+ this_lstnr->sb_length)) ||
+ (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+ ((uintptr_t)this_lstnr->user_virt_sb_base +
+ this_lstnr->sb_length))) {
+ pr_err("resp buf is out of shared buffer region\n");
+ return -EINVAL;
+ }
+
+ /* validate offsets */
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
+ pr_err("Invalid offset %d = 0x%x\n",
+ i, resp->ifd_data[i].cmd_buf_offset);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+ void __user *argp, bool is_64bit_addr)
+{
+ struct qseecom_send_modfd_listener_resp resp;
+ struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+ if (copy_from_user(&resp, argp, sizeof(resp))) {
+ pr_err("copy_from_user failed");
+ return -EINVAL;
+ }
+
+ this_lstnr = __qseecom_find_svc(data->listener.id);
+ if (this_lstnr == NULL)
+ return -EINVAL;
+
+ if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+ return -EINVAL;
+
+ resp.resp_buf_ptr = this_lstnr->sb_virt +
+ (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
+
+ if (!is_64bit_addr)
+ __qseecom_update_cmd_buf(&resp, false, data);
+ else
+ __qseecom_update_cmd_buf_64(&resp, false, data);
+ qseecom.send_resp_flag = 1;
+ this_lstnr->send_resp_flag = 1;
+ wake_up_interruptible(&qseecom.send_resp_wq);
+ return 0;
+}
+
+static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ return __qseecom_send_modfd_resp(data, argp, false);
+}
+
+static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ return __qseecom_send_modfd_resp(data, argp, true);
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_qseos_version_req req;
+
+ if (copy_from_user(&req, argp, sizeof(req))) {
+ pr_err("copy_from_user failed");
+ return -EINVAL;
+ }
+ req.qseos_version = qseecom.qseos_version;
+ if (copy_to_user(argp, &req, sizeof(req))) {
+ pr_err("copy_to_user failed");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
+{
+ int rc = 0;
+ struct qseecom_clk *qclk = NULL;
+
+ if (qseecom.no_clock_support)
+ return 0;
+
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ if (ce == CLK_CE_DRV)
+ qclk = &qseecom.ce_drv;
+
+ if (qclk == NULL) {
+ pr_err("CLK type not supported\n");
+ return -EINVAL;
+ }
+ mutex_lock(&clk_access_lock);
+
+ if (qclk->clk_access_cnt == ULONG_MAX) {
+ pr_err("clk_access_cnt beyond limitation\n");
+ goto err;
+ }
+ if (qclk->clk_access_cnt > 0) {
+ qclk->clk_access_cnt++;
+ mutex_unlock(&clk_access_lock);
+ return rc;
+ }
+
+ /* Enable CE core clk */
+ if (qclk->ce_core_clk != NULL) {
+ rc = clk_prepare_enable(qclk->ce_core_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE core clk\n");
+ goto err;
+ }
+ }
+ /* Enable CE clk */
+ if (qclk->ce_clk != NULL) {
+ rc = clk_prepare_enable(qclk->ce_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE iface clk\n");
+ goto ce_clk_err;
+ }
+ }
+ /* Enable AXI clk */
+ if (qclk->ce_bus_clk != NULL) {
+ rc = clk_prepare_enable(qclk->ce_bus_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE bus clk\n");
+ goto ce_bus_clk_err;
+ }
+ }
+ qclk->clk_access_cnt++;
+ mutex_unlock(&clk_access_lock);
+ return 0;
+
+ce_bus_clk_err:
+ if (qclk->ce_clk != NULL)
+ clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+ if (qclk->ce_core_clk != NULL)
+ clk_disable_unprepare(qclk->ce_core_clk);
+err:
+ mutex_unlock(&clk_access_lock);
+ return -EIO;
+}
+
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
+{
+ struct qseecom_clk *qclk;
+
+ if (qseecom.no_clock_support)
+ return;
+
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ mutex_lock(&clk_access_lock);
+
+ if (qclk->clk_access_cnt == 0) {
+ mutex_unlock(&clk_access_lock);
+ return;
+ }
+
+ if (qclk->clk_access_cnt == 1) {
+ if (qclk->ce_clk != NULL)
+ clk_disable_unprepare(qclk->ce_clk);
+ if (qclk->ce_core_clk != NULL)
+ clk_disable_unprepare(qclk->ce_core_clk);
+ if (qclk->ce_bus_clk != NULL)
+ clk_disable_unprepare(qclk->ce_bus_clk);
+ }
+ qclk->clk_access_cnt--;
+ mutex_unlock(&clk_access_lock);
+}
+
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+ int32_t clk_type)
+{
+ int ret = 0;
+ struct qseecom_clk *qclk;
+
+ if (qseecom.no_clock_support)
+ return 0;
+
+ qclk = &qseecom.qsee;
+ if (!qseecom.qsee_perf_client)
+ return ret;
+
+ switch (clk_type) {
+ case CLK_DFAB:
+ mutex_lock(&qsee_bw_mutex);
+ if (!qseecom.qsee_bw_count) {
+ if (qseecom.qsee_sfpb_bw_count > 0)
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 3);
+ else {
+ if (qclk->ce_core_src_clk != NULL)
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (!ret) {
+ ret =
+ msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 1);
+ if ((ret) &&
+ (qclk->ce_core_src_clk != NULL))
+ __qseecom_disable_clk(CLK_QSEE);
+ }
+ }
+ if (ret)
+ pr_err("DFAB Bandwidth req failed (%d)\n",
+ ret);
+ else {
+ qseecom.qsee_bw_count++;
+ data->perf_enabled = true;
+ }
+ } else {
+ qseecom.qsee_bw_count++;
+ data->perf_enabled = true;
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ break;
+ case CLK_SFPB:
+ mutex_lock(&qsee_bw_mutex);
+ if (!qseecom.qsee_sfpb_bw_count) {
+ if (qseecom.qsee_bw_count > 0)
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 3);
+ else {
+ if (qclk->ce_core_src_clk != NULL)
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (!ret) {
+ ret =
+ msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 2);
+ if ((ret) &&
+ (qclk->ce_core_src_clk != NULL))
+ __qseecom_disable_clk(CLK_QSEE);
+ }
+ }
+
+ if (ret)
+ pr_err("SFPB Bandwidth req failed (%d)\n",
+ ret);
+ else {
+ qseecom.qsee_sfpb_bw_count++;
+ data->fast_load_enabled = true;
+ }
+ } else {
+ qseecom.qsee_sfpb_bw_count++;
+ data->fast_load_enabled = true;
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ break;
+ default:
+ pr_err("Clock type not defined\n");
+ break;
+ }
+ return ret;
+}
+
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+ int32_t clk_type)
+{
+ int32_t ret = 0;
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+
+ if (qseecom.no_clock_support)
+ return;
+ if (!qseecom.qsee_perf_client)
+ return;
+
+ switch (clk_type) {
+ case CLK_DFAB:
+ mutex_lock(&qsee_bw_mutex);
+ if (qseecom.qsee_bw_count == 0) {
+ pr_err("Client error.Extra call to disable DFAB clk\n");
+ mutex_unlock(&qsee_bw_mutex);
+ return;
+ }
+
+ if (qseecom.qsee_bw_count == 1) {
+ if (qseecom.qsee_sfpb_bw_count > 0)
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 2);
+ else {
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 0);
+ if ((!ret) && (qclk->ce_core_src_clk != NULL))
+ __qseecom_disable_clk(CLK_QSEE);
+ }
+ if (ret)
+ pr_err("SFPB Bandwidth req fail (%d)\n",
+ ret);
+ else {
+ qseecom.qsee_bw_count--;
+ data->perf_enabled = false;
+ }
+ } else {
+ qseecom.qsee_bw_count--;
+ data->perf_enabled = false;
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ break;
+ case CLK_SFPB:
+ mutex_lock(&qsee_bw_mutex);
+ if (qseecom.qsee_sfpb_bw_count == 0) {
+ pr_err("Client error.Extra call to disable SFPB clk\n");
+ mutex_unlock(&qsee_bw_mutex);
+ return;
+ }
+ if (qseecom.qsee_sfpb_bw_count == 1) {
+ if (qseecom.qsee_bw_count > 0)
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 1);
+ else {
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, 0);
+ if ((!ret) && (qclk->ce_core_src_clk != NULL))
+ __qseecom_disable_clk(CLK_QSEE);
+ }
+ if (ret)
+ pr_err("SFPB Bandwidth req fail (%d)\n",
+ ret);
+ else {
+ qseecom.qsee_sfpb_bw_count--;
+ data->fast_load_enabled = false;
+ }
+ } else {
+ qseecom.qsee_sfpb_bw_count--;
+ data->fast_load_enabled = false;
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ break;
+ default:
+ pr_err("Clock type not defined\n");
+ break;
+ }
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct ion_handle *ihandle; /* Ion handle */
+ struct qseecom_load_img_req load_img_req;
+ int uret = 0;
+ int ret;
+ ion_phys_addr_t pa = 0;
+ size_t len;
+ struct qseecom_load_app_ireq load_req;
+ struct qseecom_load_app_64bit_ireq load_req_64bit;
+ struct qseecom_command_scm_resp resp;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ /* Copy the relevant information needed for loading the image */
+ if (copy_from_user(&load_img_req,
+ (void __user *)argp,
+ sizeof(struct qseecom_load_img_req))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ /* Get the handle of the shared fd */
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ load_img_req.ifd_data_fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ return -ENOMEM;
+ }
+
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+ if (ret) {
+ pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+ ret);
+ return ret;
+ }
+ if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+ pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+ len, load_img_req.mdt_len,
+ load_img_req.img_len);
+ return ret;
+ }
+ /* Populate the structure for sending scm call to load image */
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+ load_req.mdt_len = load_img_req.mdt_len;
+ load_req.img_len = load_img_req.img_len;
+ load_req.phy_addr = (uint32_t)pa;
+ cmd_buf = (void *)&load_req;
+ cmd_len = sizeof(struct qseecom_load_app_ireq);
+ } else {
+ load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+ load_req_64bit.mdt_len = load_img_req.mdt_len;
+ load_req_64bit.img_len = load_img_req.img_len;
+ load_req_64bit.phy_addr = (uint64_t)pa;
+ cmd_buf = (void *)&load_req_64bit;
+ cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+ }
+
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+ mutex_unlock(&qsee_bw_mutex);
+ if (ret) {
+ ret = -EIO;
+ goto exit_cpu_restore;
+ }
+ }
+
+ /* Vote for the SFPB clock */
+ ret = __qseecom_enable_clk_scale_up(data);
+ if (ret) {
+ ret = -EIO;
+ goto exit_register_bus_bandwidth_needs;
+ }
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto exit_disable_clock;
+ }
+ /* SCM_CALL to load the external elf */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to load failed : ret %d\n",
+ ret);
+ ret = -EFAULT;
+ goto exit_disable_clock;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ pr_err("%s: qseos result incomplete\n", __func__);
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret)
+ pr_err("process_incomplete_cmd failed: err: %d\n", ret);
+ break;
+ case QSEOS_RESULT_FAILURE:
+ pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+ ret = -EFAULT;
+ break;
+ default:
+ pr_err("scm_call response result %d not supported\n",
+ resp.result);
+ ret = -EFAULT;
+ break;
+ }
+
+exit_disable_clock:
+ __qseecom_disable_clk_scale_down(data);
+
+exit_register_bus_bandwidth_needs:
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ uret = qseecom_unregister_bus_bandwidth_needs(data);
+ mutex_unlock(&qsee_bw_mutex);
+ if (uret)
+ pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
+ uret, ret);
+ }
+
+exit_cpu_restore:
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+ int ret = 0;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_unload_app_ireq req;
+
+ /* unavailable client app */
+ data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+
+ /* Populate the structure for sending scm call to unload image */
+ req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+ /* SCM_CALL to unload the external elf */
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+ sizeof(struct qseecom_unload_app_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to unload failed : ret %d\n",
+ ret);
+ ret = -EFAULT;
+ goto qseecom_unload_external_elf_scm_err;
+ }
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret)
+ pr_err("process_incomplete_cmd fail err: %d\n",
+ ret);
+ } else {
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("scm_call to unload image failed resp.result =%d\n",
+ resp.result);
+ ret = -EFAULT;
+ }
+ }
+
+qseecom_unload_external_elf_scm_err:
+
+ return ret;
+}
+
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+
+ int32_t ret;
+ struct qseecom_qseos_app_load_query query_req;
+ struct qseecom_check_app_ireq req;
+ struct qseecom_registered_app_list *entry = NULL;
+ unsigned long flags = 0;
+ uint32_t app_arch = 0, app_id = 0;
+ bool found_app = false;
+
+ /* Copy the relevant information needed for loading the image */
+ if (copy_from_user(&query_req,
+ (void __user *)argp,
+ sizeof(struct qseecom_qseos_app_load_query))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+ query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
+ strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+ ret = __qseecom_check_app_exists(req, &app_id);
+ if (ret) {
+ pr_err(" scm call to check if app is loaded failed");
+ return ret; /* scm call failed */
+ }
+ if (app_id) {
+ pr_debug("App id %d (%s) already exists\n", app_id,
+ (char *)(req.app_name));
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list){
+ if (entry->app_id == app_id) {
+ app_arch = entry->app_arch;
+ entry->ref_cnt++;
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ data->client.app_id = app_id;
+ query_req.app_id = app_id;
+ if (app_arch) {
+ data->client.app_arch = app_arch;
+ query_req.app_arch = app_arch;
+ } else {
+ data->client.app_arch = 0;
+ query_req.app_arch = 0;
+ }
+ strlcpy(data->client.app_name, query_req.app_name,
+ MAX_APP_NAME_SIZE);
+ /*
+ * If app was loaded by appsbl before and was not registered,
+ * regiser this app now.
+ */
+ if (!found_app) {
+ pr_debug("Register app %d [%s] which was loaded before\n",
+ ret, (char *)query_req.app_name);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("kmalloc for app entry failed\n");
+ return -ENOMEM;
+ }
+ entry->app_id = app_id;
+ entry->ref_cnt = 1;
+ entry->app_arch = data->client.app_arch;
+ strlcpy(entry->app_name, data->client.app_name,
+ MAX_APP_NAME_SIZE);
+ entry->app_blocked = false;
+ entry->blocked_on_listener_id = 0;
+ spin_lock_irqsave(&qseecom.registered_app_list_lock,
+ flags);
+ list_add_tail(&entry->list,
+ &qseecom.registered_app_list_head);
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ }
+ if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
+ return -EEXIST; /* app already loaded */
+ } else {
+ return 0; /* app not loaded */
+ }
+}
+
+static int __qseecom_get_ce_pipe_info(
+ enum qseecom_key_management_usage_type usage,
+ uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
+{
+ int ret = -EINVAL;
+ int i, j;
+ struct qseecom_ce_info_use *p = NULL;
+ int total = 0;
+ struct qseecom_ce_pipe_entry *pcepipe;
+
+ switch (usage) {
+ case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+ if (qseecom.support_fde) {
+ p = qseecom.ce_info.fde;
+ total = qseecom.ce_info.num_fde;
+ } else {
+ pr_err("system does not support fde\n");
+ return -EINVAL;
+ }
+ break;
+ case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+ if (qseecom.support_pfe) {
+ p = qseecom.ce_info.pfe;
+ total = qseecom.ce_info.num_pfe;
+ } else {
+ pr_err("system does not support pfe\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("unsupported usage %d\n", usage);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < total; j++) {
+ if (p->unit_num == unit) {
+ pcepipe = p->ce_pipe_entry;
+ for (i = 0; i < p->num_ce_pipe_entries; i++) {
+ (*ce_hw)[i] = pcepipe->ce_num;
+ *pipe = pcepipe->ce_pipe_pair;
+ pcepipe++;
+ }
+ ret = 0;
+ break;
+ }
+ p++;
+ }
+ return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ struct qseecom_key_generate_ireq *ireq)
+{
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ ireq, sizeof(struct qseecom_key_generate_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ if (ret == -EINVAL &&
+ resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+ pr_debug("Key ID exists.\n");
+ ret = 0;
+ } else {
+ pr_err("scm call to generate key failed : %d\n", ret);
+ ret = -EFAULT;
+ }
+ goto generate_key_exit;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+ pr_debug("Key ID exists.\n");
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+ pr_debug("Key ID exists.\n");
+ ret = 0;
+ } else {
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ }
+ }
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("gen key scm call failed resp.result %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ }
+generate_key_exit:
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ struct qseecom_key_delete_ireq *ireq)
+{
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ ireq, sizeof(struct qseecom_key_delete_ireq),
+ &resp, sizeof(struct qseecom_command_scm_resp));
+ if (ret) {
+ if (ret == -EINVAL &&
+ resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+ pr_debug("Max attempts to input password reached.\n");
+ ret = -ERANGE;
+ } else {
+ pr_err("scm call to delete key failed : %d\n", ret);
+ ret = -EFAULT;
+ }
+ goto del_key_exit;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+ pr_debug("Max attempts to input password reached.\n");
+ ret = -ERANGE;
+ }
+ }
+ break;
+ case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+ pr_debug("Max attempts to input password reached.\n");
+ ret = -ERANGE;
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("Delete key scm call failed resp.result %d\n",
+ resp.result);
+ ret = -EINVAL;
+ break;
+ }
+del_key_exit:
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ struct qseecom_key_select_ireq *ireq)
+{
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+
+ if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+ ret = __qseecom_enable_clk(CLK_CE_DRV);
+ if (ret)
+ return ret;
+ }
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ ireq, sizeof(struct qseecom_key_select_ireq),
+ &resp, sizeof(struct qseecom_command_scm_resp));
+ if (ret) {
+ if (ret == -EINVAL &&
+ resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+ pr_debug("Max attempts to input password reached.\n");
+ ret = -ERANGE;
+ } else if (ret == -EINVAL &&
+ resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+ pr_debug("Set Key operation under processing...\n");
+ ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+ } else {
+ pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
+ ret);
+ ret = -EFAULT;
+ }
+ goto set_key_exit;
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ if (resp.result ==
+ QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+ pr_debug("Set Key operation under processing...\n");
+ ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+ }
+ if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+ pr_debug("Max attempts to input password reached.\n");
+ ret = -ERANGE;
+ }
+ }
+ break;
+ case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+ pr_debug("Max attempts to input password reached.\n");
+ ret = -ERANGE;
+ break;
+ case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+ pr_debug("Set Key operation under processing...\n");
+ ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("Set key scm call failed resp.result %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ }
+set_key_exit:
+ __qseecom_disable_clk(CLK_QSEE);
+ if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+ __qseecom_disable_clk(CLK_CE_DRV);
+ return ret;
+}
+
+static int __qseecom_update_current_key_user_info(
+ struct qseecom_dev_handle *data,
+ enum qseecom_key_management_usage_type usage,
+ struct qseecom_key_userinfo_update_ireq *ireq)
+{
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("Error:: unsupported usage %d\n", usage);
+ return -EFAULT;
+ }
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ return ret;
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
+ &resp, sizeof(struct qseecom_command_scm_resp));
+ if (ret) {
+ if (ret == -EINVAL &&
+ resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+ pr_debug("Set Key operation under processing...\n");
+ ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+ } else {
+ pr_err("scm call to update key userinfo failed: %d\n",
+ ret);
+ __qseecom_disable_clk(CLK_QSEE);
+ return -EFAULT;
+ }
+ }
+
+ switch (resp.result) {
+ case QSEOS_RESULT_SUCCESS:
+ break;
+ case QSEOS_RESULT_INCOMPLETE:
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (resp.result ==
+ QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+ pr_debug("Set Key operation under processing...\n");
+ ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+ }
+ if (ret)
+ pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+ resp.result);
+ break;
+ case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+ pr_debug("Update Key operation under processing...\n");
+ ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+ break;
+ case QSEOS_RESULT_FAILURE:
+ default:
+ pr_err("Set key scm call failed resp.result %d\n", resp.result);
+ ret = -EINVAL;
+ break;
+ }
+
+ __qseecom_disable_clk(CLK_QSEE);
+ return ret;
+}
+
+
+static int qseecom_enable_ice_setup(int usage)
+{
+ int ret = 0;
+
+ if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+ ret = qcom_ice_setup_ice_hw("ufs", true);
+ else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+ ret = qcom_ice_setup_ice_hw("sdcc", true);
+
+ return ret;
+}
+
+static int qseecom_disable_ice_setup(int usage)
+{
+ int ret = 0;
+
+ if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+ ret = qcom_ice_setup_ice_hw("ufs", false);
+ else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+ ret = qcom_ice_setup_ice_hw("sdcc", false);
+
+ return ret;
+}
+
+static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
+{
+ struct qseecom_ce_info_use *pce_info_use, *p;
+ int total = 0;
+ int i;
+
+ switch (usage) {
+ case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+ p = qseecom.ce_info.fde;
+ total = qseecom.ce_info.num_fde;
+ break;
+ case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+ p = qseecom.ce_info.pfe;
+ total = qseecom.ce_info.num_pfe;
+ break;
+ default:
+ pr_err("unsupported usage %d\n", usage);
+ return -EINVAL;
+ }
+
+ pce_info_use = NULL;
+
+ for (i = 0; i < total; i++) {
+ if (p->unit_num == unit) {
+ pce_info_use = p;
+ break;
+ }
+ p++;
+ }
+ if (!pce_info_use) {
+ pr_err("can not find %d\n", unit);
+ return -EINVAL;
+ }
+ return pce_info_use->num_ce_pipe_entries;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int i;
+ uint32_t *ce_hw = NULL;
+ uint32_t pipe = 0;
+ int ret = 0;
+ uint32_t flags = 0;
+ struct qseecom_create_key_req create_key_req;
+ struct qseecom_key_generate_ireq generate_key_ireq;
+ struct qseecom_key_select_ireq set_key_ireq;
+ uint32_t entries = 0;
+
+ ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("unsupported usage %d\n", create_key_req.usage);
+ ret = -EFAULT;
+ return ret;
+ }
+ entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+ create_key_req.usage);
+ if (entries <= 0) {
+ pr_err("no ce instance for usage %d instance %d\n",
+ DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+ if (!ce_hw) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+ DEFAULT_CE_INFO_UNIT);
+ if (ret) {
+ pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ if (qseecom.fde_key_size)
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+ else
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+ generate_key_ireq.flags = flags;
+ generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+ memset((void *)generate_key_ireq.key_id,
+ 0, QSEECOM_KEY_ID_SIZE);
+ memset((void *)generate_key_ireq.hash32,
+ 0, QSEECOM_HASH_SIZE);
+ memcpy((void *)generate_key_ireq.key_id,
+ (void *)key_id_array[create_key_req.usage].desc,
+ QSEECOM_KEY_ID_SIZE);
+ memcpy((void *)generate_key_ireq.hash32,
+ (void *)create_key_req.hash32,
+ QSEECOM_HASH_SIZE);
+
+ ret = __qseecom_generate_and_save_key(data,
+ create_key_req.usage, &generate_key_ireq);
+ if (ret) {
+ pr_err("Failed to generate key on storage: %d\n", ret);
+ goto free_buf;
+ }
+
+ for (i = 0; i < entries; i++) {
+ set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+ if (create_key_req.usage ==
+ QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+ set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+ set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+ } else if (create_key_req.usage ==
+ QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+ set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+ set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+ } else {
+ set_key_ireq.ce = ce_hw[i];
+ set_key_ireq.pipe = pipe;
+ }
+ set_key_ireq.flags = flags;
+
+ /* set both PIPE_ENC and PIPE_ENC_XTS*/
+ set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+ memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+ memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+ memcpy((void *)set_key_ireq.key_id,
+ (void *)key_id_array[create_key_req.usage].desc,
+ QSEECOM_KEY_ID_SIZE);
+ memcpy((void *)set_key_ireq.hash32,
+ (void *)create_key_req.hash32,
+ QSEECOM_HASH_SIZE);
+ /*
+ * It will return false if it is GPCE based crypto instance or
+ * ICE is setup properly
+ */
+ ret = qseecom_enable_ice_setup(create_key_req.usage);
+ if (ret)
+ goto free_buf;
+
+ do {
+ ret = __qseecom_set_clear_ce_key(data,
+ create_key_req.usage,
+ &set_key_ireq);
+ /*
+ * wait a little before calling scm again to let other
+ * processes run
+ */
+ if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+ msleep(50);
+
+ } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+ qseecom_disable_ice_setup(create_key_req.usage);
+
+ if (ret) {
+ pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+ pipe, ce_hw[i], ret);
+ goto free_buf;
+ } else {
+ pr_err("Set the key successfully\n");
+ if ((create_key_req.usage ==
+ QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+ (create_key_req.usage ==
+ QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+ goto free_buf;
+ }
+ }
+
+free_buf:
+ kzfree(ce_hw);
+ return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ uint32_t *ce_hw = NULL;
+ uint32_t pipe = 0;
+ int ret = 0;
+ uint32_t flags = 0;
+ int i, j;
+ struct qseecom_wipe_key_req wipe_key_req;
+ struct qseecom_key_delete_ireq delete_key_ireq;
+ struct qseecom_key_select_ireq clear_key_ireq;
+ uint32_t entries = 0;
+
+ ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("unsupported usage %d\n", wipe_key_req.usage);
+ ret = -EFAULT;
+ return ret;
+ }
+
+ entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+ wipe_key_req.usage);
+ if (entries <= 0) {
+ pr_err("no ce instance for usage %d instance %d\n",
+ DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+ if (!ce_hw) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
+ DEFAULT_CE_INFO_UNIT);
+ if (ret) {
+ pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ if (wipe_key_req.wipe_key_flag) {
+ delete_key_ireq.flags = flags;
+ delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
+ memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+ memcpy((void *)delete_key_ireq.key_id,
+ (void *)key_id_array[wipe_key_req.usage].desc,
+ QSEECOM_KEY_ID_SIZE);
+ memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+ ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
+ &delete_key_ireq);
+ if (ret) {
+ pr_err("Failed to delete key from ssd storage: %d\n",
+ ret);
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ }
+
+ for (j = 0; j < entries; j++) {
+ clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+ if (wipe_key_req.usage ==
+ QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+ clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+ clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+ } else if (wipe_key_req.usage ==
+ QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+ clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+ clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+ } else {
+ clear_key_ireq.ce = ce_hw[j];
+ clear_key_ireq.pipe = pipe;
+ }
+ clear_key_ireq.flags = flags;
+ clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+ for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+ clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
+ memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+ /*
+ * It will return false if it is GPCE based crypto instance or
+ * ICE is setup properly
+ */
+ ret = qseecom_enable_ice_setup(wipe_key_req.usage);
+ if (ret)
+ goto free_buf;
+
+ ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+ &clear_key_ireq);
+
+ qseecom_disable_ice_setup(wipe_key_req.usage);
+
+ if (ret) {
+ pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+ pipe, ce_hw[j], ret);
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ }
+
+free_buf:
+ kzfree(ce_hw);
+ return ret;
+}
+
+static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ int ret = 0;
+ uint32_t flags = 0;
+ struct qseecom_update_key_userinfo_req update_key_req;
+ struct qseecom_key_userinfo_update_ireq ireq;
+
+ ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+ update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+ pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
+ return -EFAULT;
+ }
+
+ ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+ if (qseecom.fde_key_size)
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+ else
+ flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+ ireq.flags = flags;
+ memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+ memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
+ memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
+ memcpy((void *)ireq.key_id,
+ (void *)key_id_array[update_key_req.usage].desc,
+ QSEECOM_KEY_ID_SIZE);
+ memcpy((void *)ireq.current_hash32,
+ (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
+ memcpy((void *)ireq.new_hash32,
+ (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
+
+ do {
+ ret = __qseecom_update_current_key_user_info(data,
+ update_key_req.usage,
+ &ireq);
+ /*
+ * wait a little before calling scm again to let other
+ * processes run
+ */
+ if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+ msleep(50);
+
+ } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+ if (ret) {
+ pr_err("Failed to update key info: %d\n", ret);
+ return ret;
+ }
+ return ret;
+
+}
+static int qseecom_is_es_activated(void __user *argp)
+{
+ struct qseecom_is_es_activated_req req;
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ if (qseecom.qsee_version < QSEE_VERSION_04) {
+ pr_err("invalid qsee version\n");
+ return -ENODEV;
+ }
+
+ if (argp == NULL) {
+ pr_err("arg is null\n");
+ return -EINVAL;
+ }
+
+ ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call failed\n");
+ return ret;
+ }
+
+ req.is_activated = resp.result;
+ ret = copy_to_user(argp, &req, sizeof(req));
+ if (ret) {
+ pr_err("copy_to_user failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+ struct qseecom_save_partition_hash_req req;
+ struct qseecom_command_scm_resp resp;
+ int ret;
+
+ memset(&resp, 0x00, sizeof(resp));
+
+ if (qseecom.qsee_version < QSEE_VERSION_04) {
+ pr_err("invalid qsee version\n");
+ return -ENODEV;
+ }
+
+ if (argp == NULL) {
+ pr_err("arg is null\n");
+ return -EINVAL;
+ }
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+ (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
+ if (ret) {
+ pr_err("qseecom_scm_call failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qseecom_mdtp_cipher_dip(void __user *argp)
+{
+ struct qseecom_mdtp_cipher_dip_req req;
+ u32 tzbuflenin, tzbuflenout;
+ char *tzbufin = NULL, *tzbufout = NULL;
+ struct scm_desc desc = {0};
+ int ret;
+
+ do {
+ /* Copy the parameters from userspace */
+ if (argp == NULL) {
+ pr_err("arg is null\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ pr_err("copy_from_user failed, ret= %d\n", ret);
+ break;
+ }
+
+ if (req.in_buf == NULL || req.out_buf == NULL ||
+ req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+ req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
+ req.direction > 1) {
+ pr_err("invalid parameters\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Copy the input buffer from userspace to kernel space */
+ tzbuflenin = PAGE_ALIGN(req.in_buf_size);
+ tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
+ if (!tzbufin) {
+ pr_err("error allocating in buffer\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
+ if (ret) {
+ pr_err("copy_from_user failed, ret=%d\n", ret);
+ break;
+ }
+
+ dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
+
+ /* Prepare the output buffer in kernel space */
+ tzbuflenout = PAGE_ALIGN(req.out_buf_size);
+ tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
+ if (!tzbufout) {
+ pr_err("error allocating out buffer\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+
+ /* Send the command to TZ */
+ desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
+ desc.args[0] = virt_to_phys(tzbufin);
+ desc.args[1] = req.in_buf_size;
+ desc.args[2] = virt_to_phys(tzbufout);
+ desc.args[3] = req.out_buf_size;
+ desc.args[4] = req.direction;
+
+ ret = __qseecom_enable_clk(CLK_QSEE);
+ if (ret)
+ break;
+
+ ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+
+ __qseecom_disable_clk(CLK_QSEE);
+
+ if (ret) {
+ pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
+ ret);
+ break;
+ }
+
+ /* Copy the output buffer from kernel space to userspace */
+ dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+ ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
+ if (ret) {
+ pr_err("copy_to_user failed, ret=%d\n", ret);
+ break;
+ }
+ } while (0);
+
+ kzfree(tzbufin);
+ kzfree(tzbufout);
+
+ return ret;
+}
+
+static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
+ struct qseecom_qteec_req *req)
+{
+ if (!data || !data->client.ihandle) {
+ pr_err("Client or client handle is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (data->type != QSEECOM_CLIENT_APP)
+ return -EFAULT;
+
+ if (req->req_len > UINT_MAX - req->resp_len) {
+ pr_err("Integer overflow detected in req_len & rsp_len\n");
+ return -EINVAL;
+ }
+
+ if (req->req_len + req->resp_len > data->client.sb_length) {
+ pr_debug("Not enough memory to fit cmd_buf.\n");
+ pr_debug("resp_buf. Required: %u, Available: %zu\n",
+ (req->req_len + req->resp_len), data->client.sb_length);
+ return -ENOMEM;
+ }
+
+ if (req->req_ptr == NULL || req->resp_ptr == NULL) {
+ pr_err("cmd buffer or response buffer is null\n");
+ return -EINVAL;
+ }
+ if (((uintptr_t)req->req_ptr <
+ data->client.user_virt_sb_base) ||
+ ((uintptr_t)req->req_ptr >=
+ (data->client.user_virt_sb_base + data->client.sb_length))) {
+ pr_err("cmd buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+
+ if (((uintptr_t)req->resp_ptr <
+ data->client.user_virt_sb_base) ||
+ ((uintptr_t)req->resp_ptr >=
+ (data->client.user_virt_sb_base + data->client.sb_length))) {
+ pr_err("response buffer address not within shared bufffer\n");
+ return -EINVAL;
+ }
+
+ if ((req->req_len == 0) || (req->resp_len == 0)) {
+ pr_err("cmd buf lengtgh/response buf length not valid\n");
+ return -EINVAL;
+ }
+
+ if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
+ pr_err("Integer overflow in req_len & req_ptr\n");
+ return -EINVAL;
+ }
+
+ if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
+ pr_err("Integer overflow in resp_len & resp_ptr\n");
+ return -EINVAL;
+ }
+
+ if (data->client.user_virt_sb_base >
+ (ULONG_MAX - data->client.sb_length)) {
+ pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+ return -EINVAL;
+ }
+ if ((((uintptr_t)req->req_ptr + req->req_len) >
+ ((uintptr_t)data->client.user_virt_sb_base +
+ data->client.sb_length)) ||
+ (((uintptr_t)req->resp_ptr + req->resp_len) >
+ ((uintptr_t)data->client.user_virt_sb_base +
+ data->client.sb_length))) {
+ pr_err("cmd buf or resp buf is out of shared buffer region\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
+ uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+ struct scatterlist *sg = sg_ptr->sgl;
+ struct qseecom_sg_entry *sg_entry;
+ void *buf;
+ uint i;
+ size_t size;
+ dma_addr_t coh_pmem;
+
+ if (fd_idx >= MAX_ION_FD) {
+ pr_err("fd_idx [%d] is invalid\n", fd_idx);
+ return -ENOMEM;
+ }
+ /*
+ * Allocate a buffer, populate it with number of entry plus
+ * each sg entry's phy addr and length; then return the
+ * phy_addr of the buffer.
+ */
+ size = sizeof(uint32_t) +
+ sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
+ size = (size + PAGE_SIZE) & PAGE_MASK;
+ buf = dma_alloc_coherent(qseecom.pdev,
+ size, &coh_pmem, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("failed to alloc memory for sg buf\n");
+ return -ENOMEM;
+ }
+ *(uint32_t *)buf = sg_ptr->nents;
+ sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
+ for (i = 0; i < sg_ptr->nents; i++) {
+ sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
+ sg_entry->len = sg->length;
+ sg_entry++;
+ sg = sg_next(sg);
+ }
+ data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+ data->client.sec_buf_fd[fd_idx].vbase = buf;
+ data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+ data->client.sec_buf_fd[fd_idx].size = size;
+ return 0;
+}
+
+static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
+ struct qseecom_dev_handle *data, bool cleanup)
+{
+ struct ion_handle *ihandle;
+ int ret = 0;
+ int i = 0;
+ uint32_t *update;
+ struct sg_table *sg_ptr = NULL;
+ struct scatterlist *sg;
+ struct qseecom_param_memref *memref;
+
+ if (req == NULL) {
+ pr_err("Invalid address\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if (req->ifd_data[i].fd > 0) {
+ ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+ req->ifd_data[i].fd);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client can't retrieve the handle\n");
+ return -ENOMEM;
+ }
+ if ((req->req_len < sizeof(uint32_t)) ||
+ (req->ifd_data[i].cmd_buf_offset >
+ req->req_len - sizeof(uint32_t))) {
+ pr_err("Invalid offset/req len 0x%x/0x%x\n",
+ req->req_len,
+ req->ifd_data[i].cmd_buf_offset);
+ return -EINVAL;
+ }
+ update = (uint32_t *)((char *) req->req_ptr +
+ req->ifd_data[i].cmd_buf_offset);
+ if (!update) {
+ pr_err("update pointer is NULL\n");
+ return -EINVAL;
+ }
+ } else {
+ continue;
+ }
+ /* Populate the cmd data structure with the phys_addr */
+ sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+ if (IS_ERR_OR_NULL(sg_ptr)) {
+ pr_err("IOn client could not retrieve sg table\n");
+ goto err;
+ }
+ sg = sg_ptr->sgl;
+ if (sg == NULL) {
+ pr_err("sg is NULL\n");
+ goto err;
+ }
+ if ((sg_ptr->nents == 0) || (sg->length == 0)) {
+ pr_err("Num of scat entr (%d)or length(%d) invalid\n",
+ sg_ptr->nents, sg->length);
+ goto err;
+ }
+ /* clean up buf for pre-allocated fd */
+ if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
+ (*update)) {
+ if (data->client.sec_buf_fd[i].vbase)
+ dma_free_coherent(qseecom.pdev,
+ data->client.sec_buf_fd[i].size,
+ data->client.sec_buf_fd[i].vbase,
+ data->client.sec_buf_fd[i].pbase);
+ memset((void *)update, 0,
+ sizeof(struct qseecom_param_memref));
+ memset(&(data->client.sec_buf_fd[i]), 0,
+ sizeof(struct qseecom_sec_buf_fd_info));
+ goto clean;
+ }
+
+ if (*update == 0) {
+ /* update buf for pre-allocated fd from secure heap*/
+ ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
+ sg_ptr);
+ if (ret) {
+ pr_err("Failed to handle buf for fd[%d]\n", i);
+ goto err;
+ }
+ memref = (struct qseecom_param_memref *)update;
+ memref->buffer =
+ (uint32_t)(data->client.sec_buf_fd[i].pbase);
+ memref->size =
+ (uint32_t)(data->client.sec_buf_fd[i].size);
+ } else {
+ /* update buf for fd from non-secure qseecom heap */
+ if (sg_ptr->nents != 1) {
+ pr_err("Num of scat entr (%d) invalid\n",
+ sg_ptr->nents);
+ goto err;
+ }
+ if (cleanup)
+ *update = 0;
+ else
+ *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+ }
+clean:
+ if (cleanup) {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ihandle, NULL, sg->length,
+ ION_IOC_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto err;
+ }
+ } else {
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ihandle, NULL, sg->length,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ goto err;
+ }
+ data->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0,
+ req->ifd_data[i].cmd_buf_offset);
+ data->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ data->sglist_cnt = i + 1;
+ }
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ }
+ return ret;
+err:
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ return -ENOMEM;
+}
+
+static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
+ struct qseecom_qteec_req *req, uint32_t cmd_id)
+{
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_qteec_ireq ireq;
+ struct qseecom_qteec_64bit_ireq ireq_64bit;
+ struct qseecom_registered_app_list *ptr_app;
+ bool found_app = false;
+ unsigned long flags;
+ int ret = 0;
+ uint32_t reqd_len_sb_in = 0;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = data->sglistinfo_ptr;
+
+ ret = __qseecom_qteec_validate_msg(data, req);
+ if (ret)
+ return ret;
+
+ /* find app_id & img_name from list */
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+ list) {
+ if ((ptr_app->app_id == data->client.app_id) &&
+ (!strcmp(ptr_app->app_name, data->client.app_name))) {
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+ if (!found_app) {
+ pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+ (char *)data->client.app_name);
+ return -ENOENT;
+ }
+
+ if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+ (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+ ret = __qseecom_update_qteec_req_buf(
+ (struct qseecom_qteec_modfd_req *)req, data, false);
+ if (ret)
+ return ret;
+ }
+
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ ireq.app_id = data->client.app_id;
+ ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->req_ptr);
+ ireq.req_len = req->req_len;
+ ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->resp_ptr);
+ ireq.resp_len = req->resp_len;
+ ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+ ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&ireq;
+ cmd_len = sizeof(struct qseecom_qteec_ireq);
+ } else {
+ ireq_64bit.app_id = data->client.app_id;
+ ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->req_ptr);
+ ireq_64bit.req_len = req->req_len;
+ ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->resp_ptr);
+ ireq_64bit.resp_len = req->resp_len;
+ if ((data->client.app_arch == ELFCLASS32) &&
+ ((ireq_64bit.req_ptr >=
+ PHY_ADDR_4G - ireq_64bit.req_len) ||
+ (ireq_64bit.resp_ptr >=
+ PHY_ADDR_4G - ireq_64bit.resp_len))){
+ pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
+ data->client.app_name, data->client.app_id);
+ pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
+ ireq_64bit.req_ptr, ireq_64bit.req_len,
+ ireq_64bit.resp_ptr, ireq_64bit.resp_len);
+ return -EFAULT;
+ }
+ ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+ ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&ireq_64bit;
+ cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+ }
+ if (qseecom.whitelist_support == true
+ && cmd_id == QSEOS_TEE_OPEN_SESSION)
+ *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+ else
+ *(uint32_t *)cmd_buf = cmd_id;
+
+ reqd_len_sb_in = req->req_len + req->resp_len;
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt,
+ reqd_len_sb_in,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+
+ __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+ ret, data->client.app_id);
+ return ret;
+ }
+
+ if (qseecom.qsee_reentrancy_support) {
+ ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+ } else {
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("Response result %d not supported\n",
+ resp.result);
+ ret = -EINVAL;
+ }
+ }
+ }
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt, data->client.sb_length,
+ ION_IOC_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+
+ if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+ (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+ ret = __qseecom_update_qteec_req_buf(
+ (struct qseecom_qteec_modfd_req *)req, data, true);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_qteec_modfd_req req;
+ int ret = 0;
+
+ ret = copy_from_user(&req, argp,
+ sizeof(struct qseecom_qteec_modfd_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+ QSEOS_TEE_OPEN_SESSION);
+
+ return ret;
+}
+
+static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_qteec_req req;
+ int ret = 0;
+
+ ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
+ return ret;
+}
+
+static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_qteec_modfd_req req;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_qteec_ireq ireq;
+ struct qseecom_qteec_64bit_ireq ireq_64bit;
+ struct qseecom_registered_app_list *ptr_app;
+ bool found_app = false;
+ unsigned long flags;
+ int ret = 0;
+ int i = 0;
+ uint32_t reqd_len_sb_in = 0;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = data->sglistinfo_ptr;
+ void *req_ptr = NULL;
+ void *resp_ptr = NULL;
+
+ ret = copy_from_user(&req, argp,
+ sizeof(struct qseecom_qteec_modfd_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ ret = __qseecom_qteec_validate_msg(data,
+ (struct qseecom_qteec_req *)(&req));
+ if (ret)
+ return ret;
+ req_ptr = req.req_ptr;
+ resp_ptr = req.resp_ptr;
+
+ /* find app_id & img_name from list */
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+ list) {
+ if ((ptr_app->app_id == data->client.app_id) &&
+ (!strcmp(ptr_app->app_name, data->client.app_name))) {
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+ if (!found_app) {
+ pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+ (char *)data->client.app_name);
+ return -ENOENT;
+ }
+
+ /* validate offsets */
+ for (i = 0; i < MAX_ION_FD; i++) {
+ if (req.ifd_data[i].fd) {
+ if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+ return -EINVAL;
+ }
+ }
+ req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.req_ptr);
+ req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.resp_ptr);
+ ret = __qseecom_update_qteec_req_buf(&req, data, false);
+ if (ret)
+ return ret;
+
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ ireq.app_id = data->client.app_id;
+ ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req_ptr);
+ ireq.req_len = req.req_len;
+ ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)resp_ptr);
+ ireq.resp_len = req.resp_len;
+ cmd_buf = (void *)&ireq;
+ cmd_len = sizeof(struct qseecom_qteec_ireq);
+ ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+ ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ } else {
+ ireq_64bit.app_id = data->client.app_id;
+ ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req_ptr);
+ ireq_64bit.req_len = req.req_len;
+ ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+ (uintptr_t)resp_ptr);
+ ireq_64bit.resp_len = req.resp_len;
+ cmd_buf = (void *)&ireq_64bit;
+ cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+ ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+ ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ }
+ reqd_len_sb_in = req.req_len + req.resp_len;
+ if (qseecom.whitelist_support == true)
+ *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+ else
+ *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt,
+ reqd_len_sb_in,
+ ION_IOC_CLEAN_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+
+ __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+ ret, data->client.app_id);
+ return ret;
+ }
+
+ if (qseecom.qsee_reentrancy_support) {
+ ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+ } else {
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("Response result %d not supported\n",
+ resp.result);
+ ret = -EINVAL;
+ }
+ }
+ }
+ ret = __qseecom_update_qteec_req_buf(&req, data, true);
+ if (ret)
+ return ret;
+
+ ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+ data->client.sb_virt, data->client.sb_length,
+ ION_IOC_INV_CACHES);
+ if (ret) {
+ pr_err("cache operation failed %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_qteec_modfd_req req;
+ int ret = 0;
+
+ ret = copy_from_user(&req, argp,
+ sizeof(struct qseecom_qteec_modfd_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+ ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+ QSEOS_TEE_REQUEST_CANCELLATION);
+
+ return ret;
+}
+
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+ if (data->sglist_cnt) {
+ memset(data->sglistinfo_ptr, 0,
+ SGLISTINFO_TABLE_SIZE);
+ data->sglist_cnt = 0;
+ }
+}
+
+static inline long qseecom_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct qseecom_dev_handle *data = file->private_data;
+ void __user *argp = (void __user *) arg;
+ bool perf_enabled = false;
+
+ if (!data) {
+ pr_err("Invalid/uninitialized device handle\n");
+ return -EINVAL;
+ }
+
+ if (data->abort) {
+ pr_err("Aborting qseecom driver\n");
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("reg lstnr req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ pr_debug("ioctl register_listener_req()\n");
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ data->type = QSEECOM_LISTENER_SERVICE;
+ ret = qseecom_register_listener(data, argp);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed qseecom_register_listener: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+ if ((data->listener.id == 0) ||
+ (data->type != QSEECOM_LISTENER_SERVICE)) {
+ pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
+ data->type, data->listener.id);
+ ret = -EINVAL;
+ break;
+ }
+ pr_debug("ioctl unregister_listener_req()\n");
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_unregister_listener(data);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed qseecom_unregister_listener: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_SEND_CMD_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ /* Only one client allowed here at a time */
+ mutex_lock(&app_access_lock);
+ if (qseecom.support_bus_scaling) {
+ /* register bus bw in case the client doesn't do it */
+ if (!data->mode) {
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_register_bus_bandwidth_needs(
+ data, HIGH);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+ ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+ if (ret) {
+ pr_err("Failed to set bw.\n");
+ ret = -EINVAL;
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ }
+ /*
+ * On targets where crypto clock is handled by HLOS,
+ * if clk_access_cnt is zero and perf_enabled is false,
+ * then the crypto clock was not enabled before sending cmd to
+ * tz, qseecom will enable the clock to avoid service failure.
+ */
+ if (!qseecom.no_clock_support &&
+ !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+ pr_debug("ce clock is not enabled!\n");
+ ret = qseecom_perf_enable(data);
+ if (ret) {
+ pr_err("Failed to vote for clock with err %d\n",
+ ret);
+ mutex_unlock(&app_access_lock);
+ ret = -EINVAL;
+ break;
+ }
+ perf_enabled = true;
+ }
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_send_cmd(data, argp);
+ if (qseecom.support_bus_scaling)
+ __qseecom_add_bw_scale_down_timer(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ if (perf_enabled) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ }
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed qseecom_send_cmd: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+ case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ /* Only one client allowed here at a time */
+ mutex_lock(&app_access_lock);
+ if (qseecom.support_bus_scaling) {
+ if (!data->mode) {
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_register_bus_bandwidth_needs(
+ data, HIGH);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+ ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+ if (ret) {
+ pr_err("Failed to set bw.\n");
+ mutex_unlock(&app_access_lock);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ /*
+ * On targets where crypto clock is handled by HLOS,
+ * if clk_access_cnt is zero and perf_enabled is false,
+ * then the crypto clock was not enabled before sending cmd to
+ * tz, qseecom will enable the clock to avoid service failure.
+ */
+ if (!qseecom.no_clock_support &&
+ !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+ pr_debug("ce clock is not enabled!\n");
+ ret = qseecom_perf_enable(data);
+ if (ret) {
+ pr_err("Failed to vote for clock with err %d\n",
+ ret);
+ mutex_unlock(&app_access_lock);
+ ret = -EINVAL;
+ break;
+ }
+ perf_enabled = true;
+ }
+ atomic_inc(&data->ioctl_count);
+ if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
+ ret = qseecom_send_modfd_cmd(data, argp);
+ else
+ ret = qseecom_send_modfd_cmd_64(data, argp);
+ if (qseecom.support_bus_scaling)
+ __qseecom_add_bw_scale_down_timer(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ if (perf_enabled) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ }
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed qseecom_send_cmd: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
+ break;
+ }
+ case QSEECOM_IOCTL_RECEIVE_REQ: {
+ if ((data->listener.id == 0) ||
+ (data->type != QSEECOM_LISTENER_SERVICE)) {
+ pr_err("receive req: invalid handle (%d), lid(%d)\n",
+ data->type, data->listener.id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_receive_req(data);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ if (ret && (ret != -ERESTARTSYS))
+ pr_err("failed qseecom_receive_req: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_SEND_RESP_REQ: {
+ if ((data->listener.id == 0) ||
+ (data->type != QSEECOM_LISTENER_SERVICE)) {
+ pr_err("send resp req: invalid handle (%d), lid(%d)\n",
+ data->type, data->listener.id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ if (!qseecom.qsee_reentrancy_support)
+ ret = qseecom_send_resp();
+ else
+ ret = qseecom_reentrancy_send_resp(data);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ if (ret)
+ pr_err("failed qseecom_send_resp: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+ if ((data->type != QSEECOM_CLIENT_APP) &&
+ (data->type != QSEECOM_GENERIC) &&
+ (data->type != QSEECOM_SECURE_SERVICE)) {
+ pr_err("set mem param req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_set_client_mem_param(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed Qqseecom_set_mem_param request: %d\n",
+ ret);
+ break;
+ }
+ case QSEECOM_IOCTL_LOAD_APP_REQ: {
+ if ((data->type != QSEECOM_GENERIC) &&
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("load app req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->type = QSEECOM_CLIENT_APP;
+ pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_load_app(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed load_app request: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_unload_app(data, false);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed unload_app request: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_get_qseos_version(data, argp);
+ if (ret)
+ pr_err("qseecom_get_qseos_version: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+ case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
+ if ((data->type != QSEECOM_GENERIC) &&
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("perf enable req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ if ((data->type == QSEECOM_CLIENT_APP) &&
+ (data->client.app_id == 0)) {
+ pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ __qseecom_register_bus_bandwidth_needs(data, HIGH);
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ ret = qseecom_perf_enable(data);
+ if (ret)
+ pr_err("Fail to vote for clocks %d\n", ret);
+ }
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+ case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
+ if ((data->type != QSEECOM_SECURE_SERVICE) &&
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("perf disable req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ if ((data->type == QSEECOM_CLIENT_APP) &&
+ (data->client.app_id == 0)) {
+ pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ } else {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom_unregister_bus_bandwidth_needs(data);
+ mutex_unlock(&qsee_bw_mutex);
+ }
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+
+ case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+ /* If crypto clock is not handled by HLOS, return directly. */
+ if (qseecom.no_clock_support) {
+ pr_debug("crypto clock is not handled by HLOS\n");
+ break;
+ }
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_scale_bus_bandwidth(data, argp);
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+ case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("load ext elf req: invalid client handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_load_external_elf(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed load_external_elf request: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+ if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
+ pr_err("unload ext elf req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_unload_external_elf(data);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed unload_app request: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+ data->type = QSEECOM_CLIENT_APP;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
+ ret = qseecom_query_app_loaded(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("send cmd svc req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->type = QSEECOM_SECURE_SERVICE;
+ if (qseecom.qsee_version < QSEE_VERSION_03) {
+ pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_send_service_cmd(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+ if (!(qseecom.support_pfe || qseecom.support_fde))
+ pr_err("Features requiring key init not supported\n");
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("create key req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_05) {
+ pr_err("Create Key feature unsupported: qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_create_key(data, argp);
+ if (ret)
+ pr_err("failed to create encryption key: %d\n", ret);
+
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+ if (!(qseecom.support_pfe || qseecom.support_fde))
+ pr_err("Features requiring key init not supported\n");
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("wipe key req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_05) {
+ pr_err("Wipe Key feature unsupported in qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_wipe_key(data, argp);
+ if (ret)
+ pr_err("failed to wipe encryption key: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+ if (!(qseecom.support_pfe || qseecom.support_fde))
+ pr_err("Features requiring key init not supported\n");
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("update key req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_05) {
+ pr_err("Update Key feature unsupported in qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_update_key_user_info(data, argp);
+ if (ret)
+ pr_err("failed to update key user info: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("save part hash req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_save_partition_hash(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("ES activated req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_is_es_activated(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+ if (data->type != QSEECOM_GENERIC) {
+ pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
+ data->released = true;
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_mdtp_cipher_dip(argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
+ break;
+ }
+ case QSEECOM_IOCTL_SEND_MODFD_RESP:
+ case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+ if ((data->listener.id == 0) ||
+ (data->type != QSEECOM_LISTENER_SERVICE)) {
+ pr_err("receive req: invalid handle (%d), lid(%d)\n",
+ data->type, data->listener.id);
+ ret = -EINVAL;
+ break;
+ }
+ atomic_inc(&data->ioctl_count);
+ if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
+ ret = qseecom_send_modfd_resp(data, argp);
+ else
+ ret = qseecom_send_modfd_resp_64(data, argp);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ if (ret)
+ pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
+ break;
+ }
+ case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("Open session: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ pr_err("GP feature unsupported: qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ /* Only one client allowed here at a time */
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_qteec_open_session(data, argp);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed open_session_cmd: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
+ break;
+ }
+ case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("Close session: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ pr_err("GP feature unsupported: qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ /* Only one client allowed here at a time */
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_qteec_close_session(data, argp);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed close_session_cmd: %d\n", ret);
+ break;
+ }
+ case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ pr_err("GP feature unsupported: qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ /* Only one client allowed here at a time */
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed Invoke cmd: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
+ break;
+ }
+ case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+ if ((data->client.app_id == 0) ||
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
+ data->type, data->client.app_id);
+ ret = -EINVAL;
+ break;
+ }
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ pr_err("GP feature unsupported: qsee ver %u\n",
+ qseecom.qsee_version);
+ return -EINVAL;
+ }
+ /* Only one client allowed here at a time */
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_qteec_request_cancellation(data, argp);
+ atomic_dec(&data->ioctl_count);
+ wake_up_all(&data->abort_wq);
+ mutex_unlock(&app_access_lock);
+ if (ret)
+ pr_err("failed request_cancellation: %d\n", ret);
+ break;
+ }
+ case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_get_ce_info(data, argp);
+ if (ret)
+ pr_err("failed get fde ce pipe info: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+ case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_free_ce_info(data, argp);
+ if (ret)
+ pr_err("failed get fde ce pipe info: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+ case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
+ atomic_inc(&data->ioctl_count);
+ ret = qseecom_query_ce_info(data, argp);
+ if (ret)
+ pr_err("failed get fde ce pipe info: %d\n", ret);
+ atomic_dec(&data->ioctl_count);
+ break;
+ }
+ default:
+ pr_err("Invalid IOCTL: 0x%x\n", cmd);
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ struct qseecom_dev_handle *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ file->private_data = data;
+ data->abort = 0;
+ data->type = QSEECOM_GENERIC;
+ data->released = false;
+ memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+ data->mode = INACTIVE;
+ init_waitqueue_head(&data->abort_wq);
+ atomic_set(&data->ioctl_count, 0);
+ return ret;
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+ struct qseecom_dev_handle *data = file->private_data;
+ int ret = 0;
+
+ if (data->released == false) {
+ pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
+ data->type, data->mode, data);
+ switch (data->type) {
+ case QSEECOM_LISTENER_SERVICE:
+ mutex_lock(&app_access_lock);
+ ret = qseecom_unregister_listener(data);
+ mutex_unlock(&app_access_lock);
+ break;
+ case QSEECOM_CLIENT_APP:
+ mutex_lock(&app_access_lock);
+ ret = qseecom_unload_app(data, true);
+ mutex_unlock(&app_access_lock);
+ break;
+ case QSEECOM_SECURE_SERVICE:
+ case QSEECOM_GENERIC:
+ ret = qseecom_unmap_ion_allocated_memory(data);
+ if (ret)
+ pr_err("Ion Unmap failed\n");
+ break;
+ case QSEECOM_UNAVAILABLE_CLIENT_APP:
+ break;
+ default:
+ pr_err("Unsupported clnt_handle_type %d",
+ data->type);
+ break;
+ }
+ }
+
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ if (data->mode != INACTIVE) {
+ qseecom_unregister_bus_bandwidth_needs(data);
+ if (qseecom.cumulative_mode == INACTIVE) {
+ ret = __qseecom_set_msm_bus_request(INACTIVE);
+ if (ret)
+ pr_err("Fail to scale down bus\n");
+ }
+ }
+ mutex_unlock(&qsee_bw_mutex);
+ } else {
+ if (data->fast_load_enabled == true)
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ if (data->perf_enabled == true)
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ }
+ kfree(data);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+#include "compat_qseecom.c"
+#else
+#define compat_qseecom_ioctl NULL
+#endif
+
+static const struct file_operations qseecom_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qseecom_ioctl,
+ .compat_ioctl = compat_qseecom_ioctl,
+ .open = qseecom_open,
+ .release = qseecom_release
+};
+
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
+{
+ int rc = 0;
+ struct device *pdev;
+ struct qseecom_clk *qclk;
+ char *core_clk_src = NULL;
+ char *core_clk = NULL;
+ char *iface_clk = NULL;
+ char *bus_clk = NULL;
+
+ switch (ce) {
+ case CLK_QSEE: {
+ core_clk_src = "core_clk_src";
+ core_clk = "core_clk";
+ iface_clk = "iface_clk";
+ bus_clk = "bus_clk";
+ qclk = &qseecom.qsee;
+ qclk->instance = CLK_QSEE;
+ break;
+ };
+ case CLK_CE_DRV: {
+ core_clk_src = "ce_drv_core_clk_src";
+ core_clk = "ce_drv_core_clk";
+ iface_clk = "ce_drv_iface_clk";
+ bus_clk = "ce_drv_bus_clk";
+ qclk = &qseecom.ce_drv;
+ qclk->instance = CLK_CE_DRV;
+ break;
+ };
+ default:
+ pr_err("Invalid ce hw instance: %d!\n", ce);
+ return -EIO;
+ }
+
+ if (qseecom.no_clock_support) {
+ qclk->ce_core_clk = NULL;
+ qclk->ce_clk = NULL;
+ qclk->ce_bus_clk = NULL;
+ qclk->ce_core_src_clk = NULL;
+ return 0;
+ }
+
+ pdev = qseecom.pdev;
+
+ /* Get CE3 src core clk. */
+ qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
+ if (!IS_ERR(qclk->ce_core_src_clk)) {
+ rc = clk_set_rate(qclk->ce_core_src_clk,
+ qseecom.ce_opp_freq_hz);
+ if (rc) {
+ clk_put(qclk->ce_core_src_clk);
+ qclk->ce_core_src_clk = NULL;
+ pr_err("Unable to set the core src clk @%uMhz.\n",
+ qseecom.ce_opp_freq_hz/CE_CLK_DIV);
+ return -EIO;
+ }
+ } else {
+ pr_warn("Unable to get CE core src clk, set to NULL\n");
+ qclk->ce_core_src_clk = NULL;
+ }
+
+ /* Get CE core clk */
+ qclk->ce_core_clk = clk_get(pdev, core_clk);
+ if (IS_ERR(qclk->ce_core_clk)) {
+ rc = PTR_ERR(qclk->ce_core_clk);
+ pr_err("Unable to get CE core clk\n");
+ if (qclk->ce_core_src_clk != NULL)
+ clk_put(qclk->ce_core_src_clk);
+ return -EIO;
+ }
+
+ /* Get CE Interface clk */
+ qclk->ce_clk = clk_get(pdev, iface_clk);
+ if (IS_ERR(qclk->ce_clk)) {
+ rc = PTR_ERR(qclk->ce_clk);
+ pr_err("Unable to get CE interface clk\n");
+ if (qclk->ce_core_src_clk != NULL)
+ clk_put(qclk->ce_core_src_clk);
+ clk_put(qclk->ce_core_clk);
+ return -EIO;
+ }
+
+ /* Get CE AXI clk */
+ qclk->ce_bus_clk = clk_get(pdev, bus_clk);
+ if (IS_ERR(qclk->ce_bus_clk)) {
+ rc = PTR_ERR(qclk->ce_bus_clk);
+ pr_err("Unable to get CE BUS interface clk\n");
+ if (qclk->ce_core_src_clk != NULL)
+ clk_put(qclk->ce_core_src_clk);
+ clk_put(qclk->ce_core_clk);
+ clk_put(qclk->ce_clk);
+ return -EIO;
+ }
+
+ return rc;
+}
+
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
+{
+ struct qseecom_clk *qclk;
+
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ if (qclk->ce_clk != NULL) {
+ clk_put(qclk->ce_clk);
+ qclk->ce_clk = NULL;
+ }
+ if (qclk->ce_core_clk != NULL) {
+ clk_put(qclk->ce_core_clk);
+ qclk->ce_core_clk = NULL;
+ }
+ if (qclk->ce_bus_clk != NULL) {
+ clk_put(qclk->ce_bus_clk);
+ qclk->ce_bus_clk = NULL;
+ }
+ if (qclk->ce_core_src_clk != NULL) {
+ clk_put(qclk->ce_core_src_clk);
+ qclk->ce_core_src_clk = NULL;
+ }
+ qclk->instance = CLK_INVALID;
+}
+
+static int qseecom_retrieve_ce_data(struct platform_device *pdev)
+{
+ int rc = 0;
+ uint32_t hlos_num_ce_hw_instances;
+ uint32_t disk_encrypt_pipe;
+ uint32_t file_encrypt_pipe;
+ uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
+ int i;
+ const int *tbl;
+ int size;
+ int entry;
+ struct qseecom_crypto_info *pfde_tbl = NULL;
+ struct qseecom_crypto_info *p;
+ int tbl_size;
+ int j;
+ bool old_db = true;
+ struct qseecom_ce_info_use *pce_info_use;
+ uint32_t *unit_tbl = NULL;
+ int total_units = 0;
+ struct qseecom_ce_pipe_entry *pce_entry;
+
+ qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
+ qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,qsee-ce-hw-instance",
+ &qseecom.ce_info.qsee_ce_hw_instance)) {
+ pr_err("Fail to get qsee ce hw instance information.\n");
+ rc = -EINVAL;
+ goto out;
+ } else {
+ pr_debug("qsee-ce-hw-instance=0x%x\n",
+ qseecom.ce_info.qsee_ce_hw_instance);
+ }
+
+ qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,support-fde");
+ qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,support-pfe");
+
+ if (!qseecom.support_pfe && !qseecom.support_fde) {
+ pr_warn("Device does not support PFE/FDE");
+ goto out;
+ }
+
+ if (qseecom.support_fde)
+ tbl = of_get_property((&pdev->dev)->of_node,
+ "qcom,full-disk-encrypt-info", &size);
+ else
+ tbl = NULL;
+ if (tbl) {
+ old_db = false;
+ if (size % sizeof(struct qseecom_crypto_info)) {
+ pr_err("full-disk-encrypt-info tbl size(%d)\n",
+ size);
+ rc = -EINVAL;
+ goto out;
+ }
+ tbl_size = size / sizeof
+ (struct qseecom_crypto_info);
+
+ pfde_tbl = kzalloc(size, GFP_KERNEL);
+ unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+ total_units = 0;
+
+ if (!pfde_tbl || !unit_tbl) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (of_property_read_u32_array((&pdev->dev)->of_node,
+ "qcom,full-disk-encrypt-info",
+ (u32 *)pfde_tbl, size/sizeof(u32))) {
+ pr_err("failed to read full-disk-encrypt-info tbl\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+ for (j = 0; j < total_units; j++) {
+ if (p->unit_num == *(unit_tbl + j))
+ break;
+ }
+ if (j == total_units) {
+ *(unit_tbl + total_units) = p->unit_num;
+ total_units++;
+ }
+ }
+
+ qseecom.ce_info.num_fde = total_units;
+ pce_info_use = qseecom.ce_info.fde = kcalloc(
+ total_units, sizeof(struct qseecom_ce_info_use),
+ GFP_KERNEL);
+ if (!pce_info_use) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < total_units; j++, pce_info_use++) {
+ pce_info_use->unit_num = *(unit_tbl + j);
+ pce_info_use->alloc = false;
+ pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+ pce_info_use->num_ce_pipe_entries = 0;
+ pce_info_use->ce_pipe_entry = NULL;
+ for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+ if (p->unit_num == pce_info_use->unit_num)
+ pce_info_use->num_ce_pipe_entries++;
+ }
+
+ entry = pce_info_use->num_ce_pipe_entries;
+ pce_entry = pce_info_use->ce_pipe_entry =
+ kcalloc(entry,
+ sizeof(struct qseecom_ce_pipe_entry),
+ GFP_KERNEL);
+ if (pce_entry == NULL) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+ if (p->unit_num == pce_info_use->unit_num) {
+ pce_entry->ce_num = p->ce;
+ pce_entry->ce_pipe_pair =
+ p->pipe_pair;
+ pce_entry->valid = true;
+ pce_entry++;
+ }
+ }
+ }
+ kfree(unit_tbl);
+ unit_tbl = NULL;
+ kfree(pfde_tbl);
+ pfde_tbl = NULL;
+ }
+
+ if (qseecom.support_pfe)
+ tbl = of_get_property((&pdev->dev)->of_node,
+ "qcom,per-file-encrypt-info", &size);
+ else
+ tbl = NULL;
+ if (tbl) {
+ old_db = false;
+ if (size % sizeof(struct qseecom_crypto_info)) {
+ pr_err("per-file-encrypt-info tbl size(%d)\n",
+ size);
+ rc = -EINVAL;
+ goto out;
+ }
+ tbl_size = size / sizeof
+ (struct qseecom_crypto_info);
+
+ pfde_tbl = kzalloc(size, GFP_KERNEL);
+ unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+ total_units = 0;
+ if (!pfde_tbl || !unit_tbl) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (of_property_read_u32_array((&pdev->dev)->of_node,
+ "qcom,per-file-encrypt-info",
+ (u32 *)pfde_tbl, size/sizeof(u32))) {
+ pr_err("failed to read per-file-encrypt-info tbl\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+ for (j = 0; j < total_units; j++) {
+ if (p->unit_num == *(unit_tbl + j))
+ break;
+ }
+ if (j == total_units) {
+ *(unit_tbl + total_units) = p->unit_num;
+ total_units++;
+ }
+ }
+
+ qseecom.ce_info.num_pfe = total_units;
+ pce_info_use = qseecom.ce_info.pfe = kcalloc(
+ total_units, sizeof(struct qseecom_ce_info_use),
+ GFP_KERNEL);
+ if (!pce_info_use) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < total_units; j++, pce_info_use++) {
+ pce_info_use->unit_num = *(unit_tbl + j);
+ pce_info_use->alloc = false;
+ pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+ pce_info_use->num_ce_pipe_entries = 0;
+ pce_info_use->ce_pipe_entry = NULL;
+ for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+ if (p->unit_num == pce_info_use->unit_num)
+ pce_info_use->num_ce_pipe_entries++;
+ }
+
+ entry = pce_info_use->num_ce_pipe_entries;
+ pce_entry = pce_info_use->ce_pipe_entry =
+ kcalloc(entry,
+ sizeof(struct qseecom_ce_pipe_entry),
+ GFP_KERNEL);
+ if (pce_entry == NULL) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+ if (p->unit_num == pce_info_use->unit_num) {
+ pce_entry->ce_num = p->ce;
+ pce_entry->ce_pipe_pair =
+ p->pipe_pair;
+ pce_entry->valid = true;
+ pce_entry++;
+ }
+ }
+ }
+ kfree(unit_tbl);
+ unit_tbl = NULL;
+ kfree(pfde_tbl);
+ pfde_tbl = NULL;
+ }
+
+ if (!old_db)
+ goto out1;
+
+ if (of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,support-multiple-ce-hw-instance")) {
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,hlos-num-ce-hw-instances",
+ &hlos_num_ce_hw_instances)) {
+ pr_err("Fail: get hlos number of ce hw instance\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ } else {
+ hlos_num_ce_hw_instances = 1;
+ }
+
+ if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
+ pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
+ MAX_CE_PIPE_PAIR_PER_UNIT);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array((&pdev->dev)->of_node,
+ "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
+ hlos_num_ce_hw_instances)) {
+ pr_err("Fail: get hlos ce hw instance info\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (qseecom.support_fde) {
+ pce_info_use = qseecom.ce_info.fde =
+ kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+ if (!pce_info_use) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* by default for old db */
+ qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
+ pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+ pce_info_use->alloc = false;
+ pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+ pce_info_use->ce_pipe_entry = NULL;
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,disk-encrypt-pipe-pair",
+ &disk_encrypt_pipe)) {
+ pr_err("Fail to get FDE pipe information.\n");
+ rc = -EINVAL;
+ goto out;
+ } else {
+ pr_debug("disk-encrypt-pipe-pair=0x%x",
+ disk_encrypt_pipe);
+ }
+ entry = pce_info_use->num_ce_pipe_entries =
+ hlos_num_ce_hw_instances;
+ pce_entry = pce_info_use->ce_pipe_entry =
+ kcalloc(entry,
+ sizeof(struct qseecom_ce_pipe_entry),
+ GFP_KERNEL);
+ if (pce_entry == NULL) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < entry; i++) {
+ pce_entry->ce_num = hlos_ce_hw_instance[i];
+ pce_entry->ce_pipe_pair = disk_encrypt_pipe;
+ pce_entry->valid = 1;
+ pce_entry++;
+ }
+ } else {
+ pr_warn("Device does not support FDE");
+ disk_encrypt_pipe = 0xff;
+ }
+ if (qseecom.support_pfe) {
+ pce_info_use = qseecom.ce_info.pfe =
+ kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+ if (!pce_info_use) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* by default for old db */
+ qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
+ pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+ pce_info_use->alloc = false;
+ pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+ pce_info_use->ce_pipe_entry = NULL;
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,file-encrypt-pipe-pair",
+ &file_encrypt_pipe)) {
+ pr_err("Fail to get PFE pipe information.\n");
+ rc = -EINVAL;
+ goto out;
+ } else {
+ pr_debug("file-encrypt-pipe-pair=0x%x",
+ file_encrypt_pipe);
+ }
+ entry = pce_info_use->num_ce_pipe_entries =
+ hlos_num_ce_hw_instances;
+ pce_entry = pce_info_use->ce_pipe_entry =
+ kcalloc(entry,
+ sizeof(struct qseecom_ce_pipe_entry),
+ GFP_KERNEL);
+ if (pce_entry == NULL) {
+ pr_err("failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < entry; i++) {
+ pce_entry->ce_num = hlos_ce_hw_instance[i];
+ pce_entry->ce_pipe_pair = file_encrypt_pipe;
+ pce_entry->valid = 1;
+ pce_entry++;
+ }
+ } else {
+ pr_warn("Device does not support PFE");
+ file_encrypt_pipe = 0xff;
+ }
+
+out1:
+ qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+ qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
+out:
+ if (rc) {
+ if (qseecom.ce_info.fde) {
+ pce_info_use = qseecom.ce_info.fde;
+ for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+ pce_entry = pce_info_use->ce_pipe_entry;
+ kfree(pce_entry);
+ pce_info_use++;
+ }
+ }
+ kfree(qseecom.ce_info.fde);
+ qseecom.ce_info.fde = NULL;
+ if (qseecom.ce_info.pfe) {
+ pce_info_use = qseecom.ce_info.pfe;
+ for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+ pce_entry = pce_info_use->ce_pipe_entry;
+ kfree(pce_entry);
+ pce_info_use++;
+ }
+ }
+ kfree(qseecom.ce_info.pfe);
+ qseecom.ce_info.pfe = NULL;
+ }
+ kfree(unit_tbl);
+ kfree(pfde_tbl);
+ return rc;
+}
+
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_ce_info_req req;
+ struct qseecom_ce_info_req *pinfo = &req;
+ int ret = 0;
+ int i;
+ unsigned int entries;
+ struct qseecom_ce_info_use *pce_info_use, *p;
+ int total = 0;
+ bool found = false;
+ struct qseecom_ce_pipe_entry *pce_entry;
+
+ ret = copy_from_user(pinfo, argp,
+ sizeof(struct qseecom_ce_info_req));
+ if (ret) {
+ pr_err("copy_from_user failed\n");
+ return ret;
+ }
+
+ switch (pinfo->usage) {
+ case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+ if (qseecom.support_fde) {
+ p = qseecom.ce_info.fde;
+ total = qseecom.ce_info.num_fde;
+ } else {
+ pr_err("system does not support fde\n");
+ return -EINVAL;
+ }
+ break;
+ case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+ if (qseecom.support_pfe) {
+ p = qseecom.ce_info.pfe;
+ total = qseecom.ce_info.num_pfe;
+ } else {
+ pr_err("system does not support pfe\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("unsupported usage %d\n", pinfo->usage);
+ return -EINVAL;
+ }
+
+ pce_info_use = NULL;
+ for (i = 0; i < total; i++) {
+ if (!p->alloc)
+ pce_info_use = p;
+ else if (!memcmp(p->handle, pinfo->handle,
+ MAX_CE_INFO_HANDLE_SIZE)) {
+ pce_info_use = p;
+ found = true;
+ break;
+ }
+ p++;
+ }
+
+ if (pce_info_use == NULL)
+ return -EBUSY;
+
+ pinfo->unit_num = pce_info_use->unit_num;
+ if (!pce_info_use->alloc) {
+ pce_info_use->alloc = true;
+ memcpy(pce_info_use->handle,
+ pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
+ }
+ if (pce_info_use->num_ce_pipe_entries >
+ MAX_CE_PIPE_PAIR_PER_UNIT)
+ entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+ else
+ entries = pce_info_use->num_ce_pipe_entries;
+ pinfo->num_ce_pipe_entries = entries;
+ pce_entry = pce_info_use->ce_pipe_entry;
+ for (i = 0; i < entries; i++, pce_entry++)
+ pinfo->ce_pipe_entry[i] = *pce_entry;
+ for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+ pinfo->ce_pipe_entry[i].valid = 0;
+
+ if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+ pr_err("copy_to_user failed\n");
+ ret = -EFAULT;
+ }
+ return ret;
+}
+
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_ce_info_req req;
+ struct qseecom_ce_info_req *pinfo = &req;
+ int ret = 0;
+ struct qseecom_ce_info_use *p;
+ int total = 0;
+ int i;
+ bool found = false;
+
+ ret = copy_from_user(pinfo, argp,
+ sizeof(struct qseecom_ce_info_req));
+ if (ret)
+ return ret;
+
+ switch (pinfo->usage) {
+ case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+ if (qseecom.support_fde) {
+ p = qseecom.ce_info.fde;
+ total = qseecom.ce_info.num_fde;
+ } else {
+ pr_err("system does not support fde\n");
+ return -EINVAL;
+ }
+ break;
+ case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+ if (qseecom.support_pfe) {
+ p = qseecom.ce_info.pfe;
+ total = qseecom.ce_info.num_pfe;
+ } else {
+ pr_err("system does not support pfe\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("unsupported usage %d\n", pinfo->usage);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total; i++) {
+ if (p->alloc &&
+ !memcmp(p->handle, pinfo->handle,
+ MAX_CE_INFO_HANDLE_SIZE)) {
+ memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
+ p->alloc = false;
+ found = true;
+ break;
+ }
+ p++;
+ }
+ return ret;
+}
+
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+ void __user *argp)
+{
+ struct qseecom_ce_info_req req;
+ struct qseecom_ce_info_req *pinfo = &req;
+ int ret = 0;
+ int i;
+ unsigned int entries;
+ struct qseecom_ce_info_use *pce_info_use, *p;
+ int total = 0;
+ bool found = false;
+ struct qseecom_ce_pipe_entry *pce_entry;
+
+ ret = copy_from_user(pinfo, argp,
+ sizeof(struct qseecom_ce_info_req));
+ if (ret)
+ return ret;
+
+ switch (pinfo->usage) {
+ case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+ case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+ if (qseecom.support_fde) {
+ p = qseecom.ce_info.fde;
+ total = qseecom.ce_info.num_fde;
+ } else {
+ pr_err("system does not support fde\n");
+ return -EINVAL;
+ }
+ break;
+ case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+ if (qseecom.support_pfe) {
+ p = qseecom.ce_info.pfe;
+ total = qseecom.ce_info.num_pfe;
+ } else {
+ pr_err("system does not support pfe\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("unsupported usage %d\n", pinfo->usage);
+ return -EINVAL;
+ }
+
+ pce_info_use = NULL;
+ pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
+ pinfo->num_ce_pipe_entries = 0;
+ for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+ pinfo->ce_pipe_entry[i].valid = 0;
+
+ for (i = 0; i < total; i++) {
+
+ if (p->alloc && !memcmp(p->handle,
+ pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
+ pce_info_use = p;
+ found = true;
+ break;
+ }
+ p++;
+ }
+ if (!pce_info_use)
+ goto out;
+ pinfo->unit_num = pce_info_use->unit_num;
+ if (pce_info_use->num_ce_pipe_entries >
+ MAX_CE_PIPE_PAIR_PER_UNIT)
+ entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+ else
+ entries = pce_info_use->num_ce_pipe_entries;
+ pinfo->num_ce_pipe_entries = entries;
+ pce_entry = pce_info_use->ce_pipe_entry;
+ for (i = 0; i < entries; i++, pce_entry++)
+ pinfo->ce_pipe_entry[i] = *pce_entry;
+ for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+ pinfo->ce_pipe_entry[i].valid = 0;
+out:
+ if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+ pr_err("copy_to_user failed\n");
+ ret = -EFAULT;
+ }
+ return ret;
+}
+
+/*
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
+ */
+static int qseecom_check_whitelist_feature(void)
+{
+ int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
+
+ return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+}
+
+static int qseecom_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ uint32_t feature = 10;
+ struct device *class_dev;
+ struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
+ struct qseecom_command_scm_resp resp;
+ struct qseecom_ce_info_use *pce_info_use = NULL;
+
+ qseecom.qsee_bw_count = 0;
+ qseecom.qsee_perf_client = 0;
+ qseecom.qsee_sfpb_bw_count = 0;
+
+ qseecom.qsee.ce_core_clk = NULL;
+ qseecom.qsee.ce_clk = NULL;
+ qseecom.qsee.ce_core_src_clk = NULL;
+ qseecom.qsee.ce_bus_clk = NULL;
+
+ qseecom.cumulative_mode = 0;
+ qseecom.current_mode = INACTIVE;
+ qseecom.support_bus_scaling = false;
+ qseecom.support_fde = false;
+ qseecom.support_pfe = false;
+
+ qseecom.ce_drv.ce_core_clk = NULL;
+ qseecom.ce_drv.ce_clk = NULL;
+ qseecom.ce_drv.ce_core_src_clk = NULL;
+ qseecom.ce_drv.ce_bus_clk = NULL;
+ atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+
+ qseecom.app_block_ref_cnt = 0;
+ init_waitqueue_head(&qseecom.app_block_wq);
+ qseecom.whitelist_support = true;
+
+ rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
+ if (rc < 0) {
+ pr_err("alloc_chrdev_region failed %d\n", rc);
+ return rc;
+ }
+
+ driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+ if (IS_ERR(driver_class)) {
+ rc = -ENOMEM;
+ pr_err("class_create failed %d\n", rc);
+ goto exit_unreg_chrdev_region;
+ }
+
+ class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
+ QSEECOM_DEV);
+ if (IS_ERR(class_dev)) {
+ pr_err("class_device_create failed %d\n", rc);
+ rc = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ cdev_init(&qseecom.cdev, &qseecom_fops);
+ qseecom.cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+ if (rc < 0) {
+ pr_err("cdev_add failed %d\n", rc);
+ goto exit_destroy_device;
+ }
+
+ INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+ spin_lock_init(&qseecom.registered_listener_list_lock);
+ INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+ spin_lock_init(&qseecom.registered_app_list_lock);
+ INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
+ spin_lock_init(&qseecom.registered_kclient_list_lock);
+ init_waitqueue_head(&qseecom.send_resp_wq);
+ qseecom.send_resp_flag = 0;
+
+ qseecom.qsee_version = QSEEE_VERSION_00;
+ rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
+ &resp, sizeof(resp));
+ pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
+ if (rc) {
+ pr_err("Failed to get QSEE version info %d\n", rc);
+ goto exit_del_cdev;
+ }
+ qseecom.qsee_version = resp.result;
+ qseecom.qseos_version = QSEOS_VERSION_14;
+ qseecom.commonlib_loaded = false;
+ qseecom.commonlib64_loaded = false;
+ qseecom.pdev = class_dev;
+ /* Create ION msm client */
+ qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
+ if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
+ pr_err("Ion client cannot be created\n");
+ rc = -ENOMEM;
+ goto exit_del_cdev;
+ }
+
+ /* register client for bus scaling */
+ if (pdev->dev.of_node) {
+ qseecom.pdev->of_node = pdev->dev.of_node;
+ qseecom.support_bus_scaling =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,support-bus-scaling");
+ rc = qseecom_retrieve_ce_data(pdev);
+ if (rc)
+ goto exit_destroy_ion_client;
+ qseecom.appsbl_qseecom_support =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,appsbl-qseecom-support");
+ pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
+ qseecom.appsbl_qseecom_support);
+
+ qseecom.commonlib64_loaded =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,commonlib64-loaded-by-uefi");
+ pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
+ qseecom.commonlib64_loaded);
+ qseecom.fde_key_size =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,fde-key-size");
+ qseecom.no_clock_support =
+ of_property_read_bool((&pdev->dev)->of_node,
+ "qcom,no-clock-support");
+ if (!qseecom.no_clock_support) {
+ pr_info("qseecom clocks handled by other subsystem\n");
+ } else {
+ pr_info("no-clock-support=0x%x",
+ qseecom.no_clock_support);
+ }
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,qsee-reentrancy-support",
+ &qseecom.qsee_reentrancy_support)) {
+ pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
+ qseecom.qsee_reentrancy_support = 0;
+ } else {
+ pr_warn("qseecom.qsee_reentrancy_support = %d\n",
+ qseecom.qsee_reentrancy_support);
+ }
+
+ /*
+ * The qseecom bus scaling flag can not be enabled when
+ * crypto clock is not handled by HLOS.
+ */
+ if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
+ pr_err("support_bus_scaling flag can not be enabled.\n");
+ rc = -EINVAL;
+ goto exit_destroy_ion_client;
+ }
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,ce-opp-freq",
+ &qseecom.ce_opp_freq_hz)) {
+ pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
+ qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
+ }
+ rc = __qseecom_init_clk(CLK_QSEE);
+ if (rc)
+ goto exit_destroy_ion_client;
+
+ if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+ (qseecom.support_pfe || qseecom.support_fde)) {
+ rc = __qseecom_init_clk(CLK_CE_DRV);
+ if (rc) {
+ __qseecom_deinit_clk(CLK_QSEE);
+ goto exit_destroy_ion_client;
+ }
+ } else {
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+ qseecom.ce_drv.ce_clk = qclk->ce_clk;
+ qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+ qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+ }
+
+ qseecom_platform_support = (struct msm_bus_scale_pdata *)
+ msm_bus_cl_get_pdata(pdev);
+ if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
+ (!qseecom.is_apps_region_protected &&
+ !qseecom.appsbl_qseecom_support)) {
+ struct resource *resource = NULL;
+ struct qsee_apps_region_info_ireq req;
+ struct qsee_apps_region_info_64bit_ireq req_64bit;
+ struct qseecom_command_scm_resp resp;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+
+ resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "secapp-region");
+ if (resource) {
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req.qsee_cmd_id =
+ QSEOS_APP_REGION_NOTIFICATION;
+ req.addr = (uint32_t)resource->start;
+ req.size = resource_size(resource);
+ cmd_buf = (void *)&req;
+ cmd_len = sizeof(struct
+ qsee_apps_region_info_ireq);
+ pr_warn("secure app region addr=0x%x size=0x%x",
+ req.addr, req.size);
+ } else {
+ req_64bit.qsee_cmd_id =
+ QSEOS_APP_REGION_NOTIFICATION;
+ req_64bit.addr = resource->start;
+ req_64bit.size = resource_size(
+ resource);
+ cmd_buf = (void *)&req_64bit;
+ cmd_len = sizeof(struct
+ qsee_apps_region_info_64bit_ireq);
+ pr_warn("secure app region addr=0x%llx size=0x%x",
+ req_64bit.addr, req_64bit.size);
+ }
+ } else {
+ pr_err("Fail to get secure app region info\n");
+ rc = -EINVAL;
+ goto exit_deinit_clock;
+ }
+ rc = __qseecom_enable_clk(CLK_QSEE);
+ if (rc) {
+ pr_err("CLK_QSEE enabling failed (%d)\n", rc);
+ rc = -EIO;
+ goto exit_deinit_clock;
+ }
+ rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ cmd_buf, cmd_len,
+ &resp, sizeof(resp));
+ __qseecom_disable_clk(CLK_QSEE);
+ if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
+ pr_err("send secapp reg fail %d resp.res %d\n",
+ rc, resp.result);
+ rc = -EINVAL;
+ goto exit_deinit_clock;
+ }
+ }
+ /*
+ * By default, appsbl only loads cmnlib. If OEM changes appsbl to
+ * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
+ * Pls add "qseecom.commonlib64_loaded = true" here too.
+ */
+ if (qseecom.is_apps_region_protected ||
+ qseecom.appsbl_qseecom_support)
+ qseecom.commonlib_loaded = true;
+ } else {
+ qseecom_platform_support = (struct msm_bus_scale_pdata *)
+ pdev->dev.platform_data;
+ }
+ if (qseecom.support_bus_scaling) {
+ init_timer(&(qseecom.bw_scale_down_timer));
+ INIT_WORK(&qseecom.bw_inactive_req_ws,
+ qseecom_bw_inactive_req_work);
+ qseecom.bw_scale_down_timer.function =
+ qseecom_scale_bus_bandwidth_timer_callback;
+ }
+ qseecom.timer_running = false;
+ qseecom.qsee_perf_client = msm_bus_scale_register_client(
+ qseecom_platform_support);
+
+ qseecom.whitelist_support = qseecom_check_whitelist_feature();
+ pr_warn("qseecom.whitelist_support = %d\n",
+ qseecom.whitelist_support);
+
+ if (!qseecom.qsee_perf_client)
+ pr_err("Unable to register bus client\n");
+
+ atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+ return 0;
+
+exit_deinit_clock:
+ __qseecom_deinit_clk(CLK_QSEE);
+ if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+ (qseecom.support_pfe || qseecom.support_fde))
+ __qseecom_deinit_clk(CLK_CE_DRV);
+exit_destroy_ion_client:
+ if (qseecom.ce_info.fde) {
+ pce_info_use = qseecom.ce_info.fde;
+ for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+ kzfree(pce_info_use->ce_pipe_entry);
+ pce_info_use++;
+ }
+ kfree(qseecom.ce_info.fde);
+ }
+ if (qseecom.ce_info.pfe) {
+ pce_info_use = qseecom.ce_info.pfe;
+ for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+ kzfree(pce_info_use->ce_pipe_entry);
+ pce_info_use++;
+ }
+ kfree(qseecom.ce_info.pfe);
+ }
+ ion_client_destroy(qseecom.ion_clnt);
+exit_del_cdev:
+ cdev_del(&qseecom.cdev);
+exit_destroy_device:
+ device_destroy(driver_class, qseecom_device_no);
+exit_destroy_class:
+ class_destroy(driver_class);
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(qseecom_device_no, 1);
+ return rc;
+}
+
+static int qseecom_remove(struct platform_device *pdev)
+{
+ struct qseecom_registered_kclient_list *kclient = NULL;
+ unsigned long flags = 0;
+ int ret = 0;
+ int i;
+ struct qseecom_ce_pipe_entry *pce_entry;
+ struct qseecom_ce_info_use *pce_info_use;
+
+ atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+ spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+
+ list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+ list) {
+ if (!kclient)
+ goto exit_irqrestore;
+
+ /* Break the loop if client handle is NULL */
+ if (!kclient->handle)
+ goto exit_free_kclient;
+
+ if (list_empty(&kclient->list))
+ goto exit_free_kc_handle;
+
+ list_del(&kclient->list);
+ mutex_lock(&app_access_lock);
+ ret = qseecom_unload_app(kclient->handle->dev, false);
+ mutex_unlock(&app_access_lock);
+ if (!ret) {
+ kzfree(kclient->handle->dev);
+ kzfree(kclient->handle);
+ kzfree(kclient);
+ }
+ }
+
+exit_free_kc_handle:
+ kzfree(kclient->handle);
+exit_free_kclient:
+ kzfree(kclient);
+exit_irqrestore:
+ spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+ if (qseecom.qseos_version > QSEEE_VERSION_00)
+ qseecom_unload_commonlib_image();
+
+ if (qseecom.qsee_perf_client)
+ msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
+ 0);
+ if (pdev->dev.platform_data != NULL)
+ msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+
+ if (qseecom.support_bus_scaling) {
+ cancel_work_sync(&qseecom.bw_inactive_req_ws);
+ del_timer_sync(&qseecom.bw_scale_down_timer);
+ }
+
+ if (qseecom.ce_info.fde) {
+ pce_info_use = qseecom.ce_info.fde;
+ for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+ pce_entry = pce_info_use->ce_pipe_entry;
+ kfree(pce_entry);
+ pce_info_use++;
+ }
+ }
+ kfree(qseecom.ce_info.fde);
+ if (qseecom.ce_info.pfe) {
+ pce_info_use = qseecom.ce_info.pfe;
+ for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+ pce_entry = pce_info_use->ce_pipe_entry;
+ kfree(pce_entry);
+ pce_info_use++;
+ }
+ }
+ kfree(qseecom.ce_info.pfe);
+
+ /* register client for bus scaling */
+ if (pdev->dev.of_node) {
+ __qseecom_deinit_clk(CLK_QSEE);
+ if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+ (qseecom.support_pfe || qseecom.support_fde))
+ __qseecom_deinit_clk(CLK_CE_DRV);
+ }
+
+ ion_client_destroy(qseecom.ion_clnt);
+
+ cdev_del(&qseecom.cdev);
+
+ device_destroy(driver_class, qseecom_device_no);
+
+ class_destroy(driver_class);
+
+ unregister_chrdev_region(qseecom_device_no, 1);
+
+ return ret;
+}
+
+static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret = 0;
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
+ if (qseecom.no_clock_support)
+ return 0;
+
+ mutex_lock(&qsee_bw_mutex);
+ mutex_lock(&clk_access_lock);
+
+ if (qseecom.current_mode != INACTIVE) {
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, INACTIVE);
+ if (ret)
+ pr_err("Fail to scale down bus\n");
+ else
+ qseecom.current_mode = INACTIVE;
+ }
+
+ if (qclk->clk_access_cnt) {
+ if (qclk->ce_clk != NULL)
+ clk_disable_unprepare(qclk->ce_clk);
+ if (qclk->ce_core_clk != NULL)
+ clk_disable_unprepare(qclk->ce_core_clk);
+ if (qclk->ce_bus_clk != NULL)
+ clk_disable_unprepare(qclk->ce_bus_clk);
+ }
+
+ del_timer_sync(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = false;
+
+ mutex_unlock(&clk_access_lock);
+ mutex_unlock(&qsee_bw_mutex);
+ cancel_work_sync(&qseecom.bw_inactive_req_ws);
+
+ return 0;
+}
+
+static int qseecom_resume(struct platform_device *pdev)
+{
+ int mode = 0;
+ int ret = 0;
+ struct qseecom_clk *qclk;
+
+ qclk = &qseecom.qsee;
+ if (qseecom.no_clock_support)
+ goto exit;
+
+ mutex_lock(&qsee_bw_mutex);
+ mutex_lock(&clk_access_lock);
+ if (qseecom.cumulative_mode >= HIGH)
+ mode = HIGH;
+ else
+ mode = qseecom.cumulative_mode;
+
+ if (qseecom.cumulative_mode != INACTIVE) {
+ ret = msm_bus_scale_client_update_request(
+ qseecom.qsee_perf_client, mode);
+ if (ret)
+ pr_err("Fail to scale up bus to %d\n", mode);
+ else
+ qseecom.current_mode = mode;
+ }
+
+ if (qclk->clk_access_cnt) {
+ if (qclk->ce_core_clk != NULL) {
+ ret = clk_prepare_enable(qclk->ce_core_clk);
+ if (ret) {
+ pr_err("Unable to enable/prep CE core clk\n");
+ qclk->clk_access_cnt = 0;
+ goto err;
+ }
+ }
+ if (qclk->ce_clk != NULL) {
+ ret = clk_prepare_enable(qclk->ce_clk);
+ if (ret) {
+ pr_err("Unable to enable/prep CE iface clk\n");
+ qclk->clk_access_cnt = 0;
+ goto ce_clk_err;
+ }
+ }
+ if (qclk->ce_bus_clk != NULL) {
+ ret = clk_prepare_enable(qclk->ce_bus_clk);
+ if (ret) {
+ pr_err("Unable to enable/prep CE bus clk\n");
+ qclk->clk_access_cnt = 0;
+ goto ce_bus_clk_err;
+ }
+ }
+ }
+
+ if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ mod_timer(&(qseecom.bw_scale_down_timer),
+ qseecom.bw_scale_down_timer.expires);
+ qseecom.timer_running = true;
+ }
+
+ mutex_unlock(&clk_access_lock);
+ mutex_unlock(&qsee_bw_mutex);
+ goto exit;
+
+ce_bus_clk_err:
+ if (qclk->ce_clk)
+ clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+ if (qclk->ce_core_clk)
+ clk_disable_unprepare(qclk->ce_core_clk);
+err:
+ mutex_unlock(&clk_access_lock);
+ mutex_unlock(&qsee_bw_mutex);
+ ret = -EIO;
+exit:
+ atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+ return ret;
+}
+
+static const struct of_device_id qseecom_match[] = {
+ {
+ .compatible = "qcom,qseecom",
+ },
+ {}
+};
+
+static struct platform_driver qseecom_plat_driver = {
+ .probe = qseecom_probe,
+ .remove = qseecom_remove,
+ .suspend = qseecom_suspend,
+ .resume = qseecom_resume,
+ .driver = {
+ .name = "qseecom",
+ .owner = THIS_MODULE,
+ .of_match_table = qseecom_match,
+ },
+};
+
+static int qseecom_init(void)
+{
+ return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void qseecom_exit(void)
+{
+ platform_driver_unregister(&qseecom_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
new file mode 100644
index 0000000..5ca5839
--- /dev/null
+++ b/drivers/misc/qseecom_kernel.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOM_KERNEL_H_
+#define __QSEECOM_KERNEL_H_
+
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+
+#define QSEECOM_ALIGN_SIZE 0x40
+#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x) \
+ ((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
+
+/*
+ * struct qseecom_handle -
+ * Handle to the qseecom device for kernel clients
+ * @sbuf - shared buffer pointer
+ * @sbbuf_len - shared buffer size
+ */
+struct qseecom_handle {
+ void *dev; /* in/out */
+ unsigned char *sbuf; /* in/out */
+ uint32_t sbuf_len; /* in/out */
+};
+
+int qseecom_start_app(struct qseecom_handle **handle,
+ char *app_name, uint32_t size);
+int qseecom_shutdown_app(struct qseecom_handle **handle);
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+ uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc);
+
+#endif /* __QSEECOM_KERNEL_H_ */
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 33fc2b9..127a052 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -96,9 +96,11 @@
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
+ struct user_namespace *user_ns = current_user_ns();
cputime_t utime;
cputime_t stime;
unsigned long bkt;
+ uid_t uid;
rt_mutex_lock(&uid_lock);
@@ -109,14 +111,13 @@
read_lock(&tasklist_lock);
do_each_thread(temp, task) {
- uid_entry = find_or_register_uid(from_kuid_munged(
- current_user_ns(), task_uid(task)));
+ uid = from_kuid_munged(user_ns, task_uid(task));
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
read_unlock(&tasklist_lock);
rt_mutex_unlock(&uid_lock);
pr_err("%s: failed to find the uid_entry for uid %d\n",
- __func__, from_kuid_munged(current_user_ns(),
- task_uid(task)));
+ __func__, uid);
return -ENOMEM;
}
task_cputime_adjusted(task, &utime, &stime);
@@ -238,28 +239,28 @@
io_last->fsync -= task->ioac.syscfs;
}
-static void update_io_stats_locked(void)
+static void update_io_stats_all_locked(void)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
struct io_stats *io_bucket, *io_curr, *io_last;
+ struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
-
- BUG_ON(!rt_mutex_is_locked(&uid_lock));
+ uid_t uid;
hash_for_each(hash_table, bkt, uid_entry, hash)
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
- read_lock(&tasklist_lock);
+ rcu_read_lock();
do_each_thread(temp, task) {
- uid_entry = find_or_register_uid(from_kuid_munged(
- current_user_ns(), task_uid(task)));
+ uid = from_kuid_munged(user_ns, task_uid(task));
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
add_uid_io_curr_stats(uid_entry, task);
} while_each_thread(temp, task);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
io_bucket = &uid_entry->io[uid_entry->state];
@@ -282,6 +283,47 @@
}
}
+static void update_io_stats_uid_locked(uid_t target_uid)
+{
+ struct uid_entry *uid_entry;
+ struct task_struct *task, *temp;
+ struct io_stats *io_bucket, *io_curr, *io_last;
+ struct user_namespace *user_ns = current_user_ns();
+
+ uid_entry = find_or_register_uid(target_uid);
+ if (!uid_entry)
+ return;
+
+ memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+ sizeof(struct io_stats));
+
+ rcu_read_lock();
+ do_each_thread(temp, task) {
+ if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+ continue;
+ add_uid_io_curr_stats(uid_entry, task);
+ } while_each_thread(temp, task);
+ rcu_read_unlock();
+
+ io_bucket = &uid_entry->io[uid_entry->state];
+ io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+ io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+
+ io_bucket->read_bytes +=
+ io_curr->read_bytes - io_last->read_bytes;
+ io_bucket->write_bytes +=
+ io_curr->write_bytes - io_last->write_bytes;
+ io_bucket->rchar += io_curr->rchar - io_last->rchar;
+ io_bucket->wchar += io_curr->wchar - io_last->wchar;
+ io_bucket->fsync += io_curr->fsync - io_last->fsync;
+
+ io_last->read_bytes = io_curr->read_bytes;
+ io_last->write_bytes = io_curr->write_bytes;
+ io_last->rchar = io_curr->rchar;
+ io_last->wchar = io_curr->wchar;
+ io_last->fsync = io_curr->fsync;
+}
+
static int uid_io_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
@@ -289,7 +331,7 @@
rt_mutex_lock(&uid_lock);
- update_io_stats_locked();
+ update_io_stats_all_locked();
hash_for_each(hash_table, bkt, uid_entry, hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -364,7 +406,7 @@
return count;
}
- update_io_stats_locked();
+ update_io_stats_uid_locked(uid);
uid_entry->state = state;
@@ -402,7 +444,7 @@
uid_entry->utime += utime;
uid_entry->stime += stime;
- update_io_stats_locked();
+ update_io_stats_uid_locked(uid);
clean_uid_io_last_stats(uid_entry, task);
exit:
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38..9116551 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -19,6 +19,14 @@
This is an option for use by developers; most people should
say N here. This enables MMC core and driver debugging.
+config MMC_PERF_PROFILING
+ bool "MMC performance profiling"
+ depends on MMC != n
+ default n
+ help
+ If you say Y here, support will be added for collecting
+ performance numbers at the MMC Queue and Host layers.
+
if MMC
source "drivers/mmc/core/Kconfig"
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 6142ec1..91f2445 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,17 @@
If unsure, say Y here.
+config MMC_BLOCK_DEFERRED_RESUME
+ bool "Defer MMC layer resume until I/O is requested"
+ depends on MMC_BLOCK
+ default n
+ help
+ Say Y here to enable deferred MMC resume until I/O
+ is requested.
+
+ This will reduce overall resume latency and
+ save power when there is an SD card inserted but not being used.
+
config SDIO_UART
tristate "SDIO UART/GPS class support"
depends on TTY
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..d55107f 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
+obj-$(CONFIG_MMC_BLOCK_TEST) += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 817fcf8..d8e9599 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -30,15 +30,18 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/bitops.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
+#include <linux/ioprio.h>
#include <linux/idr.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
@@ -60,15 +63,33 @@
#define INAND_CMD38_ARG_SECERASE 0x80
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_CMDQ_STOP_TIMEOUT_MS 100
#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
(rq_data_dir(req) == WRITE))
#define PACKED_CMD_VER 0x01
#define PACKED_CMD_WR 0x02
+#define PACKED_TRIGGER_MAX_ELEMENTS 5000
+#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
+ do { \
+ if (stats->enabled) \
+ stats->pack_stop_reason[reason]++; \
+ } while (0)
+
+#define MAX_RETRIES 5
+#define PCKD_TRGR_INIT_MEAN_POTEN 17
+#define PCKD_TRGR_POTEN_LOWER_BOUND 5
+#define PCKD_TRGR_URGENT_PENALTY 2
+#define PCKD_TRGR_LOWER_BOUND 5
+#define PCKD_TRGR_PRECISION_MULTIPLIER 100
+
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq);
static DEFINE_MUTEX(block_mutex);
/*
@@ -103,6 +124,7 @@
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
+#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */
unsigned int usage;
unsigned int read_only;
@@ -112,6 +134,8 @@
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_FLUSH BIT(4)
+#define MMC_BLK_PARTSWITCH BIT(5)
/*
* Only set in main mmc_blk_data associated
@@ -121,6 +145,8 @@
unsigned int part_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
+ struct device_attribute num_wr_reqs_to_start_packing;
+ struct device_attribute no_pack_for_random;
int area_type;
};
@@ -138,6 +164,8 @@
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
+ struct mmc_blk_data *md, bool enable);
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
@@ -194,9 +222,13 @@
{
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- struct mmc_card *card = md->queue.card;
+ struct mmc_card *card;
int locked = 0;
+ if (!md)
+ return -EINVAL;
+
+ card = md->queue.card;
if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
locked = 2;
else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
@@ -224,6 +256,8 @@
return count;
md = mmc_blk_get(dev_to_disk(dev));
+ if (!md)
+ return -EINVAL;
card = md->queue.card;
mmc_get_card(card);
@@ -261,6 +295,9 @@
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ if (!md)
+ return -EINVAL;
+
ret = snprintf(buf, PAGE_SIZE, "%d\n",
get_disk_ro(dev_to_disk(dev)) ^
md->read_only);
@@ -275,6 +312,10 @@
char *end;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
unsigned long set = simple_strtoul(buf, &end, 0);
+
+ if (!md)
+ return -EINVAL;
+
if (end == buf) {
ret = -EINVAL;
goto out;
@@ -287,6 +328,119 @@
return ret;
}
+static ssize_t
+no_pack_for_random_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int ret;
+
+ if (!md)
+ return -EINVAL;
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+no_pack_for_random_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card;
+ int ret = count;
+
+ if (!md)
+ return -EINVAL;
+
+ card = md->queue.card;
+ if (!card) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ sscanf(buf, "%d", &value);
+
+ if (value < 0) {
+ pr_err("%s: value %d is not valid. old value remains = %d",
+ mmc_hostname(card->host), value,
+ md->queue.no_pack_for_random);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ md->queue.no_pack_for_random = (value > 0) ? true : false;
+
+ pr_debug("%s: no_pack_for_random: new value = %d",
+ mmc_hostname(card->host),
+ md->queue.no_pack_for_random);
+
+exit:
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int num_wr_reqs_to_start_packing;
+ int ret;
+
+ if (!md)
+ return -EINVAL;
+ num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card;
+ int ret = count;
+
+ if (!md)
+ return -EINVAL;
+
+ card = md->queue.card;
+ if (!card) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ sscanf(buf, "%d", &value);
+
+ if (value >= 0) {
+ md->queue.num_wr_reqs_to_start_packing =
+ min_t(int, value, (int)card->ext_csd.max_packed_writes);
+
+ pr_debug("%s: trigger to pack: new value = %d",
+ mmc_hostname(card->host),
+ md->queue.num_wr_reqs_to_start_packing);
+ } else {
+ pr_err("%s: value %d is not valid. old value remains = %d",
+ mmc_hostname(card->host), value,
+ md->queue.num_wr_reqs_to_start_packing);
+ ret = -EINVAL;
+ }
+
+exit:
+ mmc_blk_put(md);
+ return ret;
+}
+
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
static int max_read_speed, max_write_speed, cache_size = 4;
@@ -680,11 +834,12 @@
{
int err;
- if (!mmc_can_sanitize(card)) {
- pr_warn("%s: %s - SANITIZE is not supported\n",
+ if (!mmc_can_sanitize(card) &&
+ (card->host->caps2 & MMC_CAP2_SANITIZE)) {
+ pr_warn("%s: %s - SANITIZE is not supported\n",
mmc_hostname(card->host), __func__);
- err = -EOPNOTSUPP;
- goto out;
+ err = -EOPNOTSUPP;
+ goto out;
}
pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
@@ -712,15 +867,10 @@
struct mmc_request mrq = {NULL};
struct scatterlist sg;
int err;
- int is_rpmb = false;
- u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
-
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
cmd.flags = idata->ic.flags;
@@ -763,6 +913,15 @@
mrq.cmd = &cmd;
+ if (mmc_card_doing_bkops(card)) {
+ err = mmc_stop_bkops(card);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "%s: stop_bkops failed %d\n", __func__, err);
+ return err;
+ }
+ }
+
err = mmc_blk_part_switch(card, md);
if (err)
return err;
@@ -773,13 +932,6 @@
return err;
}
- if (is_rpmb) {
- err = mmc_set_blockcount(card, data.blocks,
- idata->ic.write_flag & (1 << 31));
- if (err)
- return err;
- }
-
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
(cmd.opcode == MMC_SWITCH)) {
err = ioctl_do_sanitize(card);
@@ -813,7 +965,183 @@
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ return err;
+}
+
+struct mmc_blk_ioc_rpmb_data {
+ struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
+};
+
+static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
+ struct mmc_ioc_rpmb __user *user)
+{
+ struct mmc_blk_ioc_rpmb_data *idata;
+ int err, i;
+
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+ idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
+ if (IS_ERR(idata->data[i])) {
+ err = PTR_ERR(idata->data[i]);
+ goto copy_err;
+ }
+ }
+
+ return idata;
+
+copy_err:
+ while (--i >= 0) {
+ kfree(idata->data[i]->buf);
+ kfree(idata->data[i]);
+ }
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
+ struct mmc_ioc_rpmb __user *ic_ptr)
+{
+ struct mmc_blk_ioc_rpmb_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card = NULL;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {NULL};
+ struct scatterlist sg;
+ int err = 0, i = 0;
+ u32 status = 0;
+
+ /* The caller must have CAP_SYS_RAWIO */
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ md = mmc_blk_get(bdev->bd_disk);
+ /* make sure this is a rpmb partition */
+ if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
+ err = -EINVAL;
+ return err;
+ }
+
+ idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
+ if (IS_ERR(idata)) {
+ err = PTR_ERR(idata);
+ goto cmd_done;
+ }
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto idata_free;
+ }
+
+ mmc_get_card(card);
+
+ if (mmc_card_doing_bkops(card)) {
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, true);
+ if (err)
+ goto cmd_rel_host;
+ }
+ err = mmc_stop_bkops(card);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "%s: stop_bkops failed %d\n", __func__, err);
+ goto cmd_rel_host;
+ }
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, false);
+ if (err)
+ goto cmd_rel_host;
+ }
+ }
+
+ err = mmc_blk_part_switch(card, md);
+ if (err)
+ goto cmd_rel_host;
+
+ for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+ struct mmc_blk_ioc_data *curr_data;
+ struct mmc_ioc_cmd *curr_cmd;
+
+ curr_data = idata->data[i];
+ curr_cmd = &curr_data->ic;
+ if (!curr_cmd->opcode)
+ break;
+
+ cmd.opcode = curr_cmd->opcode;
+ cmd.arg = curr_cmd->arg;
+ cmd.flags = curr_cmd->flags;
+
+ if (curr_data->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = curr_cmd->blksz;
+ data.blocks = curr_cmd->blocks;
+
+ sg_init_one(data.sg, curr_data->buf,
+ curr_data->buf_bytes);
+
+ if (curr_cmd->write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /*
+ * Allow overriding the timeout_ns for empirical tuning.
+ */
+ if (curr_cmd->data_timeout_ns)
+ data.timeout_ns = curr_cmd->data_timeout_ns;
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
+ err = mmc_set_blockcount(card, data.blocks,
+ curr_cmd->write_flag & (1 << 31));
+ if (err)
+ goto cmd_rel_host;
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ err = cmd.error;
+ goto cmd_rel_host;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ err = data.error;
+ goto cmd_rel_host;
+ }
+
+ if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
+ sizeof(cmd.resp))) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+
+ if (!curr_cmd->write_flag) {
+ if (copy_to_user((void __user *)(unsigned long)
+ curr_cmd->data_ptr,
+ curr_data->buf,
+ curr_data->buf_bytes)) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+ }
+
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -825,6 +1153,20 @@
__func__, status, err);
}
+cmd_rel_host:
+ mmc_put_card(card);
+
+idata_free:
+ for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+ kfree(idata->data[i]->buf);
+ kfree(idata->data[i]);
+ }
+ kfree(idata);
+
+cmd_done:
+ mmc_blk_put(md);
+ if (card && card->cmdq_init)
+ wake_up(&card->host->cmdq_ctx.wait);
return err;
}
@@ -845,7 +1187,7 @@
return -EPERM;
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
- if (IS_ERR(idata))
+ if (IS_ERR_OR_NULL(idata))
return PTR_ERR(idata);
md = mmc_blk_get(bdev->bd_disk);
@@ -855,13 +1197,24 @@
}
card = md->queue.card;
- if (IS_ERR(card)) {
+ if (IS_ERR_OR_NULL(card)) {
err = PTR_ERR(card);
goto cmd_done;
}
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt_on_empty_queue(card->host);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_put_card(card);
+ goto cmd_done;
+ }
+ }
+
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
/* Always switch back to main area after RPMB access */
@@ -872,6 +1225,12 @@
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
cmd_done:
mmc_blk_put(md);
cmd_err:
@@ -963,6 +1322,9 @@
case MMC_IOC_CMD:
return mmc_blk_ioctl_cmd(bdev,
(struct mmc_ioc_cmd __user *)arg);
+ case MMC_IOC_RPMB_CMD:
+ return mmc_blk_ioctl_rpmb_cmd(bdev,
+ (struct mmc_ioc_rpmb __user *)arg);
case MMC_IOC_MULTI_CMD:
return mmc_blk_ioctl_multi_cmd(bdev,
(struct mmc_ioc_multi_cmd __user *)arg);
@@ -990,18 +1352,76 @@
#endif
};
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
+ struct mmc_blk_data *md, bool enable)
+{
+ int ret = 0;
+ bool cmdq_mode = !!mmc_card_cmdq(card);
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+ if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
+ !card->ext_csd.cmdq_support ||
+ (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
+ (cmdq_mode == enable))
+ return 0;
+
+ if (enable) {
+ ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
+ if (ret) {
+ pr_err("%s: failed (%d) to set block-size to %d\n",
+ __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
+ goto out;
+ }
+
+ } else {
+ if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
+ ret = mmc_cmdq_halt(host, true);
+ if (ret) {
+ pr_err("%s: halt: failed: %d\n",
+ mmc_hostname(host), ret);
+ goto out;
+ }
+ }
+ }
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CMDQ, enable,
+ card->ext_csd.generic_cmd6_time);
+ if (ret) {
+ pr_err("%s: cmdq mode %sable failed %d\n",
+ md->disk->disk_name, enable ? "en" : "dis", ret);
+ goto out;
+ }
+
+ if (enable)
+ mmc_card_set_cmdq(card);
+ else
+ mmc_card_clr_cmdq(card);
+out:
+ return ret;
+}
+
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
int ret;
struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
- if (main_md->part_curr == md->part_type)
+ if ((main_md->part_curr == md->part_type) &&
+ (card->part_curr == md->part_type))
return 0;
if (mmc_card_mmc(card)) {
u8 part_config = card->ext_csd.part_config;
+ if (md->part_type) {
+ /* disable CQ mode for non-user data partitions */
+ ret = mmc_blk_cmdq_switch(card, md, false);
+ if (ret)
+ return ret;
+ }
+
if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
mmc_retune_pause(card->host);
@@ -1012,12 +1432,16 @@
EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
if (ret) {
+ pr_err("%s: mmc_blk_part_switch failure, %d -> %d\n",
+ mmc_hostname(card->host), main_md->part_curr,
+ md->part_type);
if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
mmc_retune_unpause(card->host);
return ret;
}
card->ext_csd.part_config = part_config;
+ card->part_curr = md->part_type;
if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
mmc_retune_unpause(card->host);
@@ -1201,18 +1625,20 @@
switch (error) {
case -EILSEQ:
/* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
+ pr_err_ratelimited(
+ "%s: response CRC error sending %s command, card status %#x\n",
+ req->rq_disk->disk_name,
name, status);
return ERR_RETRY;
case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
+ pr_err_ratelimited(
+ "%s: timed out sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, name, status);
/* If the status cmd initially failed, retry the r/w cmd */
if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n",
+ pr_err_ratelimited("%s: status not valid, retrying timeout\n",
req->rq_disk->disk_name);
return ERR_RETRY;
}
@@ -1223,17 +1649,22 @@
* have corrected the state problem above.
*/
if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n",
+ pr_err_ratelimited(
+ "%s: command error, retrying timeout\n",
req->rq_disk->disk_name);
return ERR_RETRY;
}
/* Otherwise abort the command */
+ pr_err_ratelimited(
+ "%s: not retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_ABORT;
default:
/* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ pr_err_ratelimited(
+ "%s: unknown error %d sending read/write command, card status %#x\n",
req->rq_disk->disk_name, error, status);
return ERR_ABORT;
}
@@ -1372,8 +1803,15 @@
md->reset_done |= type;
err = mmc_hw_reset(host);
+ if (err && err != -EOPNOTSUPP) {
+ /* We failed to reset so we need to abort the request */
+ pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
+ __func__, err);
+ return -ENODEV;
+ }
+
/* Ensure we switch back to the correct partition */
- if (err != -EOPNOTSUPP) {
+ if (host->card) {
struct mmc_blk_data *main_md =
dev_get_drvdata(&host->card->dev);
int part_err;
@@ -1408,6 +1846,77 @@
return false;
}
+static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct mmc_cmdq_req *cmdq_req;
+ struct mmc_queue_req *active_mqrq;
+
+ BUG_ON(req->tag > card->ext_csd.cmdq_depth);
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
+ active_mqrq->req = req;
+
+ cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
+ cmdq_req->cmdq_req_flags |= QBR;
+ cmdq_req->mrq.cmd = &cmdq_req->cmd;
+ cmdq_req->tag = req->tag;
+ return cmdq_req;
+}
+
+static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_cmdq_req *cmdq_req = NULL;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ if (!mmc_can_erase(card)) {
+ err = -EOPNOTSUPP;
+ blk_end_request(req, err, blk_rq_bytes(req));
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_discard(card))
+ arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
+ arg = MMC_TRIM_ARG;
+ else
+ arg = MMC_ERASE_ARG;
+
+ cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+ EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0, true, false);
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err)
+ goto clear_dcmd;
+ }
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
+clear_dcmd:
+ mmc_host_clk_hold(card->host);
+ blk_complete_request(req);
+out:
+ return err ? 1 : 0;
+}
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
@@ -1451,6 +1960,69 @@
return err ? 0 : 1;
}
+static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_cmdq_req *cmdq_req = NULL;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ if (!(mmc_can_secure_erase_trim(card))) {
+ err = -EOPNOTSUPP;
+ blk_end_request(req, err, blk_rq_bytes(req));
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
+
+ cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+ EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ 0, true, false);
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err)
+ goto clear_dcmd;
+ }
+
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
+ if (err)
+ goto clear_dcmd;
+
+ if (arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+ EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0, true, false);
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err)
+ goto clear_dcmd;
+ }
+
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr,
+ MMC_SECURE_TRIM2_ARG);
+ }
+clear_dcmd:
+ mmc_host_clk_hold(card->host);
+ blk_complete_request(req);
+out:
+ return err ? 1 : 0;
+}
+
static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
@@ -1524,9 +2096,45 @@
struct mmc_card *card = md->queue.card;
int ret = 0;
- ret = mmc_flush_cache(card);
- if (ret)
+ if (!req)
+ return 0;
+
+ if (req->cmd_flags & REQ_BARRIER) {
+ /*
+ * If eMMC cache flush policy is set to 1, then the device
+ * shall flush the requests in First-In-First-Out (FIFO) order.
+ * In this case, as per spec, the host must not send any cache
+ * barrier requests as they are redundant and add unnecessary
+ * overhead to both device and host.
+ */
+ if (card->ext_csd.cache_flush_policy & 1)
+ goto end_req;
+
+ /*
+ * In case barrier is not supported or enabled in the device,
+ * use flush as a fallback option.
+ */
+ ret = mmc_cache_barrier(card);
+ if (ret)
+ ret = mmc_flush_cache(card);
+ } else if (req_op(req) == REQ_OP_FLUSH) {
+ ret = mmc_flush_cache(card);
+ }
+ if (ret == -ENODEV) {
+ pr_err("%s: %s: restart mmc card",
+ req->rq_disk->disk_name, __func__);
+ if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
+ pr_err("%s: %s: fail to restart mmc",
+ req->rq_disk->disk_name, __func__);
+ else
+ mmc_blk_reset_success(md, MMC_BLK_FLUSH);
+ }
+
+ if (ret) {
+ pr_err("%s: %s: notify flush error to upper layers",
+ req->rq_disk->disk_name, __func__);
ret = -EIO;
+ }
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
else if (atomic_read(&mq->cache_size)) {
@@ -1545,6 +2153,7 @@
}
}
#endif
+end_req:
blk_end_request_all(req, ret);
return ret ? 0 : 1;
@@ -1591,6 +2200,17 @@
int need_retune = card->host->need_retune;
int ecc_err = 0, gen_err = 0;
+ if (card->host->sdr104_wa && mmc_card_sd(card) &&
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !card->sdr104_blocked &&
+ (brq->data.error == -EILSEQ ||
+ brq->data.error == -EIO ||
+ brq->data.error == -ETIMEDOUT ||
+ brq->cmd.error == -EILSEQ ||
+ brq->cmd.error == -EIO ||
+ brq->cmd.error == -ETIMEDOUT))
+ card->err_in_sdr104 = true;
+
/*
* sbc.error indicates a problem with the set block count
* command. No data will have been transferred.
@@ -1773,6 +2393,7 @@
brq->stop.arg = 0;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.fault_injected = false;
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
@@ -1896,6 +2517,7 @@
}
mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.mrq->req = mqrq->req;
mqrq->mmc_active.err_check = mmc_blk_err_check;
mmc_queue_bounce_pre(mqrq);
@@ -1917,6 +2539,178 @@
return nr_segs;
}
+/**
+ * mmc_blk_disable_wr_packing() - disables packing mode
+ * @mq: MMC queue.
+ *
+ */
+void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
+{
+ if (mq) {
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+ }
+}
+EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
+
+static int get_packed_trigger(int potential, struct mmc_card *card,
+ struct request *req, int curr_trigger)
+{
+ static int num_mean_elements = 1;
+ static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+ unsigned int trigger = curr_trigger;
+ unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
+
+ /* scale down the upper bound to 75% */
+ pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
+
+ /*
+ * since the most common calls for this function are with small
+ * potential write values and since we don't want these calls to affect
+ * the packed trigger, set a lower bound and ignore calls with
+ * potential lower than that bound
+ */
+ if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
+ return trigger;
+
+ /*
+ * this is to prevent integer overflow in the following calculation:
+ * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
+ */
+ if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
+ num_mean_elements = 1;
+ mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+ }
+
+ /*
+ * get next mean value based on previous mean value and current
+ * potential packed writes. Calculation is as follows:
+ * mean_pot[i+1] =
+ * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
+ */
+ mean_potential *= num_mean_elements;
+ /*
+ * add num_mean_elements so that the division of two integers doesn't
+ * lower mean_potential too much
+ */
+ if (potential > mean_potential)
+ mean_potential += num_mean_elements;
+ mean_potential += potential;
+ /* this is for gaining more precision when dividing two integers */
+ mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
+ /* this completes the mean calculation */
+ mean_potential /= ++num_mean_elements;
+ mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
+
+ /*
+ * if current potential packed writes is greater than the mean potential
+ * then the heuristic is that the following workload will contain many
+ * write requests, therefore we lower the packed trigger. In the
+ * opposite case we want to increase the trigger in order to get less
+ * packing events.
+ */
+ if (potential >= mean_potential)
+ trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
+ PCKD_TRGR_LOWER_BOUND : trigger - 1;
+ else
+ trigger = (trigger >= pckd_trgr_upper_bound) ?
+ pckd_trgr_upper_bound : trigger + 1;
+
+ /*
+ * an urgent read request indicates a packed list being interrupted
+ * by this read, therefore we aim for less packing, hence the trigger
+ * gets increased
+ */
+ if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
+ trigger += PCKD_TRGR_URGENT_PENALTY;
+
+ return trigger;
+}
+
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
+ int data_dir;
+
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+ return;
+
+ /* Support for the write packing on eMMC 4.5 or later */
+ if (mq->card->ext_csd.rev <= 5)
+ return;
+
+ /*
+ * In case the packing control is not supported by the host, it should
+ * not have an effect on the write packing. Therefore we have to enable
+ * the write packing
+ */
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+ mq->wr_packing_enabled = true;
+ return;
+ }
+
+ if (!req || (req && (req->cmd_flags & REQ_PREFLUSH))) {
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+ mq->num_wr_reqs_to_start_packing =
+ get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+ mq->card, req,
+ mq->num_wr_reqs_to_start_packing);
+ mq->num_of_potential_packed_wr_reqs = 0;
+ return;
+ }
+
+ data_dir = rq_data_dir(req);
+
+ if (data_dir == READ) {
+ mmc_blk_disable_wr_packing(mq);
+ mq->num_wr_reqs_to_start_packing =
+ get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+ mq->card, req,
+ mq->num_wr_reqs_to_start_packing);
+ mq->num_of_potential_packed_wr_reqs = 0;
+ mq->wr_packing_enabled = false;
+ return;
+ } else if (data_dir == WRITE) {
+ mq->num_of_potential_packed_wr_reqs++;
+ }
+
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+}
+
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+ if (!card)
+ return NULL;
+
+ return &card->wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+ int max_num_of_packed_reqs = 0;
+
+ if (!card || !card->wr_pack_stats.packing_events)
+ return;
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ spin_lock(&card->wr_pack_stats.lock);
+ memset(card->wr_pack_stats.packing_events, 0,
+ (max_num_of_packed_reqs + 1) *
+ sizeof(*card->wr_pack_stats.packing_events));
+ memset(&card->wr_pack_stats.pack_stop_reason, 0,
+ sizeof(card->wr_pack_stats.pack_stop_reason));
+ card->wr_pack_stats.enabled = true;
+ spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@@ -1930,6 +2724,7 @@
bool put_back = true;
u8 max_packed_rw = 0;
u8 reqs = 0;
+ struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
/*
* We don't need to check packed for any further
@@ -1946,6 +2741,9 @@
if (!(md->flags & MMC_BLK_PACKED_CMD))
goto no_packed;
+ if (!mq->wr_packing_enabled)
+ goto no_packed;
+
if ((rq_data_dir(cur) == WRITE) &&
mmc_host_packed_wr(card->host))
max_packed_rw = card->ext_csd.max_packed_writes;
@@ -1961,6 +2759,9 @@
!IS_ALIGNED(blk_rq_sectors(cur), 8))
goto no_packed;
+ if (cur->cmd_flags & REQ_FUA)
+ goto no_packed;
+
mmc_blk_clear_packed(mqrq);
max_blk_count = min(card->host->max_blk_count,
@@ -1977,6 +2778,7 @@
phys_segments += mmc_calc_packed_hdr_segs(q, card);
}
+ spin_lock(&stats->lock);
do {
if (reqs >= max_packed_rw - 1) {
put_back = false;
@@ -1987,34 +2789,65 @@
next = blk_fetch_request(q);
spin_unlock_irq(q->queue_lock);
if (!next) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
put_back = false;
break;
}
if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(next), 8))
+ !IS_ALIGNED(blk_rq_sectors(next), 8)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
break;
+ }
if (req_op(next) == REQ_OP_DISCARD ||
req_op(next) == REQ_OP_SECURE_ERASE ||
- req_op(next) == REQ_OP_FLUSH)
+ req_op(next) == REQ_OP_FLUSH) {
+ if (req_op(next) != REQ_OP_SECURE_ERASE)
+ MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
break;
+ }
- if (rq_data_dir(cur) != rq_data_dir(next))
+ if (next->cmd_flags & REQ_FUA) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
break;
+ }
+
+ if (rq_data_dir(cur) != rq_data_dir(next)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
+ break;
+ }
if (mmc_req_rel_wr(next) &&
- (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
break;
+ }
req_sectors += blk_rq_sectors(next);
- if (req_sectors > max_blk_count)
+ if (req_sectors > max_blk_count) {
+ if (stats->enabled)
+ stats->pack_stop_reason[EXCEEDS_SECTORS]++;
break;
+ }
phys_segments += next->nr_phys_segments;
- if (phys_segments > max_phys_segs)
+ if (phys_segments > max_phys_segs) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
break;
+ }
+ if (mq->no_pack_for_random) {
+ if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
+ blk_rq_pos(next)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
+ put_back = 1;
+ break;
+ }
+ }
+
+ if (rq_data_dir(next) == WRITE)
+ mq->num_of_potential_packed_wr_reqs++;
list_add_tail(&next->queuelist, &mqrq->packed->list);
cur = next;
reqs++;
@@ -2026,6 +2859,15 @@
spin_unlock_irq(q->queue_lock);
}
+ if (stats->enabled) {
+ if (reqs + 1 <= card->ext_csd.max_packed_writes)
+ stats->packing_events[reqs + 1]++;
+ if (reqs + 1 == max_packed_rw)
+ MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+ }
+
+ spin_unlock(&stats->lock);
+
if (reqs > 0) {
list_add(&req->queuelist, &mqrq->packed->list);
mqrq->packed->nr_entries = ++reqs;
@@ -2103,6 +2945,7 @@
brq->data.blksz = 512;
brq->data.blocks = packed->blocks + hdr_blocks;
brq->data.flags = MMC_DATA_WRITE;
+ brq->data.fault_injected = false;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -2114,7 +2957,18 @@
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ /*
+ * This is intended for packed commands tests usage - in case these
+ * functions are not in use the respective pointers are NULL
+ */
+ if (mq->err_check_fn)
+ mqrq->mmc_active.err_check = mq->err_check_fn;
+ else
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ if (mq->packed_test_fn)
+ mq->packed_test_fn(mq->queue, mqrq);
mmc_queue_bounce_pre(mqrq);
}
@@ -2136,11 +2990,12 @@
*/
if (mmc_card_sd(card)) {
u32 blocks;
-
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- ret = blk_end_request(req, 0, blocks << 9);
- }
+ if (!brq->data.fault_injected) {
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1)
+ ret = blk_end_request(req, 0, blocks << 9);
+ } else
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
} else {
if (!mmc_packed_cmd(mq_rq->cmd_type))
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
@@ -2214,6 +3069,576 @@
mmc_blk_clear_packed(mq_rq);
}
+static int mmc_blk_cmdq_start_req(struct mmc_host *host,
+ struct mmc_cmdq_req *cmdq_req)
+{
+ struct mmc_request *mrq = &cmdq_req->mrq;
+
+ mrq->done = mmc_blk_cmdq_req_done;
+ return mmc_cmdq_start_req(host, cmdq_req);
+}
+
+/* prepare for non-data commands */
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq)
+{
+ struct request *req = mqrq->req;
+ struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req;
+
+ memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
+
+ cmdq_req->mrq.data = NULL;
+ cmdq_req->cmd_flags = req->cmd_flags;
+ cmdq_req->mrq.req = mqrq->req;
+ req->special = mqrq;
+ cmdq_req->cmdq_req_flags |= DCMD;
+ cmdq_req->mrq.cmdq_req = cmdq_req;
+
+ return &mqrq->cmdq_req;
+}
+
+
+#define IS_RT_CLASS_REQ(x) \
+ (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
+
+static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
+ bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
+ bool do_data_tag;
+ bool read_dir = (rq_data_dir(req) == READ);
+ bool prio = IS_RT_CLASS_REQ(req);
+ struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
+
+ memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
+
+ cmdq_rq->tag = req->tag;
+ if (read_dir) {
+ cmdq_rq->cmdq_req_flags |= DIR;
+ cmdq_rq->data.flags = MMC_DATA_READ;
+ } else {
+ cmdq_rq->data.flags = MMC_DATA_WRITE;
+ }
+ if (prio)
+ cmdq_rq->cmdq_req_flags |= PRIO;
+
+ if (do_rel_wr)
+ cmdq_rq->cmdq_req_flags |= REL_WR;
+
+ cmdq_rq->data.blocks = blk_rq_sectors(req);
+ cmdq_rq->blk_addr = blk_rq_pos(req);
+ cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
+
+ mmc_set_data_timeout(&cmdq_rq->data, card);
+
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+ if (do_data_tag)
+ cmdq_rq->cmdq_req_flags |= DAT_TAG;
+ cmdq_rq->data.sg = mqrq->sg;
+ cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (cmdq_rq->data.blocks > card->host->max_blk_count)
+ cmdq_rq->data.blocks = card->host->max_blk_count;
+
+ if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = cmdq_rq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
+ }
+ cmdq_rq->data.sg_len = i;
+ }
+
+ mqrq->cmdq_req.cmd_flags = req->cmd_flags;
+ mqrq->cmdq_req.mrq.req = mqrq->req;
+ mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
+ mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
+ mqrq->req->special = mqrq;
+
+ pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
+ mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
+ mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
+ cmdq_rq, cmdq_rq->blk_addr,
+ (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
+
+ return &mqrq->cmdq_req;
+}
+
+static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *active_mqrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+ struct mmc_cmdq_req *mc_rq;
+ u8 active_small_sector_read = 0;
+ int ret = 0;
+
+ mmc_deferred_scaling(host);
+ mmc_cmdq_clk_scaling_start_busy(host, true);
+
+ BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.data_active_reqs));
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
+ active_mqrq->req = req;
+
+ mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
+
+ if (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) {
+ unsigned int sectors = blk_rq_sectors(req);
+
+ if (((sectors > 0) && (sectors < 8))
+ && (rq_data_dir(req) == READ))
+ active_small_sector_read = 1;
+ }
+ ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
+ if (!ret && active_small_sector_read)
+ host->cmdq_ctx.active_small_sector_read_reqs++;
+ /*
+ * When in SVS2 on low load scenario and there are lots of requests
+ * queued for CMDQ we need to wait till the queue is empty to scale
+ * back up to Nominal even if there is a sudden increase in load.
+ * This impacts performance where lots of IO get executed in SVS2
+ * frequency since the queue is full. As SVS2 is a low load use case
+ * we can serialize the requests and not queue them in parallel
+ * without impacting other use cases. This makes sure the queue gets
+ * empty faster and we will be able to scale up to Nominal frequency
+ * when needed.
+ */
+ if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
+ wait_event_interruptible(ctx->queue_empty_wq,
+ (!ctx->active_reqs));
+
+ return ret;
+}
+
+/*
+ * Issues a flush (dcmd) request
+ */
+int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req)
+{
+ int err;
+ struct mmc_queue_req *active_mqrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host;
+ struct mmc_cmdq_req *cmdq_req;
+ struct mmc_cmdq_context_info *ctx_info;
+
+ BUG_ON(!card);
+ host = card->host;
+ BUG_ON(!host);
+ BUG_ON(req->tag > card->ext_csd.cmdq_depth);
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+ ctx_info = &host->cmdq_ctx;
+
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
+ active_mqrq->req = req;
+
+ cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
+ cmdq_req->cmdq_req_flags |= QBR;
+ cmdq_req->mrq.cmd = &cmdq_req->cmd;
+ cmdq_req->tag = req->tag;
+
+ err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd);
+ if (err) {
+ pr_err("%s: failed (%d) preparing flush req\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+ err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
+ return err;
+}
+EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
+
+static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
+{
+ int err = 0;
+
+ if (mmc_cmdq_halt(host, true)) {
+ pr_err("%s: halt failed\n", mmc_hostname(host));
+ goto reset;
+ }
+
+ if (clear_all)
+ mmc_cmdq_discard_queue(host, 0);
+reset:
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->disable(host, true);
+ mmc_host_clk_release(host);
+ err = mmc_cmdq_hw_reset(host);
+ if (err && err != -EOPNOTSUPP) {
+ pr_err("%s: failed to cmdq_hw_reset err = %d\n",
+ mmc_hostname(host), err);
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->enable(host);
+ mmc_host_clk_release(host);
+ mmc_cmdq_halt(host, false);
+ goto out;
+ }
+ /*
+ * CMDQ HW reset would have already made CQE
+ * in unhalted state, but reflect the same
+ * in software state of cmdq_ctx.
+ */
+ mmc_host_clr_halt(host);
+out:
+ return;
+}
+
+/**
+ * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
+ * @q: request_queue pointer.
+ * @tag: tag number of request to check.
+ *
+ * This function checks if the request with tag number "tag"
+ * is a DCMD request or not based on cmdq_req_flags set.
+ *
+ * returns true if DCMD req, otherwise false.
+ */
+static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
+{
+ struct request *req;
+ struct mmc_queue_req *mq_rq;
+ struct mmc_cmdq_req *cmdq_req;
+
+ req = blk_queue_find_tag(q, tag);
+ if (WARN_ON(!req))
+ goto out;
+ mq_rq = req->special;
+ if (WARN_ON(!mq_rq))
+ goto out;
+ cmdq_req = &(mq_rq->cmdq_req);
+ return (cmdq_req->cmdq_req_flags & DCMD);
+out:
+ return -ENOENT;
+}
+
+/**
+ * mmc_blk_cmdq_reset_all - Reset everything for CMDQ block request.
+ * @host: mmc_host pointer.
+ * @err: error for which reset is performed.
+ *
+ * This function implements reset_all functionality for
+ * cmdq. It resets the controller, power cycle the card,
+ * and invalidate all busy tags(requeue all request back to
+ * elevator).
+ */
+static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
+{
+ struct mmc_request *mrq = host->err_mrq;
+ struct mmc_card *card = host->card;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct request_queue *q;
+ int itag = 0;
+ int ret = 0;
+
+ if (WARN_ON(!mrq))
+ return;
+
+ q = mrq->req->q;
+ WARN_ON(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+ #ifdef CONFIG_MMC_CLKGATE
+ pr_debug("%s: %s: active_reqs = %lu, clk_requests = %d\n",
+ mmc_hostname(host), __func__,
+ ctx_info->active_reqs, host->clk_requests);
+ #endif
+
+ mmc_blk_cmdq_reset(host, false);
+
+ for_each_set_bit(itag, &ctx_info->active_reqs,
+ host->num_cq_slots) {
+ ret = is_cmdq_dcmd_req(q, itag);
+ if (WARN_ON(ret == -ENOENT))
+ continue;
+ if (!ret) {
+ WARN_ON(!test_and_clear_bit(itag,
+ &ctx_info->data_active_reqs));
+ mmc_cmdq_post_req(host, itag, err);
+ } else {
+ clear_bit(CMDQ_STATE_DCMD_ACTIVE,
+ &ctx_info->curr_state);
+ }
+ WARN_ON(!test_and_clear_bit(itag,
+ &ctx_info->active_reqs));
+ mmc_host_clk_release(host);
+ mmc_put_card(card);
+ }
+
+ spin_lock_irq(q->queue_lock);
+ blk_queue_invalidate_tags(q);
+ spin_unlock_irq(q->queue_lock);
+}
+
+static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq)
+{
+ int err;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+
+ mmc_get_card(card);
+ mmc_host_clk_hold(host);
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: halt: failed: %d\n", __func__, err);
+ goto out;
+ }
+
+ /* disable CQ mode in card */
+ if (mmc_card_cmdq(card)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CMDQ, 0,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: failed to switch card to legacy mode: %d\n",
+ __func__, err);
+ goto out;
+ }
+ mmc_card_clr_cmdq(card);
+ }
+ host->cmdq_ops->disable(host, false);
+ host->card->cmdq_init = false;
+out:
+ mmc_host_clk_release(host);
+ mmc_put_card(card);
+}
+
+static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mq_rq = req->special;
+ struct mmc_request *mrq;
+ struct mmc_cmdq_req *cmdq_req;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+
+ BUG_ON(!host);
+
+ /*
+ * The mmc_queue_req will be present only if the request
+ * is issued to the LLD. The request could be fetched from
+ * block layer queue but could be waiting to be issued
+ * (for e.g. clock scaling is waiting for an empty cmdq queue)
+ * Reset the timer in such cases to give LLD more time
+ */
+ if (!mq_rq) {
+ pr_warn("%s: restart timer for tag: %d\n", __func__, req->tag);
+ return BLK_EH_RESET_TIMER;
+ }
+
+ mrq = &mq_rq->cmdq_req.mrq;
+ cmdq_req = &mq_rq->cmdq_req;
+
+ BUG_ON(!mrq || !cmdq_req);
+
+ if (cmdq_req->cmdq_req_flags & DCMD)
+ mrq->cmd->error = -ETIMEDOUT;
+ else
+ mrq->data->error = -ETIMEDOUT;
+
+ if (mrq->cmd && mrq->cmd->error) {
+ if (!(mrq->req->cmd_flags & REQ_PREFLUSH)) {
+ /*
+ * Notify completion for non flush commands like
+ * discard that wait for DCMD finish.
+ */
+ set_bit(CMDQ_STATE_REQ_TIMED_OUT,
+ &ctx_info->curr_state);
+ complete(&mrq->completion);
+ return BLK_EH_NOT_HANDLED;
+ }
+ }
+
+ if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state) ||
+ test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state))
+ return BLK_EH_NOT_HANDLED;
+
+ set_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
+ return BLK_EH_HANDLED;
+}
+
+/*
+ * mmc_blk_cmdq_err: error handling of cmdq error requests.
+ * Function should be called in context of error out request
+ * which has claim_host and rpm acquired.
+ * This may be called with CQ engine halted. Make sure to
+ * unhalt it after error recovery.
+ *
+ * TODO: Currently cmdq error handler does reset_all in case
+ * of any erorr. Need to optimize error handling.
+ */
+static void mmc_blk_cmdq_err(struct mmc_queue *mq)
+{
+ struct mmc_host *host = mq->card->host;
+ struct mmc_request *mrq = host->err_mrq;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct request_queue *q;
+ int err, ret;
+ u32 status = 0;
+
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->dumpstate(host);
+ mmc_host_clk_release(host);
+
+ if (WARN_ON(!mrq))
+ return;
+
+ q = mrq->req->q;
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("halt: failed: %d\n", err);
+ goto reset;
+ }
+
+ /* RED error - Fatal: requires reset */
+ if (mrq->cmdq_req->resp_err) {
+ err = mrq->cmdq_req->resp_err;
+ if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
+ ret = get_card_status(host->card, &status, 0);
+ if (ret)
+ pr_err("%s: CMD13 failed with err %d\n",
+ mmc_hostname(host), ret);
+ }
+ pr_err("%s: Response error detected with device status 0x%08x\n",
+ mmc_hostname(host), status);
+ goto reset;
+ }
+
+ /*
+ * In case of software request time-out, we schedule err work only for
+ * the first error out request and handles all other request in flight
+ * here.
+ */
+ if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state)) {
+ err = -ETIMEDOUT;
+ } else if (mrq->data && mrq->data->error) {
+ err = mrq->data->error;
+ } else if (mrq->cmd && mrq->cmd->error) {
+ /* DCMD commands */
+ err = mrq->cmd->error;
+ }
+
+reset:
+ mmc_blk_cmdq_reset_all(host, err);
+ if (mrq->cmdq_req->resp_err)
+ mrq->cmdq_req->resp_err = false;
+ mmc_cmdq_halt(host, false);
+
+ host->err_mrq = NULL;
+ clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
+ WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ wake_up(&ctx_info->wait);
+}
+
+/* invoked by block layer in softirq context */
+void mmc_blk_cmdq_complete_rq(struct request *rq)
+{
+ struct mmc_queue_req *mq_rq = rq->special;
+ struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
+ struct mmc_host *host = mrq->host;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
+ struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
+ int err = 0;
+ bool is_dcmd = false;
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+
+ if (err || cmdq_req->resp_err) {
+ pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
+ mmc_hostname(mrq->host), __func__, err,
+ cmdq_req->resp_err);
+ if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ pr_err("%s: CQ in error state, ending current req: %d\n",
+ __func__, err);
+ } else {
+ set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
+ BUG_ON(host->err_mrq != NULL);
+ host->err_mrq = mrq;
+ schedule_work(&mq->cmdq_err_work);
+ }
+ goto out;
+ }
+ /*
+ * In case of error CMDQ is expected to be either in halted
+ * or disable state so cannot receive any completion of
+ * other requests.
+ */
+ BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+ /* clear pending request */
+ BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+ &ctx_info->active_reqs));
+ if (cmdq_req->cmdq_req_flags & DCMD)
+ is_dcmd = true;
+ else
+ BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+ &ctx_info->data_active_reqs));
+ if (!is_dcmd)
+ mmc_cmdq_post_req(host, cmdq_req->tag, err);
+ if (cmdq_req->cmdq_req_flags & DCMD) {
+ clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+ blk_end_request_all(rq, err);
+ goto out;
+ }
+
+ blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
+
+out:
+
+ mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
+ if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ mmc_host_clk_release(host);
+ wake_up(&ctx_info->wait);
+ mmc_put_card(host->card);
+ }
+
+ if (!ctx_info->active_reqs)
+ wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq);
+
+ if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
+ complete(&mq->cmdq_shutdown_complete);
+
+ return;
+}
+
+/*
+ * Complete reqs from block layer softirq context
+ * Invoked in irq context
+ */
+void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
+{
+ struct request *req = mrq->req;
+
+ blk_complete_request(req);
+}
+EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
@@ -2226,6 +3651,7 @@
struct mmc_async_req *areq;
const u8 packed_nr = 2;
u8 reqs = 0;
+ bool reset = false;
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
unsigned long waitfor = jiffies;
#endif
@@ -2261,7 +3687,7 @@
areq = mmc_start_req(card->host, areq, (int *) &status);
if (!areq) {
if (status == MMC_BLK_NEW_REQUEST)
- mq->flags |= MMC_QUEUE_NEW_REQUEST;
+ set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
return 0;
}
@@ -2271,6 +3697,26 @@
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
+ if (card->err_in_sdr104) {
+ /*
+ * Data CRC/timeout errors will manifest as CMD/DATA
+ * ERR. But we'd like to retry these too.
+ * Moreover, no harm done if this fails too for multiple
+ * times, we anyway reduce the bus-speed and retry the
+ * same request.
+ * If that fails too, we don't override this status.
+ */
+ if (status == MMC_BLK_ABORT ||
+ status == MMC_BLK_CMD_ERR ||
+ status == MMC_BLK_DATA_ERR ||
+ status == MMC_BLK_RETRY)
+ /* reset on all of these errors and retry */
+ reset = true;
+
+ status = MMC_BLK_RETRY;
+ card->err_in_sdr104 = false;
+ }
+
switch (status) {
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
@@ -2311,11 +3757,36 @@
break;
case MMC_BLK_RETRY:
retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
+ if (retry++ < MMC_BLK_MAX_RETRIES) {
break;
+ } else if (reset) {
+ reset = false;
+ /*
+ * If we exhaust all the retries due to
+ * CRC/timeout errors in SDR140 mode with UHS SD
+ * cards, re-configure the card in SDR50
+ * bus-speed mode.
+ * All subsequent re-init of this card will be
+ * in SDR50 mode, unless it is removed and
+ * re-inserted. When new UHS SD cards are
+ * inserted, it may start at SDR104 mode if
+ * supported by the card.
+ */
+ pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ req->rq_disk->disk_name);
+ mmc_host_clear_sdr104(card->host);
+ mmc_suspend_clk_scaling(card->host);
+ mmc_blk_reset(md, card->host, type);
+ /* SDR104 mode is blocked from now on */
+ card->sdr104_blocked = true;
+ /* retry 5 times again */
+ retry = 0;
+ break;
+ }
/* Fall through */
case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
+ if (!mmc_blk_reset(md, card->host, type) &&
+ (retry++ < (MMC_BLK_MAX_RETRIES + 1)))
break;
goto cmd_abort;
case MMC_BLK_DATA_ERR: {
@@ -2324,10 +3795,7 @@
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV ||
- mmc_packed_cmd(mq_rq->cmd_type))
- goto cmd_abort;
- /* Fall through */
+ goto cmd_abort;
}
case MMC_BLK_ECC_ERR:
if (brq->data.blocks > 1) {
@@ -2411,6 +3879,132 @@
return 0;
}
+static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+ u8 part_config = card->ext_csd.part_config;
+
+ if ((main_md->part_curr == md->part_type) &&
+ (card->part_curr == md->part_type))
+ return 0;
+
+ WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) &&
+ card->ext_csd.cmdq_support &&
+ (md->flags & MMC_BLK_CMD_QUEUE)));
+
+ if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state))
+ WARN_ON(mmc_cmdq_halt(host, true));
+
+ /* disable CQ mode in card */
+ if (mmc_card_cmdq(card)) {
+ WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CMDQ, 0,
+ card->ext_csd.generic_cmd6_time));
+ mmc_card_clr_cmdq(card);
+ }
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= md->part_type;
+
+ WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG, part_config,
+ card->ext_csd.part_time));
+
+ card->ext_csd.part_config = part_config;
+ card->part_curr = md->part_type;
+
+ main_md->part_curr = md->part_type;
+
+ WARN_ON(mmc_blk_cmdq_switch(card, md, true));
+ WARN_ON(mmc_cmdq_halt(host, false));
+
+ return 0;
+}
+
+static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ int ret;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+
+ mmc_get_card(card);
+
+ if (!card->host->cmdq_ctx.active_reqs && mmc_card_doing_bkops(card)) {
+ ret = mmc_cmdq_halt(card->host, true);
+ if (ret)
+ goto out;
+ ret = mmc_stop_bkops(card);
+ if (ret) {
+ pr_err("%s: %s: mmc_stop_bkops failed %d\n",
+ md->disk->disk_name, __func__, ret);
+ goto out;
+ }
+ ret = mmc_cmdq_halt(card->host, false);
+ if (ret)
+ goto out;
+ }
+
+ ret = mmc_blk_cmdq_part_switch(card, md);
+ if (ret) {
+ pr_err("%s: %s: partition switch failed %d\n",
+ md->disk->disk_name, __func__, ret);
+ goto out;
+ }
+
+ if (req) {
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+ if ((req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD) &&
+ (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
+ ctx->active_small_sector_read_reqs) {
+ ret = wait_event_interruptible(ctx->queue_empty_wq,
+ !ctx->active_reqs);
+ if (ret) {
+ pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n",
+ mmc_hostname(host),
+ __func__, ret);
+ BUG_ON(1);
+ }
+ /* clear the counter now */
+ ctx->active_small_sector_read_reqs = 0;
+ /*
+ * If there were small sector (less than 8 sectors) read
+ * operations in progress then we have to wait for the
+ * outstanding requests to finish and should also have
+ * atleast 6 microseconds delay before queuing the DCMD
+ * request.
+ */
+ udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD);
+ }
+
+ if (req_op(req) == REQ_OP_DISCARD) {
+ ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
+ } else if (req_op(req) == REQ_OP_SECURE_ERASE) {
+ if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
+ } else if (req_op(req) == REQ_OP_FLUSH) {
+ ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
+ } else {
+ ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
+ }
+ }
+
+ return ret;
+
+out:
+ if (req)
+ blk_end_request_all(req, ret);
+ mmc_put_card(card);
+
+ return ret;
+}
+
int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
@@ -2418,14 +4012,33 @@
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
bool req_is_special = mmc_req_is_special(req);
+ int err;
- if (req && !mq->mqrq_prev->req)
+ if (req && !mq->mqrq_prev->req) {
/* claim host only for the first request */
mmc_get_card(card);
+ if (mmc_card_doing_bkops(host->card)) {
+ ret = mmc_stop_bkops(host->card);
+ if (ret)
+ goto out;
+ }
+ }
+
ret = mmc_blk_part_switch(card, md);
+
if (ret) {
+ err = mmc_blk_reset(md, card->host, MMC_BLK_PARTSWITCH);
+ if (!err) {
+ pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) succeeded.\n",
+ mmc_hostname(host));
+ mmc_blk_reset_success(md, MMC_BLK_PARTSWITCH);
+ } else
+ pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) failed.\n",
+ mmc_hostname(host));
+
if (req) {
blk_end_request_all(req, -EIO);
}
@@ -2433,7 +4046,9 @@
goto out;
}
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ mmc_blk_write_packing_control(mq, req);
+
+ clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
@@ -2443,8 +4058,12 @@
/* complete ongoing async transfer before issuing secure erase*/
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- ret = mmc_blk_issue_secdiscard_rq(mq, req);
- } else if (req && req_op(req) == REQ_OP_FLUSH) {
+ if (!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_issue_discard_rq(mq, req);
+ } else if ((req && req_op(req) == REQ_OP_FLUSH) ||
+ (cmd_flags & REQ_BARRIER)) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -2459,7 +4078,8 @@
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
+ if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
+ req_is_special)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
@@ -2528,7 +4148,7 @@
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+ ret = mmc_init_queue(&md->queue, card, NULL, subname, area_type);
if (ret)
goto err_putdisk;
@@ -2584,7 +4204,16 @@
blk_queue_write_cache(md->queue.queue, true, true);
}
- if (mmc_card_mmc(card) &&
+ if (card->cmdq_init) {
+ md->flags |= MMC_BLK_CMD_QUEUE;
+ md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
+ md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
+ md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
+ md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out;
+ md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown;
+ }
+
+ if (mmc_card_mmc(card) && !card->cmdq_init &&
(area_type == MMC_BLK_DATA_AREA_MAIN) &&
(md->flags & MMC_BLK_CMD23) &&
card->ext_csd.packed_event_en) {
@@ -2697,6 +4326,10 @@
mmc_cleanup_queue(&md->queue);
if (md->flags & MMC_BLK_PACKED_CMD)
mmc_packed_clean(&md->queue);
+ if (md->flags & MMC_BLK_CMD_QUEUE)
+ mmc_cmdq_clean(&md->queue, card);
+ device_remove_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2784,8 +4417,37 @@
if (ret)
goto power_ro_lock_fail;
}
+
+ md->num_wr_reqs_to_start_packing.show =
+ num_wr_reqs_to_start_packing_show;
+ md->num_wr_reqs_to_start_packing.store =
+ num_wr_reqs_to_start_packing_store;
+ sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+ md->num_wr_reqs_to_start_packing.attr.name =
+ "num_wr_reqs_to_start_packing";
+ md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
+ if (ret)
+ goto num_wr_reqs_to_start_packing_fail;
+
+ md->no_pack_for_random.show = no_pack_for_random_show;
+ md->no_pack_for_random.store = no_pack_for_random_store;
+ sysfs_attr_init(&md->no_pack_for_random.attr);
+ md->no_pack_for_random.attr.name = "no_pack_for_random";
+ md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->no_pack_for_random);
+ if (ret)
+ goto no_pack_for_random_fails;
+
return ret;
+no_pack_for_random_fails:
+ device_remove_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
+num_wr_reqs_to_start_packing_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
power_ro_lock_fail:
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
@@ -2833,6 +4495,8 @@
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY,
+ add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD),
/*
* Some MMC cards need longer data read timeout than indicated in CSD.
@@ -2843,6 +4507,20 @@
MMC_QUIRK_LONG_READ_TIME),
/*
+ * Some Samsung MMC cards need longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * Hynix eMMC cards need longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
* On these Samsung MoviNAND parts, performing secure erase or
* secure trim can result in unrecoverable corruption due to a
* firmware bug.
@@ -2873,6 +4551,10 @@
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /* Some INAND MCP devices advertise incorrect timeout values */
+ MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_INAND_DATA_TIMEOUT),
+
END_FIXUP
};
@@ -2912,7 +4594,8 @@
goto out;
}
- pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+ pm_runtime_use_autosuspend(&card->dev);
+ pm_runtime_set_autosuspend_delay(&card->dev, MMC_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(&card->dev);
/*
@@ -2948,23 +4631,40 @@
dev_set_drvdata(&card->dev, NULL);
}
-static int _mmc_blk_suspend(struct mmc_card *card)
+static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
{
struct mmc_blk_data *part_md;
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ int rc = 0;
if (md) {
- mmc_queue_suspend(&md->queue);
+ rc = mmc_queue_suspend(&md->queue, wait);
+ if (rc)
+ goto out;
list_for_each_entry(part_md, &md->part, part) {
- mmc_queue_suspend(&part_md->queue);
+ rc = mmc_queue_suspend(&part_md->queue, wait);
+ if (rc)
+ goto out_resume;
}
}
- return 0;
+ goto out;
+
+ out_resume:
+ mmc_queue_resume(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_resume(&part_md->queue);
+ }
+ out:
+ return rc;
}
static void mmc_blk_shutdown(struct mmc_card *card)
{
- _mmc_blk_suspend(card);
+ _mmc_blk_suspend(card, 1);
+
+ /* send power off notification */
+ if (mmc_card_mmc(card))
+ mmc_send_pon(card);
}
#ifdef CONFIG_PM_SLEEP
@@ -2972,7 +4672,7 @@
{
struct mmc_card *card = mmc_dev_to_card(dev);
- return _mmc_blk_suspend(card);
+ return _mmc_blk_suspend(card, 0);
}
static int mmc_blk_resume(struct device *dev)
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
new file mode 100644
index 0000000..967affa
--- /dev/null
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -0,0 +1,2038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* MMC block test */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/delay.h>
+#include <linux/test-iosched.h>
+#include "queue.h"
+
+#define MODULE_NAME "mmc_block_test"
+#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
+#define TEST_MAX_BIOS_PER_REQ 120
+#define CMD23_PACKED_BIT (1 << 30)
+#define LARGE_PRIME_1 1103515367
+#define LARGE_PRIME_2 35757
+#define PACKED_HDR_VER_MASK 0x000000FF
+#define PACKED_HDR_RW_MASK 0x0000FF00
+#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
+#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+enum is_random {
+ NON_RANDOM_TEST,
+ RANDOM_TEST,
+};
+
+enum mmc_block_test_testcases {
+ /* Start of send write packing test group */
+ SEND_WRITE_PACKING_MIN_TESTCASE,
+ TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
+ TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
+ TEST_STOP_DUE_TO_FLUSH,
+ TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
+ TEST_STOP_DUE_TO_EMPTY_QUEUE,
+ TEST_STOP_DUE_TO_MAX_REQ_NUM,
+ TEST_STOP_DUE_TO_THRESHOLD,
+ SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
+
+ /* Start of err check test group */
+ ERR_CHECK_MIN_TESTCASE,
+ TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
+ TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
+ TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
+ TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
+ TEST_RET_PARTIAL_MAX_FAIL_IDX,
+ TEST_RET_RETRY,
+ TEST_RET_CMD_ERR,
+ TEST_RET_DATA_ERR,
+ ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
+
+ /* Start of send invalid test group */
+ INVALID_CMD_MIN_TESTCASE,
+ TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
+ TEST_HDR_WRONG_WRITE_CODE,
+ TEST_HDR_INVALID_RW_CODE,
+ TEST_HDR_DIFFERENT_ADDRESSES,
+ TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
+ TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
+ TEST_HDR_CMD23_PACKED_BIT_SET,
+ TEST_CMD23_MAX_PACKED_WRITES,
+ TEST_CMD23_ZERO_PACKED_WRITES,
+ TEST_CMD23_PACKED_BIT_UNSET,
+ TEST_CMD23_REL_WR_BIT_SET,
+ TEST_CMD23_BITS_16TO29_SET,
+ TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+ INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+
+ /*
+ * Start of packing control test group.
+ * in these next testcases the abbreviation FB = followed by
+ */
+ PACKING_CONTROL_MIN_TESTCASE,
+ TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
+ PACKING_CONTROL_MIN_TESTCASE,
+ TEST_PACKING_EXP_N_OVER_TRIGGER,
+ TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
+ TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
+ TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
+ TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
+ TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
+ TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
+ TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
+ TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+ PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+};
+
+enum mmc_block_test_group {
+ TEST_NO_GROUP,
+ TEST_GENERAL_GROUP,
+ TEST_SEND_WRITE_PACKING_GROUP,
+ TEST_ERR_CHECK_GROUP,
+ TEST_SEND_INVALID_GROUP,
+ TEST_PACKING_CONTROL_GROUP,
+};
+
+struct mmc_block_test_debug {
+ struct dentry *send_write_packing_test;
+ struct dentry *err_check_test;
+ struct dentry *send_invalid_packed_test;
+ struct dentry *random_test_seed;
+ struct dentry *packing_control_test;
+};
+
+struct mmc_block_test_data {
+ /* The number of write requests that the test will issue */
+ int num_requests;
+ /* The expected write packing statistics for the current test */
+ struct mmc_wr_pack_stats exp_packed_stats;
+ /*
+ * A user-defined seed for random choices of number of bios written in
+ * a request, and of number of requests issued in a test
+ * This field is randomly updated after each use
+ */
+ unsigned int random_test_seed;
+ /* A retry counter used in err_check tests */
+ int err_check_counter;
+ /* Can be one of the values of enum test_group */
+ enum mmc_block_test_group test_group;
+ /*
+ * Indicates if the current testcase is running with random values of
+ * num_requests and num_bios (in each request)
+ */
+ int is_random;
+ /* Data structure for debugfs dentrys */
+ struct mmc_block_test_debug debug;
+ /*
+ * Data structure containing individual test information, including
+ * self-defined specific data
+ */
+ struct test_info test_info;
+ /* mmc block device test */
+ struct blk_dev_test_type bdt;
+};
+
+static struct mmc_block_test_data *mbtd;
+
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+ int i;
+ int max_num_of_packed_reqs = 0;
+
+ if ((!card) || (!card->wr_pack_stats.packing_events))
+ return;
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ spin_lock(&card->wr_pack_stats.lock);
+
+ pr_info("%s: write packing statistics:\n",
+ mmc_hostname(card->host));
+
+ for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+ if (card->wr_pack_stats.packing_events[i] != 0)
+ pr_info("%s: Packed %d reqs - %d times\n",
+ mmc_hostname(card->host), i,
+ card->wr_pack_stats.packing_events[i]);
+ }
+
+ pr_info("%s: stopped packing due to the following reasons:\n",
+ mmc_hostname(card->host));
+
+ if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+ pr_info("%s: %d times: exceedmax num of segments\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+ if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+ pr_info("%s: %d times: exceeding the max num of sectors\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+ if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+ pr_info("%s: %d times: wrong data direction\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+ if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+ pr_info("%s: %d times: flush or discard\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+ if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+ pr_info("%s: %d times: empty queue\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+ if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
+ pr_info("%s: %d times: rel write\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
+ if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
+ pr_info("%s: %d times: Threshold\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+ spin_unlock(&card->wr_pack_stats.lock);
+}
+
+/*
+ * A callback assigned to the packed_test_fn field.
+ * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
+ * Here we alter the packed header or CMD23 in order to send an invalid
+ * packed command to the card.
+ */
+static void test_invalid_packed_cmd(struct request_queue *q,
+ struct mmc_queue_req *mqrq)
+{
+ struct mmc_queue *mq = q->queuedata;
+ u32 *packed_cmd_hdr = mqrq->packed->cmd_hdr;
+ struct request *req = mqrq->req;
+ struct request *second_rq;
+ struct test_request *test_rq;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ int num_requests;
+ int max_packed_reqs;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return;
+ }
+
+ test_rq = (struct test_request *)req->elv.priv[0];
+ if (!test_rq) {
+ test_pr_err("%s: NULL test_rq", __func__);
+ return;
+ }
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ switch (mbtd->test_info.testcase) {
+ case TEST_HDR_INVALID_VERSION:
+ test_pr_info("%s: set invalid header version", __func__);
+ /* Put 0 in header version field (1 byte, offset 0 in header) */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
+ break;
+ case TEST_HDR_WRONG_WRITE_CODE:
+ test_pr_info("%s: wrong write code", __func__);
+ /* Set R/W field with R value (1 byte, offset 1 in header) */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
+ break;
+ case TEST_HDR_INVALID_RW_CODE:
+ test_pr_info("%s: invalid r/w code", __func__);
+ /* Set R/W field with invalid value */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
+ break;
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ test_pr_info("%s: different addresses", __func__);
+ second_rq = list_entry(req->queuelist.next, struct request,
+ queuelist);
+ test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
+ __func__, (long)req->__sector,
+ (long)second_rq->__sector);
+ /*
+ * Put start sector of second write request in the first write
+ * request's cmd25 argument in the packed header
+ */
+ packed_cmd_hdr[3] = second_rq->__sector;
+ break;
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ test_pr_info("%s: request num smaller than actual" , __func__);
+ num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+ >> 16;
+ /* num of entries is decremented by 1 */
+ num_requests = (num_requests - 1) << 16;
+ /*
+ * Set number of requests field in packed write header to be
+ * smaller than the actual number (1 byte, offset 2 in header)
+ */
+ packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+ ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+ break;
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ test_pr_info("%s: request num larger than actual" , __func__);
+ num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+ >> 16;
+ /* num of entries is incremented by 1 */
+ num_requests = (num_requests + 1) << 16;
+ /*
+ * Set number of requests field in packed write header to be
+ * larger than the actual number (1 byte, offset 2 in header).
+ */
+ packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+ ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+ break;
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ test_pr_info("%s: header CMD23 packed bit set" , __func__);
+ /*
+ * Set packed bit (bit 30) in cmd23 argument of first and second
+ * write requests in packed write header.
+ * These are located at bytes 2 and 4 in packed write header
+ */
+ packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
+ packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
+ break;
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ test_pr_info("%s: CMD23 request num > max_packed_reqs",
+ __func__);
+ /*
+ * Set the individual packed cmd23 request num to
+ * max_packed_reqs + 1
+ */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
+ break;
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ test_pr_info("%s: CMD23 request num = 0", __func__);
+ /* Set the individual packed cmd23 request num to zero */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED;
+ break;
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ test_pr_info("%s: CMD23 packed bit unset", __func__);
+ /*
+ * Set the individual packed cmd23 packed bit to 0,
+ * although there is a packed write request
+ */
+ brq->sbc.arg &= ~CMD23_PACKED_BIT;
+ break;
+ case TEST_CMD23_REL_WR_BIT_SET:
+ test_pr_info("%s: CMD23 REL WR bit set", __func__);
+ /* Set the individual packed cmd23 reliable write bit */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
+ break;
+ case TEST_CMD23_BITS_16TO29_SET:
+ test_pr_info("%s: CMD23 bits [16-29] set", __func__);
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ PACKED_HDR_BITS_16_TO_29_SET;
+ break;
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ test_pr_info("%s: CMD23 hdr not in block count", __func__);
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ ((rq_data_dir(req) == READ) ? 0 : mqrq->packed->blocks);
+ break;
+ default:
+ test_pr_err("%s: unexpected testcase %d",
+ __func__, mbtd->test_info.testcase);
+ break;
+ }
+}
+
+/*
+ * A callback assigned to the err_check_fn field of the mmc_request by the
+ * MMC/card/block layer.
+ * Called upon request completion by the MMC/core layer.
+ * Here we emulate an error return value from the card.
+ */
+static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request_queue *req_q = test_iosched_get_req_queue();
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ int ret = 0;
+
+ if (req_q)
+ mq = req_q->queuedata;
+ else {
+ test_pr_err("%s: NULL request_queue", __func__);
+ return 0;
+ }
+
+ if (!mq) {
+ test_pr_err("%s: %s: NULL mq", __func__,
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ if (!mq_rq) {
+ test_pr_err("%s: %s: NULL mq_rq", __func__,
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ switch (mbtd->test_info.testcase) {
+ case TEST_RET_ABORT:
+ test_pr_info("%s: return abort", __func__);
+ ret = MMC_BLK_ABORT;
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ test_pr_info("%s: return partial followed by success",
+ __func__);
+ /*
+ * Since in this testcase num_requests is always >= 2,
+ * we can be sure that packed_fail_idx is always >= 1
+ */
+ mq_rq->packed->idx_failure = (mbtd->num_requests / 2);
+ test_pr_info("%s: packed_fail_idx = %d"
+ , __func__, mq_rq->packed->idx_failure);
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ if (!mbtd->err_check_counter) {
+ test_pr_info("%s: return partial followed by abort",
+ __func__);
+ mbtd->err_check_counter++;
+ /*
+ * Since in this testcase num_requests is always >= 3,
+ * we have that packed_fail_idx is always >= 1
+ */
+ mq_rq->packed->idx_failure = (mbtd->num_requests / 2);
+ test_pr_info("%s: packed_fail_idx = %d"
+ , __func__, mq_rq->packed->idx_failure);
+ ret = MMC_BLK_PARTIAL;
+ break;
+ }
+ mbtd->err_check_counter = 0;
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_ABORT;
+ break;
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ test_pr_info("%s: return partial multiple until success",
+ __func__);
+ if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
+ mq->err_check_fn = NULL;
+ mbtd->err_check_counter = 0;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ }
+ mq_rq->packed->idx_failure = 1;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ test_pr_info("%s: return partial max fail_idx", __func__);
+ mq_rq->packed->idx_failure = max_packed_reqs - 1;
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_RETRY:
+ test_pr_info("%s: return retry", __func__);
+ ret = MMC_BLK_RETRY;
+ break;
+ case TEST_RET_CMD_ERR:
+ test_pr_info("%s: return cmd err", __func__);
+ ret = MMC_BLK_CMD_ERR;
+ break;
+ case TEST_RET_DATA_ERR:
+ test_pr_info("%s: return data err", __func__);
+ ret = MMC_BLK_DATA_ERR;
+ break;
+ default:
+ test_pr_err("%s: unexpected testcase %d",
+ __func__, mbtd->test_info.testcase);
+ }
+
+ return ret;
+}
+
+/*
+ * This is a specific implementation for the get_test_case_str_fn function
+ * pointer in the test_info data structure. Given a valid test_data instance,
+ * the function returns a string resembling the test name, based on the testcase
+ */
+static char *get_test_case_str(struct test_data *td)
+{
+ if (!td) {
+ test_pr_err("%s: NULL td", __func__);
+ return NULL;
+ }
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ return " stop due to flush";
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ return " stop due to flush after max-1 reqs";
+ case TEST_STOP_DUE_TO_READ:
+ return " stop due to read";
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ return "Test stop due to read after max-1 reqs";
+ case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+ return "Test stop due to empty queue";
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ return "Test stop due to max req num";
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ return "Test stop due to exceeding threshold";
+ case TEST_RET_ABORT:
+ return "Test err_check return abort";
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ return "Test err_check return partial followed by success";
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ return "Test err_check return partial followed by abort";
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ return "Test err_check return partial multiple until success";
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ return "Test err_check return partial max fail index";
+ case TEST_RET_RETRY:
+ return "Test err_check return retry";
+ case TEST_RET_CMD_ERR:
+ return "Test err_check return cmd error";
+ case TEST_RET_DATA_ERR:
+ return "Test err_check return data error";
+ case TEST_HDR_INVALID_VERSION:
+ return "Test invalid - wrong header version";
+ case TEST_HDR_WRONG_WRITE_CODE:
+ return "Test invalid - wrong write code";
+ case TEST_HDR_INVALID_RW_CODE:
+ return "Test invalid - wrong R/W code";
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ return "Test invalid - header different addresses";
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ return "Test invalid - header req num smaller than actual";
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ return "Test invalid - header req num larger than actual";
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ return "Test invalid - header cmd23 packed bit set";
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ return "Test invalid - cmd23 max packed writes";
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ return "Test invalid - cmd23 zero packed writes";
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ return "Test invalid - cmd23 packed bit unset";
+ case TEST_CMD23_REL_WR_BIT_SET:
+ return "Test invalid - cmd23 rel wr bit set";
+ case TEST_CMD23_BITS_16TO29_SET:
+ return "Test invalid - cmd23 bits [16-29] set";
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ return "Test invalid - cmd23 header block not in count";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER:
+ return "\nTest packing control - pack n";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ return "\nTest packing control - pack n followed by read";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ return "\nTest packing control - pack n followed by flush";
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ return "\nTest packing control - pack one followed by read";
+ case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+ return "\nTest packing control - pack threshold";
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ return "\nTest packing control - no packing";
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ return "\nTest packing control - no packing, trigger requests";
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ return "\nTest packing control - no pack, trigger-read-trigger";
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ return "\nTest packing control- no pack, trigger-flush-trigger";
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ return "\nTest packing control - mix: pack -> no pack -> pack";
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ return "\nTest packing control - mix: no pack->pack->no pack";
+ default:
+ return "Unknown testcase";
+ }
+
+ return NULL;
+}
+
+/*
+ * Compare individual testcase's statistics to the expected statistics:
+ * Compare stop reason and number of packing events
+ */
+static int check_wr_packing_statistics(struct test_data *td)
+{
+ struct mmc_wr_pack_stats *mmc_packed_stats;
+ struct mmc_queue *mq = td->req_q->queuedata;
+ int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ int i;
+ struct mmc_card *card = mq->card;
+ struct mmc_wr_pack_stats expected_stats;
+ int *stop_reason;
+ int ret = 0;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ expected_stats = mbtd->exp_packed_stats;
+
+ mmc_packed_stats = mmc_blk_get_packed_statistics(card);
+ if (!mmc_packed_stats) {
+ test_pr_err("%s: NULL mmc_packed_stats", __func__);
+ return -EINVAL;
+ }
+
+ if (!mmc_packed_stats->packing_events) {
+ test_pr_err("%s: NULL packing_events", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&mmc_packed_stats->lock);
+
+ if (!mmc_packed_stats->enabled) {
+ test_pr_err("%s write packing statistics are not enabled",
+ __func__);
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ stop_reason = mmc_packed_stats->pack_stop_reason;
+
+ for (i = 1; i <= max_packed_reqs; ++i) {
+ if (mmc_packed_stats->packing_events[i] !=
+ expected_stats.packing_events[i]) {
+ test_pr_err(
+ "%s: Wrong pack stats in index %d, got %d, expected %d",
+ __func__, i, mmc_packed_stats->packing_events[i],
+ expected_stats.packing_events[i]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
+ expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
+ __func__, stop_reason[EXCEEDS_SEGMENTS],
+ expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
+ expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
+ __func__, stop_reason[EXCEEDS_SECTORS],
+ expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
+ expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
+ __func__, stop_reason[WRONG_DATA_DIR],
+ expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
+ expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
+ __func__, stop_reason[FLUSH_OR_DISCARD],
+ expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
+ expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
+ __func__, stop_reason[EMPTY_QUEUE],
+ expected_stats.pack_stop_reason[EMPTY_QUEUE]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
+ expected_stats.pack_stop_reason[REL_WRITE]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
+ __func__, stop_reason[REL_WRITE],
+ expected_stats.pack_stop_reason[REL_WRITE]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+exit_err:
+ spin_unlock(&mmc_packed_stats->lock);
+ if (ret && mmc_packed_stats->enabled)
+ print_mmc_packing_stats(card);
+ return ret;
+cancel_round:
+ spin_unlock(&mmc_packed_stats->lock);
+ test_iosched_set_ignore_round(true);
+ return 0;
+}
+
+/*
+ * Pseudo-randomly choose a seed based on the last seed, and update it in
+ * seed_number. then return seed_number (mod max_val), or min_val.
+ */
+static unsigned int pseudo_random_seed(unsigned int *seed_number,
+ unsigned int min_val,
+ unsigned int max_val)
+{
+ int ret = 0;
+
+ if (!seed_number)
+ return 0;
+
+ *seed_number = ((unsigned int)(((unsigned long)*seed_number *
+ (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
+ ret = (unsigned int)((*seed_number) % max_val);
+
+ return (ret > min_val ? ret : min_val);
+}
+
+/*
+ * Given a pseudo-random seed, find a pseudo-random num_of_bios.
+ * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
+ */
+static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
+ unsigned int *num_of_bios)
+{
+ do {
+ *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
+ TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = 1;
+ } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
+}
+
+/* Add a single read request to the given td's request queue */
+static int prepare_request_add_read(struct test_data *td)
+{
+ int ret;
+ int start_sec;
+
+ if (td)
+ start_sec = td->start_sector;
+ else {
+ test_pr_err("%s: NULL td", __func__);
+ return 0;
+ }
+
+ test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
+ td->wr_rd_next_req_id);
+
+ ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
+ TEST_PATTERN_5A, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a read request", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Add a single flush request to the given td's request queue */
+static int prepare_request_add_flush(struct test_data *td)
+{
+ int ret;
+
+ if (!td) {
+ test_pr_err("%s: NULL td", __func__);
+ return 0;
+ }
+
+ test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
+ td->unique_next_req_id);
+ ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
+ 0, 0, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a flush request", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Add num_requets amount of write requests to the given td's request queue.
+ * If random test mode is chosen we pseudo-randomly choose the number of bios
+ * for each write request, otherwise add between 1 to 5 bio per request.
+ */
+static int prepare_request_add_write_reqs(struct test_data *td,
+ int num_requests, int is_err_expected,
+ int is_random)
+{
+ int i;
+ unsigned int start_sec;
+ int num_bios;
+ int ret = 0;
+ unsigned int *bio_seed = &mbtd->random_test_seed;
+
+ if (td)
+ start_sec = td->start_sector;
+ else {
+ test_pr_err("%s: NULL td", __func__);
+ return ret;
+ }
+
+ test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+ num_requests, td->wr_rd_next_req_id);
+
+ for (i = 1; i <= num_requests; i++) {
+ start_sec = td->start_sector + 4096 * td->num_of_write_bios;
+ if (is_random)
+ pseudo_rnd_num_of_bios(bio_seed, &num_bios);
+ else
+ /*
+ * For the non-random case, give num_bios a value
+ * between 1 and 5, to keep a small number of BIOs
+ */
+ num_bios = (i%5)+1;
+
+ ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+ start_sec, num_bios, TEST_PATTERN_5A, NULL);
+
+ if (ret) {
+ test_pr_err("%s: failed to add a write request",
+ __func__);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for a generic packed commands
+ * testcase
+ */
+static int prepare_packed_requests(struct test_data *td, int is_err_expected,
+ int num_requests, int is_random)
+{
+ int ret = 0;
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ struct request_queue *req_q;
+
+ if (!td) {
+ pr_err("%s: NULL td", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ pr_err("%s: NULL request queue", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ if (mbtd->random_test_seed <= 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+ is_random);
+ if (ret)
+ return ret;
+
+ /* Avoid memory corruption in upcoming stats set */
+ if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
+ num_requests--;
+
+ memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+ if (num_requests <= max_packed_reqs)
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ return ret;
+
+ mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+ break;
+ case TEST_STOP_DUE_TO_READ:
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ return ret;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ break;
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.packing_events[1] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ break;
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+ break;
+ default:
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ }
+ mbtd->num_requests = num_requests;
+
+ return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for the packing control
+ * testcases
+ */
+static int prepare_packed_control_tests_requests(struct test_data *td,
+ int is_err_expected, int num_requests, int is_random)
+{
+ int ret = 0;
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ int temp_num_req = num_requests;
+ struct request_queue *req_q;
+ int test_packed_trigger;
+ int num_packed_reqs;
+
+ if (!td) {
+ test_pr_err("%s: NULL td\n", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ test_pr_err("%s: NULL request queue\n", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+ num_packed_reqs = num_requests - test_packed_trigger;
+
+ if (mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ if (td->test_info.testcase ==
+ TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
+ temp_num_req = num_requests;
+ num_requests = test_packed_trigger - 1;
+ }
+
+ /* Verify that the packing is disabled before starting the test */
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+
+ if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
+ mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
+ mq->wr_packing_enabled = true;
+ num_requests = test_packed_trigger + 2;
+ }
+
+ ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+ is_random);
+ if (ret)
+ goto exit;
+
+ if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
+ num_requests = temp_num_req;
+
+ memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+
+ switch (td->test_info.testcase) {
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_packed_reqs,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_requests,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ break;
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_requests,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ default:
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ }
+ mbtd->num_requests = num_requests;
+
+exit:
+ return ret;
+}
+
+/*
+ * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
+ * In this testcase we have mixed error expectations from different
+ * write requests, hence the special prepare function.
+ */
+static int prepare_partial_followed_by_abort(struct test_data *td,
+ int num_requests)
+{
+ int i, start_address;
+ int is_err_expected = 0;
+ int ret = 0;
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_packed_reqs;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ for (i = 1; i <= num_requests; i++) {
+ if (i > (num_requests / 2))
+ is_err_expected = 1;
+
+ start_address = td->start_sector + 4096 * td->num_of_write_bios;
+ ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+ start_address, (i % 5) + 1, TEST_PATTERN_5A,
+ NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a write request",
+ __func__);
+ return ret;
+ }
+ }
+
+ memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+
+ mbtd->num_requests = num_requests;
+
+ return ret;
+}
+
+/*
+ * Get number of write requests for current testcase. If random test mode was
+ * chosen, pseudo-randomly choose the number of requests, otherwise set to
+ * two less than the packing threshold.
+ */
+static int get_num_requests(struct test_data *td)
+{
+ int *seed = &mbtd->random_test_seed;
+ struct request_queue *req_q;
+ struct mmc_queue *mq;
+ int max_num_requests;
+ int num_requests;
+ int min_num_requests = 2;
+ int is_random = mbtd->is_random;
+ int max_for_double;
+ int test_packed_trigger;
+
+ req_q = test_iosched_get_req_queue();
+ if (req_q)
+ mq = req_q->queuedata;
+ else {
+ test_pr_err("%s: NULL request queue", __func__);
+ return 0;
+ }
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+ num_requests = max_num_requests - 2;
+ test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+ /*
+ * Here max_for_double is intended for packed control testcases
+ * in which we issue many write requests. It's purpose is to prevent
+ * exceeding max number of req_queue requests.
+ */
+ max_for_double = max_num_requests - 10;
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ /* Don't expect packing, so issue up to trigger-1 reqs */
+ num_requests = test_packed_trigger - 1;
+
+ if (is_random) {
+ if (td->test_info.testcase ==
+ TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
+ /*
+ * Here we don't want num_requests to be less than 1
+ * as a consequence of division by 2.
+ */
+ min_num_requests = 3;
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ /* Don't expect packing, so issue up to trigger reqs */
+ max_num_requests = test_packed_trigger;
+
+ num_requests = pseudo_random_seed(seed, min_num_requests,
+ max_num_requests - 1);
+ }
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ num_requests -= test_packed_trigger;
+
+ if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
+ num_requests =
+ num_requests > max_for_double ? max_for_double : num_requests;
+
+ if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
+ num_requests += test_packed_trigger;
+
+ if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
+ num_requests = test_packed_trigger;
+
+ return num_requests;
+}
+
+/*
+ * An implementation for the prepare_test_fn pointer in the test_info
+ * data structure. According to the testcase we add the right number of requests
+ * and decide if an error is expected or not.
+ */
+static int prepare_test(struct test_data *td)
+{
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_num_requests;
+ int num_requests = 0;
+ int ret = 0;
+ int is_random = mbtd->is_random;
+ int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+
+ if (is_random && mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ num_requests = get_num_requests(td);
+
+ if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
+ mq->packed_test_fn =
+ test_invalid_packed_cmd;
+
+ if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
+ mq->err_check_fn = test_err_check;
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ case TEST_STOP_DUE_TO_READ:
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ ret = prepare_packed_requests(td, 0, num_requests, is_random);
+ break;
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ ret = prepare_packed_requests(td, 0, max_num_requests - 1,
+ is_random);
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ ret = prepare_partial_followed_by_abort(td, num_requests);
+ break;
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ ret = prepare_packed_requests(td, 0, max_num_requests,
+ is_random);
+ break;
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ ret = prepare_packed_requests(td, 0, max_num_requests + 1,
+ is_random);
+ break;
+ case TEST_RET_ABORT:
+ case TEST_RET_RETRY:
+ case TEST_RET_CMD_ERR:
+ case TEST_RET_DATA_ERR:
+ case TEST_HDR_INVALID_VERSION:
+ case TEST_HDR_WRONG_WRITE_CODE:
+ case TEST_HDR_INVALID_RW_CODE:
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ case TEST_CMD23_REL_WR_BIT_SET:
+ case TEST_CMD23_BITS_16TO29_SET:
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ ret = prepare_packed_requests(td, 1, num_requests, is_random);
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER:
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+ is_random);
+ break;
+ case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ max_num_requests, is_random);
+ break;
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ test_packed_trigger + 1,
+ is_random);
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+ is_random);
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ test_packed_trigger, is_random);
+ break;
+ default:
+ test_pr_info("%s: Invalid test case...", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int run_packed_test(struct test_data *td)
+{
+ struct mmc_queue *mq;
+ struct request_queue *req_q;
+
+ if (!td) {
+ pr_err("%s: NULL td", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ pr_err("%s: NULL request queue", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+ mmc_blk_init_packed_statistics(mq->card);
+
+ if (td->test_info.testcase != TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
+ /*
+ * Verify that the packing is disabled before starting the
+ * test
+ */
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+ }
+
+ __blk_run_queue(td->req_q);
+
+ return 0;
+}
+
+/*
+ * An implementation for the post_test_fn in the test_info data structure.
+ * In our case we just reset the function pointers in the mmc_queue in order for
+ * the FS to be able to dispatch it's requests correctly after the test is
+ * finished.
+ */
+static int post_test(struct test_data *td)
+{
+ struct mmc_queue *mq;
+
+ if (!td)
+ return -EINVAL;
+
+ mq = td->req_q->queuedata;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ mq->packed_test_fn = NULL;
+ mq->err_check_fn = NULL;
+
+ return 0;
+}
+
+/*
+ * This function checks, based on the current test's test_group, that the
+ * packed commands capability and control are set right. In addition, we check
+ * if the card supports the packed command feature.
+ */
+static int validate_packed_commands_settings(void)
+{
+ struct request_queue *req_q;
+ struct mmc_queue *mq;
+ int max_num_requests;
+ struct mmc_host *host;
+
+ req_q = test_iosched_get_req_queue();
+ if (!req_q) {
+ test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
+ test_iosched_set_test_result(TEST_FAILED);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+ host = mq->card->host;
+
+ if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
+ test_pr_err("%s: Packed Write capability disabled, exit test",
+ __func__);
+ test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+ return -EINVAL;
+ }
+
+ if (max_num_requests == 0) {
+ test_pr_err(
+ "%s: no write packing support, ext_csd.max_packed_writes=%d",
+ __func__, mq->card->ext_csd.max_packed_writes);
+ test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+ return -EINVAL;
+ }
+
+ test_pr_info("%s: max number of packed requests supported is %d ",
+ __func__, max_num_requests);
+
+ switch (mbtd->test_group) {
+ case TEST_SEND_WRITE_PACKING_GROUP:
+ case TEST_ERR_CHECK_GROUP:
+ case TEST_SEND_INVALID_GROUP:
+ /* disable the packing control */
+ host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
+ break;
+ case TEST_PACKING_CONTROL_GROUP:
+ host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static bool message_repeat;
+static int test_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ message_repeat = 1;
+ return 0;
+}
+
+/* send_packing TEST */
+static ssize_t send_write_packing_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+
+ test_pr_info("%s: -- send_write_packing TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+
+ mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
+ j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t send_write_packing_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nsend_write_packing_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Pack due to FLUSH message\n"
+ "- Pack due to FLUSH after threshold writes\n"
+ "- Pack due to READ message\n"
+ "- Pack due to READ after threshold writes\n"
+ "- Pack due to empty queue\n"
+ "- Pack due to threshold writes\n"
+ "- Pack due to one over threshold writes\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations send_write_packing_test_ops = {
+ .open = test_open,
+ .write = send_write_packing_test_write,
+ .read = send_write_packing_test_read,
+};
+
+/* err_check TEST */
+static ssize_t err_check_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+
+ test_pr_info("%s: -- err_check TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_ERR_CHECK_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = ERR_CHECK_MIN_TESTCASE;
+ j <= ERR_CHECK_MAX_TESTCASE ; j++) {
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t err_check_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nerr_check_TEST\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Return ABORT\n"
+ "- Return PARTIAL followed by success\n"
+ "- Return PARTIAL followed by abort\n"
+ "- Return PARTIAL multiple times until success\n"
+ "- Return PARTIAL with fail index = threshold\n"
+ "- Return RETRY\n"
+ "- Return CMD_ERR\n"
+ "- Return DATA_ERR\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations err_check_test_ops = {
+ .open = test_open,
+ .write = err_check_test_write,
+ .read = err_check_test_read,
+};
+
+/* send_invalid_packed TEST */
+static ssize_t send_invalid_packed_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+ int num_of_failures = 0;
+
+ test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_SEND_INVALID_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = INVALID_CMD_MIN_TESTCASE;
+ j <= INVALID_CMD_MAX_TESTCASE ; j++) {
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ num_of_failures++;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ num_of_failures++;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ if (num_of_failures > 0) {
+ test_iosched_set_test_result(TEST_FAILED);
+ test_pr_err(
+ "There were %d failures during the test, TEST FAILED",
+ num_of_failures);
+ }
+ return count;
+}
+
+static ssize_t send_invalid_packed_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nsend_invalid_packed_TEST\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Send an invalid header version\n"
+ "- Send the wrong write code\n"
+ "- Send an invalid R/W code\n"
+ "- Send wrong start address in header\n"
+ "- Send header with block_count smaller than actual\n"
+ "- Send header with block_count larger than actual\n"
+ "- Send header CMD23 packed bit set\n"
+ "- Send CMD23 with block count over threshold\n"
+ "- Send CMD23 with block_count equals zero\n"
+ "- Send CMD23 packed bit unset\n"
+ "- Send CMD23 reliable write bit set\n"
+ "- Send CMD23 bits [16-29] set\n"
+ "- Send CMD23 header block not in block_count\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations send_invalid_packed_test_ops = {
+ .open = test_open,
+ .write = send_invalid_packed_test_write,
+ .read = send_invalid_packed_test_read,
+};
+
+/* packing_control TEST */
+static ssize_t write_packing_control_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_num_requests = mq->card->ext_csd.max_packed_writes;
+ int test_successful = 1;
+
+ test_pr_info("%s: -- write_packing_control TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ test_pr_info("%s: max_num_requests = %d ", __func__,
+ max_num_requests);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+ mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = PACKING_CONTROL_MIN_TESTCASE;
+ j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
+
+ test_successful = 1;
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_successful = 0;
+ break;
+ }
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_successful = 0;
+ break;
+ }
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+
+ if (!test_successful)
+ break;
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t write_packing_control_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nwrite_packing_control_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Packing expected - one over trigger\n"
+ "- Packing expected - N over trigger\n"
+ "- Packing expected - N over trigger followed by read\n"
+ "- Packing expected - N over trigger followed by flush\n"
+ "- Packing expected - threshold over trigger FB by flush\n"
+ "- Packing not expected - less than trigger\n"
+ "- Packing not expected - trigger requests\n"
+ "- Packing not expected - trigger, read, trigger\n"
+ "- Mixed state - packing -> no packing -> packing\n"
+ "- Mixed state - no packing -> packing -> no packing\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations write_packing_control_test_ops = {
+ .open = test_open,
+ .write = write_packing_control_test_write,
+ .read = write_packing_control_test_read,
+};
+
+static void mmc_block_test_debugfs_cleanup(void)
+{
+ debugfs_remove(mbtd->debug.random_test_seed);
+ debugfs_remove(mbtd->debug.send_write_packing_test);
+ debugfs_remove(mbtd->debug.err_check_test);
+ debugfs_remove(mbtd->debug.send_invalid_packed_test);
+ debugfs_remove(mbtd->debug.packing_control_test);
+}
+
+static int mmc_block_test_debugfs_init(void)
+{
+ struct dentry *utils_root, *tests_root;
+
+ utils_root = test_iosched_get_debugfs_utils_root();
+ tests_root = test_iosched_get_debugfs_tests_root();
+
+ if (!utils_root || !tests_root)
+ return -EINVAL;
+
+ mbtd->debug.random_test_seed = debugfs_create_u32(
+ "random_test_seed",
+ S_IRUGO | S_IWUGO,
+ utils_root,
+ &mbtd->random_test_seed);
+
+ if (!mbtd->debug.random_test_seed)
+ goto err_nomem;
+
+ mbtd->debug.send_write_packing_test =
+ debugfs_create_file("send_write_packing_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &send_write_packing_test_ops);
+
+ if (!mbtd->debug.send_write_packing_test)
+ goto err_nomem;
+
+ mbtd->debug.err_check_test =
+ debugfs_create_file("err_check_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &err_check_test_ops);
+
+ if (!mbtd->debug.err_check_test)
+ goto err_nomem;
+
+ mbtd->debug.send_invalid_packed_test =
+ debugfs_create_file("send_invalid_packed_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &send_invalid_packed_test_ops);
+
+ if (!mbtd->debug.send_invalid_packed_test)
+ goto err_nomem;
+
+ mbtd->debug.packing_control_test = debugfs_create_file(
+ "packing_control_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &write_packing_control_test_ops);
+
+ if (!mbtd->debug.packing_control_test)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ mmc_block_test_debugfs_cleanup();
+ return -ENOMEM;
+}
+
+static void mmc_block_test_probe(void)
+{
+ struct request_queue *q = test_iosched_get_req_queue();
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+
+ if (!q) {
+ test_pr_err("%s: NULL request queue", __func__);
+ return;
+ }
+
+ mq = q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ mbtd->exp_packed_stats.packing_events =
+ kzalloc((max_packed_reqs + 1) *
+ sizeof(*mbtd->exp_packed_stats.packing_events),
+ GFP_KERNEL);
+
+ mmc_block_test_debugfs_init();
+}
+
+static void mmc_block_test_remove(void)
+{
+ mmc_block_test_debugfs_cleanup();
+}
+
+static int __init mmc_block_test_init(void)
+{
+ mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
+ if (!mbtd) {
+ test_pr_err("%s: failed to allocate mmc_block_test_data",
+ __func__);
+ return -ENODEV;
+ }
+
+ mbtd->bdt.init_fn = mmc_block_test_probe;
+ mbtd->bdt.exit_fn = mmc_block_test_remove;
+ INIT_LIST_HEAD(&mbtd->bdt.list);
+ test_iosched_register(&mbtd->bdt);
+
+ return 0;
+}
+
+static void __exit mmc_block_test_exit(void)
+{
+ test_iosched_unregister(&mbtd->bdt);
+ kfree(mbtd);
+}
+
+module_init(mmc_block_test_init);
+module_exit(mmc_block_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMC block test");
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index df382be..b9c8824 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -3115,7 +3115,8 @@
}
#ifdef CONFIG_HIGHMEM
- __free_pages(test->highmem, BUFFER_ORDER);
+ if (test->highmem)
+ __free_pages(test->highmem, BUFFER_ORDER);
#endif
kfree(test->buffer);
kfree(test);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 1810f76..ccfd225 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -16,6 +16,8 @@
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -27,6 +29,13 @@
#define MMC_QUEUE_BOUNCESZ 65536
/*
+ * Based on benchmark tests the default num of requests to trigger the write
+ * packing was determined, to keep the read latency as low as possible and
+ * manage to keep the high write throughput.
+ */
+#define DEFAULT_NUM_REQS_TO_START_PACK 17
+
+/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
@@ -50,10 +59,98 @@
return BLKPREP_OK;
}
+static struct request *mmc_peek_request(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ mq->cmdq_req_peeked = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ if (!blk_queue_stopped(q))
+ mq->cmdq_req_peeked = blk_peek_request(q);
+ spin_unlock_irq(q->queue_lock);
+
+ return mq->cmdq_req_peeked;
+}
+
+static bool mmc_check_blk_queue_start_tag(struct request_queue *q,
+ struct request *req)
+{
+ int ret;
+
+ spin_lock_irq(q->queue_lock);
+ ret = blk_queue_start_tag(q, req);
+ spin_unlock_irq(q->queue_lock);
+
+ return !!ret;
+}
+
+static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
+ struct mmc_queue *mq)
+{
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+ struct request_queue *q = mq->queue;
+
+ /*
+ * Wait until all of the following conditions are true:
+ * 1. There is a request pending in the block layer queue
+ * to be processed.
+ * 2. If the peeked request is flush/discard then there shouldn't
+ * be any other direct command active.
+ * 3. cmdq state should be unhalted.
+ * 4. cmdq state shouldn't be in error state.
+ * 5. free tag available to process the new request.
+ */
+ wait_event(ctx->wait, kthread_should_stop()
+ || (mmc_peek_request(mq) &&
+ !(((req_op(mq->cmdq_req_peeked) == REQ_OP_FLUSH) ||
+ (req_op(mq->cmdq_req_peeked) == REQ_OP_DISCARD))
+ && test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state))
+ && !(!host->card->part_curr && !mmc_card_suspended(host->card)
+ && mmc_host_halt(host))
+ && !(!host->card->part_curr && mmc_host_cq_disable(host) &&
+ !mmc_card_suspended(host->card))
+ && !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)
+ && !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked)));
+}
+
+static int mmc_cmdq_thread(void *d)
+{
+ struct mmc_queue *mq = d;
+ struct mmc_card *card = mq->card;
+
+ struct mmc_host *host = card->host;
+
+ current->flags |= PF_MEMALLOC;
+ if (card->host->wakeup_on_idle)
+ set_wake_up_idle(true);
+
+ while (1) {
+ int ret = 0;
+
+ mmc_cmdq_ready_wait(host, mq);
+ if (kthread_should_stop())
+ break;
+
+ ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
+ /*
+ * Don't requeue if issue_fn fails, just bug on.
+ * We don't expect failure here and there is no recovery other
+ * than fixing the actual issue if there is any.
+ * Also we end the request if there is a partition switch error,
+ * so we should not requeue the request here.
+ */
+ if (ret)
+ BUG_ON(1);
+ } /* loop */
+
+ return 0;
+}
+
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
struct sched_param scheduler_params = {0};
scheduler_params.sched_priority = 1;
@@ -61,6 +158,8 @@
sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
current->flags |= PF_MEMALLOC;
+ if (card->host->wakeup_on_idle)
+ set_wake_up_idle(true);
down(&mq->thread_sem);
do {
@@ -78,8 +177,8 @@
set_current_state(TASK_RUNNING);
mmc_blk_issue_rq(mq, req);
cond_resched();
- if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags)) {
+ clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
continue; /* fetch again */
}
@@ -111,6 +210,13 @@
return 0;
}
+static void mmc_cmdq_dispatch_req(struct request_queue *q)
+{
+ struct mmc_queue *mq = q->queuedata;
+
+ wake_up(&mq->card->host->cmdq_ctx.wait);
+}
+
/*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
@@ -186,6 +292,32 @@
}
/**
+ * mmc_blk_cmdq_setup_queue
+ * @mq: mmc queue
+ * @card: card to attach to this queue
+ *
+ * Setup queue for CMDQ supporting MMC card
+ */
+void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ u64 limit = BLK_BOUNCE_HIGH;
+ struct mmc_host *host = card->host;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = *mmc_dev(host)->dma_mask;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count,
+ host->max_req_size / 512));
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+ blk_queue_max_segments(mq->queue, host->max_segs);
+}
+
+/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
@@ -195,7 +327,7 @@
* Initialise a MMC card request queue.
*/
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+ spinlock_t *lock, const char *subname, int area_type)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
@@ -207,6 +339,37 @@
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
+ if (card->ext_csd.cmdq_support &&
+ (area_type == MMC_BLK_DATA_AREA_MAIN)) {
+ mq->queue = blk_init_queue(mmc_cmdq_dispatch_req, lock);
+ if (!mq->queue)
+ return -ENOMEM;
+ mmc_cmdq_setup_queue(mq, card);
+ ret = mmc_cmdq_init(mq, card);
+ if (ret) {
+ pr_err("%s: %d: cmdq: unable to set-up\n",
+ mmc_hostname(card->host), ret);
+ blk_cleanup_queue(mq->queue);
+ } else {
+ sema_init(&mq->thread_sem, 1);
+ /* hook for pm qos cmdq init */
+ if (card->host->cmdq_ops->init)
+ card->host->cmdq_ops->init(card->host);
+ mq->queue->queuedata = mq;
+ mq->thread = kthread_run(mmc_cmdq_thread, mq,
+ "mmc-cmdqd/%d%s",
+ host->index,
+ subname ? subname : "");
+ if (IS_ERR(mq->thread)) {
+ pr_err("%s: %d: cmdq: failed to start mmc-cmdqd thread\n",
+ mmc_hostname(card->host), ret);
+ ret = PTR_ERR(mq->thread);
+ }
+
+ return ret;
+ }
+ }
+
mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
@@ -214,6 +377,9 @@
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
+ mq->num_wr_reqs_to_start_packing =
+ min_t(int, (int)card->ext_csd.max_packed_writes,
+ DEFAULT_NUM_REQS_TO_START_PACK);
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -279,24 +445,49 @@
#endif
if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+ unsigned int max_segs = host->max_segs;
+
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+retry:
+ blk_queue_max_segments(mq->queue, host->max_segs);
mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
- if (ret)
+ if (ret == -ENOMEM)
+ goto cur_sg_alloc_failed;
+ else if (ret)
goto cleanup_queue;
-
mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
- if (ret)
+ if (ret == -ENOMEM)
+ goto prev_sg_alloc_failed;
+ else if (ret)
goto cleanup_queue;
+
+ goto success;
+
+prev_sg_alloc_failed:
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+cur_sg_alloc_failed:
+ host->max_segs /= 2;
+ if (host->max_segs) {
+ goto retry;
+ } else {
+ host->max_segs = max_segs;
+ goto cleanup_queue;
+ }
}
+success:
sema_init(&mq->thread_sem, 1);
+ /* hook for pm qos legacy init */
+ if (card->host->ops->init)
+ card->host->ops->init(card->host);
+
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
@@ -411,28 +602,195 @@
mqrq_prev->packed = NULL;
}
+static void mmc_cmdq_softirq_done(struct request *rq)
+{
+ struct mmc_queue *mq = rq->q->queuedata;
+
+ mq->cmdq_complete_fn(rq);
+}
+
+static void mmc_cmdq_error_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ cmdq_err_work);
+
+ mq->cmdq_error_fn(mq);
+}
+
+enum blk_eh_timer_return mmc_cmdq_rq_timed_out(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ pr_err("%s: request with tag: %d flags: 0x%llx timed out\n",
+ mmc_hostname(mq->card->host), req->tag, req->cmd_flags);
+
+ return mq->cmdq_req_timed_out(req);
+}
+
+int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+ int i, ret = 0;
+ /* one slot is reserved for dcmd requests */
+ int q_depth = card->ext_csd.cmdq_depth - 1;
+
+ card->cmdq_init = false;
+ if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE)) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
+ init_waitqueue_head(&card->host->cmdq_ctx.wait);
+
+ mq->mqrq_cmdq = kzalloc(
+ sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
+ if (!mq->mqrq_cmdq) {
+ pr_warn("%s: unable to allocate mqrq's for q_depth %d\n",
+ mmc_card_name(card), q_depth);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* sg is allocated for data request slots only */
+ for (i = 0; i < q_depth; i++) {
+ mq->mqrq_cmdq[i].sg = mmc_alloc_sg(card->host->max_segs, &ret);
+ if (ret) {
+ pr_warn("%s: unable to allocate cmdq sg of size %d\n",
+ mmc_card_name(card),
+ card->host->max_segs);
+ goto free_mqrq_sg;
+ }
+ }
+
+ ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
+ if (ret) {
+ pr_warn("%s: unable to allocate cmdq tags %d\n",
+ mmc_card_name(card), q_depth);
+ goto free_mqrq_sg;
+ }
+
+ blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done);
+ INIT_WORK(&mq->cmdq_err_work, mmc_cmdq_error_work);
+ init_completion(&mq->cmdq_shutdown_complete);
+ init_completion(&mq->cmdq_pending_req_done);
+
+ blk_queue_rq_timed_out(mq->queue, mmc_cmdq_rq_timed_out);
+ blk_queue_rq_timeout(mq->queue, 120 * HZ);
+ card->cmdq_init = true;
+
+ goto out;
+
+free_mqrq_sg:
+ for (i = 0; i < q_depth; i++)
+ kfree(mq->mqrq_cmdq[i].sg);
+ kfree(mq->mqrq_cmdq);
+ mq->mqrq_cmdq = NULL;
+out:
+ return ret;
+}
+
+void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card)
+{
+ int i;
+ int q_depth = card->ext_csd.cmdq_depth - 1;
+
+ blk_free_tags(mq->queue->queue_tags);
+ mq->queue->queue_tags = NULL;
+ blk_queue_free_tags(mq->queue);
+
+ for (i = 0; i < q_depth; i++)
+ kfree(mq->mqrq_cmdq[i].sg);
+ kfree(mq->mqrq_cmdq);
+ mq->mqrq_cmdq = NULL;
+}
+
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
+ * @wait: Wait till MMC request queue is empty
*
* Stop the block request queue, and wait for our thread to
* complete any outstanding requests. This ensures that we
* won't suspend while a request is being processed.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_queue_suspend(struct mmc_queue *mq, int wait)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ int rc = 0;
+ struct mmc_card *card = mq->card;
+ struct request *req;
- if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
- mq->flags |= MMC_QUEUE_SUSPENDED;
+ if (card->cmdq_init && blk_queue_tagged(q)) {
+ struct mmc_host *host = card->host;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))
+ goto out;
- down(&mq->thread_sem);
+ if (wait) {
+
+ /*
+ * After blk_stop_queue is called, wait for all
+ * active_reqs to complete.
+ * Then wait for cmdq thread to exit before calling
+ * cmdq shutdown to avoid race between issuing
+ * requests and shutdown of cmdq.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (host->cmdq_ctx.active_reqs)
+ wait_for_completion(
+ &mq->cmdq_shutdown_complete);
+ kthread_stop(mq->thread);
+ mq->cmdq_shutdown(mq);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ wake_up(&host->cmdq_ctx.wait);
+ req = blk_peek_request(q);
+ if (req || mq->cmdq_req_peeked ||
+ host->cmdq_ctx.active_reqs) {
+ clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
+ blk_start_queue(q);
+ rc = -EBUSY;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ goto out;
}
+
+ if (!(test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))) {
+ if (!wait) {
+ /* suspend/stop the queue in case of suspend */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else {
+ /* shutdown the queue in case of shutdown/reboot */
+ blk_cleanup_queue(q);
+ }
+
+ rc = down_trylock(&mq->thread_sem);
+ if (rc && !wait) {
+ /*
+ * Failed to take the lock so better to abort the
+ * suspend because mmcqd thread is processing requests.
+ */
+ clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ rc = -EBUSY;
+ } else if (rc && wait) {
+ down(&mq->thread_sem);
+ rc = 0;
+ }
+ }
+out:
+ return rc;
}
/**
@@ -442,12 +800,13 @@
void mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
unsigned long flags;
- if (mq->flags & MMC_QUEUE_SUSPENDED) {
- mq->flags &= ~MMC_QUEUE_SUSPENDED;
+ if (test_and_clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags)) {
- up(&mq->thread_sem);
+ if (!(card->cmdq_init && blk_queue_tagged(q)))
+ up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index fe58d31..bf7a95b 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -48,20 +48,40 @@
struct mmc_async_req mmc_active;
enum mmc_packed_type cmd_type;
struct mmc_packed *packed;
+ struct mmc_cmdq_req cmdq_req;
};
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
- unsigned int flags;
-#define MMC_QUEUE_SUSPENDED (1 << 0)
-#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+ unsigned long flags;
+#define MMC_QUEUE_SUSPENDED 0
+#define MMC_QUEUE_NEW_REQUEST 1
+ int (*issue_fn)(struct mmc_queue *, struct request *);
+ int (*cmdq_issue_fn)(struct mmc_queue *,
+ struct request *);
+ void (*cmdq_complete_fn)(struct request *);
+ void (*cmdq_error_fn)(struct mmc_queue *);
+ enum blk_eh_timer_return (*cmdq_req_timed_out)(struct request *);
void *data;
struct request_queue *queue;
struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
+ struct mmc_queue_req *mqrq_cmdq;
+ bool wr_packing_enabled;
+ int num_of_potential_packed_wr_reqs;
+ int num_wr_reqs_to_start_packing;
+ bool no_pack_for_random;
+ struct work_struct cmdq_err_work;
+
+ struct completion cmdq_pending_req_done;
+ struct completion cmdq_shutdown_complete;
+ struct request *cmdq_req_peeked;
+ int (*err_check_fn)(struct mmc_card *, struct mmc_async_req *);
+ void (*packed_test_fn)(struct request_queue *, struct mmc_queue_req *);
+ void (*cmdq_shutdown)(struct mmc_queue *);
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
atomic_t max_write_speed;
atomic_t max_read_speed;
@@ -73,9 +93,9 @@
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
- const char *);
+ const char *, int);
extern void mmc_cleanup_queue(struct mmc_queue *);
-extern void mmc_queue_suspend(struct mmc_queue *);
+extern int mmc_queue_suspend(struct mmc_queue *, int);
extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
@@ -88,4 +108,9 @@
extern int mmc_access_rpmb(struct mmc_queue *);
+extern void print_mmc_packing_stats(struct mmc_card *card);
+
+extern int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card);
+extern void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card);
+
#endif
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 36d9d69..c0592ac 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -23,6 +23,17 @@
This driver can also be built as a module. If so, the module
will be called pwrseq_simple.
+config MMC_RING_BUFFER
+ bool "MMC_RING_BUFFER"
+ depends on MMC
+ default n
+ help
+ This enables the ring buffer tracing of significant
+ events for mmc driver to provide command history for
+ debugging purpose.
+
+ If unsure, say N.
+
config MMC_EMBEDDED_SDIO
boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
help
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index f007151..dbd1f1b 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,4 @@
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_RING_BUFFER) += ring_buffer.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c64266f..1c28cf8 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -132,6 +132,16 @@
struct mmc_host *host = card->host;
int ret;
+ if (!drv) {
+ pr_debug("%s: %s: drv is NULL\n", dev_name(dev), __func__);
+ return;
+ }
+
+ if (!card) {
+ pr_debug("%s: %s: card is NULL\n", dev_name(dev), __func__);
+ return;
+ }
+
if (dev->driver && drv->shutdown)
drv->shutdown(card);
@@ -154,6 +164,8 @@
if (ret)
return ret;
+ if (mmc_bus_needs_resume(host))
+ return 0;
ret = host->bus_ops->suspend(host);
return ret;
}
@@ -164,11 +176,17 @@
struct mmc_host *host = card->host;
int ret;
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ goto skip_full_resume;
+ }
+
ret = host->bus_ops->resume(host);
if (ret)
pr_warn("%s: error %d during resume (card was removed?)\n",
mmc_hostname(host), ret);
+skip_full_resume:
ret = pm_generic_resume(dev);
return ret;
}
@@ -180,6 +198,9 @@
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
return host->bus_ops->runtime_suspend(host);
}
@@ -188,8 +209,12 @@
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
+ if (mmc_bus_needs_resume(host))
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+
return host->bus_ops->runtime_resume(host);
}
+
#endif /* !CONFIG_PM */
static const struct dev_pm_ops mmc_bus_pm_ops = {
@@ -273,6 +298,9 @@
card->dev.release = mmc_release_card;
card->dev.type = type;
+ spin_lock_init(&card->wr_pack_stats.lock);
+ spin_lock_init(&card->bkops.stats.lock);
+
return card;
}
@@ -350,6 +378,13 @@
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
+ if (mmc_card_sdio(card)) {
+ ret = device_init_wakeup(&card->dev, true);
+ if (ret)
+ pr_err("%s: %s: failed to init wakeup: %d\n",
+ mmc_hostname(card->host), __func__, ret);
+ }
+
device_enable_async_suspend(&card->dev);
ret = device_add(&card->dev);
@@ -383,6 +418,9 @@
of_node_put(card->dev.of_node);
}
+ kfree(card->wr_pack_stats.packing_events);
+ kfree(card->cached_ext_csd);
+
put_device(&card->dev);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index e7af954..1397d03 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/devfreq.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
@@ -29,6 +30,8 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/jiffies.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -109,6 +112,7 @@
data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+ data->fault_injected = true;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
@@ -120,6 +124,763 @@
#endif /* CONFIG_FAIL_MMC_REQUEST */
+static bool mmc_is_data_request(struct mmc_request *mmc_request)
+{
+ switch (mmc_request->cmd->opcode) {
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_READ_MULTIPLE_BLOCK:
+ case MMC_WRITE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
+{
+ struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&clk_scaling->lock);
+
+ clk_scaling->start_busy = ktime_get();
+ clk_scaling->is_busy_started = true;
+
+ if (lock_needed)
+ spin_unlock_bh(&clk_scaling->lock);
+}
+
+static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
+{
+ struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&clk_scaling->lock);
+
+ if (!clk_scaling->is_busy_started) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ clk_scaling->total_busy_time_us +=
+ ktime_to_us(ktime_sub(ktime_get(),
+ clk_scaling->start_busy));
+ pr_debug("%s: accumulated busy time is %lu usec\n",
+ mmc_hostname(host), clk_scaling->total_busy_time_us);
+ clk_scaling->is_busy_started = false;
+
+out:
+ if (lock_needed)
+ spin_unlock_bh(&clk_scaling->lock);
+}
+
+/**
+ * mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
+ * @host: pointer to mmc host structure
+ * @lock_needed: flag indication if locking is needed
+ *
+ * This function starts the busy timer in case it was not already started.
+ */
+void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
+ bool lock_needed)
+{
+ if (!host->clk_scaling.enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&host->clk_scaling.lock);
+
+ if (!host->clk_scaling.is_busy_started &&
+ !test_bit(CMDQ_STATE_DCMD_ACTIVE,
+ &host->cmdq_ctx.curr_state)) {
+ host->clk_scaling.start_busy = ktime_get();
+ host->clk_scaling.is_busy_started = true;
+ }
+
+ if (lock_needed)
+ spin_unlock_bh(&host->clk_scaling.lock);
+}
+EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);
+
+/**
+ * mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
+ * @host: pointer to mmc host structure
+ * @lock_needed: flag indication if locking is needed
+ *
+ * This function stops the busy timer in case it is the last data request.
+ * In case the current request is not the last one, the busy time till
+ * now will be accumulated and the counter will be restarted.
+ */
+void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
+ bool lock_needed, bool is_cmdq_dcmd)
+{
+ if (!host->clk_scaling.enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&host->clk_scaling.lock);
+
+ /*
+ * For CQ mode: In completion of DCMD request, start busy time in
+ * case of pending data requests
+ */
+ if (is_cmdq_dcmd) {
+ if (host->cmdq_ctx.data_active_reqs) {
+ host->clk_scaling.is_busy_started = true;
+ host->clk_scaling.start_busy = ktime_get();
+ }
+ goto out;
+ }
+
+ host->clk_scaling.total_busy_time_us +=
+ ktime_to_us(ktime_sub(ktime_get(),
+ host->clk_scaling.start_busy));
+
+ if (host->cmdq_ctx.data_active_reqs) {
+ host->clk_scaling.is_busy_started = true;
+ host->clk_scaling.start_busy = ktime_get();
+ } else {
+ host->clk_scaling.is_busy_started = false;
+ }
+out:
+ if (lock_needed)
+ spin_unlock_bh(&host->clk_scaling.lock);
+
+}
+EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);
+
+/**
+ * mmc_can_scale_clk() - Check clock scaling capability
+ * @host: pointer to mmc host structure
+ */
+bool mmc_can_scale_clk(struct mmc_host *host)
+{
+ if (!host) {
+ pr_err("bad host parameter\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ return host->caps2 & MMC_CAP2_CLK_SCALE;
+}
+EXPORT_SYMBOL(mmc_can_scale_clk);
+
+static int mmc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+ struct mmc_devfeq_clk_scaling *clk_scaling;
+
+ if (!host) {
+ pr_err("bad host parameter\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ return 0;
+
+ spin_lock_bh(&clk_scaling->lock);
+
+ /* accumulate the busy time of ongoing work */
+ memset(status, 0, sizeof(*status));
+ if (clk_scaling->is_busy_started) {
+ if (mmc_card_cmdq(host->card)) {
+ /* the "busy-timer" will be restarted in case there
+ * are pending data requests */
+ mmc_cmdq_clk_scaling_stop_busy(host, false, false);
+ } else {
+ mmc_clk_scaling_stop_busy(host, false);
+ mmc_clk_scaling_start_busy(host, false);
+ }
+ }
+
+ status->busy_time = clk_scaling->total_busy_time_us;
+ status->total_time = ktime_to_us(ktime_sub(ktime_get(),
+ clk_scaling->measure_interval_start));
+ clk_scaling->total_busy_time_us = 0;
+ status->current_frequency = clk_scaling->curr_freq;
+ clk_scaling->measure_interval_start = ktime_get();
+
+ pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
+ mmc_hostname(host),
+ (status->busy_time*100)/status->total_time,
+ status->total_time, status->busy_time,
+ status->current_frequency);
+
+ spin_unlock_bh(&clk_scaling->lock);
+
+ return 0;
+}
+
+static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u32 status;
+
+ /*
+ * If the current partition type is RPMB, clock switching may not
+ * work properly as sending tuning command (CMD21) is illegal in
+ * this mode.
+ */
+ if (!card || (mmc_card_mmc(card) &&
+ (card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
+ mmc_card_doing_bkops(card))))
+ return false;
+
+ if (mmc_send_status(card, &status)) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ return false;
+ }
+
+ return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
+}
+
+int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
+{
+ int err = 0;
+
+ err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
+ (!host->cmdq_ctx.active_reqs));
+ if (host->cmdq_ctx.active_reqs) {
+ pr_err("%s: %s: unexpected active requests (%lu)\n",
+ mmc_hostname(host), __func__,
+ host->cmdq_ctx.active_reqs);
+ return -EPERM;
+ }
+
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
+
+int mmc_clk_update_freq(struct mmc_host *host,
+ unsigned long freq, enum mmc_load state)
+{
+ int err = 0;
+ bool cmdq_mode;
+
+ if (!host) {
+ pr_err("bad host parameter\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mmc_host_clk_hold(host);
+ cmdq_mode = mmc_card_cmdq(host->card);
+
+ /* make sure the card supports the frequency we want */
+ if (unlikely(freq > host->card->clk_scaling_highest)) {
+ freq = host->card->clk_scaling_highest;
+ pr_warn("%s: %s: frequency was overridden to %lu\n",
+ mmc_hostname(host), __func__,
+ host->card->clk_scaling_highest);
+ }
+
+ if (unlikely(freq < host->card->clk_scaling_lowest)) {
+ freq = host->card->clk_scaling_lowest;
+ pr_warn("%s: %s: frequency was overridden to %lu\n",
+ mmc_hostname(host), __func__,
+ host->card->clk_scaling_lowest);
+ }
+
+ if (freq == host->clk_scaling.curr_freq)
+ goto out;
+
+ if (host->ops->notify_load) {
+ err = host->ops->notify_load(host, state);
+ if (err) {
+ pr_err("%s: %s: fail on notify_load\n",
+ mmc_hostname(host), __func__);
+ goto out;
+ }
+ }
+
+ if (cmdq_mode) {
+ err = mmc_cmdq_halt_on_empty_queue(host);
+ if (err) {
+ pr_err("%s: %s: failed halting queue (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto halt_failed;
+ }
+ }
+
+ if (!mmc_is_valid_state_for_clk_scaling(host)) {
+ pr_debug("%s: invalid state for clock scaling - skipping",
+ mmc_hostname(host));
+ goto invalid_state;
+ }
+
+ err = host->bus_ops->change_bus_speed(host, &freq);
+ if (!err)
+ host->clk_scaling.curr_freq = freq;
+ else
+ pr_err("%s: %s: failed (%d) at freq=%lu\n",
+ mmc_hostname(host), __func__, err, freq);
+
+invalid_state:
+ if (cmdq_mode) {
+ if (mmc_cmdq_halt(host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(host), __func__);
+ }
+
+halt_failed:
+ if (err) {
+ /* restore previous state */
+ if (host->ops->notify_load)
+ if (host->ops->notify_load(host,
+ host->clk_scaling.state))
+ pr_err("%s: %s: fail on notify_load restore\n",
+ mmc_hostname(host), __func__);
+ }
+out:
+ mmc_host_clk_release(host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_clk_update_freq);
+
+static int mmc_devfreq_set_target(struct device *dev,
+ unsigned long *freq, u32 devfreq_flags)
+{
+ struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+ struct mmc_devfeq_clk_scaling *clk_scaling;
+ int err = 0;
+ int abort;
+
+ if (!(host && freq)) {
+ pr_err("%s: unexpected host/freq parameter\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+ clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ goto out;
+
+ if (*freq == UINT_MAX)
+ *freq = clk_scaling->freq_table[1];
+ else
+ *freq = clk_scaling->freq_table[0];
+
+ pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
+ *freq, current->comm);
+
+ if ((clk_scaling->curr_freq == *freq) ||
+ clk_scaling->skip_clk_scale_freq_update)
+ goto out;
+
+ /* No need to scale the clocks if they are gated */
+ if (!host->ios.clock)
+ goto out;
+
+ spin_lock_bh(&clk_scaling->lock);
+ if (clk_scaling->clk_scaling_in_progress) {
+ pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
+ mmc_hostname(host));
+ spin_unlock_bh(&clk_scaling->lock);
+ goto out;
+ }
+ clk_scaling->need_freq_change = true;
+ clk_scaling->target_freq = *freq;
+ clk_scaling->state = *freq < clk_scaling->curr_freq ?
+ MMC_LOAD_LOW : MMC_LOAD_HIGH;
+ spin_unlock_bh(&clk_scaling->lock);
+
+ abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
+ if (abort)
+ goto out;
+
+ /*
+ * In case we were able to claim host there is no need to
+ * defer the frequency change. It will be done now
+ */
+ clk_scaling->need_freq_change = false;
+
+ mmc_host_clk_hold(host);
+ err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
+ if (err && err != -EAGAIN)
+ pr_err("%s: clock scale to %lu failed with error %d\n",
+ mmc_hostname(host), *freq, err);
+ else
+ pr_debug("%s: clock change to %lu finished successfully (%s)\n",
+ mmc_hostname(host), *freq, current->comm);
+
+
+ mmc_host_clk_release(host);
+ mmc_release_host(host);
+out:
+ return err;
+}
+
+/**
+ * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
+ * @host: pointer to mmc host structure
+ *
+ * This function does clock scaling in case "need_freq_change" flag was set
+ * by the clock scaling logic.
+ */
+void mmc_deferred_scaling(struct mmc_host *host)
+{
+ unsigned long target_freq;
+ int err;
+
+ if (!host->clk_scaling.enable)
+ return;
+
+ spin_lock_bh(&host->clk_scaling.lock);
+
+ if (host->clk_scaling.clk_scaling_in_progress ||
+ !(host->clk_scaling.need_freq_change)) {
+ spin_unlock_bh(&host->clk_scaling.lock);
+ return;
+ }
+
+
+ atomic_inc(&host->clk_scaling.devfreq_abort);
+ target_freq = host->clk_scaling.target_freq;
+ host->clk_scaling.clk_scaling_in_progress = true;
+ host->clk_scaling.need_freq_change = false;
+ spin_unlock_bh(&host->clk_scaling.lock);
+ pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
+ mmc_hostname(host),
+ target_freq, current->comm);
+
+ err = mmc_clk_update_freq(host, target_freq,
+ host->clk_scaling.state);
+ if (err && err != -EAGAIN)
+ pr_err("%s: failed on deferred scale clocks (%d)\n",
+ mmc_hostname(host), err);
+ else
+ pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
+ mmc_hostname(host),
+ target_freq, current->comm);
+ host->clk_scaling.clk_scaling_in_progress = false;
+ atomic_dec(&host->clk_scaling.devfreq_abort);
+}
+EXPORT_SYMBOL(mmc_deferred_scaling);
+
+static int mmc_devfreq_create_freq_table(struct mmc_host *host)
+{
+ int i;
+ struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+ pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
+ mmc_hostname(host),
+ host->card->clk_scaling_lowest,
+ host->card->clk_scaling_highest);
+
+ if (!clk_scaling->freq_table) {
+ pr_debug("%s: no frequency table defined - setting default\n",
+ mmc_hostname(host));
+ clk_scaling->freq_table = kzalloc(
+ 2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
+ if (!clk_scaling->freq_table)
+ return -ENOMEM;
+ clk_scaling->freq_table[0] = host->card->clk_scaling_lowest;
+ clk_scaling->freq_table[1] = host->card->clk_scaling_highest;
+ clk_scaling->freq_table_sz = 2;
+ goto out;
+ }
+
+ if (host->card->clk_scaling_lowest >
+ clk_scaling->freq_table[0])
+ pr_debug("%s: frequency table undershot possible freq\n",
+ mmc_hostname(host));
+
+ for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+ if (clk_scaling->freq_table[i] <=
+ host->card->clk_scaling_highest)
+ continue;
+ clk_scaling->freq_table[i] =
+ host->card->clk_scaling_highest;
+ clk_scaling->freq_table_sz = i + 1;
+ pr_debug("%s: frequency table overshot possible freq (%d)\n",
+ mmc_hostname(host), clk_scaling->freq_table[i]);
+ break;
+ }
+
+out:
+ /**
+ * devfreq requires unsigned long type freq_table while the
+ * freq_table in clk_scaling is un32. Here allocates an individual
+ * memory space for it and release it when exit clock scaling.
+ */
+ clk_scaling->devfreq_profile.freq_table = kzalloc(
+ clk_scaling->freq_table_sz *
+ sizeof(*(clk_scaling->devfreq_profile.freq_table)),
+ GFP_KERNEL);
+ if (!clk_scaling->devfreq_profile.freq_table)
+ return -ENOMEM;
+ clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
+
+ for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+ clk_scaling->devfreq_profile.freq_table[i] =
+ clk_scaling->freq_table[i];
+ pr_debug("%s: freq[%d] = %u\n",
+ mmc_hostname(host), i, clk_scaling->freq_table[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Initialize clock scaling for supported hosts. It is assumed that the caller
+ * ensure clock is running at maximum possible frequency before calling this
+ * function. Shall use struct devfreq_simple_ondemand_data to configure
+ * governor.
+ */
+int mmc_init_clk_scaling(struct mmc_host *host)
+{
+ int err;
+
+ if (!host || !host->card) {
+ pr_err("%s: unexpected host/card parameters\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host) ||
+ !host->bus_ops->change_bus_speed) {
+ pr_debug("%s: clock scaling is not supported\n",
+ mmc_hostname(host));
+ return 0;
+ }
+
+ pr_debug("registering %s dev (%p) to devfreq",
+ mmc_hostname(host),
+ mmc_classdev(host));
+
+ if (host->clk_scaling.devfreq) {
+ pr_err("%s: dev is already registered for dev %p\n",
+ mmc_hostname(host),
+ mmc_dev(host));
+ return -EPERM;
+ }
+ spin_lock_init(&host->clk_scaling.lock);
+ atomic_set(&host->clk_scaling.devfreq_abort, 0);
+ host->clk_scaling.curr_freq = host->ios.clock;
+ host->clk_scaling.clk_scaling_in_progress = false;
+ host->clk_scaling.need_freq_change = false;
+ host->clk_scaling.is_busy_started = false;
+
+ host->clk_scaling.devfreq_profile.polling_ms =
+ host->clk_scaling.polling_delay_ms;
+ host->clk_scaling.devfreq_profile.get_dev_status =
+ mmc_devfreq_get_dev_status;
+ host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
+ host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
+
+ host->clk_scaling.ondemand_gov_data.simple_scaling = true;
+ host->clk_scaling.ondemand_gov_data.upthreshold =
+ host->clk_scaling.upthreshold;
+ host->clk_scaling.ondemand_gov_data.downdifferential =
+ host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
+
+ err = mmc_devfreq_create_freq_table(host);
+ if (err) {
+ pr_err("%s: fail to create devfreq frequency table\n",
+ mmc_hostname(host));
+ return err;
+ }
+
+ pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
+ mmc_hostname(host),
+ host->clk_scaling.ondemand_gov_data.upthreshold,
+ host->clk_scaling.ondemand_gov_data.downdifferential,
+ host->clk_scaling.devfreq_profile.polling_ms);
+ host->clk_scaling.devfreq = devfreq_add_device(
+ mmc_classdev(host),
+ &host->clk_scaling.devfreq_profile,
+ "simple_ondemand",
+ &host->clk_scaling.ondemand_gov_data);
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: unable to register with devfreq\n",
+ mmc_hostname(host));
+ return -EPERM;
+ }
+
+ pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n",
+ mmc_hostname(host),
+ dev_name(mmc_classdev(host)),
+ mmc_classdev(host),
+ host->clk_scaling.devfreq,
+ host->ios.clock);
+
+ host->clk_scaling.enable = true;
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_init_clk_scaling);
+
+/**
+ * mmc_suspend_clk_scaling() - suspend clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will suspend devfreq feature for the specific host.
+ * The statistics collected by mmc will be cleared.
+ * This function is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_suspend_clk_scaling(struct mmc_host *host)
+{
+ int err;
+
+ if (!host) {
+ WARN(1, "bad host parameter\n");
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
+ return 0;
+
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: %s: no devfreq is assosiated with this device\n",
+ mmc_hostname(host), __func__);
+ return -EPERM;
+ }
+
+ atomic_inc(&host->clk_scaling.devfreq_abort);
+ wake_up(&host->wq);
+ err = devfreq_suspend_device(host->clk_scaling.devfreq);
+ if (err) {
+ pr_err("%s: %s: failed to suspend devfreq\n",
+ mmc_hostname(host), __func__);
+ return err;
+ }
+ host->clk_scaling.enable = false;
+
+ host->clk_scaling.total_busy_time_us = 0;
+
+ pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_suspend_clk_scaling);
+
+/**
+ * mmc_resume_clk_scaling() - resume clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will resume devfreq feature for the specific host.
+ * This API is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_resume_clk_scaling(struct mmc_host *host)
+{
+ int err = 0;
+ u32 max_clk_idx = 0;
+ u32 devfreq_max_clk = 0;
+ u32 devfreq_min_clk = 0;
+
+ if (!host) {
+ WARN(1, "bad host parameter\n");
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host))
+ return 0;
+
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: %s: no devfreq is assosiated with this device\n",
+ mmc_hostname(host), __func__);
+ return -EPERM;
+ }
+
+ atomic_set(&host->clk_scaling.devfreq_abort, 0);
+
+ max_clk_idx = host->clk_scaling.freq_table_sz - 1;
+ devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
+ devfreq_min_clk = host->clk_scaling.freq_table[0];
+
+ host->clk_scaling.curr_freq = devfreq_max_clk;
+ if (host->ios.clock < host->card->clk_scaling_highest)
+ host->clk_scaling.curr_freq = devfreq_min_clk;
+
+ host->clk_scaling.clk_scaling_in_progress = false;
+ host->clk_scaling.need_freq_change = false;
+
+ err = devfreq_resume_device(host->clk_scaling.devfreq);
+ if (err) {
+ pr_err("%s: %s: failed to resume devfreq (%d)\n",
+ mmc_hostname(host), __func__, err);
+ } else {
+ host->clk_scaling.enable = true;
+ pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_resume_clk_scaling);
+
+/**
+ * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Disable clock scaling permanently.
+ */
+int mmc_exit_clk_scaling(struct mmc_host *host)
+{
+ int err;
+
+ if (!host) {
+ pr_err("%s: bad host parameter\n", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host))
+ return 0;
+
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: %s: no devfreq is assosiated with this device\n",
+ mmc_hostname(host), __func__);
+ return -EPERM;
+ }
+
+ err = mmc_suspend_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ err = devfreq_remove_device(host->clk_scaling.devfreq);
+ if (err) {
+ pr_err("%s: remove devfreq failed (%d)\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ kfree(host->clk_scaling.devfreq_profile.freq_table);
+
+ host->clk_scaling.devfreq = NULL;
+ atomic_set(&host->clk_scaling.devfreq_abort, 1);
+ pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_exit_clk_scaling);
+
static inline void mmc_complete_cmd(struct mmc_request *mrq)
{
if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
@@ -150,6 +911,12 @@
{
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
+#ifdef CONFIG_MMC_PERF_PROFILING
+ ktime_t diff;
+#endif
+
+ if (host->clk_scaling.is_busy_started)
+ mmc_clk_scaling_stop_busy(host, true);
/* Flag re-tuning needed on CRC errors */
if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
@@ -198,6 +965,24 @@
cmd->resp[2], cmd->resp[3]);
if (mrq->data) {
+#ifdef CONFIG_MMC_PERF_PROFILING
+ if (host->perf_enable) {
+ diff = ktime_sub(ktime_get(), host->perf.start);
+ if (mrq->data->flags == MMC_DATA_READ) {
+ host->perf.rbytes_drv +=
+ mrq->data->bytes_xfered;
+ host->perf.rtime_drv =
+ ktime_add(host->perf.rtime_drv,
+ diff);
+ } else {
+ host->perf.wbytes_drv +=
+ mrq->data->bytes_xfered;
+ host->perf.wtime_drv =
+ ktime_add(host->perf.wtime_drv,
+ diff);
+ }
+ }
+#endif
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
@@ -341,33 +1126,194 @@
mrq->stop->error = 0;
mrq->stop->mrq = mrq;
}
+#ifdef CONFIG_MMC_PERF_PROFILING
+ if (host->perf_enable)
+ host->perf.start = ktime_get();
+#endif
}
mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
+
+ if (mmc_is_data_request(mrq)) {
+ mmc_deferred_scaling(host);
+ mmc_clk_scaling_start_busy(host, true);
+ }
+
__mmc_start_request(host, mrq);
return 0;
}
+static void mmc_start_cmdq_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ if (mrq->data) {
+ pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
+ mmc_hostname(host), mrq->data->blksz,
+ mrq->data->blocks, mrq->data->flags,
+ mrq->data->timeout_ns / NSEC_PER_MSEC,
+ mrq->data->timeout_clks);
+
+ BUG_ON(mrq->data->blksz > host->max_blk_size);
+ BUG_ON(mrq->data->blocks > host->max_blk_count);
+ BUG_ON(mrq->data->blocks * mrq->data->blksz >
+ host->max_req_size);
+ mrq->data->error = 0;
+ mrq->data->mrq = mrq;
+ }
+
+ if (mrq->cmd) {
+ mrq->cmd->error = 0;
+ mrq->cmd->mrq = mrq;
+ }
+
+ mmc_host_clk_hold(host);
+ if (likely(host->cmdq_ops->request))
+ host->cmdq_ops->request(host, mrq);
+ else
+ pr_err("%s: %s: issue request failed\n", mmc_hostname(host),
+ __func__);
+}
+
/**
- * mmc_start_bkops - start BKOPS for supported cards
+ * mmc_blk_init_bkops_statistics - initialize bkops statistics
* @card: MMC card to start BKOPS
- * @form_exception: A flag to indicate if this function was
- * called due to an exception raised by the card
*
- * Start background operations whenever requested.
- * When the urgent BKOPS bit is set in a R1 command response
- * then background operations should be started immediately.
+ * Initialize and enable the bkops statistics
+ */
+void mmc_blk_init_bkops_statistics(struct mmc_card *card)
+{
+ int i;
+ struct mmc_bkops_stats *stats;
+
+ if (!card)
+ return;
+
+ stats = &card->bkops.stats;
+ spin_lock(&stats->lock);
+
+ stats->manual_start = 0;
+ stats->hpi = 0;
+ stats->auto_start = 0;
+ stats->auto_stop = 0;
+ for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
+ stats->level[i] = 0;
+ stats->enabled = true;
+
+ spin_unlock(&stats->lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
+
+static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->hpi++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->manual_start++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->auto_start++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->auto_stop++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
+ unsigned level)
+{
+ BUG_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->level[level]++;
+ spin_unlock_irq(&stats->lock);
+}
+
+/**
+ * mmc_set_auto_bkops - set auto BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ * @enable: enable/disable flag
+ * Configure the card to run automatic BKOPS.
+ *
+ * Should be called when host is claimed.
*/
-void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
+{
+ int ret = 0;
+ u8 bkops_en;
+
+ BUG_ON(!card);
+ enable = !!enable;
+
+ if (unlikely(!mmc_card_support_auto_bkops(card))) {
+ pr_err("%s: %s: card doesn't support auto bkops\n",
+ mmc_hostname(card->host), __func__);
+ return -EPERM;
+ }
+
+ if (enable) {
+ if (mmc_card_doing_auto_bkops(card))
+ goto out;
+ bkops_en = card->ext_csd.bkops_en | EXT_CSD_BKOPS_AUTO_EN;
+ } else {
+ if (!mmc_card_doing_auto_bkops(card))
+ goto out;
+ bkops_en = card->ext_csd.bkops_en & ~EXT_CSD_BKOPS_AUTO_EN;
+ }
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
+ bkops_en, 0);
+ if (ret) {
+ pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
+ mmc_hostname(card->host), __func__, enable, ret);
+ } else {
+ if (enable) {
+ mmc_card_set_auto_bkops(card);
+ mmc_update_bkops_auto_on(&card->bkops.stats);
+ } else {
+ mmc_card_clr_auto_bkops(card);
+ mmc_update_bkops_auto_off(&card->bkops.stats);
+ }
+ card->ext_csd.bkops_en = bkops_en;
+ pr_debug("%s: %s: bkops state %x\n",
+ mmc_hostname(card->host), __func__, bkops_en);
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(mmc_set_auto_bkops);
+
+/**
+ * mmc_check_bkops - check BKOPS for supported cards
+ * @card: MMC card to check BKOPS
+ *
+ * Read the BKOPS status in order to determine whether the
+ * card requires bkops to be started.
+ */
+void mmc_check_bkops(struct mmc_card *card)
{
int err;
- int timeout;
- bool use_busy_signal;
BUG_ON(!card);
- if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
+ if (mmc_card_doing_bkops(card))
return;
err = mmc_read_bkops_status(card);
@@ -377,47 +1323,50 @@
return;
}
- if (!card->ext_csd.raw_bkops_status)
+ card->bkops.needs_check = false;
+
+ mmc_update_bkops_level(&card->bkops.stats,
+ card->ext_csd.raw_bkops_status);
+
+ card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
+}
+EXPORT_SYMBOL(mmc_check_bkops);
+
+/**
+ * mmc_start_manual_bkops - start BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ *
+ * Send START_BKOPS to the card.
+ * The function should be called with claimed host.
+ */
+void mmc_start_manual_bkops(struct mmc_card *card)
+{
+ int err;
+
+ BUG_ON(!card);
+
+ if (unlikely(!mmc_card_configured_manual_bkops(card)))
return;
- if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
- from_exception)
+ if (mmc_card_doing_bkops(card))
return;
- mmc_claim_host(card->host);
- if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
- timeout = MMC_BKOPS_MAX_TIMEOUT;
- use_busy_signal = true;
- } else {
- timeout = 0;
- use_busy_signal = false;
- }
-
mmc_retune_hold(card->host);
- err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout,
- use_busy_signal, true, false);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
+ 1, 0, false, true, false);
if (err) {
- pr_warn("%s: Error %d starting bkops\n",
- mmc_hostname(card->host), err);
- mmc_retune_release(card->host);
- goto out;
+ pr_err("%s: Error %d starting manual bkops\n",
+ mmc_hostname(card->host), err);
+ } else {
+ mmc_card_set_doing_bkops(card);
+ mmc_update_bkops_start(&card->bkops.stats);
+ card->bkops.needs_bkops = false;
}
- /*
- * For urgent bkops status (LEVEL_2 and more)
- * bkops executed synchronously, otherwise
- * the operation is in progress
- */
- if (!use_busy_signal)
- mmc_card_set_doing_bkops(card);
- else
- mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
+ mmc_retune_release(card->host);
}
-EXPORT_SYMBOL(mmc_start_bkops);
+EXPORT_SYMBOL(mmc_start_manual_bkops);
/*
* mmc_wait_data_done() - done callback for data request
@@ -427,10 +1376,13 @@
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
+ unsigned long flags;
struct mmc_context_info *context_info = &mrq->host->context_info;
+ spin_lock_irqsave(&context_info->lock, flags);
context_info->is_done_rcv = true;
wake_up_interruptible(&context_info->wait);
+ spin_unlock_irqrestore(&context_info->lock, flags);
}
static void mmc_wait_done(struct mmc_request *mrq)
@@ -520,6 +1472,7 @@
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
int err;
+ bool is_done_rcv = false;
unsigned long flags;
while (1) {
@@ -527,9 +1480,10 @@
(context_info->is_done_rcv ||
context_info->is_new_req));
spin_lock_irqsave(&context_info->lock, flags);
+ is_done_rcv = context_info->is_done_rcv;
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
- if (context_info->is_done_rcv) {
+ if (is_done_rcv) {
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
@@ -564,20 +1518,20 @@
struct mmc_command *cmd;
while (1) {
- wait_for_completion(&mrq->completion);
+ wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
/*
- * If host has timed out waiting for the sanitize
+ * If host has timed out waiting for the sanitize/bkops
* to complete, card might be still in programming state
* so let's try to bring the card out of programming
* state.
*/
- if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
+ if ((cmd->bkops_busy || cmd->sanitize_busy) && cmd->error == -ETIMEDOUT) {
if (!mmc_interrupt_hpi(host->card)) {
- pr_warn("%s: %s: Interrupted sanitize\n",
- mmc_hostname(host), __func__);
+ pr_warn("%s: %s: Interrupted sanitize/bkops\n",
+ mmc_hostname(host), __func__);
cmd->error = 0;
break;
} else {
@@ -664,6 +1618,134 @@
}
/**
+ * mmc_cmdq_discard_card_queue - discard the task[s] in the device
+ * @host: host instance
+ * @tasks: mask of tasks to be knocked off
+ * 0: remove all queued tasks
+ */
+int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
+{
+ return mmc_discard_queue(host, tasks);
+}
+EXPORT_SYMBOL(mmc_cmdq_discard_queue);
+
+
+/**
+ * mmc_cmdq_post_req - post process of a completed request
+ * @host: host instance
+ * @tag: the request tag.
+ * @err: non-zero is error, success otherwise
+ */
+void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
+{
+ if (likely(host->cmdq_ops->post_req))
+ host->cmdq_ops->post_req(host, tag, err);
+}
+EXPORT_SYMBOL(mmc_cmdq_post_req);
+
+/**
+ * mmc_cmdq_halt - halt/un-halt the command queue engine
+ * @host: host instance
+ * @halt: true - halt, un-halt otherwise
+ *
+ * Host halts the command queue engine. It should complete
+ * the ongoing transfer and release the bus.
+ * All legacy commands can be sent upon successful
+ * completion of this function.
+ * Returns 0 on success, negative otherwise
+ */
+int mmc_cmdq_halt(struct mmc_host *host, bool halt)
+{
+ int err = 0;
+
+ if (mmc_host_cq_disable(host)) {
+ pr_debug("%s: %s: CQE is already disabled\n",
+ mmc_hostname(host), __func__);
+ return 0;
+ }
+
+ if ((halt && mmc_host_halt(host)) ||
+ (!halt && !mmc_host_halt(host))) {
+ pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(host),
+ __func__, halt ? "halted" : "un-halted");
+ return 0;
+ }
+
+ mmc_host_clk_hold(host);
+ if (host->cmdq_ops->halt) {
+ err = host->cmdq_ops->halt(host, halt);
+ if (!err && host->ops->notify_halt)
+ host->ops->notify_halt(host, halt);
+ if (!err && halt)
+ mmc_host_set_halt(host);
+ else if (!err && !halt) {
+ mmc_host_clr_halt(host);
+ wake_up(&host->cmdq_ctx.wait);
+ }
+ } else {
+ err = -ENOSYS;
+ }
+ mmc_host_clk_release(host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_halt);
+
+int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
+{
+ struct mmc_request *mrq = &cmdq_req->mrq;
+
+ mrq->host = host;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ return -ENOMEDIUM;
+ }
+ mmc_start_cmdq_request(host, mrq);
+ return 0;
+}
+EXPORT_SYMBOL(mmc_cmdq_start_req);
+
+static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
+{
+ mmc_host_clk_release(mrq->host);
+ complete(&mrq->completion);
+}
+
+int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
+ struct mmc_cmdq_req *cmdq_req)
+{
+ struct mmc_request *mrq = &cmdq_req->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ int err = 0;
+
+ init_completion(&mrq->completion);
+ mrq->done = mmc_cmdq_dcmd_req_done;
+ err = mmc_cmdq_start_req(host, cmdq_req);
+ if (err)
+ return err;
+
+ wait_for_completion_io(&mrq->completion);
+ if (cmd->error) {
+ pr_err("%s: DCMD %d failed with err %d\n",
+ mmc_hostname(host), cmd->opcode,
+ cmd->error);
+ err = cmd->error;
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->dumpstate(host);
+ mmc_host_clk_release(host);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
+
+int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
+{
+ return __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ 0, true, true);
+}
+EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
+
+/**
* mmc_start_req - start a non-blocking request
* @host: MMC host to start command
* @areq: async request to start
@@ -713,7 +1795,7 @@
if (areq)
mmc_post_req(host, areq->mrq, -EINVAL);
- mmc_start_bkops(host->card, true);
+ mmc_check_bkops(host->card);
/* prepare the request again */
if (areq)
@@ -735,8 +1817,7 @@
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- /* Cancel a prepared request if it was not started. */
- if ((err || start_err) && areq)
+ if (err && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
@@ -764,6 +1845,10 @@
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(host))
+ mmc_resume_bus(host);
+#endif
__mmc_start_req(host, mrq);
if (!mrq->cap_cmd_during_tfr)
@@ -819,8 +1904,6 @@
}
err = mmc_send_hpi_cmd(card, &status);
- if (err)
- goto out;
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
do {
@@ -828,8 +1911,13 @@
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
break;
- if (time_after(jiffies, prg_wait))
- err = -ETIMEDOUT;
+ if (time_after(jiffies, prg_wait)) {
+ err = mmc_send_status(card, &status);
+ if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
+ err = -ETIMEDOUT;
+ else
+ break;
+ }
} while (!err);
out:
@@ -881,6 +1969,11 @@
int err = 0;
BUG_ON(!card);
+ if (unlikely(!mmc_card_configured_manual_bkops(card)))
+ goto out;
+ if (!mmc_card_doing_bkops(card))
+ goto out;
+
err = mmc_interrupt_hpi(card);
/*
@@ -889,10 +1982,11 @@
*/
if (!err || (err == -EINVAL)) {
mmc_card_clr_doing_bkops(card);
+ mmc_update_bkops_hpi(&card->bkops.stats);
mmc_retune_release(card->host);
err = 0;
}
-
+out:
return err;
}
EXPORT_SYMBOL(mmc_stop_bkops);
@@ -908,8 +2002,14 @@
if (err)
return err;
- card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
- card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
+ MMC_BKOPS_URGENCY_MASK;
+ card->ext_csd.raw_exception_status =
+ ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & (EXT_CSD_URGENT_BKOPS |
+ EXT_CSD_DYNCAP_NEEDED |
+ EXT_CSD_SYSPOOL_EXHAUSTED
+ | EXT_CSD_PACKED_FAILURE);
+
kfree(ext_csd);
return 0;
}
@@ -927,6 +2027,10 @@
{
unsigned int mult;
+ if (!card) {
+ WARN_ON(1);
+ return;
+ }
/*
* SDIO cards only define an upper 1 s limit on access.
*/
@@ -993,9 +2097,11 @@
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
+ * Certain Hynix 5.x cards giving read timeout even with 300ms.
+ * Increasing further to max value (4s).
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
- data->timeout_ns = 600000000;
+ data->timeout_ns = 4000000000u;
data->timeout_clks = 0;
}
@@ -1014,6 +2120,11 @@
data->timeout_ns = 100000000; /* 100ms */
}
}
+ /* Increase the timeout values for some bad INAND MCP devices */
+ if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
+ data->timeout_ns = 4000000000u; /* 4s */
+ data->timeout_clks = 0;
+ }
}
EXPORT_SYMBOL(mmc_set_data_timeout);
@@ -1064,6 +2175,7 @@
might_sleep();
add_wait_queue(&host->wq, &wait);
+
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1089,6 +2201,9 @@
if (pm)
pm_runtime_get_sync(mmc_dev(host));
+ if (host->ops->enable && !stop && host->claim_cnt == 1)
+ host->ops->enable(host);
+
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
@@ -1106,6 +2221,9 @@
WARN_ON(!host->claimed);
+ if (host->ops->disable && host->claim_cnt == 1)
+ host->ops->disable(host);
+
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
@@ -1129,9 +2247,14 @@
{
pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host))
+ mmc_resume_bus(card->host);
+#endif
}
EXPORT_SYMBOL(mmc_get_card);
+
/*
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
@@ -1148,7 +2271,7 @@
* Internal function that does the actual ios call to the host driver,
* optionally printing some debug output.
*/
-static inline void mmc_set_ios(struct mmc_host *host)
+void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
@@ -1161,7 +2284,21 @@
if (ios->clock > 0)
mmc_set_ungated(host);
host->ops->set_ios(host, ios);
+ if (ios->old_rate != ios->clock) {
+ if (likely(ios->clk_ts)) {
+ char trace_info[80];
+ snprintf(trace_info, 80,
+ "%s: freq_KHz %d --> %d | t = %d",
+ mmc_hostname(host), ios->old_rate / 1000,
+ ios->clock / 1000, jiffies_to_msecs(
+ (long)jiffies - (long)ios->clk_ts));
+ trace_mmc_clk(trace_info);
+ }
+ ios->old_rate = ios->clock;
+ ios->clk_ts = jiffies;
+ }
}
+EXPORT_SYMBOL(mmc_set_ios);
/*
* Control chip select pin on a host.
@@ -1204,6 +2341,8 @@
{
unsigned long flags;
+ WARN_ON(!host->ios.clock);
+
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_old = host->ios.clock;
host->ios.clock = 0;
@@ -1226,7 +2365,7 @@
* we just ignore the call.
*/
if (host->clk_old) {
- BUG_ON(host->ios.clock);
+ WARN_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
__mmc_set_clock(host, host->clk_old);
}
@@ -1249,6 +2388,10 @@
void mmc_set_ungated(struct mmc_host *host)
{
}
+
+void mmc_gate_clock(struct mmc_host *host)
+{
+}
#endif
int mmc_execute_tuning(struct mmc_card *card)
@@ -1309,9 +2452,10 @@
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
- else
+ else {
host->ios.chip_select = MMC_CS_DONTCARE;
- host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ }
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
host->ios.drv_type = 0;
@@ -1790,12 +2934,15 @@
pr_warn("%s: cannot verify signal voltage switch\n",
mmc_hostname(host));
- mmc_host_clk_hold(host);
-
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ /*
+ * Hold the clock reference so clock doesn't get auto gated during this
+ * voltage switch sequence.
+ */
+ mmc_host_clk_hold(host);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto err_command;
@@ -1817,6 +2964,7 @@
* During a signal voltage level switch, the clock must be gated
* for 5 ms according to the SD spec
*/
+ host->card_clock_off = true;
clock = host->ios.clock;
host->ios.clock = 0;
mmc_set_ios(host);
@@ -1827,6 +2975,9 @@
* sent CMD11, so a power cycle is required anyway
*/
err = -EAGAIN;
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+ host->card_clock_off = false;
goto power_cycle;
}
@@ -1835,6 +2986,7 @@
host->ios.clock = clock;
mmc_set_ios(host);
+ host->card_clock_off = false;
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
@@ -2046,6 +3198,40 @@
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+ int err = 0;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead && host->card) {
+ mmc_power_up(host, host->card->ocr);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ if (mmc_card_cmdq(host->card)) {
+ err = mmc_cmdq_halt(host, false);
+ if (err)
+ pr_err("%s: %s: unhalt failed: %d\n",
+ mmc_hostname(host), __func__, err);
+ else
+ mmc_card_clr_suspended(host->card);
+ }
+ }
+
+ mmc_bus_put(host);
+ pr_debug("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -2277,16 +3463,9 @@
return mmc_mmc_erase_timeout(card, arg, qty);
}
-static int mmc_do_erase(struct mmc_card *card, unsigned int from,
- unsigned int to, unsigned int arg)
+static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
{
- struct mmc_command cmd = {0};
- unsigned int qty = 0, busy_timeout = 0;
- bool use_r1b_resp = false;
- unsigned long timeout;
- int err;
-
- mmc_retune_hold(card->host);
+ u32 qty = 0;
/*
* qty is used to calculate the erase timeout which depends on how many
@@ -2312,12 +3491,120 @@
else
qty += ((to / card->erase_size) -
(from / card->erase_size)) + 1;
+ return qty;
+}
+
+static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
+{
+ struct mmc_command *cmd = cmdq_req->mrq.cmd;
+ int err;
+
+ memset(cmd, 0, sizeof(struct mmc_command));
+
+ cmd->opcode = opcode;
+ cmd->arg = arg;
+ if (cmd->opcode == MMC_ERASE) {
+ cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
+ } else {
+ cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ }
+
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err) {
+ pr_err("mmc_erase: group start error %d, status %#x\n",
+ err, cmd->resp[0]);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command *cmd = cmdq_req->mrq.cmd;
+ unsigned int qty = 0;
+ unsigned long timeout;
+ unsigned int fr, nr;
+ int err;
+
+ fr = from;
+ nr = to - from + 1;
+
+ qty = mmc_get_erase_qty(card, from, to);
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
+ err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
+ from, qty);
+ if (err)
+ goto out;
+
+ err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
+ to, qty);
+ if (err)
+ goto out;
+
+ err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
+ arg, qty);
+ if (err)
+ goto out;
+
+ timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+ do {
+ memset(cmd, 0, sizeof(struct mmc_command));
+ cmd->opcode = MMC_SEND_STATUS;
+ cmd->arg = card->rca << 16;
+ cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
+ /* Do not retry else we can't see errors */
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err || (cmd->resp[0] & 0xFDF92000)) {
+ pr_err("error %d requesting status %#x\n",
+ err, cmd->resp[0]);
+ err = -EIO;
+ goto out;
+ }
+ /* Timeout if the device never becomes ready for data and
+ * never leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s\n",
+ mmc_hostname(card->host), __func__);
+ err = -EIO;
+ goto out;
+ }
+ } while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
+out:
+ return err;
+}
+
+static int mmc_do_erase(struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command cmd = {0};
+ unsigned int qty = 0, busy_timeout = 0;
+ bool use_r1b_resp = false;
+ unsigned long timeout;
+ unsigned int fr, nr;
+ int err;
+
+ fr = from;
+ nr = to - from + 1;
+
+ qty = mmc_get_erase_qty(card, from, to);
+
+ if (!mmc_card_blockaddr(card)) {
+ from <<= 9;
+ to <<= 9;
+ }
+
+ mmc_retune_hold(card->host);
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_START;
else
@@ -2464,20 +3751,9 @@
return nr_new;
}
-/**
- * mmc_erase - erase sectors.
- * @card: card to erase
- * @from: first sector to erase
- * @nr: number of sectors to erase
- * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
- *
- * Caller must claim host before calling this function.
- */
-int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
- unsigned int arg)
+int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
+ unsigned int nr, unsigned int arg)
{
- unsigned int rem, to = from + nr;
- int err;
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
@@ -2501,6 +3777,68 @@
if (from % card->erase_size || nr % card->erase_size)
return -EINVAL;
}
+ return 0;
+}
+
+int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+ int ret;
+
+ ret = mmc_erase_sanity_check(card, from, nr, arg);
+ if (ret)
+ return ret;
+
+ if (arg == MMC_ERASE_ARG) {
+ rem = from % card->erase_size;
+ if (rem) {
+ rem = card->erase_size - rem;
+ from += rem;
+ if (nr > rem)
+ nr -= rem;
+ else
+ return 0;
+ }
+ rem = nr % card->erase_size;
+ if (rem)
+ nr -= rem;
+ }
+
+ if (nr == 0)
+ return 0;
+
+ to = from + nr;
+
+ if (to <= from)
+ return -EINVAL;
+
+ /* 'from' and 'to' are inclusive */
+ to -= 1;
+
+ return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
+}
+EXPORT_SYMBOL(mmc_cmdq_erase);
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+ int ret;
+
+ ret = mmc_erase_sanity_check(card, from, nr, arg);
+ if (ret)
+ return ret;
if (arg == MMC_ERASE_ARG)
nr = mmc_align_erase_size(card, &from, &to, nr);
@@ -2524,10 +3862,10 @@
*/
rem = card->erase_size - (from % card->erase_size);
if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
- err = mmc_do_erase(card, from, from + rem - 1, arg);
+ ret = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem;
- if ((err) || (to <= from))
- return err;
+ if ((ret) || (to <= from))
+ return ret;
}
return mmc_do_erase(card, from, to, arg);
@@ -2677,6 +4015,9 @@
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
+ if (!host->max_busy_timeout ||
+ (host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
+
/*
* Without erase_group_def set, MMC erase timeout depends on clock
* frequence which can change. In that case, the best choice is
@@ -2738,6 +4079,23 @@
mmc_host_clk_release(host);
}
+/*
+ * mmc_cmdq_hw_reset: Helper API for doing
+ * reset_all of host and reinitializing card.
+ * This must be called with mmc_claim_host
+ * acquired by the caller.
+ */
+int mmc_cmdq_hw_reset(struct mmc_host *host)
+{
+ if (!host->bus_ops->power_restore)
+ return -EOPNOTSUPP;
+
+ mmc_power_cycle(host, host->ocr_avail);
+ mmc_select_voltage(host, host->card->ocr);
+ return host->bus_ops->power_restore(host);
+}
+EXPORT_SYMBOL(mmc_cmdq_hw_reset);
+
int mmc_hw_reset(struct mmc_host *host)
{
int ret;
@@ -2832,6 +4190,10 @@
if (ret) {
mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
@@ -2878,12 +4240,16 @@
void mmc_rescan(struct work_struct *work)
{
+ unsigned long flags;
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
- int i;
- if (host->rescan_disable)
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->rescan_disable) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
/* If there is a non-removable card registered, only scan once */
if (!mmc_card_is_removable(host) && host->rescan_entered)
@@ -2934,13 +4300,7 @@
mmc_release_host(host);
goto out;
}
-
- for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
- break;
- if (freqs[i] <= host->f_min)
- break;
- }
+ mmc_rescan_try_freq(host, host->f_min);
mmc_release_host(host);
out:
@@ -2950,18 +4310,18 @@
void mmc_start_host(struct mmc_host *host)
{
+ mmc_claim_host(host);
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
- mmc_claim_host(host);
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
mmc_power_up(host, host->ocr_avail);
- mmc_release_host(host);
mmc_gpiod_request_cd_irq(host);
+ mmc_release_host(host);
_mmc_detect_change(host, 0, false);
}
@@ -3044,7 +4404,9 @@
}
mmc_power_up(host, host->card->ocr);
+ mmc_claim_host(host);
ret = host->bus_ops->power_restore(host);
+ mmc_release_host(host);
mmc_bus_put(host);
@@ -3053,6 +4415,40 @@
EXPORT_SYMBOL(mmc_power_restore_host);
/*
+ * Add barrier request to the requests in cache
+ */
+int mmc_cache_barrier(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ if (!card->ext_csd.cache_ctrl ||
+ (card->quirks & MMC_QUIRK_CACHE_DISABLE))
+ goto out;
+
+ if (!mmc_card_mmc(card))
+ goto out;
+
+ if (!card->ext_csd.barrier_en)
+ return -ENOTSUPP;
+
+ /*
+ * If a device receives maximum supported barrier
+ * requests, a barrier command is treated as a
+ * flush command. Hence, it is betetr to use
+ * flush timeout instead a generic CMD6 timeout
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 0x2, 0);
+ if (err)
+ pr_err("%s: cache barrier error %d\n",
+ mmc_hostname(host), err);
+out:
+ return err;
+}
+EXPORT_SYMBOL(mmc_cache_barrier);
+
+/*
* Flush the cache to the non-volatile storage.
*/
int mmc_flush_cache(struct mmc_card *card)
@@ -3061,12 +4457,23 @@
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
- (card->ext_csd.cache_ctrl & 1)) {
+ (card->ext_csd.cache_ctrl & 1) &&
+ (!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
- if (err)
+ if (err == -ETIMEDOUT) {
+ pr_err("%s: cache flush timeout\n",
+ mmc_hostname(card->host));
+ err = mmc_interrupt_hpi(card);
+ if (err) {
+ pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
+ mmc_hostname(card->host), err);
+ err = -ENODEV;
+ }
+ } else if (err) {
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
+ }
}
return err;
@@ -3119,6 +4526,10 @@
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
spin_unlock_irqrestore(&host->lock, flags);
_mmc_detect_change(host, 0, false);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index c975c7a..2adf42c 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -15,21 +15,6 @@
#define MMC_CMD_RETRIES 3
-struct mmc_bus_ops {
- void (*remove)(struct mmc_host *);
- void (*detect)(struct mmc_host *);
- int (*pre_suspend)(struct mmc_host *);
- int (*suspend)(struct mmc_host *);
- int (*resume)(struct mmc_host *);
- int (*runtime_suspend)(struct mmc_host *);
- int (*runtime_resume)(struct mmc_host *);
- int (*power_save)(struct mmc_host *);
- int (*power_restore)(struct mmc_host *);
- int (*alive)(struct mmc_host *);
- int (*shutdown)(struct mmc_host *);
- int (*reset)(struct mmc_host *);
-};
-
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
void mmc_detach_bus(struct mmc_host *host);
@@ -40,6 +25,8 @@
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+int mmc_clk_update_freq(struct mmc_host *host,
+ unsigned long freq, enum mmc_load state);
void mmc_gate_clock(struct mmc_host *host);
void mmc_ungate_clock(struct mmc_host *host);
void mmc_set_ungated(struct mmc_host *host);
@@ -62,6 +49,8 @@
if (ms < 1000 / HZ) {
cond_resched();
mdelay(ms);
+ } else if (ms < jiffies_to_msecs(2)) {
+ usleep_range(ms * 1000, (ms + 1) * 1000);
} else {
msleep(ms);
}
@@ -89,6 +78,12 @@
void mmc_init_context_info(struct mmc_host *host);
+extern bool mmc_can_scale_clk(struct mmc_host *host);
+extern int mmc_init_clk_scaling(struct mmc_host *host);
+extern int mmc_resume_clk_scaling(struct mmc_host *host);
+extern int mmc_exit_clk_scaling(struct mmc_host *host);
+extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
+
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index bf0f6ce..0d0d56f 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/fault-inject.h>
+#include <linux/uaccess.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -31,6 +32,26 @@
#endif /* CONFIG_FAIL_MMC_REQUEST */
/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ring_buffer_show(struct seq_file *s, void *data)
+{
+ struct mmc_host *mmc = s->private;
+
+ mmc_dump_trace_buffer(mmc, s);
+ return 0;
+}
+
+static int mmc_ring_buffer_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_ring_buffer_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ring_buffer_fops = {
+ .open = mmc_ring_buffer_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int mmc_ios_show(struct seq_file *s, void *data)
{
static const char *vdd_str[] = {
@@ -234,6 +255,100 @@
DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
+#include <linux/delay.h>
+
+static int mmc_scale_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ *val = host->clk_scaling.curr_freq;
+
+ return 0;
+}
+
+static int mmc_scale_set(void *data, u64 val)
+{
+ int err = 0;
+ struct mmc_host *host = data;
+
+ mmc_claim_host(host);
+ mmc_host_clk_hold(host);
+
+ /* change frequency from sysfs manually */
+ err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
+ if (err == -EAGAIN)
+ err = 0;
+ else if (err)
+ pr_err("%s: clock scale to %llu failed with error %d\n",
+ mmc_hostname(host), val, err);
+ else
+ pr_debug("%s: clock change to %llu finished successfully (%s)\n",
+ mmc_hostname(host), val, current->comm);
+
+ mmc_host_clk_release(host);
+ mmc_release_host(host);
+
+ return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_scale_fops, mmc_scale_get, mmc_scale_set,
+ "%llu\n");
+
+static int mmc_max_clock_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = host->f_max;
+
+ return 0;
+}
+
+static int mmc_max_clock_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+ int err = -EINVAL;
+ unsigned long freq = val;
+ unsigned int old_freq;
+
+ if (!host || (val < host->f_min))
+ goto out;
+
+ mmc_claim_host(host);
+ if (host->bus_ops && host->bus_ops->change_bus_speed) {
+ old_freq = host->f_max;
+ host->f_max = freq;
+
+ err = host->bus_ops->change_bus_speed(host, &freq);
+
+ if (err)
+ host->f_max = old_freq;
+ }
+ mmc_release_host(host);
+out:
+ return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_max_clock_fops, mmc_max_clock_get,
+ mmc_max_clock_set, "%llu\n");
+
+static int mmc_force_err_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ if (host && host->ops && host->ops->force_err_irq) {
+ mmc_host_clk_hold(host);
+ host->ops->force_err_irq(host, val);
+ mmc_host_clk_release(host);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_force_err_fops, NULL, mmc_force_err_set, "%llu\n");
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -256,6 +371,29 @@
&mmc_clock_fops))
goto err_node;
+ if (!debugfs_create_file("max_clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_max_clock_fops))
+ goto err_node;
+
+ if (!debugfs_create_file("scale", S_IRUSR | S_IWUSR, root, host,
+ &mmc_scale_fops))
+ goto err_node;
+
+ if (!debugfs_create_bool("skip_clk_scale_freq_update",
+ S_IRUSR | S_IWUSR, root,
+ &host->clk_scaling.skip_clk_scale_freq_update))
+ goto err_node;
+
+ if (!debugfs_create_bool("cmdq_task_history",
+ S_IRUSR | S_IWUSR, root,
+ &host->cmdq_thist_enabled))
+ goto err_node;
+
+#ifdef CONFIG_MMC_RING_BUFFER
+ if (!debugfs_create_file("ring_buffer", S_IRUSR,
+ root, host, &mmc_ring_buffer_fops))
+ goto err_node;
+#endif
#ifdef CONFIG_MMC_CLKGATE
if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
root, &host->clk_delay))
@@ -270,6 +408,10 @@
&host->fail_mmc_request)))
goto err_node;
#endif
+ if (!debugfs_create_file("force_error", S_IWUSR, root, host,
+ &mmc_force_err_fops))
+ goto err_node;
+
return;
err_node:
@@ -291,11 +433,26 @@
int ret;
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ ret = mmc_cmdq_halt_on_empty_queue(card->host);
+ if (ret) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host), __func__,
+ ret);
+ goto out;
+ }
+ }
ret = mmc_send_status(data, &status);
if (!ret)
*val = status;
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+out:
mmc_put_card(card);
return ret;
@@ -318,8 +475,18 @@
return -ENOMEM;
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt_on_empty_queue(card->host);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host), __func__,
+ err);
+ mmc_put_card(card);
+ goto out_free_halt;
+ }
+ }
+
err = mmc_get_ext_csd(card, &ext_csd);
- mmc_put_card(card);
if (err)
goto out_free;
@@ -329,10 +496,25 @@
BUG_ON(n != EXT_CSD_STR_LEN);
filp->private_data = buf;
+
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
+ mmc_put_card(card);
kfree(ext_csd);
return 0;
out_free:
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+ mmc_put_card(card);
+out_free_halt:
kfree(buf);
return err;
}
@@ -359,6 +541,275 @@
.llseek = default_llseek,
};
+static int mmc_wr_pack_stats_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_card *card = inode->i_private;
+
+ filp->private_data = card;
+ card->wr_pack_stats.print_in_read = 1;
+ return 0;
+}
+
+#define TEMP_BUF_SIZE 256
+static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ struct mmc_wr_pack_stats *pack_stats;
+ int i;
+ int max_num_of_packed_reqs = 0;
+ char *temp_buf;
+
+ if (!card)
+ return cnt;
+
+ if (!access_ok(VERIFY_WRITE, ubuf, cnt))
+ return cnt;
+
+ if (!card->wr_pack_stats.print_in_read)
+ return 0;
+
+ if (!card->wr_pack_stats.enabled) {
+ pr_info("%s: write packing statistics are disabled\n",
+ mmc_hostname(card->host));
+ goto exit;
+ }
+
+ pack_stats = &card->wr_pack_stats;
+
+ if (!pack_stats->packing_events) {
+ pr_info("%s: NULL packing_events\n", mmc_hostname(card->host));
+ goto exit;
+ }
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+ if (!temp_buf)
+ goto exit;
+
+ spin_lock(&pack_stats->lock);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
+ mmc_hostname(card->host));
+ strlcat(ubuf, temp_buf, cnt);
+
+ for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+ if (pack_stats->packing_events[i]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: Packed %d reqs - %d times\n",
+ mmc_hostname(card->host), i,
+ pack_stats->packing_events[i]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ }
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: stopped packing due to the following reasons:\n",
+ mmc_hostname(card->host));
+ strlcat(ubuf, temp_buf, cnt);
+
+ if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: exceed max num of segments\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: exceed max num of sectors\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: wrong data direction\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: flush or discard\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: empty queue\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[EMPTY_QUEUE]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[REL_WRITE]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: rel write\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[REL_WRITE]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[THRESHOLD]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: Threshold\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[THRESHOLD]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+
+ if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: Large sector alignment\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[RANDOM]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: random request\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[RANDOM]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[FUA]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: fua request\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[FUA]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+
+ spin_unlock(&pack_stats->lock);
+
+ kfree(temp_buf);
+
+ pr_info("%s", ubuf);
+
+exit:
+ if (card->wr_pack_stats.print_in_read == 1) {
+ card->wr_pack_stats.print_in_read = 0;
+ return strnlen(ubuf, cnt);
+ }
+
+ return 0;
+}
+
+static ssize_t mmc_wr_pack_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ int value;
+
+ if (!card)
+ return cnt;
+
+ if (!access_ok(VERIFY_READ, ubuf, cnt))
+ return cnt;
+
+ sscanf(ubuf, "%d", &value);
+ if (value) {
+ mmc_blk_init_packed_statistics(card);
+ } else {
+ spin_lock(&card->wr_pack_stats.lock);
+ card->wr_pack_stats.enabled = false;
+ spin_unlock(&card->wr_pack_stats.lock);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations mmc_dbg_wr_pack_stats_fops = {
+ .open = mmc_wr_pack_stats_open,
+ .read = mmc_wr_pack_stats_read,
+ .write = mmc_wr_pack_stats_write,
+};
+
+static int mmc_bkops_stats_read(struct seq_file *file, void *data)
+{
+ struct mmc_card *card = file->private;
+ struct mmc_bkops_stats *stats;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ stats = &card->bkops.stats;
+
+ if (!stats->enabled) {
+ pr_info("%s: bkops statistics are disabled\n",
+ mmc_hostname(card->host));
+ goto exit;
+ }
+
+ spin_lock(&stats->lock);
+
+ seq_printf(file, "%s: bkops statistics:\n",
+ mmc_hostname(card->host));
+ seq_printf(file, "%s: BKOPS: sent START_BKOPS to device: %u\n",
+ mmc_hostname(card->host), stats->manual_start);
+ seq_printf(file, "%s: BKOPS: stopped due to HPI: %u\n",
+ mmc_hostname(card->host), stats->hpi);
+ seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 1: %u\n",
+ mmc_hostname(card->host), stats->auto_start);
+ seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 0: %u\n",
+ mmc_hostname(card->host), stats->auto_stop);
+
+ for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; ++i)
+ seq_printf(file, "%s: BKOPS: due to level %d: %u\n",
+ mmc_hostname(card->host), i, stats->level[i]);
+
+ spin_unlock(&stats->lock);
+
+exit:
+
+ return 0;
+}
+
+static ssize_t mmc_bkops_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct mmc_card *card = filp->f_mapping->host->i_private;
+ int value;
+ struct mmc_bkops_stats *stats;
+ int err;
+
+ if (!card)
+ return cnt;
+
+ stats = &card->bkops.stats;
+
+ err = kstrtoint_from_user(ubuf, cnt, 0, &value);
+ if (err) {
+ pr_err("%s: %s: error parsing input from user (%d)\n",
+ mmc_hostname(card->host), __func__, err);
+ return err;
+ }
+ if (value) {
+ mmc_blk_init_bkops_statistics(card);
+ } else {
+ spin_lock(&stats->lock);
+ stats->enabled = false;
+ spin_unlock(&stats->lock);
+ }
+
+ return cnt;
+}
+
+static int mmc_bkops_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_bkops_stats_read, inode->i_private);
+}
+
+static const struct file_operations mmc_dbg_bkops_stats_fops = {
+ .open = mmc_bkops_stats_open,
+ .read = seq_read,
+ .write = mmc_bkops_stats_write,
+};
+
void mmc_add_card_debugfs(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -391,6 +842,19 @@
&mmc_dbg_ext_csd_fops))
goto err;
+ if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
+ (card->host->caps2 & MMC_CAP2_PACKED_WR))
+ if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
+ &mmc_dbg_wr_pack_stats_fops))
+ goto err;
+
+ if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) &&
+ (mmc_card_configured_auto_bkops(card) ||
+ mmc_card_configured_manual_bkops(card)))
+ if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,
+ &mmc_dbg_bkops_stats_fops))
+ goto err;
+
return;
err:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f18105f..eb730fd 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -4,6 +4,7 @@
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman
* Copyright (C) 2010 Linus Walleij
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -24,6 +25,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/ring_buffer.h>
+
#include <linux/mmc/slot-gpio.h>
#include "core.h"
@@ -31,6 +34,10 @@
#include "slot-gpio.h"
#include "pwrseq.h"
+#define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35
+#define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5
+#define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100
+
static DEFINE_IDA(mmc_host_ida);
static DEFINE_SPINLOCK(mmc_host_lock);
@@ -164,6 +171,7 @@
if (host->clk_gated) {
spin_unlock_irqrestore(&host->clk_lock, flags);
mmc_ungate_clock(host);
+
spin_lock_irqsave(&host->clk_lock, flags);
pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
}
@@ -176,11 +184,19 @@
* mmc_host_may_gate_card - check if this card may be gated
* @card: card to check.
*/
-static bool mmc_host_may_gate_card(struct mmc_card *card)
+bool mmc_host_may_gate_card(struct mmc_card *card)
{
/* If there is no card we may gate it */
if (!card)
return true;
+
+ /*
+ * SDIO3.0 card allows the clock to be gated off so check if
+ * that is the case or not.
+ */
+ if (mmc_card_sdio(card) && card->cccr.async_intr_sup)
+ return true;
+
/*
* Don't gate SDIO cards! These need to be clocked at all times
* since they may be independent systems generating interrupts
@@ -296,6 +312,10 @@
{
}
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+ return false;
+}
#endif
void mmc_retune_enable(struct mmc_host *host)
@@ -645,6 +665,217 @@
EXPORT_SYMBOL(mmc_alloc_host);
+static ssize_t show_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host));
+}
+
+static ssize_t store_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || !host->card || kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mmc_get_card(host->card);
+
+ if (!value) {
+ /*turning off clock scaling*/
+ mmc_exit_clk_scaling(host);
+ host->caps2 &= ~MMC_CAP2_CLK_SCALE;
+ host->clk_scaling.state = MMC_LOAD_HIGH;
+ /* Set to max. frequency when disabling */
+ mmc_clk_update_freq(host, host->card->clk_scaling_highest,
+ host->clk_scaling.state);
+ } else if (value) {
+ /* starting clock scaling, will restart in case started */
+ host->caps2 |= MMC_CAP2_CLK_SCALE;
+ if (host->clk_scaling.enable)
+ mmc_exit_clk_scaling(host);
+ mmc_init_clk_scaling(host);
+ }
+
+ mmc_put_card(host->card);
+
+ return count;
+}
+
+static ssize_t show_up_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold);
+}
+
+#define MAX_PERCENTAGE 100
+static ssize_t store_up_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+ return -EINVAL;
+
+ host->clk_scaling.upthreshold = value;
+
+ pr_debug("%s: clkscale_up_thresh set to %lu\n",
+ mmc_hostname(host), value);
+ return count;
+}
+
+static ssize_t show_down_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ host->clk_scaling.downthreshold);
+}
+
+static ssize_t store_down_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+ return -EINVAL;
+
+ host->clk_scaling.downthreshold = value;
+
+ pr_debug("%s: clkscale_down_thresh set to %lu\n",
+ mmc_hostname(host), value);
+ return count;
+}
+
+static ssize_t show_polling(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n",
+ host->clk_scaling.polling_delay_ms);
+}
+
+static ssize_t store_polling(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ host->clk_scaling.polling_delay_ms = value;
+
+ pr_debug("%s: clkscale_polling_delay_ms set to %lu\n",
+ mmc_hostname(host), value);
+ return count;
+}
+
+DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable);
+DEVICE_ATTR(polling_interval, S_IRUGO | S_IWUSR,
+ show_polling, store_polling);
+DEVICE_ATTR(up_threshold, S_IRUGO | S_IWUSR,
+ show_up_threshold, store_up_threshold);
+DEVICE_ATTR(down_threshold, S_IRUGO | S_IWUSR,
+ show_down_threshold, store_down_threshold);
+
+static struct attribute *clk_scaling_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_up_threshold.attr,
+ &dev_attr_down_threshold.attr,
+ &dev_attr_polling_interval.attr,
+ NULL,
+};
+
+static struct attribute_group clk_scaling_attr_grp = {
+ .name = "clk_scaling",
+ .attrs = clk_scaling_attrs,
+};
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+static ssize_t
+show_perf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ int64_t rtime_drv, wtime_drv;
+ unsigned long rbytes_drv, wbytes_drv, flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ rbytes_drv = host->perf.rbytes_drv;
+ wbytes_drv = host->perf.wbytes_drv;
+
+ rtime_drv = ktime_to_us(host->perf.rtime_drv);
+ wtime_drv = ktime_to_us(host->perf.wtime_drv);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:"
+ "%lu bytes in %lld microseconds\n"
+ "Read performance at driver Level:"
+ "%lu bytes in %lld microseconds\n",
+ wbytes_drv, wtime_drv,
+ rbytes_drv, rtime_drv);
+}
+
+static ssize_t
+set_perf(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ int64_t value;
+ unsigned long flags;
+
+ sscanf(buf, "%lld", &value);
+ spin_lock_irqsave(&host->lock, flags);
+ if (!value) {
+ memset(&host->perf, 0, sizeof(host->perf));
+ host->perf_enable = false;
+ } else {
+ host->perf_enable = true;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR,
+ show_perf, set_perf);
+
+#endif
+
+static struct attribute *dev_attrs[] = {
+#ifdef CONFIG_MMC_PERF_PROFILING
+ &dev_attr_perf.attr,
+#endif
+ NULL,
+};
+static struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
@@ -666,15 +897,31 @@
led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
+ host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD;
+ host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD;
+ host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC;
+ host->clk_scaling.skip_clk_scale_freq_update = false;
+
#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
#endif
mmc_host_clk_sysfs_init(host);
+ mmc_trace_init(host);
+
+ err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+ if (err)
+ pr_err("%s: failed to create clk scale sysfs group with err %d\n",
+ __func__, err);
#ifdef CONFIG_BLOCK
mmc_latency_hist_sysfs_init(host);
#endif
+ err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp);
+ if (err)
+ pr_err("%s: failed to create sysfs group with err %d\n",
+ __func__, err);
+
mmc_start_host(host);
if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
mmc_register_pm_notifier(host);
@@ -706,6 +953,9 @@
mmc_latency_hist_sysfs_exit(host);
#endif
+ sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
+ sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 56e6355..a36bcbb 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -19,6 +19,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
+#include <linux/reboot.h>
+#include <trace/events/mmc.h>
#include "core.h"
#include "host.h"
@@ -72,6 +74,7 @@
__res & __mask; \
})
+static int mmc_switch_status(struct mmc_card *card, bool ignore_crc);
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -137,6 +140,19 @@
mmc_init_erase(card);
}
+static const struct mmc_fixup mmc_fixups[] = {
+
+ /* avoid HPI for specific cards */
+ MMC_FIXUP_EXT_CSD_REV("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+ add_quirk, MMC_QUIRK_BROKEN_HPI, MMC_V4_41),
+
+ /* Disable cache for specific cards */
+ MMC_FIXUP("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+ add_quirk_mmc, MMC_QUIRK_CACHE_DISABLE),
+
+ END_FIXUP
+};
+
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
@@ -388,9 +404,6 @@
*/
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
- /* fixup device after ext_csd revision field is updated */
- mmc_fixup_device(card, mmc_ext_csd_fixups);
-
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
@@ -516,6 +529,25 @@
ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
}
+ /* check whether the eMMC card supports HPI */
+ if ((ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) &&
+ !(card->quirks & MMC_QUIRK_BROKEN_HPI)) {
+ card->ext_csd.hpi = 1;
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+ card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+ else
+ card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+ /*
+ * Indicate the maximum timeout to close
+ * a command interrupted by HPI
+ */
+ card->ext_csd.out_of_int_time =
+ ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+ pr_info("%s: Out-of-interrupt timeout is %d[ms]\n",
+ mmc_hostname(card->host),
+ card->ext_csd.out_of_int_time);
+ }
+
if (card->ext_csd.rev >= 5) {
/* Adjust production date as per JEDEC JESD84-B451 */
if (card->cid.year < 2010)
@@ -523,16 +555,16 @@
/* check whether the eMMC card supports BKOPS */
if (!mmc_card_broken_hpi(card) &&
- ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
+ card->ext_csd.hpi) {
card->ext_csd.bkops = 1;
- card->ext_csd.man_bkops_en =
- (ext_csd[EXT_CSD_BKOPS_EN] &
- EXT_CSD_MANUAL_BKOPS_MASK);
+ card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
- if (!card->ext_csd.man_bkops_en)
- pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
- mmc_hostname(card->host));
+ if (!card->ext_csd.bkops_en)
+ pr_info("%s: BKOPS_EN equals 0x%x\n",
+ mmc_hostname(card->host),
+ card->ext_csd.bkops_en);
}
/* check whether the eMMC card supports HPI */
@@ -555,6 +587,19 @@
card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
/*
+ * Some eMMC vendors violate eMMC 5.0 spec and set
+ * REL_WR_SEC_C register to 0x10 to indicate the
+ * ability of RPMB throughput improvement thus lead
+ * to failure when TZ module write data to RPMB
+ * partition. So check bit[4] of EXT_CSD[166] and
+ * if it is not set then change value of REL_WR_SEC_C
+ * to 0x1 directly ignoring value of EXT_CSD[222].
+ */
+ if (!(card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR))
+ card->ext_csd.rel_sectors = 0x1;
+
+ /*
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
@@ -610,6 +655,46 @@
card->ext_csd.data_sector_size = 512;
}
+ if (card->ext_csd.rev >= 7) {
+ /* Enhance Strobe is supported since v5.1 which rev should be
+ * 8 but some eMMC devices can support it with rev 7. So handle
+ * Enhance Strobe here.
+ */
+ card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
+ card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT];
+ card->ext_csd.fw_version = ext_csd[EXT_CSD_FIRMWARE_VERSION];
+ pr_info("%s: eMMC FW version: 0x%02x\n",
+ mmc_hostname(card->host),
+ card->ext_csd.fw_version);
+ if (card->ext_csd.cmdq_support) {
+ /*
+ * Queue Depth = N + 1,
+ * see JEDEC JESD84-B51 section 7.4.19
+ */
+ card->ext_csd.cmdq_depth =
+ ext_csd[EXT_CSD_CMDQ_DEPTH] + 1;
+ pr_info("%s: CMDQ supported: depth: %d\n",
+ mmc_hostname(card->host),
+ card->ext_csd.cmdq_depth);
+ }
+ card->ext_csd.barrier_support =
+ ext_csd[EXT_CSD_BARRIER_SUPPORT];
+ card->ext_csd.cache_flush_policy =
+ ext_csd[EXT_CSD_CACHE_FLUSH_POLICY];
+ pr_info("%s: cache barrier support %d flush policy %d\n",
+ mmc_hostname(card->host),
+ card->ext_csd.barrier_support,
+ card->ext_csd.cache_flush_policy);
+ card->ext_csd.enhanced_rpmb_supported =
+ (card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
+ } else {
+ card->ext_csd.cmdq_support = 0;
+ card->ext_csd.cmdq_depth = 0;
+ card->ext_csd.barrier_support = 0;
+ card->ext_csd.cache_flush_policy = 0;
+ }
+
/* eMMC v5 or later */
if (card->ext_csd.rev >= 7) {
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
@@ -630,6 +715,7 @@
static int mmc_read_ext_csd(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
u8 *ext_csd;
int err;
@@ -638,6 +724,9 @@
err = mmc_get_ext_csd(card, &ext_csd);
if (err) {
+ pr_err("%s: %s: mmc_get_ext_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
+
/* If the host or the card can't do the switch,
* fail more gracefully. */
if ((err != -EINVAL)
@@ -661,6 +750,7 @@
return err;
}
+ card->cached_ext_csd = ext_csd;
err = mmc_decode_ext_csd(card, ext_csd);
kfree(ext_csd);
return err;
@@ -762,6 +852,8 @@
card->ext_csd.enhanced_area_offset);
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
+ card->ext_csd.enhanced_rpmb_supported);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
@@ -817,6 +909,7 @@
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_enhanced_rpmb_supported.attr,
&dev_attr_rel_sectors.attr,
&dev_attr_ocr.attr,
&dev_attr_dsr.attr,
@@ -953,11 +1046,11 @@
*/
static int mmc_select_bus_width(struct mmc_card *card)
{
- static unsigned ext_csd_bits[] = {
+ static const unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
};
- static unsigned bus_widths[] = {
+ static const unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
};
@@ -1018,12 +1111,12 @@
}
/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
+static int mmc_switch_status(struct mmc_card *card, bool ignore_crc)
{
u32 status;
int err;
- err = mmc_send_status(card, &status);
+ err = __mmc_send_status(card, &status, ignore_crc);
if (err)
return err;
@@ -1043,7 +1136,7 @@
true, false, true);
if (!err) {
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
}
if (err)
@@ -1072,10 +1165,11 @@
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits,
- card->ext_csd.generic_cmd6_time);
+ card->ext_csd.generic_cmd6_time,
+ true, false, false);
if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width);
@@ -1118,8 +1212,10 @@
if (err)
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
- if (!err)
+ if (!err) {
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+ err = mmc_switch_status(card, false);
+ }
return err;
}
@@ -1134,9 +1230,28 @@
/*
* HS400 mode requires 8-bit bus width
*/
- if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
- host->ios.bus_width == MMC_BUS_WIDTH_8))
- return 0;
+ if (card->ext_csd.strobe_support) {
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->caps & MMC_CAP_8_BIT_DATA))
+ return 0;
+
+ /* For Enhance Strobe flow. For non Enhance Strobe, signal
+ * voltage will not be set.
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_180);
+ if (err)
+ return err;
+ } else {
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8))
+ return 0;
+ }
/* Switch card to HS mode */
val = EXT_CSD_TIMING_HS;
@@ -1157,14 +1272,22 @@
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
+ val = EXT_CSD_DDR_BUS_WIDTH_8;
+ if (card->ext_csd.strobe_support) {
+ err = mmc_select_bus_width(card);
+ if (IS_ERR_VALUE((unsigned long)err))
+ return err;
+ val |= EXT_CSD_BUS_WIDTH_STROBE;
+ }
+
/* Switch card to DDR */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
- EXT_CSD_DDR_BUS_WIDTH_8,
+ val,
card->ext_csd.generic_cmd6_time);
if (err) {
pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
@@ -1189,7 +1312,28 @@
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
- err = mmc_switch_status(card);
+ if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) {
+ mmc_host_clk_hold(host);
+ err = host->ops->enhanced_strobe(host);
+ mmc_host_clk_release(host);
+ } else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) &&
+ host->ops->execute_tuning) {
+ mmc_host_clk_hold(host);
+ err = host->ops->execute_tuning(host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ mmc_host_clk_release(host);
+
+ if (err)
+ pr_warn("%s: tuning execution failed\n",
+ mmc_hostname(host));
+ }
+
+ /*
+ * Sending of CMD13 should be done after the host calibration
+ * for enhanced_strobe or HS400 mode is completed.
+ * Otherwise may see CMD13 timeouts or CRC errors.
+ */
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1227,7 +1371,7 @@
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1240,7 +1384,7 @@
mmc_set_timing(host, MMC_TIMING_MMC_HS);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1255,7 +1399,7 @@
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1301,7 +1445,7 @@
mmc_set_clock(host, card->ext_csd.hs_max_dtr);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1338,7 +1482,7 @@
if (host->ops->hs400_enhanced_strobe)
host->ops->hs400_enhanced_strobe(host, &host->ios);
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
@@ -1411,7 +1555,12 @@
old_timing = host->ios.timing;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- err = mmc_switch_status(card);
+ /*
+ * Since after switching to hs200, crc errors might
+ * occur for commands send before tuning.
+ * So ignore crc error for cmd13.
+ */
+ err = mmc_switch_status(card, true);
/*
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
@@ -1431,6 +1580,17 @@
return err;
}
+static int mmc_reboot_notify(struct notifier_block *notify_block,
+ unsigned long event, void *unused)
+{
+ struct mmc_card *card = container_of(
+ notify_block, struct mmc_card, reboot_notify);
+
+ card->pon_type = (event != SYS_RESTART) ? MMC_LONG_PON : MMC_SHRT_PON;
+
+ return NOTIFY_OK;
+}
+
/*
* Activate High Speed, HS200 or HS400ES mode if supported.
*/
@@ -1441,7 +1601,10 @@
if (!mmc_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ /* For Enhance Strobe HS400 flow */
+ if (card->ext_csd.strobe_support &&
+ card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ card->host->caps & MMC_CAP_8_BIT_DATA)
err = mmc_select_hs400es(card);
else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
err = mmc_select_hs200(card);
@@ -1474,12 +1637,242 @@
*/
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
host->ios.bus_width == MMC_BUS_WIDTH_8)
- if (host->ops->prepare_hs400_tuning)
- host->ops->prepare_hs400_tuning(host, &host->ios);
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
return mmc_execute_tuning(card);
}
+static int mmc_select_cmdq(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int ret = 0;
+
+ if (!host->cmdq_ops) {
+ pr_err("%s: host controller doesn't support CMDQ\n",
+ mmc_hostname(host));
+ return 0;
+ }
+
+ ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
+ if (ret)
+ goto out;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (ret)
+ goto out;
+
+ mmc_card_set_cmdq(card);
+ mmc_host_clk_hold(card->host);
+ ret = host->cmdq_ops->enable(card->host);
+ if (ret) {
+ mmc_host_clk_release(card->host);
+ pr_err("%s: failed (%d) enabling CMDQ on host\n",
+ mmc_hostname(host), ret);
+ mmc_card_clr_cmdq(card);
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 0,
+ card->ext_csd.generic_cmd6_time);
+ goto out;
+ }
+
+ mmc_host_clk_release(card->host);
+ pr_info_once("%s: CMDQ enabled on card\n", mmc_hostname(host));
+out:
+ return ret;
+}
+
+static int mmc_select_hs_ddr52(struct mmc_host *host)
+{
+ int err;
+
+ mmc_select_hs(host->card);
+ err = mmc_select_bus_width(host->card);
+ if (err < 0) {
+ pr_err("%s: %s: select_bus_width failed(%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ err = mmc_select_hs_ddr(host->card);
+ mmc_set_clock(host, MMC_HIGH_52_MAX_DTR);
+
+ return err;
+}
+
+/*
+ * Scale down from HS400 to HS in order to allow frequency change.
+ * This is needed for cards that doesn't support changing frequency in HS400
+ */
+static int mmc_scale_low(struct mmc_host *host, unsigned long freq)
+{
+ int err = 0;
+
+ mmc_set_timing(host, MMC_TIMING_LEGACY);
+ mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+
+ if (host->clk_scaling.lower_bus_speed_mode &
+ MMC_SCALING_LOWER_DDR52_MODE) {
+ err = mmc_select_hs_ddr52(host);
+ if (err)
+ pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
+ mmc_hostname(host), __func__, err);
+ else
+ return err;
+ }
+
+ err = mmc_select_hs(host->card);
+ if (err) {
+ pr_err("%s: %s: scaling low: failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ err = mmc_select_bus_width(host->card);
+ if (err < 0) {
+ pr_err("%s: %s: select_bus_width failed(%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ mmc_set_clock(host, freq);
+
+ return 0;
+}
+
+/*
+ * Scale UP from HS to HS200/H400
+ */
+static int mmc_scale_high(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (mmc_card_ddr52(host->card)) {
+ mmc_set_timing(host, MMC_TIMING_LEGACY);
+ mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+ }
+
+ if (!host->card->ext_csd.strobe_support) {
+ if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) {
+ pr_err("%s: %s: card does not support HS200\n",
+ mmc_hostname(host), __func__);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ err = mmc_select_hs200(host->card);
+ if (err) {
+ pr_err("%s: %s: selecting HS200 failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ mmc_set_bus_speed(host->card);
+
+ err = mmc_hs200_tuning(host->card);
+ if (err) {
+ pr_err("%s: %s: hs200 tuning failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400)) {
+ pr_debug("%s: card does not support HS400\n",
+ mmc_hostname(host));
+ return 0;
+ }
+ }
+
+ err = mmc_select_hs400(host->card);
+ if (err) {
+ pr_err("%s: %s: select hs400 failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ return err;
+}
+
+static int mmc_set_clock_bus_speed(struct mmc_card *card, unsigned long freq)
+{
+ int err = 0;
+
+ if (freq == MMC_HS200_MAX_DTR)
+ err = mmc_scale_high(card->host);
+ else
+ err = mmc_scale_low(card->host, freq);
+
+ return err;
+}
+
+static inline unsigned long mmc_ddr_freq_accommodation(unsigned long freq)
+{
+ if (freq == MMC_HIGH_DDR_MAX_DTR)
+ return freq;
+
+ return freq/2;
+}
+
+/**
+ * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the MMC card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card. If it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ * Host is assumed to be calimed while calling this funciton.
+ */
+static int mmc_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+ int err = 0;
+ struct mmc_card *card;
+ unsigned long actual_freq;
+
+ card = host->card;
+
+ if (!card || !freq) {
+ err = -EINVAL;
+ goto out;
+ }
+ actual_freq = *freq;
+
+ WARN_ON(!host->claimed);
+
+ /*
+ * For scaling up/down HS400 we'll need special handling,
+ * for other timings we can simply do clock frequency change
+ */
+ if (mmc_card_hs400(card) ||
+ (!mmc_card_hs200(host->card) && *freq == MMC_HS200_MAX_DTR)) {
+ err = mmc_set_clock_bus_speed(card, *freq);
+ if (err) {
+ pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
+ mmc_hostname(host), __func__, err, *freq);
+ goto out;
+ }
+ } else if (mmc_card_hs200(host->card)) {
+ mmc_set_clock(host, *freq);
+ err = mmc_hs200_tuning(host->card);
+ if (err) {
+ pr_warn("%s: %s: tuning execution failed %d\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_set_clock(host, host->clk_scaling.curr_freq);
+ }
+ } else {
+ if (mmc_card_ddr52(host->card))
+ actual_freq = mmc_ddr_freq_accommodation(*freq);
+ mmc_set_clock(host, actual_freq);
+ }
+
+out:
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -1508,20 +1901,27 @@
* respond.
* mmc_go_idle is needed for eMMC that are asleep
*/
+reinit:
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_send_op_cond() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
+ }
/*
* For SPI, enable CRC as appropriate.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_spi_set_crc() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
+ }
}
/*
@@ -1531,12 +1931,17 @@
err = mmc_send_cid(host, cid);
else
err = mmc_all_send_cid(host, cid);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_send_cid() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
+ }
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
err = -ENOENT;
+ pr_err("%s: %s: CID memcmp failed %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
}
@@ -1548,6 +1953,8 @@
card = mmc_alloc_card(host, &mmc_type);
if (IS_ERR(card)) {
err = PTR_ERR(card);
+ pr_err("%s: %s: no memory to allocate for card %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
}
@@ -1555,6 +1962,8 @@
card->type = MMC_TYPE_MMC;
card->rca = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ host->card = card;
+ card->reboot_notify.notifier_call = mmc_reboot_notify;
}
/*
@@ -1568,8 +1977,11 @@
*/
if (!mmc_host_is_spi(host)) {
err = mmc_set_relative_addr(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_set_relative_addr() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
@@ -1579,15 +1991,24 @@
* Fetch CSD from card.
*/
err = mmc_send_csd(card, card->raw_csd);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_send_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
err = mmc_decode_csd(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_decode_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
err = mmc_decode_cid(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_decode_cid() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
}
/*
@@ -1602,15 +2023,21 @@
*/
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_select_card() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
}
if (!oldcard) {
/* Read extended CSD. */
err = mmc_read_ext_csd(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_read_ext_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
/*
* If doing byte addressing, check if required to do sector
@@ -1623,6 +2050,9 @@
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
+
+ if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
+ mmc_card_set_blockaddr(card);
}
/*
@@ -1635,8 +2065,11 @@
EXT_CSD_ERASE_GROUP_DEF, 1,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for ERASE_GRP_DEF fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (err) {
err = 0;
@@ -1666,8 +2099,13 @@
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
card->ext_csd.part_config,
card->ext_csd.part_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for PART_CONFIG fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
+ card->part_curr = card->ext_csd.part_config &
+ EXT_CSD_PART_CONFIG_ACC_MASK;
}
/*
@@ -1678,8 +2116,11 @@
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for POWER_ON PON fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
/*
* The err can be -EBADMSG or 0,
@@ -1693,8 +2134,11 @@
* Select timing interface
*/
err = mmc_select_timing(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_select_timing() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (mmc_card_hs200(card)) {
err = mmc_hs200_tuning(card);
@@ -1714,6 +2158,16 @@
}
}
+ card->clk_scaling_lowest = host->f_min;
+ if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS400) ||
+ (card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS200))
+ card->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
+ else if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS) ||
+ (card->mmc_avail_type | EXT_CSD_CARD_TYPE_DDR_52))
+ card->clk_scaling_highest = card->ext_csd.hs_max_dtr;
+ else
+ card->clk_scaling_highest = card->csd.max_dtr;
+
/*
* Choose the power class with selected bus interface
*/
@@ -1726,8 +2180,11 @@
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HPI_MGMT, 1,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for HPI_MGMT fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
@@ -1739,28 +2196,72 @@
/*
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
+ * If HPI is not supported then cache shouldn't be enabled.
*/
if (!mmc_card_broken_hpi(card) &&
card->ext_csd.cache_size > 0) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, 1,
- card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
- goto free_card;
+ if (card->ext_csd.hpi_en &&
+ (!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
+ mmc_hostname(host), __func__, err);
+ goto free_card;
+ }
- /*
- * Only if no error, cache is turned on successfully.
- */
- if (err) {
- pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
- mmc_hostname(card->host), err);
- card->ext_csd.cache_ctrl = 0;
- err = 0;
+ /*
+ * Only if no error, cache is turned on successfully.
+ */
+ if (err) {
+ pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
+ /* enable cache barrier if supported by the device */
+ if (card->ext_csd.cache_ctrl &&
+ card->ext_csd.barrier_support) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BARRIER_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for BARRIER_CTRL fails %d\n",
+ mmc_hostname(host), __func__,
+ err);
+ goto free_card;
+ }
+ if (err) {
+ pr_warn("%s: Barrier is supported but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.barrier_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.barrier_en = 1;
+ }
+ }
} else {
- card->ext_csd.cache_ctrl = 1;
+ /*
+ * mmc standard doesn't say what is the card default
+ * value for EXT_CSD_CACHE_CTRL.
+ * Hence, cache may be enabled by default by
+ * card vendors.
+ * Thus, it is best to explicitly disable cache in case
+ * we want to avoid cache.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 0,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: %s: fail on CACHE_CTRL OFF %d\n",
+ mmc_hostname(host), __func__, err);
+ goto free_card;
+ }
}
}
-
/*
* The mandatory minimum values are defined for packed command.
* read: 5, write: 3
@@ -1772,8 +2273,11 @@
EXT_CSD_EXP_EVENTS_CTRL,
EXT_CSD_PACKED_EVENT_EN,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for EXP_EVENTS_CTRL fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (err) {
pr_warn("%s: Enabling packed event failed\n",
mmc_hostname(card->host));
@@ -1782,42 +2286,125 @@
} else {
card->ext_csd.packed_event_en = 1;
}
+
}
- if (!oldcard)
- host->card = card;
+ if (!oldcard) {
+ if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+ (card->ext_csd.max_packed_writes > 0)) {
+ /*
+ * We would like to keep the statistics in an index
+ * that equals the num of packed requests
+ * (1 to max_packed_writes)
+ */
+ card->wr_pack_stats.packing_events = kzalloc(
+ (card->ext_csd.max_packed_writes + 1) *
+ sizeof(*card->wr_pack_stats.packing_events),
+ GFP_KERNEL);
+ if (!card->wr_pack_stats.packing_events) {
+ pr_err("%s: %s: no memory for packing events\n",
+ mmc_hostname(host), __func__);
+ goto free_card;
+ }
+ }
+ }
+
+ /*
+ * Start auto bkops, if supported.
+ *
+ * Note: This leaves the possibility of having both manual and
+ * auto bkops running in parallel. The runtime implementation
+ * will allow this, but ignore bkops exceptions on the premises
+ * that auto bkops will eventually kick in and the device will
+ * handle bkops without START_BKOPS from the host.
+ */
+ if (mmc_card_support_auto_bkops(card)) {
+ /*
+ * Ignore the return value of setting auto bkops.
+ * If it failed, will run in backward compatible mode.
+ */
+ (void)mmc_set_auto_bkops(card, true);
+ }
+
+ if (card->ext_csd.cmdq_support && (card->host->caps2 &
+ MMC_CAP2_CMD_QUEUE)) {
+ err = mmc_select_cmdq(card);
+ if (err) {
+ pr_err("%s: selecting CMDQ mode: failed: %d\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cmdq_support = 0;
+ oldcard = card;
+ goto reinit;
+ }
+ }
return 0;
free_card:
- if (!oldcard)
+ if (!oldcard) {
+ host->card = NULL;
mmc_remove_card(card);
+ }
err:
return err;
}
-static int mmc_can_sleep(struct mmc_card *card)
+static int mmc_can_sleepawake(struct mmc_host *host)
{
- return (card && card->ext_csd.rev >= 3);
+ return host && (host->caps2 & MMC_CAP2_SLEEP_AWAKE) && host->card &&
+ (host->card->ext_csd.rev >= 3);
}
-static int mmc_sleep(struct mmc_host *host)
+static int mmc_sleepawake(struct mmc_host *host, bool sleep)
{
struct mmc_command cmd = {0};
struct mmc_card *card = host->card;
- unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ unsigned int timeout_ms;
int err;
+ if (!card) {
+ pr_err("%s: %s: invalid card\n", mmc_hostname(host), __func__);
+ return -EINVAL;
+ }
+
+ timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ if (card->ext_csd.rev >= 3 &&
+ card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ u8 part_config = card->ext_csd.part_config;
+
+ /*
+ * If the last access before suspend is RPMB access, then
+ * switch to default part config so that sleep command CMD5
+ * and deselect CMD7 can be sent to the card.
+ */
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG,
+ part_config,
+ card->ext_csd.part_time);
+ if (err) {
+ pr_err("%s: %s: failed to switch to default part config %x\n",
+ mmc_hostname(host), __func__, part_config);
+ return err;
+ }
+ card->ext_csd.part_config = part_config;
+ card->part_curr = card->ext_csd.part_config &
+ EXT_CSD_PART_CONFIG_ACC_MASK;
+ }
+
/* Re-tuning can't be done once the card is deselected */
mmc_retune_hold(host);
- err = mmc_deselect_cards(host);
- if (err)
- goto out_release;
+ if (sleep) {
+ err = mmc_deselect_cards(host);
+ if (err)
+ goto out_release;
+ }
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
- cmd.arg |= 1 << 15;
+ if (sleep)
+ cmd.arg |= 1 << 15;
/*
* If the max_busy_timeout of the host is specified, validate it against
@@ -1845,6 +2432,9 @@
if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
mmc_delay(timeout_ms);
+ if (!sleep)
+ err = mmc_select_card(card);
+
out_release:
mmc_retune_release(host);
return err;
@@ -1879,6 +2469,27 @@
return err;
}
+int mmc_send_pon(struct mmc_card *card)
+{
+ int err = 0;
+ struct mmc_host *host = card->host;
+
+ if (!mmc_can_poweroff_notify(card))
+ goto out;
+
+ mmc_get_card(card);
+ if (card->pon_type & MMC_LONG_PON)
+ err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
+ else if (card->pon_type & MMC_SHRT_PON)
+ err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+ if (err)
+ pr_warn("%s: error %d sending PON type %u",
+ mmc_hostname(host), err, card->pon_type);
+ mmc_put_card(card);
+out:
+ return err;
+}
+
/*
* Host is being removed. Free up the current card.
*/
@@ -1887,8 +2498,14 @@
BUG_ON(!host);
BUG_ON(!host->card);
+ unregister_reboot_notifier(&host->card->reboot_notify);
+
+ mmc_exit_clk_scaling(host);
mmc_remove_card(host->card);
+
+ mmc_claim_host(host);
host->card = NULL;
+ mmc_release_host(host);
}
/*
@@ -1928,20 +2545,101 @@
}
}
+static int mmc_cache_card_ext_csd(struct mmc_host *host)
+{
+ int err;
+ u8 *ext_csd;
+ struct mmc_card *card = host->card;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err || !ext_csd) {
+ pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /* only cache read/write fields that the sw changes */
+ card->ext_csd.raw_ext_csd_cmdq = ext_csd[EXT_CSD_CMDQ];
+ card->ext_csd.raw_ext_csd_cache_ctrl = ext_csd[EXT_CSD_CACHE_CTRL];
+ card->ext_csd.raw_ext_csd_bus_width = ext_csd[EXT_CSD_BUS_WIDTH];
+ card->ext_csd.raw_ext_csd_hs_timing = ext_csd[EXT_CSD_HS_TIMING];
+
+ kfree(ext_csd);
+
+ return 0;
+}
+
+static int mmc_test_awake_ext_csd(struct mmc_host *host)
+{
+ int err;
+ u8 *ext_csd;
+ struct mmc_card *card = host->card;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err || !ext_csd) {
+ pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /* only compare read/write fields that the sw changes */
+ pr_debug("%s: %s: type(cached:current) cmdq(%d:%d) cache_ctrl(%d:%d) bus_width (%d:%d) timing(%d:%d)\n",
+ mmc_hostname(host), __func__,
+ card->ext_csd.raw_ext_csd_cmdq,
+ ext_csd[EXT_CSD_CMDQ],
+ card->ext_csd.raw_ext_csd_cache_ctrl,
+ ext_csd[EXT_CSD_CACHE_CTRL],
+ card->ext_csd.raw_ext_csd_bus_width,
+ ext_csd[EXT_CSD_BUS_WIDTH],
+ card->ext_csd.raw_ext_csd_hs_timing,
+ ext_csd[EXT_CSD_HS_TIMING]);
+
+ err = !((card->ext_csd.raw_ext_csd_cmdq ==
+ ext_csd[EXT_CSD_CMDQ]) &&
+ (card->ext_csd.raw_ext_csd_cache_ctrl ==
+ ext_csd[EXT_CSD_CACHE_CTRL]) &&
+ (card->ext_csd.raw_ext_csd_bus_width ==
+ ext_csd[EXT_CSD_BUS_WIDTH]) &&
+ (card->ext_csd.raw_ext_csd_hs_timing ==
+ ext_csd[EXT_CSD_HS_TIMING]));
+
+ kfree(ext_csd);
+
+ return err;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
- unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
- EXT_CSD_POWER_OFF_LONG;
BUG_ON(!host);
BUG_ON(!host->card);
+ err = mmc_suspend_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
goto out;
+ if (host->card->cmdq_init) {
+ BUG_ON(host->cmdq_ctx.active_reqs);
+
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: halt: failed: %d\n", __func__, err);
+ goto out;
+ }
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->disable(host, true);
+ mmc_host_clk_release(host);
+ }
+
if (mmc_card_doing_bkops(host->card)) {
err = mmc_stop_bkops(host->card);
if (err)
@@ -1952,36 +2650,117 @@
if (err)
goto out;
- if (mmc_can_poweroff_notify(host->card) &&
- ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
- err = mmc_poweroff_notify(host->card, notify_type);
- else if (mmc_can_sleep(host->card))
- err = mmc_sleep(host);
- else if (!mmc_host_is_spi(host))
+ if (mmc_can_sleepawake(host)) {
+ /*
+ * For caching host->ios to cached_ios we need to
+ * make sure that clocks are not gated otherwise
+ * cached_ios->clock will be 0.
+ */
+ mmc_host_clk_hold(host);
+ memcpy(&host->cached_ios, &host->ios, sizeof(host->cached_ios));
+ mmc_cache_card_ext_csd(host);
+ err = mmc_sleepawake(host, true);
+ mmc_host_clk_release(host);
+ } else if (!mmc_host_is_spi(host)) {
err = mmc_deselect_cards(host);
+ }
if (!err) {
mmc_power_off(host);
mmc_card_set_suspended(host->card);
}
out:
+ /* Kick CMDQ thread to process any requests came in while suspending */
+ if (host->card->cmdq_init)
+ wake_up(&host->cmdq_ctx.wait);
+
mmc_release_host(host);
return err;
}
+static int mmc_partial_init(struct mmc_host *host)
+{
+ int err = 0;
+ struct mmc_card *card = host->card;
+
+ pr_debug("%s: %s: starting partial init\n",
+ mmc_hostname(host), __func__);
+
+ mmc_set_bus_width(host, host->cached_ios.bus_width);
+ mmc_set_timing(host, host->cached_ios.timing);
+ mmc_set_clock(host, host->cached_ios.clock);
+ mmc_set_bus_mode(host, host->cached_ios.bus_mode);
+
+ mmc_host_clk_hold(host);
+
+ if (mmc_card_hs400(card)) {
+ if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
+ err = host->ops->enhanced_strobe(host);
+ } else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
+ err = host->ops->execute_tuning(host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ if (err)
+ pr_warn("%s: %s: tuning execution failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ }
+
+ /*
+ * The ext_csd is read to make sure the card did not went through
+ * Power-failure during sleep period.
+ * A subset of the W/E_P, W/C_P register will be tested. In case
+ * these registers values are different from the values that were
+ * cached during suspend, we will conclude that a Power-failure occurred
+ * and will do full initialization sequence.
+ * In addition, full init sequence also transfer ext_csd before moving
+ * to CMDQ mode which has a side affect of configuring SDHCI registers
+ * which needed to be done before moving to CMDQ mode. The same
+ * registers need to be configured for partial init.
+ */
+ err = mmc_test_awake_ext_csd(host);
+ if (err) {
+ pr_debug("%s: %s: fail on ext_csd read (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
+ pr_debug("%s: %s: reading and comparing ext_csd successful\n",
+ mmc_hostname(host), __func__);
+
+ if (card->ext_csd.cmdq_support && (card->host->caps2 &
+ MMC_CAP2_CMD_QUEUE)) {
+ err = mmc_select_cmdq(card);
+ if (err) {
+ pr_warn("%s: %s: enabling CMDQ mode failed (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ }
+ }
+out:
+ mmc_host_clk_release(host);
+
+ pr_debug("%s: %s: done partial init (%d)\n",
+ mmc_hostname(host), __func__, err);
+
+ return err;
+}
+
/*
* Suspend callback
*/
static int mmc_suspend(struct mmc_host *host)
{
int err;
+ ktime_t start = ktime_get();
+ MMC_TRACE(host, "%s: Enter\n", __func__);
err = _mmc_suspend(host, true);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
+ trace_mmc_suspend(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1991,43 +2770,61 @@
*/
static int _mmc_resume(struct mmc_host *host)
{
- int err = 0;
+ int err = -ENOSYS;
+ int retries;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- if (!mmc_card_suspended(host->card))
+ if (!mmc_card_suspended(host->card)) {
+ mmc_release_host(host);
goto out;
+ }
mmc_power_up(host, host->card->ocr);
- err = mmc_init_card(host, host->card->ocr, host->card);
+ retries = 3;
+ while (retries) {
+ if (mmc_can_sleepawake(host)) {
+ err = mmc_sleepawake(host, false);
+ if (!err)
+ err = mmc_partial_init(host);
+ if (err)
+ pr_err("%s: %s: awake failed (%d), fallback to full init\n",
+ mmc_hostname(host), __func__, err);
+ }
+
+ if (err)
+ err = mmc_init_card(host, host->card->ocr, host->card);
+
+ if (err) {
+ pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
+ mmc_hostname(host), err, retries);
+ retries--;
+ mmc_power_off(host);
+ usleep_range(5000, 5500);
+ mmc_power_up(host, host->card->ocr);
+ mmc_select_voltage(host, host->card->ocr);
+ continue;
+ }
+ break;
+ }
+ if (!err && mmc_card_cmdq(host->card)) {
+ err = mmc_cmdq_halt(host, false);
+ if (err)
+ pr_err("%s: un-halt: failed: %d\n", __func__, err);
+ }
mmc_card_clr_suspended(host->card);
-out:
mmc_release_host(host);
- return err;
-}
-/*
- * Shutdown callback
- */
-static int mmc_shutdown(struct mmc_host *host)
-{
- int err = 0;
+ err = mmc_resume_clk_scaling(host);
+ if (err)
+ pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
- /*
- * In a specific case for poweroff notify, we need to resume the card
- * before we can shutdown it properly.
- */
- if (mmc_can_poweroff_notify(host->card) &&
- !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
- err = _mmc_resume(host);
-
- if (!err)
- err = _mmc_suspend(host, false);
-
+out:
return err;
}
@@ -2036,25 +2833,97 @@
*/
static int mmc_resume(struct mmc_host *host)
{
+ int err = 0;
+
+ MMC_TRACE(host, "%s: Enter\n", __func__);
pm_runtime_enable(&host->card->dev);
+
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return 0;
}
+#define MAX_DEFER_SUSPEND_COUNTER 20
+static bool mmc_process_bkops(struct mmc_host *host)
+{
+ int err = 0;
+ bool is_running = false;
+ u32 status;
+
+ mmc_claim_host(host);
+ if (mmc_card_cmdq(host->card)) {
+ BUG_ON(host->cmdq_ctx.active_reqs);
+
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: halt: failed: %d\n", __func__, err);
+ goto unhalt;
+ }
+ }
+
+ if (mmc_card_doing_bkops(host->card)) {
+ /* check that manual bkops finished */
+ err = mmc_send_status(host->card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", __func__);
+ goto unhalt;
+ }
+ if (R1_CURRENT_STATE(status) != R1_STATE_PRG) {
+ mmc_card_clr_doing_bkops(host->card);
+ goto unhalt;
+ }
+ } else {
+ mmc_check_bkops(host->card);
+ }
+
+ if (host->card->bkops.needs_bkops &&
+ !mmc_card_support_auto_bkops(host->card))
+ mmc_start_manual_bkops(host->card);
+
+unhalt:
+ if (mmc_card_cmdq(host->card)) {
+ err = mmc_cmdq_halt(host, false);
+ if (err)
+ pr_err("%s: unhalt: failed: %d\n", __func__, err);
+ }
+ mmc_release_host(host);
+
+ if (host->card->bkops.needs_bkops ||
+ mmc_card_doing_bkops(host->card)) {
+ if (host->card->bkops.retry_counter++ <
+ MAX_DEFER_SUSPEND_COUNTER) {
+ host->card->bkops.needs_check = true;
+ is_running = true;
+ } else {
+ host->card->bkops.retry_counter = 0;
+ }
+ }
+ return is_running;
+}
+
/*
* Callback for runtime_suspend.
*/
static int mmc_runtime_suspend(struct mmc_host *host)
{
int err;
+ ktime_t start = ktime_get();
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
+ if (mmc_process_bkops(host)) {
+ pm_runtime_mark_last_busy(&host->card->dev);
+ pr_debug("%s: defered, need bkops\n", __func__);
+ return -EBUSY;
+ }
+
err = _mmc_suspend(host, true);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
+ trace_mmc_runtime_suspend(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
return err;
}
@@ -2064,13 +2933,17 @@
static int mmc_runtime_resume(struct mmc_host *host)
{
int err;
+ ktime_t start = ktime_get();
err = _mmc_resume(host);
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
mmc_hostname(host), err);
- return 0;
+ trace_mmc_runtime_resume(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+
+ return err;
}
int mmc_can_reset(struct mmc_card *card)
@@ -2118,7 +2991,7 @@
.runtime_suspend = mmc_runtime_suspend,
.runtime_resume = mmc_runtime_resume,
.alive = mmc_alive,
- .shutdown = mmc_shutdown,
+ .change_bus_speed = mmc_change_bus_speed,
.reset = mmc_reset,
};
@@ -2177,6 +3050,14 @@
goto remove_card;
mmc_claim_host(host);
+ err = mmc_init_clk_scaling(host);
+ if (err) {
+ mmc_release_host(host);
+ goto remove_card;
+ }
+
+ register_reboot_notifier(&host->card->reboot_notify);
+
return 0;
remove_card:
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index ad6e979..16f7c58 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -54,7 +54,7 @@
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
-static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
+int __mmc_send_status(struct mmc_card *card, u32 *status,
bool ignore_crc)
{
int err;
@@ -456,6 +456,45 @@
}
/**
+ * mmc_prepare_switch - helper; prepare to modify EXT_CSD register
+ * @card: the MMC card associated with the data transfer
+ * @set: cmd set values
+ * @index: EXT_CSD register index
+ * @value: value to program into EXT_CSD register
+ * @tout_ms: timeout (ms) for operation performed by register write,
+ * timeout of zero implies maximum possible timeout
+ * @use_busy_signal: use the busy signal as response type
+ *
+ * Helper to prepare to modify EXT_CSD register for selected card.
+ */
+
+static inline void mmc_prepare_switch(struct mmc_command *cmd, u8 index,
+ u8 value, u8 set, unsigned int tout_ms,
+ bool use_busy_signal)
+{
+ cmd->opcode = MMC_SWITCH;
+ cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (index << 16) |
+ (value << 8) |
+ set;
+ cmd->flags = MMC_CMD_AC;
+ cmd->busy_timeout = tout_ms;
+ if (use_busy_signal)
+ cmd->flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ else
+ cmd->flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+}
+
+int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, bool use_busy_signal,
+ bool ignore_timeout)
+{
+ mmc_prepare_switch(cmd, index, value, set, timeout_ms, use_busy_signal);
+ return 0;
+}
+EXPORT_SYMBOL(__mmc_switch_cmdq_mode);
+
+/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
* @set: cmd set values
@@ -479,6 +518,7 @@
unsigned long timeout;
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
+ int retries = 5;
bool expired = false;
bool busy = false;
@@ -494,12 +534,8 @@
(timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
- cmd.opcode = MMC_SWITCH;
- cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
- (index << 16) |
- (value << 8) |
- set;
- cmd.flags = MMC_CMD_AC;
+ mmc_prepare_switch(&cmd, index, value, set, timeout_ms,
+ use_r1b_resp);
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
/*
@@ -513,6 +549,8 @@
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
+ else if (index == EXT_CSD_BKOPS_START)
+ cmd.bkops_busy = true;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
@@ -570,10 +608,17 @@
/* Timeout if the device never leaves the program state. */
if (expired &&
(R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
- pr_err("%s: Card stuck in programming state! %s\n",
- mmc_hostname(host), __func__);
- err = -ETIMEDOUT;
- goto out;
+ pr_err("%s: Card stuck in programming state! %s, timeout:%ums, retries:%d\n",
+ mmc_hostname(host), __func__,
+ timeout_ms, retries);
+ if (retries)
+ timeout = jiffies +
+ msecs_to_jiffies(timeout_ms);
+ else {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ retries--;
}
} while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
@@ -717,7 +762,10 @@
data.sg = &sg;
data.sg_len = 1;
+ data.timeout_ns = 1000000;
+ data.timeout_clks = 0;
mmc_set_data_timeout(&data, card);
+
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
@@ -765,7 +813,7 @@
unsigned int opcode;
int err;
- if (!card->ext_csd.hpi) {
+ if (!card->ext_csd.hpi_en) {
pr_warn("%s: Card didn't support HPI command\n",
mmc_hostname(card->host));
return -EINVAL;
@@ -782,7 +830,7 @@
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
- pr_warn("%s: error %d interrupting operation. "
+ pr_debug("%s: error %d interrupting operation. "
"HPI command response %#x\n", mmc_hostname(card->host),
err, cmd.resp[0]);
return err;
@@ -797,3 +845,21 @@
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
+
+int mmc_discard_queue(struct mmc_host *host, u32 tasks)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ if (tasks) {
+ cmd.arg = DISCARD_TASK;
+ cmd.arg |= (tasks << 16);
+ } else {
+ cmd.arg = DISCARD_QUEUE;
+ }
+
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL(mmc_discard_queue);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index f1b8e81..ad1058c 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -27,10 +27,12 @@
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_discard_queue(struct mmc_host *host, u32 tasks);
int mmc_switch_status_error(struct mmc_host *host, u32 status);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, bool use_busy_signal, bool send_status,
bool ignore_crc);
-
+int __mmc_send_status(struct mmc_card *card, u32 *status,
+ bool ignore_crc);
#endif
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 4e65ea5..ba44cfd 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -35,6 +35,51 @@
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
#endif
+#ifndef SDIO_VENDOR_ID_MSM
+#define SDIO_VENDOR_ID_MSM 0x0070
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_WCN1314
+#define SDIO_DEVICE_ID_MSM_WCN1314 0x2881
+#endif
+
+#ifndef SDIO_VENDOR_ID_MSM_QCA
+#define SDIO_VENDOR_ID_MSM_QCA 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_1 0x300
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_2 0x301
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_1 0x400
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_2 0x401
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA6574
+#define SDIO_VENDOR_ID_QCA6574 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA6574
+#define SDIO_DEVICE_ID_QCA6574 0x50a
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA9377
+#define SDIO_VENDOR_ID_QCA9377 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA9377
+#define SDIO_DEVICE_ID_QCA9377 0x701
+#endif
+
+
/*
* This hook just adds a quirk for all sdio devices
*/
@@ -54,6 +99,21 @@
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM, SDIO_DEVICE_ID_MSM_WCN1314,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_1,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_2,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_1,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_2,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
@@ -66,6 +126,11 @@
SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+ SDIO_FIXUP(SDIO_VENDOR_ID_QCA6574, SDIO_DEVICE_ID_QCA6574,
+ add_quirk, MMC_QUIRK_QCA6574_SETTINGS),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_QCA9377, SDIO_DEVICE_ID_QCA9377,
+ add_quirk, MMC_QUIRK_QCA9377_SETTINGS),
END_FIXUP
};
@@ -86,6 +151,8 @@
(f->name == CID_NAME_ANY ||
!strncmp(f->name, card->cid.prod_name,
sizeof(card->cid.prod_name))) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
(f->cis_vendor == card->cis.vendor ||
f->cis_vendor == (u16) SDIO_ANY_ID) &&
(f->cis_device == card->cis.device ||
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 60542b2..7112f9f 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -27,6 +27,12 @@
#include "sd.h"
#include "sd_ops.h"
+#define UHS_SDR104_MIN_DTR (100 * 1000 * 1000)
+#define UHS_DDR50_MIN_DTR (50 * 1000 * 1000)
+#define UHS_SDR50_MIN_DTR (50 * 1000 * 1000)
+#define UHS_SDR25_MIN_DTR (25 * 1000 * 1000)
+#define UHS_SDR12_MIN_DTR (12.5 * 1000 * 1000)
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -368,9 +374,9 @@
goto out;
if ((status[16] & 0xF) != 1) {
- pr_warn("%s: Problem switching card into high-speed mode!\n",
- mmc_hostname(card->host));
- err = 0;
+ pr_warn("%s: Problem switching card into high-speed mode!, status:%x\n",
+ mmc_hostname(card->host), (status[16] & 0xF));
+ err = -EBUSY;
} else {
err = 1;
}
@@ -424,24 +430,28 @@
}
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
- card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
- } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
- card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104) &&
+ (card->host->f_max > UHS_SDR104_MIN_DTR)) {
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
- SD_MODE_UHS_SDR50)) {
- card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ SD_MODE_UHS_SDR50) &&
+ (card->host->f_max > UHS_SDR50_MIN_DTR)) {
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
+ (card->host->f_max > UHS_DDR50_MIN_DTR)) {
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
- card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25) &&
+ (card->host->f_max > UHS_SDR25_MIN_DTR)) {
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
- card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
}
@@ -479,15 +489,17 @@
if (err)
return err;
- if ((status[16] & 0xF) != card->sd_bus_speed)
- pr_warn("%s: Problem setting bus speed mode!\n",
- mmc_hostname(card->host));
- else {
+ if ((status[16] & 0xF) != card->sd_bus_speed) {
+ pr_warn("%s: Problem setting bus speed mode(%u)! max_dtr:%u, timing:%u, status:%x\n",
+ mmc_hostname(card->host), card->sd_bus_speed,
+ card->sw_caps.uhs_max_dtr, timing, (status[16] & 0xF));
+ err = -EBUSY;
+ } else {
mmc_set_timing(card->host, timing);
mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
}
- return 0;
+ return err;
}
/* Get host's max current setting at its current voltage */
@@ -579,6 +591,64 @@
return 0;
}
+/**
+ * mmc_sd_change_bus_speed() - Change SD card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the SD card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) beforing changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card and if it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ */
+static int mmc_sd_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+ int err = 0;
+ struct mmc_card *card;
+
+ mmc_claim_host(host);
+ /*
+ * Assign card pointer after claiming host to avoid race
+ * conditions that may arise during removal of the card.
+ */
+ card = host->card;
+
+ /* sanity checks */
+ if (!card || !freq) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mmc_set_clock(host, (unsigned int) (*freq));
+
+ if (!mmc_host_is_spi(card->host) && mmc_card_uhs(card)
+ && card->host->ops->execute_tuning) {
+ /*
+ * We try to probe host driver for tuning for any
+ * frequency, it is host driver responsibility to
+ * perform actual tuning only when required.
+ */
+ mmc_host_clk_hold(card->host);
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+ mmc_host_clk_release(card->host);
+
+ if (err) {
+ pr_warn("%s: %s: tuning execution failed %d. Restoring to previous clock %lu\n",
+ mmc_hostname(card->host), __func__, err,
+ host->clk_scaling.curr_freq);
+ mmc_set_clock(host, host->clk_scaling.curr_freq);
+ }
+ }
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
/*
* UHS-I specific initialization procedure
*/
@@ -935,7 +1005,10 @@
{
unsigned max_dtr = (unsigned int)-1;
- if (mmc_card_hs(card)) {
+ if (mmc_card_uhs(card)) {
+ if (max_dtr > card->sw_caps.uhs_max_dtr)
+ max_dtr = card->sw_caps.uhs_max_dtr;
+ } else if (mmc_card_hs(card)) {
if (max_dtr > card->sw_caps.hs_max_dtr)
max_dtr = card->sw_caps.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
@@ -997,6 +1070,7 @@
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto free_card;
+ host->card = card;
}
if (!oldcard) {
@@ -1060,12 +1134,16 @@
}
}
- host->card = card;
+ card->clk_scaling_highest = mmc_sd_get_max_clock(card);
+ card->clk_scaling_lowest = host->f_min;
+
return 0;
free_card:
- if (!oldcard)
+ if (!oldcard) {
+ host->card = NULL;
mmc_remove_card(card);
+ }
return err;
}
@@ -1078,8 +1156,12 @@
BUG_ON(!host);
BUG_ON(!host->card);
+ mmc_exit_clk_scaling(host);
mmc_remove_card(host->card);
+
+ mmc_claim_host(host);
host->card = NULL;
+ mmc_release_host(host);
}
/*
@@ -1121,6 +1203,7 @@
if (!retries) {
printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
__func__, mmc_hostname(host), err);
+ err = _mmc_detect_card_removed(host);
}
#else
err = _mmc_detect_card_removed(host);
@@ -1145,6 +1228,13 @@
BUG_ON(!host);
BUG_ON(!host->card);
+ err = mmc_suspend_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
@@ -1170,11 +1260,13 @@
{
int err;
+ MMC_TRACE(host, "%s: Enter\n", __func__);
err = _mmc_sd_suspend(host);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1207,8 +1299,11 @@
if (err) {
printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
mmc_hostname(host), err, retries);
- mdelay(5);
retries--;
+ mmc_power_off(host);
+ usleep_range(5000, 5500);
+ mmc_power_up(host, host->card->ocr);
+ mmc_select_voltage(host, host->card->ocr);
continue;
}
break;
@@ -1218,6 +1313,15 @@
#endif
mmc_card_clr_suspended(host->card);
+ if (host->card->sdr104_blocked)
+ goto out;
+ err = mmc_resume_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
+
out:
mmc_release_host(host);
return err;
@@ -1228,7 +1332,12 @@
*/
static int mmc_sd_resume(struct mmc_host *host)
{
+ int err = 0;
+
+ MMC_TRACE(host, "%s: Enter\n", __func__);
pm_runtime_enable(&host->card->dev);
+
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return 0;
}
@@ -1279,7 +1388,7 @@
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.alive = mmc_sd_alive,
- .shutdown = mmc_sd_suspend,
+ .change_bus_speed = mmc_sd_change_bus_speed,
.reset = mmc_sd_reset,
};
@@ -1335,6 +1444,10 @@
err = mmc_sd_init_card(host, rocr, NULL);
if (err) {
retries--;
+ mmc_power_off(host);
+ usleep_range(5000, 5500);
+ mmc_power_up(host, rocr);
+ mmc_select_voltage(host, rocr);
continue;
}
break;
@@ -1357,6 +1470,13 @@
goto remove_card;
mmc_claim_host(host);
+
+ err = mmc_init_clk_scaling(host);
+ if (err) {
+ mmc_release_host(host);
+ goto remove_card;
+ }
+
return 0;
remove_card:
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 8e10bdc..9ebe730 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -186,6 +186,23 @@
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_INTERRUPT_EXTENSION, 0, &data);
+ if (ret)
+ goto out;
+ if (data & SDIO_SUPPORT_ASYNC_INTR) {
+ if (card->host->caps2 &
+ MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) {
+ data |= SDIO_ENABLE_ASYNC_INTR;
+ ret = mmc_io_rw_direct(card, 1, 0,
+ SDIO_CCCR_INTERRUPT_EXTENSION,
+ data, NULL);
+ if (ret)
+ goto out;
+ card->cccr.async_intr_sup = 1;
+ }
+ }
}
/* if no uhs mode ensure we check for high speed */
@@ -204,12 +221,60 @@
return ret;
}
+static void sdio_enable_vendor_specific_settings(struct mmc_card *card)
+{
+ int ret;
+ u8 settings;
+
+ if (mmc_enable_qca6574_settings(card) ||
+ mmc_enable_qca9377_settings(card)) {
+ ret = mmc_io_rw_direct(card, 1, 0, 0xF2, 0x0F, NULL);
+ if (ret) {
+ pr_crit("%s: failed to write to fn 0xf2 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0, 0xF1, 0, &settings);
+ if (ret) {
+ pr_crit("%s: failed to read fn 0xf1 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ settings |= 0x80;
+ ret = mmc_io_rw_direct(card, 1, 0, 0xF1, settings, NULL);
+ if (ret) {
+ pr_crit("%s: failed to write to fn 0xf1 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0, 0xF0, 0, &settings);
+ if (ret) {
+ pr_crit("%s: failed to read fn 0xf0 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ settings |= 0x20;
+ ret = mmc_io_rw_direct(card, 1, 0, 0xF0, settings, NULL);
+ if (ret) {
+ pr_crit("%s: failed to write to fn 0xf0 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+ }
+out:
+ return;
+}
+
static int sdio_enable_wide(struct mmc_card *card)
{
int ret;
u8 ctrl;
- if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -225,7 +290,10 @@
/* set as 4-bit bus width */
ctrl &= ~SDIO_BUS_WIDTH_MASK;
- ctrl |= SDIO_BUS_WIDTH_4BIT;
+ if (card->host->caps & MMC_CAP_8_BIT_DATA)
+ ctrl |= SDIO_BUS_WIDTH_8BIT;
+ else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+ ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
if (ret)
@@ -266,7 +334,7 @@
int ret;
u8 ctrl;
- if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -276,10 +344,10 @@
if (ret)
return ret;
- if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+ if (!(ctrl & (SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT)))
return 0;
- ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+ ctrl &= ~(SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT);
ctrl |= SDIO_BUS_ASYNC_INT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -496,6 +564,9 @@
if (err)
return err;
+ /* Vendor specific settings based on card quirks */
+ sdio_enable_vendor_specific_settings(card);
+
speed &= ~SDIO_SPEED_BSS_MASK;
speed |= bus_speed;
err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
@@ -622,8 +693,11 @@
/*
* Call the optional HC's init_card function to handle quirks.
*/
- if (host->ops->init_card)
+ if (host->ops->init_card) {
+ mmc_host_clk_hold(host);
host->ops->init_card(host, card);
+ mmc_host_clk_release(host);
+ }
/*
* If the host and card support UHS-I mode request the card
@@ -790,7 +864,12 @@
* Switch to wider bus (if supported).
*/
err = sdio_enable_4bit_bus(card);
- if (err)
+ if (err > 0) {
+ if (card->host->caps & MMC_CAP_8_BIT_DATA)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8);
+ else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ } else if (err)
goto remove;
}
finish:
@@ -917,6 +996,7 @@
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
+ MMC_TRACE(host, "%s: Enter\n", __func__);
mmc_claim_host(host);
if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
@@ -924,13 +1004,15 @@
if (!mmc_card_keep_power(host)) {
mmc_power_off(host);
+ } else if (host->ios.clock) {
+ mmc_gate_clock(host);
} else if (host->retune_period) {
mmc_retune_timer_stop(host);
mmc_retune_needed(host);
}
mmc_release_host(host);
-
+ MMC_TRACE(host, "%s: Exit\n", __func__);
return 0;
}
@@ -941,6 +1023,7 @@
BUG_ON(!host);
BUG_ON(!host->card);
+ MMC_TRACE(host, "%s: Enter\n", __func__);
/* Basic card reinitialization. */
mmc_claim_host(host);
@@ -973,6 +1056,13 @@
} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
+ if (err > 0) {
+ if (host->caps & MMC_CAP_8_BIT_DATA)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
+ else if (host->caps & MMC_CAP_4_BIT_DATA)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
}
if (!err && host->sdio_irqs) {
@@ -988,6 +1078,8 @@
mmc_release_host(host);
host->pm_flags &= ~MMC_PM_KEEP_POWER;
+ host->pm_flags &= ~MMC_PM_WAKE_SDIO_IRQ;
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1222,40 +1314,6 @@
int sdio_reset_comm(struct mmc_card *card)
{
- struct mmc_host *host = card->host;
- u32 ocr;
- u32 rocr;
- int err;
-
- printk("%s():\n", __func__);
- mmc_claim_host(host);
-
- mmc_retune_disable(host);
-
- mmc_go_idle(host);
-
- mmc_set_clock(host, host->f_min);
-
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (err)
- goto err;
-
- rocr = mmc_select_voltage(host, ocr);
- if (!rocr) {
- err = -EINVAL;
- goto err;
- }
-
- err = mmc_sdio_init_card(host, rocr, card, 0);
- if (err)
- goto err;
-
- mmc_release_host(host);
- return 0;
-err:
- printk("%s: Error resetting SDIO communications (%d)\n",
- mmc_hostname(host), err);
- mmc_release_host(host);
- return err;
+ return mmc_power_restore_host(card->host);
}
EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index dcb3dee..5d7f198 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -55,7 +55,7 @@
for (i = 0; i < nr_strings; i++) {
buffer[i] = string;
- strcpy(string, buf);
+ strlcpy(string, buf, strlen(buf) + 1);
string += strlen(string) + 1;
buf += strlen(buf) + 1;
}
@@ -276,8 +276,16 @@
break;
/* null entries have no link field or data */
- if (tpl_code == 0x00)
- continue;
+ if (tpl_code == 0x00) {
+ if (card->cis.vendor == 0x70 &&
+ (card->cis.device == 0x2460 ||
+ card->cis.device == 0x0460 ||
+ card->cis.device == 0x23F1 ||
+ card->cis.device == 0x23F0))
+ break;
+ else
+ continue;
+ }
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
if (ret)
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 09cc67d..95589d1 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -93,7 +93,9 @@
{
mmc_claim_host(host);
host->sdio_irq_pending = true;
+ mmc_host_clk_hold(host);
process_sdio_pending_irqs(host);
+ mmc_host_clk_release(host);
mmc_release_host(host);
}
EXPORT_SYMBOL_GPL(sdio_run_irqs);
@@ -104,6 +106,7 @@
struct sched_param param = { .sched_priority = 1 };
unsigned long period, idle_period;
int ret;
+ bool ws;
sched_setscheduler(current, SCHED_FIFO, ¶m);
@@ -137,6 +140,17 @@
ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
if (ret)
break;
+ ws = false;
+ /*
+ * prevent suspend if it has started when scheduled;
+ * 100 msec (approx. value) should be enough for the system to
+ * resume and attend to the card's request
+ */
+ if ((host->dev_status == DEV_SUSPENDING) ||
+ (host->dev_status == DEV_SUSPENDED)) {
+ pm_wakeup_event(&host->card->dev, 100);
+ ws = true;
+ }
ret = process_sdio_pending_irqs(host);
host->sdio_irq_pending = false;
mmc_release_host(host);
@@ -173,6 +187,12 @@
host->ops->enable_sdio_irq(host, 1);
mmc_host_clk_release(host);
}
+ /*
+ * function drivers would have processed the event from card
+ * unless suspended, hence release wake source
+ */
+ if (ws && (host->dev_status == DEV_RESUMED))
+ pm_relax(&host->card->dev);
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5274f50..515abb2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -396,18 +396,39 @@
If unsure, say N.
config MMC_SDHCI_MSM
- tristate "Qualcomm SDHCI Controller Support"
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ tristate "Qualcomm Technologies, Inc. SDHCI Controller Support"
+ depends on ARCH_QCOM || ARCH_MSM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in Qualcomm SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in Qualcomm Technologies, Inc. SOCs. The controller
+ supports SD/MMC/SDIO devices.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_MSM_ICE
+ bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core"
+ depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE
+ help
+ This selects the QTI specific additions to support Inline Crypto
+ Engine (ICE). ICE accelerates the crypto operations and maintains
+ the high SDHCI performance.
+
+ Select this if you have ICE supported for SDHCI on QTI chipset.
+ If unsure, say N.
+
+config MMC_MSM
+ tristate "Qualcomm SDCC Controller Support"
+ depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
+ help
+ This provides support for the SD/MMC cell found in the
+ MSM and QSD SOCs from Qualcomm. The controller also has
+ support for SDIO devices.
+
config MMC_MXC
tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
depends on ARCH_MXC || PPC_MPC512x
@@ -764,6 +785,19 @@
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQ_HCI
+ tristate "Command Queue Support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index e2bdaaf..64defc5 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -71,9 +71,11 @@
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
-obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
+obj-$(CONFIG_MMC_SDHCI_MSM_ICE) += sdhci-msm-ice.o
+obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
+obj-$(CONFIG_MMC_CQ_HCI) += cmdq_hci.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
new file mode 100644
index 0000000..77c5ca3
--- /dev/null
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -0,0 +1,1197 @@
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/blkdev.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+
+#include "cmdq_hci.h"
+#include "sdhci.h"
+#include "sdhci-msm.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+/* 10 sec */
+#define HALT_TIMEOUT_MS 10000
+
+static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
+static int cmdq_halt(struct mmc_host *mmc, bool halt);
+
+#ifdef CONFIG_PM_RUNTIME
+static int cmdq_runtime_pm_get(struct cmdq_host *host)
+{
+ return pm_runtime_get_sync(host->mmc->parent);
+}
+static int cmdq_runtime_pm_put(struct cmdq_host *host)
+{
+ pm_runtime_mark_last_busy(host->mmc->parent);
+ return pm_runtime_put_autosuspend(host->mmc->parent);
+}
+#else
+static inline int cmdq_runtime_pm_get(struct cmdq_host *host)
+{
+ return 0;
+}
+static inline int cmdq_runtime_pm_put(struct cmdq_host *host)
+{
+ return 0;
+}
+#endif
+static inline struct mmc_request *get_req_by_tag(struct cmdq_host *cq_host,
+ unsigned int tag)
+{
+ return cq_host->mrq_slot[tag];
+}
+
+static inline u8 *get_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cmdq_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT) {
+ *link_temp = VALID(0) | ACT(0) | END(1);
+ return;
+ }
+
+ *link_temp = VALID(1) | ACT(0x6) | END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cmdq_set_halt_irq(struct cmdq_host *cq_host, bool enable)
+{
+ u32 ier;
+
+ ier = cmdq_readl(cq_host, CQISTE);
+ if (enable) {
+ cmdq_writel(cq_host, ier | HALT, CQISTE);
+ cmdq_writel(cq_host, ier | HALT, CQISGE);
+ } else {
+ cmdq_writel(cq_host, ier & ~HALT, CQISTE);
+ cmdq_writel(cq_host, ier & ~HALT, CQISGE);
+ }
+}
+
+static void cmdq_clear_set_irqs(struct cmdq_host *cq_host, u32 clear, u32 set)
+{
+ u32 ier;
+
+ ier = cmdq_readl(cq_host, CQISTE);
+ ier &= ~clear;
+ ier |= set;
+ cmdq_writel(cq_host, ier, CQISTE);
+ cmdq_writel(cq_host, ier, CQISGE);
+ /* ensure the writes are done */
+ mb();
+}
+
+
+#define DRV_NAME "cmdq-host"
+
+static void cmdq_dump_task_history(struct cmdq_host *cq_host)
+{
+ int i;
+
+ if (likely(!cq_host->mmc->cmdq_thist_enabled))
+ return;
+
+ if (!cq_host->thist) {
+ pr_err("%s: %s: CMDQ task history buffer not allocated\n",
+ mmc_hostname(cq_host->mmc), __func__);
+ return;
+ }
+
+ pr_err("---- Circular Task History ----\n");
+ pr_err(DRV_NAME ": Last entry index: %d", cq_host->thist_idx - 1);
+
+ for (i = 0; i < cq_host->num_slots; i++) {
+ pr_err(DRV_NAME ": [%02d]%s Task: 0x%08x | Args: 0x%08x\n", i,
+ (cq_host->thist[i].is_dcmd) ? "DCMD" : "DATA",
+ lower_32_bits(cq_host->thist[i].task),
+ upper_32_bits(cq_host->thist[i].task));
+ }
+ pr_err("-------------------------\n");
+}
+
+static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ dma_addr_t desc_dma;
+ int tag = 0;
+ unsigned long data_active_reqs =
+ mmc->cmdq_ctx.data_active_reqs;
+ unsigned long desc_size =
+ (cq_host->mmc->max_segs * cq_host->trans_desc_len);
+
+ for_each_set_bit(tag, &data_active_reqs, cq_host->num_slots) {
+ desc_dma = get_trans_desc_dma(cq_host, tag);
+ pr_err("%s: %s: tag = %d, trans_dma(phys) = %pad, trans_desc(virt) = 0x%p\n",
+ mmc_hostname(mmc), __func__, tag,
+ &desc_dma, get_trans_desc(cq_host, tag));
+ print_hex_dump(KERN_ERR, "cmdq-adma:", DUMP_PREFIX_ADDRESS,
+ 32, 8, get_trans_desc(cq_host, tag),
+ (desc_size), false);
+ }
+}
+
+static void cmdq_dumpregs(struct cmdq_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+
+ MMC_TRACE(mmc,
+ "%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
+ __func__, cmdq_readl(cq_host, CQCTL), cmdq_readl(cq_host, CQIS),
+ cmdq_readl(cq_host, CQISTE), cmdq_readl(cq_host, CQISGE),
+ cmdq_readl(cq_host, CQTDBR), cmdq_readl(cq_host, CQTCN),
+ cmdq_readl(cq_host, CQDQS), cmdq_readl(cq_host, CQDPT),
+ cmdq_readl(cq_host, CQTERRI), cmdq_readl(cq_host, CQCRI),
+ cmdq_readl(cq_host, CQCRA), cmdq_readl(cq_host, CQCRDCT));
+ pr_err(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
+ mmc_hostname(mmc));
+
+ pr_err(DRV_NAME ": Caps: 0x%08x | Version: 0x%08x\n",
+ cmdq_readl(cq_host, CQCAP),
+ cmdq_readl(cq_host, CQVER));
+ pr_err(DRV_NAME ": Queing config: 0x%08x | Queue Ctrl: 0x%08x\n",
+ cmdq_readl(cq_host, CQCFG),
+ cmdq_readl(cq_host, CQCTL));
+ pr_err(DRV_NAME ": Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cmdq_readl(cq_host, CQIS),
+ cmdq_readl(cq_host, CQISTE));
+ pr_err(DRV_NAME ": Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cmdq_readl(cq_host, CQISGE),
+ cmdq_readl(cq_host, CQIC));
+ pr_err(DRV_NAME ": TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cmdq_readl(cq_host, CQTDLBA),
+ cmdq_readl(cq_host, CQTDLBAU));
+ pr_err(DRV_NAME ": Doorbell: 0x%08x | Comp Notif: 0x%08x\n",
+ cmdq_readl(cq_host, CQTDBR),
+ cmdq_readl(cq_host, CQTCN));
+ pr_err(DRV_NAME ": Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cmdq_readl(cq_host, CQDQS),
+ cmdq_readl(cq_host, CQDPT));
+ pr_err(DRV_NAME ": Task clr: 0x%08x | Send stat 1: 0x%08x\n",
+ cmdq_readl(cq_host, CQTCLR),
+ cmdq_readl(cq_host, CQSSC1));
+ pr_err(DRV_NAME ": Send stat 2: 0x%08x | DCMD resp: 0x%08x\n",
+ cmdq_readl(cq_host, CQSSC2),
+ cmdq_readl(cq_host, CQCRDCT));
+ pr_err(DRV_NAME ": Resp err mask: 0x%08x | Task err: 0x%08x\n",
+ cmdq_readl(cq_host, CQRMEM),
+ cmdq_readl(cq_host, CQTERRI));
+ pr_err(DRV_NAME ": Resp idx 0x%08x | Resp arg: 0x%08x\n",
+ cmdq_readl(cq_host, CQCRI),
+ cmdq_readl(cq_host, CQCRA));
+ pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
+ cmdq_readl(cq_host, CQ_VENDOR_CFG));
+ pr_err(DRV_NAME ": ===========================================\n");
+
+ cmdq_dump_task_history(cq_host);
+ if (cq_host->ops->dump_vendor_regs)
+ cq_host->ops->dump_vendor_regs(mmc);
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cmdq_host_alloc_tdl(struct cmdq_host *cq_host)
+{
+
+ size_t desc_size;
+ size_t data_size;
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CMDQ_TASK_DESC_SZ_128) {
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) |
+ CQ_TASK_DESC_SZ, CQCFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CMDQ_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_info("%s: desc_size: %d data_sz: %d slot-sz: %d\n", __func__,
+ (int)desc_size, (int)data_size, cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ cq_host->thist = devm_kzalloc(mmc_dev(cq_host->mmc),
+ (sizeof(*cq_host->thist) *
+ cq_host->num_slots),
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_info("desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long) cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static int cmdq_enable(struct mmc_host *mmc)
+{
+ int err = 0;
+ u32 cqcfg;
+ bool dcmd_enable;
+ struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
+
+ if (!cq_host || !mmc->card || !mmc_card_cmdq(mmc->card)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (cq_host->enabled)
+ goto out;
+
+ cmdq_runtime_pm_get(cq_host);
+ cqcfg = cmdq_readl(cq_host, CQCFG);
+ if (cqcfg & 0x1) {
+ pr_info("%s: %s: cq_host is already enabled\n",
+ mmc_hostname(mmc), __func__);
+ WARN_ON(1);
+ goto pm_ref_count;
+ }
+
+ if (cq_host->quirks & CMDQ_QUIRK_NO_DCMD)
+ dcmd_enable = false;
+ else
+ dcmd_enable = true;
+
+ cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
+ (dcmd_enable ? CQ_DCMD : 0));
+
+ cmdq_writel(cq_host, cqcfg, CQCFG);
+ /* enable CQ_HOST */
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
+ CQCFG);
+
+ if (!cq_host->desc_base ||
+ !cq_host->trans_desc_base) {
+ err = cmdq_host_alloc_tdl(cq_host);
+ if (err)
+ goto pm_ref_count;
+ }
+
+ cmdq_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), CQTDLBA);
+ cmdq_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), CQTDLBAU);
+
+ /*
+ * disable all vendor interrupts
+ * enable CMDQ interrupts
+ * enable the vendor error interrupts
+ */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+
+ cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
+
+ /* cq_host would use this rca to address the card */
+ cmdq_writel(cq_host, mmc->card->rca, CQSSC2);
+
+ /* send QSR at lesser intervals than the default */
+ cmdq_writel(cq_host, SEND_QSR_INTERVAL, CQSSC1);
+
+ /* enable bkops exception indication */
+ if (mmc_card_configured_manual_bkops(mmc->card) &&
+ !mmc_card_configured_auto_bkops(mmc->card))
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQRMEM) | CQ_EXCEPTION,
+ CQRMEM);
+
+ /* ensure the writes are done before enabling CQE */
+ mb();
+
+ cq_host->enabled = true;
+ mmc_host_clr_cq_disable(mmc);
+
+ if (cq_host->ops->set_transfer_params)
+ cq_host->ops->set_transfer_params(mmc);
+
+ if (cq_host->ops->set_block_size)
+ cq_host->ops->set_block_size(cq_host->mmc);
+
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+
+ if (cq_host->ops->clear_set_dumpregs)
+ cq_host->ops->clear_set_dumpregs(mmc, 1);
+
+ if (cq_host->ops->enhanced_strobe_mask)
+ cq_host->ops->enhanced_strobe_mask(mmc, true);
+
+pm_ref_count:
+ cmdq_runtime_pm_put(cq_host);
+out:
+ MMC_TRACE(mmc, "%s: CQ enabled err: %d\n", __func__, err);
+ return err;
+}
+
+static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+
+ if (soft) {
+ cmdq_writel(cq_host, cmdq_readl(
+ cq_host, CQCFG) & ~(CQ_ENABLE),
+ CQCFG);
+ }
+ if (cq_host->ops->enhanced_strobe_mask)
+ cq_host->ops->enhanced_strobe_mask(mmc, false);
+
+ cq_host->enabled = false;
+ mmc_host_set_cq_disable(mmc);
+ MMC_TRACE(mmc, "%s: CQ disabled\n", __func__);
+}
+
+static void cmdq_disable(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+
+ cmdq_runtime_pm_get(cq_host);
+ cmdq_disable_nosync(mmc, soft);
+ cmdq_runtime_pm_put(cq_host);
+}
+
+static void cmdq_reset(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ unsigned int cqcfg;
+ unsigned int tdlba;
+ unsigned int tdlbau;
+ unsigned int rca;
+ int ret;
+
+ cmdq_runtime_pm_get(cq_host);
+ cqcfg = cmdq_readl(cq_host, CQCFG);
+ tdlba = cmdq_readl(cq_host, CQTDLBA);
+ tdlbau = cmdq_readl(cq_host, CQTDLBAU);
+ rca = cmdq_readl(cq_host, CQSSC2);
+
+ cmdq_disable(mmc, true);
+
+ if (cq_host->ops->reset) {
+ ret = cq_host->ops->reset(mmc);
+ if (ret) {
+ pr_crit("%s: reset CMDQ controller: failed\n",
+ mmc_hostname(mmc));
+ BUG();
+ }
+ }
+
+ cmdq_writel(cq_host, tdlba, CQTDLBA);
+ cmdq_writel(cq_host, tdlbau, CQTDLBAU);
+
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+
+ cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
+
+ /* cq_host would use this rca to address the card */
+ cmdq_writel(cq_host, rca, CQSSC2);
+
+ /* ensure the writes are done before enabling CQE */
+ mb();
+
+ cmdq_writel(cq_host, cqcfg, CQCFG);
+ cmdq_runtime_pm_put(cq_host);
+ cq_host->enabled = true;
+ mmc_host_clr_cq_disable(mmc);
+}
+
+static void cmdq_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr, bool qbr)
+{
+ struct mmc_cmdq_req *cmdq_req = mrq->cmdq_req;
+ u32 req_flags = cmdq_req->cmdq_req_flags;
+
+ pr_debug("%s: %s: data-tag: 0x%08x - dir: %d - prio: %d - cnt: 0x%08x - addr: 0x%llx\n",
+ mmc_hostname(mrq->host), __func__,
+ !!(req_flags & DAT_TAG), !!(req_flags & DIR),
+ !!(req_flags & PRIO), cmdq_req->data.blocks,
+ (u64)mrq->cmdq_req->blk_addr);
+
+ *data = VALID(1) |
+ END(1) |
+ INT(intr) |
+ ACT(0x5) |
+ FORCED_PROG(!!(req_flags & FORCED_PRG)) |
+ CONTEXT(mrq->cmdq_req->ctx_id) |
+ DATA_TAG(!!(req_flags & DAT_TAG)) |
+ DATA_DIR(!!(req_flags & DIR)) |
+ PRIORITY(!!(req_flags & PRIO)) |
+ QBAR(qbr) |
+ REL_WRITE(!!(req_flags & REL_WR)) |
+ BLK_COUNT(mrq->cmdq_req->data.blocks) |
+ BLK_ADDR((u64)mrq->cmdq_req->blk_addr);
+
+ MMC_TRACE(mrq->host,
+ "%s: Task: 0x%08x | Args: 0x%08x | cnt: 0x%08x\n", __func__,
+ lower_32_bits(*data),
+ upper_32_bits(*data),
+ mrq->cmdq_req->data.blocks);
+}
+
+static int cmdq_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cmdq_set_tran_desc(u8 *desc, dma_addr_t addr, int len,
+ bool end, bool is_dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (VALID(1) |
+ END(end ? 1 : 0) |
+ INT(0) |
+ ACT(0x4) |
+ DAT_LENGTH(len));
+
+ if (is_dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cmdq_prep_tran_desc(struct mmc_request *mrq,
+ struct cmdq_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cmdq_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+ memset(desc, 0, cq_host->trans_desc_len * cq_host->mmc->max_segs);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cmdq_set_tran_desc(desc, addr, len, end, cq_host->dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ pr_debug("%s: req: 0x%p tag: %d calc_trans_des: 0x%p sg-cnt: %d\n",
+ __func__, mrq->req, tag, desc, sg_count);
+
+ return 0;
+}
+
+static void cmdq_log_task_desc_history(struct cmdq_host *cq_host, u64 task,
+ bool is_dcmd)
+{
+ if (likely(!cq_host->mmc->cmdq_thist_enabled))
+ return;
+
+ if (!cq_host->thist) {
+ pr_err("%s: %s: CMDQ task history buffer not allocated\n",
+ mmc_hostname(cq_host->mmc), __func__);
+ return;
+ }
+
+ if (cq_host->thist_idx >= cq_host->num_slots)
+ cq_host->thist_idx = 0;
+
+ cq_host->thist[cq_host->thist_idx].is_dcmd = is_dcmd;
+ memcpy(&cq_host->thist[cq_host->thist_idx++].task,
+ &task, cq_host->task_desc_len);
+}
+
+static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_BUSY) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (VALID(1) |
+ END(1) |
+ INT(1) |
+ QBAR(1) |
+ ACT(0x5) |
+ CMD_INDEX(mrq->cmd->opcode) |
+ CMD_TIMING(timing) | RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("cmdq: dcmd: cmd: %d timing: %d resp: %d\n",
+ mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+ cmdq_log_task_desc_history(cq_host, *task_desc, true);
+ MMC_TRACE(mrq->host,
+ "%s: DCMD: Task: 0x%08x | Args: 0x%08x\n",
+ __func__,
+ lower_32_bits(*task_desc),
+ upper_32_bits(*task_desc));
+}
+
+static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ sdhci_msm_pm_qos_cpu_vote(host,
+ msm_host->pdata->pm_qos_data.cmdq_latency, mrq->req->cpu);
+}
+
+static void cmdq_pm_qos_unvote(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ /* use async as we're inside an atomic context (soft-irq) */
+ sdhci_msm_pm_qos_cpu_unvote(host, mrq->req->cpu, true);
+}
+
+static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ u32 tag = mrq->cmdq_req->tag;
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!cq_host->enabled) {
+ pr_err("%s: CMDQ host not enabled yet !!!\n",
+ mmc_hostname(mmc));
+ err = -EINVAL;
+ goto out;
+ }
+
+ cmdq_runtime_pm_get(cq_host);
+
+ if (mrq->cmdq_req->cmdq_req_flags & DCMD) {
+ cmdq_prep_dcmd_desc(mmc, mrq);
+ cq_host->mrq_slot[DCMD_SLOT] = mrq;
+ /* DCMD's are always issued on a fixed slot */
+ tag = DCMD_SLOT;
+ goto ring_doorbell;
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+
+ cmdq_prep_task_desc(mrq, &data, 1,
+ (mrq->cmdq_req->cmdq_req_flags & QBR));
+ *task_desc = cpu_to_le64(data);
+ cmdq_log_task_desc_history(cq_host, *task_desc, false);
+
+ err = cmdq_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: %s: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), __func__, err);
+ goto out;
+ }
+
+ cq_host->mrq_slot[tag] = mrq;
+
+ /* PM QoS */
+ sdhci_msm_pm_qos_irq_vote(host);
+ cmdq_pm_qos_vote(host, mrq);
+ring_doorbell:
+ /* Ensure the task descriptor list is flushed before ringing doorbell */
+ wmb();
+ if (cmdq_readl(cq_host, CQTDBR) & (1 << tag)) {
+ cmdq_dumpregs(cq_host);
+ BUG_ON(1);
+ }
+ MMC_TRACE(mmc, "%s: tag: %d\n", __func__, tag);
+ cmdq_writel(cq_host, 1 << tag, CQTDBR);
+ /* Commit the doorbell write immediately */
+ wmb();
+
+out:
+ return err;
+}
+
+static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
+{
+ struct mmc_request *mrq;
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+
+ mrq = get_req_by_tag(cq_host, tag);
+ if (tag == cq_host->dcmd_slot)
+ mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
+
+ if (mrq->cmdq_req->cmdq_req_flags & DCMD)
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
+ CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
+
+ cmdq_runtime_pm_put(cq_host);
+ mrq->done(mrq);
+}
+
+irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ unsigned long err_info = 0;
+ struct mmc_request *mrq;
+ int ret;
+ u32 dbr_set = 0;
+
+ status = cmdq_readl(cq_host, CQIS);
+
+ if (!status && !err)
+ return IRQ_NONE;
+ MMC_TRACE(mmc, "%s: CQIS: 0x%x err: %d\n",
+ __func__, status, err);
+
+ if (err || (status & CQIS_RED)) {
+ err_info = cmdq_readl(cq_host, CQTERRI);
+ pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
+ mmc_hostname(mmc), err, status, err_info);
+
+ /*
+ * Need to halt CQE in case of error in interrupt context itself
+ * otherwise CQE may proceed with sending CMD to device even if
+ * CQE/card is in error state.
+ * CMDQ error handling will make sure that it is unhalted after
+ * handling all the errors.
+ */
+ ret = cmdq_halt_poll(mmc, true);
+ if (ret)
+ pr_err("%s: %s: halt failed ret=%d\n",
+ mmc_hostname(mmc), __func__, ret);
+
+ /*
+ * Clear the CQIS after halting incase of error. This is done
+ * because if CQIS is cleared before halting, the CQ will
+ * continue with issueing commands for rest of requests with
+ * Doorbell rung. This will overwrite the Resp Arg register.
+ * So CQ must be halted first and then CQIS cleared incase
+ * of error
+ */
+ cmdq_writel(cq_host, status, CQIS);
+
+ cmdq_dumpregs(cq_host);
+
+ if (!err_info) {
+ /*
+ * It may so happen sometimes for few errors(like ADMA)
+ * that HW cannot give CQTERRI info.
+ * Thus below is a HW WA for recovering from such
+ * scenario.
+ * - To halt/disable CQE and do reset_all.
+ * Since there is no way to know which tag would
+ * have caused such error, so check for any first
+ * bit set in doorbell and proceed with an error.
+ */
+ dbr_set = cmdq_readl(cq_host, CQTDBR);
+ if (!dbr_set) {
+ pr_err("%s: spurious/force error interrupt\n",
+ mmc_hostname(mmc));
+ cmdq_halt_poll(mmc, false);
+ mmc_host_clr_halt(mmc);
+ return IRQ_HANDLED;
+ }
+
+ tag = ffs(dbr_set) - 1;
+ pr_err("%s: error tag selected: tag = %lu\n",
+ mmc_hostname(mmc), tag);
+ mrq = get_req_by_tag(cq_host, tag);
+ if (mrq->data)
+ mrq->data->error = err;
+ else
+ mrq->cmd->error = err;
+ /*
+ * Get ADMA descriptor memory in case of ADMA
+ * error for debug.
+ */
+ if (err == -EIO)
+ cmdq_dump_adma_mem(cq_host);
+ goto skip_cqterri;
+ }
+
+ if (err_info & CQ_RMEFV) {
+ tag = GET_CMD_ERR_TAG(err_info);
+ pr_err("%s: CMD err tag: %lu\n", __func__, tag);
+
+ mrq = get_req_by_tag(cq_host, tag);
+ /* CMD44/45/46/47 will not have a valid cmd */
+ if (mrq->cmd)
+ mrq->cmd->error = err;
+ else
+ mrq->data->error = err;
+ } else {
+ tag = GET_DAT_ERR_TAG(err_info);
+ pr_err("%s: Dat err tag: %lu\n", __func__, tag);
+ mrq = get_req_by_tag(cq_host, tag);
+ mrq->data->error = err;
+ }
+
+skip_cqterri:
+ /*
+ * If CQE halt fails then, disable CQE
+ * from processing any further requests
+ */
+ if (ret) {
+ cmdq_disable_nosync(mmc, true);
+ /*
+ * Enable legacy interrupts as CQE halt has failed.
+ * This is needed to send legacy commands like status
+ * cmd as part of error handling work.
+ */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
+ }
+
+ /*
+ * CQE detected a response error from device
+ * In most cases, this would require a reset.
+ */
+ if (status & CQIS_RED) {
+ /*
+ * will check if the RED error is due to a bkops
+ * exception once the queue is empty
+ */
+ BUG_ON(!mmc->card);
+ if (mmc_card_configured_manual_bkops(mmc->card) ||
+ mmc_card_configured_auto_bkops(mmc->card))
+ mmc->card->bkops.needs_check = true;
+
+ mrq->cmdq_req->resp_err = true;
+ pr_err("%s: Response error (0x%08x) from card !!!",
+ mmc_hostname(mmc), cmdq_readl(cq_host, CQCRA));
+
+ } else {
+ mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
+ mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
+ }
+
+ cmdq_finish_data(mmc, tag);
+ } else {
+ cmdq_writel(cq_host, status, CQIS);
+ }
+
+ if (status & CQIS_TCC) {
+ /* read CQTCN and complete the request */
+ comp_status = cmdq_readl(cq_host, CQTCN);
+ if (!comp_status)
+ goto out;
+ /*
+ * The CQTCN must be cleared before notifying req completion
+ * to upper layers to avoid missing completion notification
+ * of new requests with the same tag.
+ */
+ cmdq_writel(cq_host, comp_status, CQTCN);
+ /*
+ * A write memory barrier is necessary to guarantee that CQTCN
+ * gets cleared first before next doorbell for the same tag is
+ * set but that is already achieved by the barrier present
+ * before setting doorbell, hence one is not needed here.
+ */
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: completing tag -> %lu\n",
+ mmc_hostname(mmc), tag);
+ MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
+ __func__, tag);
+ cmdq_finish_data(mmc, tag);
+ }
+ }
+
+ if (status & CQIS_HAC) {
+ if (cq_host->ops->post_cqe_halt)
+ cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
+ /* halt is completed, wakeup waiting thread */
+ complete(&cq_host->halt_comp);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cmdq_irq);
+
+/* cmdq_halt_poll - Halting CQE using polling method.
+ * @mmc: struct mmc_host
+ * @halt: bool halt
+ * This is used mainly from interrupt context to halt/unhalt
+ * CQE engine.
+ */
+static int cmdq_halt_poll(struct mmc_host *mmc, bool halt)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int retries = 100;
+
+ if (!halt) {
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
+ CQCTL);
+ return 0;
+ }
+
+ cmdq_set_halt_irq(cq_host, false);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT, CQCTL);
+ while (retries) {
+ if (!(cmdq_readl(cq_host, CQCTL) & HALT)) {
+ udelay(5);
+ retries--;
+ continue;
+ } else {
+ if (cq_host->ops->post_cqe_halt)
+ cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc,
+ false);
+ mmc_host_set_halt(mmc);
+ break;
+ }
+ }
+ cmdq_set_halt_irq(cq_host, true);
+ return retries ? 0 : -ETIMEDOUT;
+}
+
+/* May sleep */
+static int cmdq_halt(struct mmc_host *mmc, bool halt)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ u32 ret = 0;
+ u32 config = 0;
+ int retries = 3;
+
+ cmdq_runtime_pm_get(cq_host);
+ if (halt) {
+ while (retries) {
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT,
+ CQCTL);
+ ret = wait_for_completion_timeout(&cq_host->halt_comp,
+ msecs_to_jiffies(HALT_TIMEOUT_MS));
+ if (!ret) {
+ pr_warn("%s: %s: HAC int timeout\n",
+ mmc_hostname(mmc), __func__);
+ if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
+ /*
+ * Don't retry if CQE is halted but irq
+ * is not triggered in timeout period.
+ * And since we are returning error,
+ * un-halt CQE. Since irq was not fired
+ * yet, no need to set other params
+ */
+ retries = 0;
+ config = cmdq_readl(cq_host, CQCTL);
+ config &= ~HALT;
+ cmdq_writel(cq_host, config, CQCTL);
+ } else {
+ pr_warn("%s: %s: retryng halt (%d)\n",
+ mmc_hostname(mmc), __func__,
+ retries);
+ retries--;
+ continue;
+ }
+ } else {
+ MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
+ __func__, retries);
+ break;
+ }
+ }
+ ret = retries ? 0 : -ETIMEDOUT;
+ } else {
+ if (cq_host->ops->set_transfer_params)
+ cq_host->ops->set_transfer_params(mmc);
+ if (cq_host->ops->set_block_size)
+ cq_host->ops->set_block_size(mmc);
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+ MMC_TRACE(mmc, "%s: unhalt done\n", __func__);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
+ CQCTL);
+ }
+ cmdq_runtime_pm_put(cq_host);
+ return ret;
+}
+
+static void cmdq_post_req(struct mmc_host *mmc, int tag, int err)
+{
+ struct cmdq_host *cq_host;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ struct sdhci_host *sdhci_host = mmc_priv(mmc);
+
+ if (WARN_ON(!mmc))
+ return;
+
+ cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ mrq = get_req_by_tag(cq_host, tag);
+ data = mrq->data;
+
+ if (data) {
+ data->error = err;
+ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (err)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = blk_rq_bytes(mrq->req);
+
+ /* we're in atomic context (soft-irq) so unvote async. */
+ sdhci_msm_pm_qos_irq_unvote(sdhci_host, true);
+ cmdq_pm_qos_unvote(sdhci_host, mrq);
+ }
+}
+
+static void cmdq_dumpstate(struct mmc_host *mmc)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ cmdq_runtime_pm_get(cq_host);
+ cmdq_dumpregs(cq_host);
+ cmdq_runtime_pm_put(cq_host);
+}
+
+static int cmdq_late_init(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ /*
+ * TODO: This should basically move to something like "sdhci-cmdq-msm"
+ * for msm specific implementation.
+ */
+ sdhci_msm_pm_qos_irq_init(host);
+
+ if (msm_host->pdata->pm_qos_data.cmdq_valid)
+ sdhci_msm_pm_qos_cpu_init(host,
+ msm_host->pdata->pm_qos_data.cmdq_latency);
+ return 0;
+}
+
+static const struct mmc_cmdq_host_ops cmdq_host_ops = {
+ .init = cmdq_late_init,
+ .enable = cmdq_enable,
+ .disable = cmdq_disable,
+ .request = cmdq_request,
+ .post_req = cmdq_post_req,
+ .halt = cmdq_halt,
+ .reset = cmdq_reset,
+ .dumpstate = cmdq_dumpstate,
+};
+
+struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev)
+{
+ struct cmdq_host *cq_host;
+ struct resource *cmdq_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cmdq_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cmdq_mem");
+ if (!cmdq_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = kzalloc(sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ dev_err(&pdev->dev, "failed to allocate memory for CMDQ\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cmdq_memres->start,
+ resource_size(cmdq_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cmdq regs\n");
+ kfree(cq_host);
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cmdq_pltfm_init);
+
+int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err = 0;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cmdq_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cmdq_ops = &cmdq_host_ops;
+ mmc->num_cq_slots = NUM_SLOTS;
+ mmc->dcmd_cq_slot = DCMD_SLOT;
+
+ cq_host->mrq_slot = kzalloc(sizeof(cq_host->mrq_slot) *
+ cq_host->num_slots, GFP_KERNEL);
+ if (!cq_host->mrq_slot)
+ return -ENOMEM;
+
+ init_completion(&cq_host->halt_comp);
+ return err;
+}
+EXPORT_SYMBOL(cmdq_init);
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
new file mode 100644
index 0000000..5347b3ab
--- /dev/null
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQ_HCI_H
+#define LINUX_MMC_CQ_HCI_H
+#include <linux/mmc/core.h>
+
+/* registers */
+/* version */
+#define CQVER 0x00
+/* capabilities */
+#define CQCAP 0x04
+/* configuration */
+#define CQCFG 0x08
+#define CQ_DCMD 0x00001000
+#define CQ_TASK_DESC_SZ 0x00000100
+#define CQ_ENABLE 0x00000001
+
+/* control */
+#define CQCTL 0x0C
+#define CLEAR_ALL_TASKS 0x00000100
+#define HALT 0x00000001
+
+/* interrupt status */
+#define CQIS 0x10
+#define CQIS_HAC (1 << 0)
+#define CQIS_TCC (1 << 1)
+#define CQIS_RED (1 << 2)
+#define CQIS_TCL (1 << 3)
+
+/* interrupt status enable */
+#define CQISTE 0x14
+
+/* interrupt signal enable */
+#define CQISGE 0x18
+
+/* interrupt coalescing */
+#define CQIC 0x1C
+#define CQIC_ENABLE (1 << 31)
+#define CQIC_RESET (1 << 16)
+#define CQIC_ICCTHWEN (1 << 15)
+#define CQIC_ICCTH(x) ((x & 0x1F) << 8)
+#define CQIC_ICTOVALWEN (1 << 7)
+#define CQIC_ICTOVAL(x) (x & 0x7F)
+
+/* task list base address */
+#define CQTDLBA 0x20
+
+/* task list base address upper */
+#define CQTDLBAU 0x24
+
+/* door-bell */
+#define CQTDBR 0x28
+
+/* task completion notification */
+#define CQTCN 0x2C
+
+/* device queue status */
+#define CQDQS 0x30
+
+/* device pending tasks */
+#define CQDPT 0x34
+
+/* task clear */
+#define CQTCLR 0x38
+
+/* send status config 1 */
+#define CQSSC1 0x40
+/*
+ * Value n means CQE would send CMD13 during the transfer of data block
+ * BLOCK_CNT-n
+ */
+#define SEND_QSR_INTERVAL 0x70001
+
+/* send status config 2 */
+#define CQSSC2 0x44
+
+/* response for dcmd */
+#define CQCRDCT 0x48
+
+/* response mode error mask */
+#define CQRMEM 0x50
+#define CQ_EXCEPTION (1 << 6)
+
+/* task error info */
+#define CQTERRI 0x54
+
+/* CQTERRI bit fields */
+#define CQ_RMECI 0x1F
+#define CQ_RMETI (0x1F << 8)
+#define CQ_RMEFV (1 << 15)
+#define CQ_DTECI (0x3F << 16)
+#define CQ_DTETI (0x1F << 24)
+#define CQ_DTEFV (1 << 31)
+
+#define GET_CMD_ERR_TAG(__r__) ((__r__ & CQ_RMETI) >> 8)
+#define GET_DAT_ERR_TAG(__r__) ((__r__ & CQ_DTETI) >> 24)
+
+/* command response index */
+#define CQCRI 0x58
+
+/* command response argument */
+#define CQCRA 0x5C
+
+#define CQ_INT_ALL 0xF
+#define CQIC_DEFAULT_ICCTH 31
+#define CQIC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define VALID(x) ((x & 1) << 0)
+#define END(x) ((x & 1) << 1)
+#define INT(x) ((x & 1) << 2)
+#define ACT(x) ((x & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define FORCED_PROG(x) ((x & 1) << 6)
+#define CONTEXT(x) ((x & 0xF) << 7)
+#define DATA_TAG(x) ((x & 1) << 11)
+#define DATA_DIR(x) ((x & 1) << 12)
+#define PRIORITY(x) ((x & 1) << 13)
+#define QBAR(x) ((x & 1) << 14)
+#define REL_WRITE(x) ((x & 1) << 15)
+#define BLK_COUNT(x) ((x & 0xFFFF) << 16)
+#define BLK_ADDR(x) ((x & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CMD_INDEX(x) ((x & 0x3F) << 16)
+#define CMD_TIMING(x) ((x & 1) << 22)
+#define RESP_TYPE(x) ((x & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define DAT_LENGTH(x) ((x & 0xFFFF) << 16)
+#define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32)
+#define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0)
+
+#define CQ_VENDOR_CFG 0x100
+#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
+
+struct task_history {
+ u64 task;
+ bool is_dcmd;
+};
+
+struct cmdq_host {
+ const struct cmdq_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CMDQ_TASK_DESC_SZ_128 0x1
+
+ u32 quirks;
+#define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+#define CMDQ_QUIRK_NO_DCMD 0x2
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQCFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct task_history *thist;
+ u8 thist_idx;
+
+ struct completion halt_comp;
+ struct mmc_request **mrq_slot;
+ void *private;
+};
+
+struct cmdq_host_ops {
+ void (*set_transfer_params)(struct mmc_host *mmc);
+ void (*set_data_timeout)(struct mmc_host *mmc, u32 val);
+ void (*clear_set_irqs)(struct mmc_host *mmc, bool clear);
+ void (*set_block_size)(struct mmc_host *mmc);
+ void (*dump_vendor_regs)(struct mmc_host *mmc);
+ void (*write_l)(struct cmdq_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cmdq_host *host, int reg);
+ void (*clear_set_dumpregs)(struct mmc_host *mmc, bool set);
+ void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
+ int (*reset)(struct mmc_host *mmc);
+ void (*post_cqe_halt)(struct mmc_host *mmc);
+};
+
+static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops && host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cmdq_readl(struct cmdq_host *host, int reg)
+{
+ if (unlikely(host->ops && host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
+extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
+ bool dma64);
+extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
+#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 7123ef9..445fc47 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -830,6 +830,7 @@
switch (uhs) {
case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_DDR50:
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
new file mode 100644
index 0000000..ba6e51c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdhci-msm-ice.h"
+
+static void sdhci_msm_ice_success_cb(void *host_ctrl,
+ enum ice_event_completion evt)
+{
+ struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
+
+ if ((msm_host->ice.state == SDHCI_MSM_ICE_STATE_DISABLED &&
+ evt == ICE_INIT_COMPLETION) || (msm_host->ice.state ==
+ SDHCI_MSM_ICE_STATE_SUSPENDED && evt == ICE_RESUME_COMPLETION))
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+
+ complete(&msm_host->ice.async_done);
+}
+
+static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error)
+{
+ struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
+
+ dev_err(&msm_host->pdev->dev, "%s: Error in ice operation 0x%x",
+ __func__, error);
+
+ if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE)
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
+
+ complete(&msm_host->ice.async_done);
+}
+
+static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev)
+{
+ struct device_node *node;
+ struct platform_device *ice_pdev = NULL;
+
+ node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
+ if (!node) {
+ dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
+ __func__);
+ goto out;
+ }
+ ice_pdev = qcom_ice_get_pdevice(node);
+out:
+ return ice_pdev;
+}
+
+static
+struct qcom_ice_variant_ops *sdhci_msm_ice_get_vops(struct device *dev)
+{
+ struct qcom_ice_variant_ops *ice_vops = NULL;
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
+ if (!node) {
+ dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
+ __func__);
+ goto out;
+ }
+ ice_vops = qcom_ice_get_variant_ops(node);
+ of_node_put(node);
+out:
+ return ice_vops;
+}
+
+static
+void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u32 config = 0;
+ u32 ice_cap = 0;
+
+ /*
+ * Enable the cryptographic support inside SDHC.
+ * This is a global config which needs to be enabled
+ * all the time.
+ * Only when it it is enabled, the ICE_HCI capability
+ * will get reflected in CQCAP register.
+ */
+ config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+ if (enable)
+ config &= ~DISABLE_CRYPTO;
+ else
+ config |= DISABLE_CRYPTO;
+ writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+ /*
+ * CQCAP register is in different register space from above
+ * ice global enable register. So a mb() is required to ensure
+ * above write gets completed before reading the CQCAP register.
+ */
+ mb();
+
+ /*
+ * Check if ICE HCI capability support is present
+ * If present, enable it.
+ */
+ ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES);
+ if (ice_cap & ICE_HCI_SUPPORT) {
+ config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG);
+
+ if (enable)
+ config |= CRYPTO_GENERAL_ENABLE;
+ else
+ config &= ~CRYPTO_GENERAL_ENABLE;
+ writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG);
+ }
+}
+
+int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+ struct device *sdhc_dev;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (!msm_host || !msm_host->pdev) {
+ pr_err("%s: invalid msm_host %p or msm_host->pdev\n",
+ __func__, msm_host);
+ return -EINVAL;
+ }
+
+ sdhc_dev = &msm_host->pdev->dev;
+ msm_host->ice.vops = sdhci_msm_ice_get_vops(sdhc_dev);
+ msm_host->ice.pdev = sdhci_msm_ice_get_pdevice(sdhc_dev);
+
+ if (msm_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
+ dev_err(sdhc_dev, "%s: ICE device not probed yet\n",
+ __func__);
+ msm_host->ice.pdev = NULL;
+ msm_host->ice.vops = NULL;
+ return -EPROBE_DEFER;
+ }
+
+ if (!msm_host->ice.pdev) {
+ dev_dbg(sdhc_dev, "%s: invalid platform device\n", __func__);
+ msm_host->ice.vops = NULL;
+ return -ENODEV;
+ }
+ if (!msm_host->ice.vops) {
+ dev_dbg(sdhc_dev, "%s: invalid ice vops\n", __func__);
+ msm_host->ice.pdev = NULL;
+ return -ENODEV;
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
+ return 0;
+}
+
+static
+int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host)
+{
+ struct resource *ice_memres = NULL;
+ struct platform_device *pdev = msm_host->pdev;
+ int err = 0;
+
+ if (!msm_host->ice_hci_support)
+ goto out;
+ /*
+ * ICE HCI registers are present in cmdq register space.
+ * So map the cmdq mem for accessing ICE HCI registers.
+ */
+ ice_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cmdq_mem");
+ if (!ice_memres) {
+ dev_err(&pdev->dev, "Failed to get iomem resource for ice\n");
+ err = -EINVAL;
+ goto out;
+ }
+ msm_host->cryptoio = devm_ioremap(&pdev->dev,
+ ice_memres->start,
+ resource_size(ice_memres));
+ if (!msm_host->cryptoio) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ err = -ENOMEM;
+ }
+out:
+ return err;
+}
+
+int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ init_completion(&msm_host->ice.async_done);
+ if (msm_host->ice.vops->config) {
+ err = msm_host->ice.vops->init(msm_host->ice.pdev,
+ msm_host,
+ sdhci_msm_ice_success_cb,
+ sdhci_msm_ice_error_cb);
+ if (err) {
+ pr_err("%s: ice init err %d\n",
+ mmc_hostname(host->mmc), err);
+ return err;
+ }
+ }
+
+ if (!wait_for_completion_timeout(&msm_host->ice.async_done,
+ msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
+ pr_err("%s: ice init timedout after %d ms\n",
+ mmc_hostname(host->mmc),
+ SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
+ sdhci_msm_ice_print_regs(host);
+ return -ETIMEDOUT;
+ }
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ struct ice_data_setting ice_set;
+ sector_t lba = 0;
+ unsigned int ctrl_info_val = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ BUG_ON(!mrq);
+ memset(&ice_set, 0, sizeof(struct ice_data_setting));
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ if (msm_host->ice.vops->config) {
+ err = msm_host->ice.vops->config(msm_host->ice.pdev,
+ req, &ice_set);
+ if (err) {
+ pr_err("%s: ice config failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return err;
+ }
+ }
+ /* if writing data command */
+ if (rq_data_dir(req) == WRITE)
+ bypass = ice_set.encr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ /* if reading data command */
+ else if (rq_data_dir(req) == READ)
+ bypass = ice_set.decr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ pr_debug("%s: %s: slot %d encr_bypass %d bypass %d decr_bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, ice_set.encr_bypass, bypass,
+ ice_set.decr_bypass,
+ ice_set.crypto_data.key_index);
+ }
+
+ /* Configure ICE index */
+ ctrl_info_val =
+ (ice_set.crypto_data.key_index &
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
+ << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
+
+ /* Configure data unit size of transfer request */
+ ctrl_info_val |=
+ (SDHCI_MSM_ICE_TR_DATA_UNIT_512_B &
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU)
+ << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU;
+
+ /* Configure ICE bypass mode */
+ ctrl_info_val |=
+ (bypass & MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS)
+ << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS;
+
+ writel_relaxed((lba & 0xFFFFFFFF),
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n + 16 * slot);
+ writel_relaxed(((lba >> 32) & 0xFFFFFFFF),
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
+ writel_relaxed(ctrl_info_val,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+
+ /* Ensure ICE registers are configured before issuing SDHCI request */
+ mb();
+ return 0;
+}
+
+int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state before reset %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ init_completion(&msm_host->ice.async_done);
+
+ if (msm_host->ice.vops->reset) {
+ err = msm_host->ice.vops->reset(msm_host->ice.pdev);
+ if (err) {
+ pr_err("%s: ice reset failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return err;
+ }
+ }
+
+ if (!wait_for_completion_timeout(&msm_host->ice.async_done,
+ msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
+ pr_err("%s: ice reset timedout after %d ms\n",
+ mmc_hostname(host->mmc),
+ SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
+ sdhci_msm_ice_print_regs(host);
+ return -ETIMEDOUT;
+ }
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state after reset %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.state !=
+ SDHCI_MSM_ICE_STATE_SUSPENDED) {
+ pr_err("%s: ice is in invalid state before resume %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ init_completion(&msm_host->ice.async_done);
+
+ if (msm_host->ice.vops->resume) {
+ err = msm_host->ice.vops->resume(msm_host->ice.pdev);
+ if (err) {
+ pr_err("%s: ice resume failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return err;
+ }
+ }
+
+ if (!wait_for_completion_timeout(&msm_host->ice.async_done,
+ msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
+ pr_err("%s: ice resume timedout after %d ms\n",
+ mmc_hostname(host->mmc),
+ SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
+ sdhci_msm_ice_print_regs(host);
+ return -ETIMEDOUT;
+ }
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state after resume %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.state !=
+ SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state before resume %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (msm_host->ice.vops->suspend) {
+ err = msm_host->ice.vops->suspend(msm_host->ice.pdev);
+ if (err) {
+ pr_err("%s: ice suspend failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return -EINVAL;
+ }
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_SUSPENDED;
+ return 0;
+}
+
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int stat = -EINVAL;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (msm_host->ice.vops->status) {
+ *ice_status = 0;
+ stat = msm_host->ice.vops->status(msm_host->ice.pdev);
+ if (stat < 0) {
+ pr_err("%s: ice get sts failed %d\n",
+ mmc_hostname(host->mmc), stat);
+ return -EINVAL;
+ }
+ *ice_status = stat;
+ }
+ return 0;
+}
+
+void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (msm_host->ice.vops->debug)
+ msm_host->ice.vops->debug(msm_host->ice.pdev);
+}
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
new file mode 100644
index 0000000..88ef0e2
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_MSM_ICE_H__
+#define __SDHCI_MSM_ICE_H__
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/async.h>
+#include <linux/blkdev.h>
+#include <crypto/ice.h>
+
+#include "sdhci-msm.h"
+
+#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto"
+/* Timeout waiting for ICE initialization, that requires TZ access */
+#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS 500
+
+/*
+ * SDHCI host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS 32
+
+#define CORE_VENDOR_SPEC_ICE_CTRL 0x300
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n 0x304
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n 0x308
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n 0x30C
+
+/* ICE3.0 register which got added cmdq reg space */
+#define ICE_CQ_CAPABILITIES 0x04
+#define ICE_HCI_SUPPORT (1 << 28)
+#define ICE_CQ_CONFIG 0x08
+#define CRYPTO_GENERAL_ENABLE (1 << 1)
+
+/* ICE3.0 register which got added hc reg space */
+#define HC_VENDOR_SPECIFIC_FUNC4 0x260
+#define DISABLE_CRYPTO (1 << 15)
+#define HC_VENDOR_SPECIFIC_ICE_CTRL 0x800
+#define ICE_SW_RST_EN (1 << 0)
+
+/* SDHCI MSM ICE CTRL Info register offset */
+enum {
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0,
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1,
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x6,
+};
+
+/* SDHCI MSM ICE CTRL Info register masks */
+enum {
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0x1,
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1F,
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x7,
+};
+
+/* SDHCI MSM ICE encryption/decryption bypass state */
+enum {
+ SDHCI_MSM_ICE_DISABLE_BYPASS = 0,
+ SDHCI_MSM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+ SDHCI_MSM_ICE_TR_DATA_UNIT_512_B = 0,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB = 1,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB = 2,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB = 3,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB = 4,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB = 5,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB = 6,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB = 7,
+};
+
+/* SDHCI MSM ICE internal state */
+enum {
+ SDHCI_MSM_ICE_STATE_DISABLED = 0,
+ SDHCI_MSM_ICE_STATE_ACTIVE = 1,
+ SDHCI_MSM_ICE_STATE_SUSPENDED = 2,
+};
+
+/* crypto context fields in cmdq data command task descriptor */
+#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0)
+#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32)
+#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47)
+
+#ifdef CONFIG_MMC_SDHCI_MSM_ICE
+int sdhci_msm_ice_get_dev(struct sdhci_host *host);
+int sdhci_msm_ice_init(struct sdhci_host *host);
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot);
+int sdhci_msm_ice_reset(struct sdhci_host *host);
+int sdhci_msm_ice_resume(struct sdhci_host *host);
+int sdhci_msm_ice_suspend(struct sdhci_host *host);
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status);
+void sdhci_msm_ice_print_regs(struct sdhci_host *host);
+#else
+inline int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (msm_host) {
+ msm_host->ice.pdev = NULL;
+ msm_host->ice.vops = NULL;
+ }
+ return -ENODEV;
+}
+inline int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_get_status(struct sdhci_host *host,
+ int *ice_status)
+{
+ return 0;
+}
+inline void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+ return;
+}
+#endif /* CONFIG_MMC_SDHCI_MSM_ICE */
+#endif /* __SDHCI_MSM_ICE_H__ */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 90ed2e1..fe62b69 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1,7 +1,8 @@
/*
- * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
+ * driver source file
*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,112 +16,500 @@
*/
#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_runtime.h>
+#include <trace/events/mmc.h>
-#include "sdhci-pltfm.h"
+#include "sdhci-msm.h"
+#include "cmdq_hci.h"
-#define CORE_MCI_VERSION 0x50
+#define QOS_REMOVE_DELAY_MS 10
+#define CORE_POWER 0x0
+#define CORE_SW_RST (1 << 7)
+
+#define SDHCI_VER_100 0x2B
+
+#define CORE_VERSION_STEP_MASK 0x0000FFFF
+#define CORE_VERSION_MINOR_MASK 0x0FFF0000
+#define CORE_VERSION_MINOR_SHIFT 16
+#define CORE_VERSION_MAJOR_MASK 0xF0000000
#define CORE_VERSION_MAJOR_SHIFT 28
-#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
-#define CORE_VERSION_MINOR_MASK 0xff
+#define CORE_VERSION_TARGET_MASK 0x000000FF
+#define SDHCI_MSM_VER_420 0x49
+
+#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
+
+#define CORE_VERSION_MAJOR_MASK 0xF0000000
+#define CORE_VERSION_MAJOR_SHIFT 28
#define CORE_HC_MODE 0x78
#define HC_MODE_EN 0x1
-#define CORE_POWER 0x0
-#define CORE_SW_RST BIT(7)
+#define FF_CLK_SW_RST_DIS (1 << 13)
-#define CORE_PWRCTL_STATUS 0xdc
-#define CORE_PWRCTL_MASK 0xe0
-#define CORE_PWRCTL_CLEAR 0xe4
-#define CORE_PWRCTL_CTL 0xe8
-#define CORE_PWRCTL_BUS_OFF BIT(0)
-#define CORE_PWRCTL_BUS_ON BIT(1)
-#define CORE_PWRCTL_IO_LOW BIT(2)
-#define CORE_PWRCTL_IO_HIGH BIT(3)
-#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
-#define CORE_PWRCTL_IO_SUCCESS BIT(2)
-#define REQ_BUS_OFF BIT(0)
-#define REQ_BUS_ON BIT(1)
-#define REQ_IO_LOW BIT(2)
-#define REQ_IO_HIGH BIT(3)
-#define INT_MASK 0xf
+#define CORE_PWRCTL_BUS_OFF 0x01
+#define CORE_PWRCTL_BUS_ON (1 << 1)
+#define CORE_PWRCTL_IO_LOW (1 << 2)
+#define CORE_PWRCTL_IO_HIGH (1 << 3)
+
+#define CORE_PWRCTL_BUS_SUCCESS 0x01
+#define CORE_PWRCTL_BUS_FAIL (1 << 1)
+#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
+#define CORE_PWRCTL_IO_FAIL (1 << 3)
+
+#define INT_MASK 0xF
#define MAX_PHASES 16
-#define CORE_DLL_LOCK BIT(7)
-#define CORE_DLL_EN BIT(16)
-#define CORE_CDR_EN BIT(17)
-#define CORE_CK_OUT_EN BIT(18)
-#define CORE_CDR_EXT_EN BIT(19)
-#define CORE_DLL_PDN BIT(29)
-#define CORE_DLL_RST BIT(30)
-#define CORE_DLL_CONFIG 0x100
-#define CORE_DLL_STATUS 0x108
-#define CORE_VENDOR_SPEC 0x10c
-#define CORE_CLK_PWRSAVE BIT(1)
+#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
+#define CORE_DLL_EN (1 << 16)
+#define CORE_CDR_EN (1 << 17)
+#define CORE_CK_OUT_EN (1 << 18)
+#define CORE_CDR_EXT_EN (1 << 19)
+#define CORE_DLL_PDN (1 << 29)
+#define CORE_DLL_RST (1 << 30)
-#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
+#define CORE_DLL_LOCK (1 << 7)
+#define CORE_DDR_DLL_LOCK (1 << 11)
-#define CDR_SELEXT_SHIFT 20
-#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
-#define CMUX_SHIFT_PHASE_SHIFT 24
-#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
+#define CORE_CLK_PWRSAVE (1 << 1)
+#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
+#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
+#define CORE_HC_MCLK_SEL_MASK (3 << 8)
+#define CORE_HC_AUTO_CMD21_EN (1 << 6)
+#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
+#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
+#define CORE_HC_SELECT_IN_EN (1 << 18)
+#define CORE_HC_SELECT_IN_HS400 (6 << 19)
+#define CORE_HC_SELECT_IN_MASK (7 << 19)
+#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
-struct sdhci_msm_host {
- struct platform_device *pdev;
- void __iomem *core_mem; /* MSM SDCC mapped address */
- int pwr_irq; /* power irq */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
- struct clk *bus_clk; /* SDHC bus voter clock */
- struct mmc_host *mmc;
+#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
+#define HC_SW_RST_REQ (1 << 21)
+#define CORE_ONE_MID_EN (1 << 25)
+
+#define CORE_8_BIT_SUPPORT (1 << 18)
+#define CORE_3_3V_SUPPORT (1 << 24)
+#define CORE_3_0V_SUPPORT (1 << 25)
+#define CORE_1_8V_SUPPORT (1 << 26)
+#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
+
+#define CORE_CSR_CDC_CTLR_CFG0 0x130
+#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
+#define CORE_HW_AUTOCAL_ENA (1 << 17)
+
+#define CORE_CSR_CDC_CTLR_CFG1 0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
+#define CORE_TIMER_ENA (1 << 16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
+#define CORE_CDC_OFFSET_CFG 0x14C
+#define CORE_CSR_CDC_DELAY_CFG 0x150
+#define CORE_CDC_SLAVE_DDA_CFG 0x160
+#define CORE_CSR_CDC_STATUS0 0x164
+#define CORE_CALIBRATION_DONE (1 << 0)
+
+#define CORE_CDC_ERROR_CODE_MASK 0x7000000
+
+#define CQ_CMD_DBG_RAM 0x110
+#define CQ_CMD_DBG_RAM_WA 0x150
+#define CQ_CMD_DBG_RAM_OL 0x154
+
+#define CORE_CSR_CDC_GEN_CFG 0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
+#define CORE_CDC_SWITCH_RC_EN (1 << 1)
+
+#define CORE_CDC_T4_DLY_SEL (1 << 0)
+#define CORE_CMDIN_RCLK_EN (1 << 1)
+#define CORE_START_CDC_TRAFFIC (1 << 6)
+
+#define CORE_PWRSAVE_DLL (1 << 3)
+#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
+
+#define CORE_DDR_CAL_EN (1 << 0)
+#define CORE_FLL_CYCLE_CNT (1 << 18)
+#define CORE_DLL_CLOCK_DISABLE (1 << 21)
+
+#define DDR_CONFIG_POR_VAL 0x80040853
+#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
+#define DDR_CONFIG_PRG_RCLK_DLY 115
+#define DDR_CONFIG_2_POR_VAL 0x80040873
+
+/* 512 descriptors */
+#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
+#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
+
+#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
+#define TCXO_FREQ 19200000
+
+#define INVALID_TUNING_PHASE -1
+#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+
+#define NUM_TUNING_PHASES 16
+#define MAX_DRV_TYPES_SUPPORTED_HS200 4
+#define MSM_AUTOSUSPEND_DELAY_MS 100
+
+struct sdhci_msm_offset {
+ u32 CORE_MCI_DATA_CNT;
+ u32 CORE_MCI_STATUS;
+ u32 CORE_MCI_FIFO_CNT;
+ u32 CORE_MCI_VERSION;
+ u32 CORE_GENERICS;
+ u32 CORE_TESTBUS_CONFIG;
+ u32 CORE_TESTBUS_SEL2_BIT;
+ u32 CORE_TESTBUS_ENA;
+ u32 CORE_TESTBUS_SEL2;
+ u32 CORE_PWRCTL_STATUS;
+ u32 CORE_PWRCTL_MASK;
+ u32 CORE_PWRCTL_CLEAR;
+ u32 CORE_PWRCTL_CTL;
+ u32 CORE_SDCC_DEBUG_REG;
+ u32 CORE_DLL_CONFIG;
+ u32 CORE_DLL_STATUS;
+ u32 CORE_VENDOR_SPEC;
+ u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
+ u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
+ u32 CORE_VENDOR_SPEC_FUNC2;
+ u32 CORE_VENDOR_SPEC_CAPABILITIES0;
+ u32 CORE_DDR_200_CFG;
+ u32 CORE_VENDOR_SPEC3;
+ u32 CORE_DLL_CONFIG_2;
+ u32 CORE_DDR_CONFIG;
+ u32 CORE_DDR_CONFIG_2;
};
-/* Platform specific tuning */
-static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
-{
- u32 wait_cnt = 50;
- u8 ck_out_en;
- struct mmc_host *mmc = host->mmc;
+struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
+ .CORE_MCI_DATA_CNT = 0x35C,
+ .CORE_MCI_STATUS = 0x324,
+ .CORE_MCI_FIFO_CNT = 0x308,
+ .CORE_MCI_VERSION = 0x318,
+ .CORE_GENERICS = 0x320,
+ .CORE_TESTBUS_CONFIG = 0x32C,
+ .CORE_TESTBUS_SEL2_BIT = 3,
+ .CORE_TESTBUS_ENA = (1 << 31),
+ .CORE_TESTBUS_SEL2 = (1 << 3),
+ .CORE_PWRCTL_STATUS = 0x240,
+ .CORE_PWRCTL_MASK = 0x244,
+ .CORE_PWRCTL_CLEAR = 0x248,
+ .CORE_PWRCTL_CTL = 0x24C,
+ .CORE_SDCC_DEBUG_REG = 0x358,
+ .CORE_DLL_CONFIG = 0x200,
+ .CORE_DLL_STATUS = 0x208,
+ .CORE_VENDOR_SPEC = 0x20C,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
+ .CORE_VENDOR_SPEC_FUNC2 = 0x210,
+ .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
+ .CORE_DDR_200_CFG = 0x224,
+ .CORE_VENDOR_SPEC3 = 0x250,
+ .CORE_DLL_CONFIG_2 = 0x254,
+ .CORE_DDR_CONFIG = 0x258,
+ .CORE_DDR_CONFIG_2 = 0x25C,
+};
- /* Poll for CK_OUT_EN bit. max. poll time = 50us */
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
+ .CORE_MCI_DATA_CNT = 0x30,
+ .CORE_MCI_STATUS = 0x34,
+ .CORE_MCI_FIFO_CNT = 0x44,
+ .CORE_MCI_VERSION = 0x050,
+ .CORE_GENERICS = 0x70,
+ .CORE_TESTBUS_CONFIG = 0x0CC,
+ .CORE_TESTBUS_SEL2_BIT = 4,
+ .CORE_TESTBUS_ENA = (1 << 3),
+ .CORE_TESTBUS_SEL2 = (1 << 4),
+ .CORE_PWRCTL_STATUS = 0xDC,
+ .CORE_PWRCTL_MASK = 0xE0,
+ .CORE_PWRCTL_CLEAR = 0xE4,
+ .CORE_PWRCTL_CTL = 0xE8,
+ .CORE_SDCC_DEBUG_REG = 0x124,
+ .CORE_DLL_CONFIG = 0x100,
+ .CORE_DLL_STATUS = 0x108,
+ .CORE_VENDOR_SPEC = 0x10C,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
+ .CORE_VENDOR_SPEC_FUNC2 = 0x110,
+ .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
+ .CORE_DDR_200_CFG = 0x184,
+ .CORE_VENDOR_SPEC3 = 0x1B0,
+ .CORE_DLL_CONFIG_2 = 0x1B4,
+ .CORE_DDR_CONFIG = 0x1B8,
+ .CORE_DDR_CONFIG_2 = 0x1BC,
+};
+
+u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ return readb_relaxed(base_addr + offset);
+}
+
+u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ return readl_relaxed(base_addr + offset);
+}
+
+void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ writeb_relaxed(val, base_addr + offset);
+}
+
+void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ writel_relaxed(val, base_addr + offset);
+}
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
+static const u32 tuning_block_64[] = {
+ 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
+ 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
+ 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
+ 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
+};
+
+static const u32 tuning_block_128[] = {
+ 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
+ 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
+ 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
+ 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
+ 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
+ 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
+ 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
+ 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
+};
+
+/* global to hold each slot instance for debug */
+static struct sdhci_msm_host *sdhci_slot[2];
+
+static int disable_slots;
+/* root can write, others read */
+module_param(disable_slots, int, S_IRUGO|S_IWUSR);
+
+static bool nocmdq;
+module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
+
+enum vdd_io_level {
+ /* set vdd_io_data->low_vol_level */
+ VDD_IO_LOW,
+ /* set vdd_io_data->high_vol_level */
+ VDD_IO_HIGH,
+ /*
+ * set whatever there in voltage_level (third argument) of
+ * sdhci_msm_set_vdd_io_vol() function.
+ */
+ VDD_IO_SET_LEVEL,
+};
+
+/* MSM platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
+ u8 poll)
+{
+ int rc = 0;
+ u32 wait_cnt = 50;
+ u8 ck_out_en = 0;
+ struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ /* poll for CK_OUT_EN bit. max. poll time = 50us */
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
while (ck_out_en != poll) {
if (--wait_cnt == 0) {
- dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
- mmc_hostname(mmc), poll);
- return -ETIMEDOUT;
+ pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
+ mmc_hostname(mmc), __func__, poll);
+ rc = -ETIMEDOUT;
+ goto out;
}
udelay(1);
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
}
+out:
+ return rc;
+}
- return 0;
+/*
+ * Enable CDR to track changes of DAT lines and adjust sampling
+ * point according to voltage/temperature variations
+ */
+static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
+{
+ int rc = 0;
+ u32 config;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ config |= CORE_CDR_EN;
+ config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ rc = msm_dll_poll_ck_out_en(host, 0);
+ if (rc)
+ goto err;
+
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+ rc = msm_dll_poll_ck_out_en(host, 1);
+ if (rc)
+ goto err;
+ goto out;
+err:
+ pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
+out:
+ return rc;
+}
+
+static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u32 tmp;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &tmp)) {
+ spin_lock_irqsave(&host->lock, flags);
+ msm_host->en_auto_cmd21 = !!tmp;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
+static ssize_t show_auto_cmd21(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
+}
+
+/* MSM auto-tuning handler */
+static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
+ bool enable,
+ u32 type)
+{
+ int rc = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 val = 0;
+
+ if (!msm_host->en_auto_cmd21)
+ return 0;
+
+ if (type == MMC_SEND_TUNING_BLOCK_HS200)
+ val = CORE_HC_AUTO_CMD21_EN;
+ else
+ return 0;
+
+ if (enable) {
+ rc = msm_enable_cdr_cm_sdc4_dll(host);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) | val,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+ } else {
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & ~val,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+ }
+ return rc;
}
static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
{
- int rc;
- static const u8 grey_coded_phase_table[] = {
- 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
- 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
- };
+ int rc = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+ 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
+ 0x8};
unsigned long flags;
u32 config;
struct mmc_host *mmc = host->mmc;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
spin_lock_irqsave(&host->lock, flags);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
rc = msm_dll_poll_ck_out_en(host, 0);
@@ -131,31 +520,36 @@
* Write the selected DLL clock output phase (0 ... 15)
* to CDR_SELEXT bit field of DLL_CONFIG register.
*/
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
- config &= ~CDR_SELEXT_MASK;
- config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~(0xF << 20))
+ | (grey_coded_phase_table[phase] << 20)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en(host, 1);
if (rc)
goto err_out;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config |= CORE_CDR_EN;
config &= ~CORE_CDR_EXT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
goto out;
err_out:
- dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
- mmc_hostname(mmc), phase);
+ pr_err("%s: %s: Failed to set DLL phase: %d\n",
+ mmc_hostname(mmc), __func__, phase);
out:
spin_unlock_irqrestore(&host->lock, flags);
+ pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
return rc;
}
@@ -163,26 +557,27 @@
* Find out the greatest range of consecuitive selected
* DLL clock output phases that can be used as sampling
* setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
* Select the 3/4 of the range and configure the DLL with the
* selected DLL clock output phase.
*/
static int msm_find_most_appropriate_phase(struct sdhci_host *host,
- u8 *phase_table, u8 total_phases)
+ u8 *phase_table, u8 total_phases)
{
int ret;
u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
- u8 phases_per_row[MAX_PHASES] = { 0 };
+ u8 phases_per_row[MAX_PHASES] = {0};
int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
bool phase_0_found = false, phase_15_found = false;
struct mmc_host *mmc = host->mmc;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
if (!total_phases || (total_phases > MAX_PHASES)) {
- dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
- mmc_hostname(mmc), total_phases);
+ pr_err("%s: %s: invalid argument: total_phases=%d\n",
+ mmc_hostname(mmc), __func__, total_phases);
return -EINVAL;
}
@@ -240,7 +635,7 @@
i = phases_15;
for (cnt = 0; cnt < phases_0; cnt++) {
ranges[phase_15_raw_index][i] =
- ranges[phase_0_raw_index][cnt];
+ ranges[phase_0_raw_index][cnt];
if (++i >= MAX_PHASES)
break;
}
@@ -256,24 +651,29 @@
}
}
- i = (curr_max * 3) / 4;
+ i = ((curr_max * 3) / 4);
if (i)
i--;
- ret = ranges[selected_row_index][i];
+ ret = (int)ranges[selected_row_index][i];
if (ret >= MAX_PHASES) {
ret = -EINVAL;
- dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
- mmc_hostname(mmc), ret);
+ pr_err("%s: %s: invalid phase selected=%d\n",
+ mmc_hostname(mmc), __func__, ret);
}
+ pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
return ret;
}
static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
{
- u32 mclk_freq = 0, config;
+ u32 mclk_freq = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
/* Program the MCLK value to MCLK_FREQ bit field */
if (host->clock <= 112000000)
@@ -293,117 +693,622 @@
else if (host->clock <= 200000000)
mclk_freq = 7;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
- config &= ~CMUX_SHIFT_PHASE_MASK;
- config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~(7 << 24)) | (mclk_freq << 24)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
}
-/* Initialize the DLL (Programmable Delay Line) */
+/* Initialize the DLL (Programmable Delay Line ) */
static int msm_init_cm_dll(struct sdhci_host *host)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
struct mmc_host *mmc = host->mmc;
- int wait_cnt = 50;
+ int rc = 0;
unsigned long flags;
+ u32 wait_cnt;
+ bool prev_pwrsave, curr_pwrsave;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
spin_lock_irqsave(&host->lock, flags);
-
+ prev_pwrsave = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+ curr_pwrsave = prev_pwrsave;
/*
* Make sure that clock is always enabled when DLL
* tuning is in progress. Keeping PWRSAVE ON may
- * turn off the clock.
+ * turn off the clock. So let's disable the PWRSAVE
+ * here and re-enable it once tuning is completed.
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+ if (prev_pwrsave) {
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ curr_pwrsave = false;
+ }
+
+ if (msm_host->use_updated_dll_reset) {
+ /* Disable the DLL clock */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~CORE_CK_OUT_EN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2);
+ }
/* Write 1 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
msm_cm_dll_set_freq(host);
+ if (msm_host->use_updated_dll_reset) {
+ u32 mclk_freq = 0;
+
+ if ((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & CORE_FLL_CYCLE_CNT))
+ mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
+ else
+ mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
+
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & ~(0xFF << 10)) | (mclk_freq << 10)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+ /* wait for 5us before enabling DLL clock */
+ udelay(5);
+ }
+
/* Write 0 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Write 0 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+ if (msm_host->use_updated_dll_reset) {
+ msm_cm_dll_set_freq(host);
+ /* Enable the DLL clock */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2);
+ }
/* Set DLL_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Set CK_OUT_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ wait_cnt = 50;
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
- while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
- CORE_DLL_LOCK)) {
+ while (!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
/* max. wait for 50us sec for LOCK bit to be set */
if (--wait_cnt == 0) {
- dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
- mmc_hostname(mmc));
- spin_unlock_irqrestore(&host->lock, flags);
- return -ETIMEDOUT;
+ pr_err("%s: %s: DLL failed to LOCK\n",
+ mmc_hostname(mmc), __func__);
+ rc = -ETIMEDOUT;
+ goto out;
}
+ /* wait for 1us before polling again */
udelay(1);
}
+out:
+ /* Restore the correct PWRSAVE state */
+ if (prev_pwrsave ^ curr_pwrsave) {
+ u32 reg = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+
+ if (prev_pwrsave)
+ reg |= CORE_CLK_PWRSAVE;
+ else
+ reg &= ~CORE_CLK_PWRSAVE;
+
+ writel_relaxed(reg, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ }
+
spin_unlock_irqrestore(&host->lock, flags);
- return 0;
+ pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
+ return rc;
}
-static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
{
+ u32 calib_done;
+ int ret = 0;
+ int cdc_err = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ & ~CORE_CDC_T4_DLY_SEL),
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+ /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+ & ~CORE_CDC_SWITCH_BYPASS_OFF),
+ host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+ | CORE_CDC_SWITCH_RC_EN),
+ host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ & ~CORE_START_CDC_TRAFFIC),
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+ /*
+ * Perform CDC Register Initialization Sequence
+ *
+ * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
+ * CORE_CSR_CDC_CTLR_CFG1 0x3011111
+ * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
+ * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
+ * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
+ * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
+ * CORE_CSR_CDC_DELAY_CFG 0x3AC
+ * CORE_CDC_OFFSET_CFG 0x0
+ * CORE_CDC_SLAVE_DDA_CFG 0x16334
+ */
+
+ writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+ writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+ writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+ writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+ writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+ writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+ writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+ /* CDC HW Calibration */
+
+ /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+ | CORE_SW_TRIG_FULL_CALIB),
+ host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+ & ~CORE_SW_TRIG_FULL_CALIB),
+ host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+ | CORE_HW_AUTOCAL_ENA),
+ host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
+ host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+ mb();
+
+ /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
+ ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+ calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CDC Calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
+ cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+ & CORE_CDC_ERROR_CODE_MASK;
+ if (cdc_err) {
+ pr_err("%s: %s: CDC Error Code %d\n",
+ mmc_hostname(host->mmc), __func__, cdc_err);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ | CORE_START_CDC_TRAFFIC),
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+out:
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 dll_status, ddr_config;
+ int ret = 0;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Reprogramming the value in case it might have been modified by
+ * bootloaders.
+ */
+ if (msm_host->rclk_delay_fix) {
+ writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
+ msm_host_offset->CORE_DDR_CONFIG_2);
+ } else {
+ ddr_config = DDR_CONFIG_POR_VAL &
+ ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
+ ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
+ writel_relaxed(ddr_config, host->ioaddr +
+ msm_host_offset->CORE_DDR_CONFIG);
+ }
+
+ if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ | CORE_CMDIN_RCLK_EN), host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG);
+
+ /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ | CORE_DDR_CAL_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+
+ /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
+ ret = readl_poll_timeout(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS,
+ dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ /*
+ * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
+ * when MCLK is gated OFF, it is not gated for less than 0.5us
+ * and MCLK must be switched on for at-least 1us before DATA
+ * starts coming. Controllers with 14lpp tech DLL cannot
+ * guarantee above requirement. So PWRSAVE_DLL should not be
+ * turned on for host controllers using this DLL.
+ */
+ if (!msm_host->use_14lpp_dll)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ | CORE_PWRSAVE_DLL), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+ mb();
+out:
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct mmc_host *mmc = host->mmc;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
+ pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+ mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ if (msm_host->calibration_done ||
+ !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+ return 0;
+ }
+
+ /*
+ * Reset the tuning block.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ if (!ret)
+ msm_host->calibration_done = true;
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+
+ /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ if (msm_host->use_cdclp533)
+ /* Calibrate CDCLP533 DLL HW */
+ ret = sdhci_msm_cdclp533_calibration(host);
+ else
+ /* Calibrate CM_DLL_SDC4 HW */
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
+ u8 drv_type)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+ struct mmc_host *mmc = host->mmc;
+ u8 val = ((drv_type << 4) | 2);
+
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_HS_TIMING << 16) |
+ (val << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
+ /* 1 sec */
+ cmd.busy_timeout = 1000 * 1000;
+
+ memset(cmd.resp, 0, sizeof(cmd.resp));
+ cmd.retries = 3;
+
+ mrq.cmd = &cmd;
+ cmd.data = NULL;
+
+ mmc_wait_for_req(mmc, &mrq);
+ pr_debug("%s: %s: set card drive type to %d\n",
+ mmc_hostname(mmc), __func__,
+ drv_type);
+}
+
+int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ unsigned long flags;
int tuning_seq_cnt = 3;
- u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+ u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
+ const u32 *tuning_block_pattern = tuning_block_64;
+ int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
int rc;
struct mmc_host *mmc = host->mmc;
- struct mmc_ios ios = host->mmc->ios;
+ struct mmc_ios ios = host->mmc->ios;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u8 drv_type = 0;
+ bool drv_type_changed = false;
+ struct mmc_card *card = host->mmc->card;
+ int sts_retry;
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
* if clock frequency is greater than 100MHz in these modes.
*/
- if (host->clock <= 100 * 1000 * 1000 ||
- !((ios.timing == MMC_TIMING_MMC_HS200) ||
- (ios.timing == MMC_TIMING_UHS_SDR104)))
+ if (host->clock <= CORE_FREQ_100MHZ ||
+ !((ios.timing == MMC_TIMING_MMC_HS400) ||
+ (ios.timing == MMC_TIMING_MMC_HS200) ||
+ (ios.timing == MMC_TIMING_UHS_SDR104)))
return 0;
+ /*
+ * Don't allow re-tuning for CRC errors observed for any commands
+ * that are sent during tuning sequence itself.
+ */
+ if (msm_host->tuning_in_progress)
+ return 0;
+ msm_host->tuning_in_progress = true;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
+
+ /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
+ if (msm_host->tuning_done && !msm_host->calibration_done &&
+ (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+ rc = sdhci_msm_hs400_dll_calibration(host);
+ spin_lock_irqsave(&host->lock, flags);
+ if (!rc)
+ msm_host->calibration_done = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
+ (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
+ tuning_block_pattern = tuning_block_128;
+ size = sizeof(tuning_block_128);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ data_buf = kmalloc(size, GFP_KERNEL);
+ if (!data_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
retry:
- /* First of all reset the tuning block */
+ tuned_phase_cnt = 0;
+
+ /* first of all reset the tuning block */
rc = msm_init_cm_dll(host);
if (rc)
- return rc;
+ goto kfree;
phase = 0;
do {
- /* Set the phase in delay line hw block */
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {
+ .cmd = &cmd,
+ .data = &data
+ };
+ struct scatterlist sg;
+ struct mmc_command sts_cmd = {0};
+
+ /* set the phase in delay line hw block */
rc = msm_config_cm_dll_phase(host, phase);
if (rc)
- return rc;
+ goto kfree;
- rc = mmc_send_tuning(mmc, opcode, NULL);
- if (!rc) {
- /* Tuning is successful at this tuning point */
- tuned_phases[tuned_phase_cnt++] = phase;
- dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
- mmc_hostname(mmc), phase);
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = size;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, size);
+ memset(data_buf, 0, size);
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (card && (cmd.error || data.error)) {
+ sts_cmd.opcode = MMC_SEND_STATUS;
+ sts_cmd.arg = card->rca << 16;
+ sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ sts_retry = 5;
+ while (sts_retry) {
+ mmc_wait_for_cmd(mmc, &sts_cmd, 0);
+
+ if (sts_cmd.error ||
+ (R1_CURRENT_STATE(sts_cmd.resp[0])
+ != R1_STATE_TRAN)) {
+ sts_retry--;
+ /*
+ * wait for at least 146 MCLK cycles for
+ * the card to move to TRANS state. As
+ * the MCLK would be min 200MHz for
+ * tuning, we need max 0.73us delay. To
+ * be on safer side 1ms delay is given.
+ */
+ usleep_range(1000, 1200);
+ pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
+ mmc_hostname(mmc), phase,
+ sts_cmd.error, sts_cmd.resp[0]);
+ continue;
+ }
+ break;
+ };
}
- } while (++phase < ARRAY_SIZE(tuned_phases));
+
+ if (!cmd.error && !data.error &&
+ !memcmp(data_buf, tuning_block_pattern, size)) {
+ /* tuning is successful at this tuning point */
+ tuned_phases[tuned_phase_cnt++] = phase;
+ pr_debug("%s: %s: found *** good *** phase = %d\n",
+ mmc_hostname(mmc), __func__, phase);
+ } else {
+ pr_debug("%s: %s: found ## bad ## phase = %d\n",
+ mmc_hostname(mmc), __func__, phase);
+ }
+ } while (++phase < 16);
+
+ if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
+ card && mmc_card_mmc(card)) {
+ /*
+ * If all phases pass then its a problem. So change the card's
+ * drive type to a different value, if supported and repeat
+ * tuning until at least one phase fails. Then set the original
+ * drive type back.
+ *
+ * If all the phases still pass after trying all possible
+ * drive types, then one of those 16 phases will be picked.
+ * This is no different from what was going on before the
+ * modification to change drive type and retune.
+ */
+ pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
+ tuned_phase_cnt);
+
+ /* set drive type to other value . default setting is 0x0 */
+ while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
+ pr_debug("%s: trying different drive strength (%d)\n",
+ mmc_hostname(mmc), drv_type);
+ if (card->ext_csd.raw_driver_strength &
+ (1 << drv_type)) {
+ sdhci_msm_set_mmc_drv_type(host, opcode,
+ drv_type);
+ if (!drv_type_changed)
+ drv_type_changed = true;
+ goto retry;
+ }
+ }
+ }
+
+ /* reset drive type to default (50 ohm) if changed */
+ if (drv_type_changed)
+ sdhci_msm_set_mmc_drv_type(host, opcode, 0);
if (tuned_phase_cnt) {
rc = msm_find_most_appropriate_phase(host, tuned_phases,
- tuned_phase_cnt);
+ tuned_phase_cnt);
if (rc < 0)
- return rc;
+ goto kfree;
else
- phase = rc;
+ phase = (u8)rc;
/*
* Finally set the selected phase in delay
@@ -411,152 +1316,2872 @@
*/
rc = msm_config_cm_dll_phase(host, phase);
if (rc)
- return rc;
- dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
- mmc_hostname(mmc), phase);
+ goto kfree;
+ msm_host->saved_tuning_phase = phase;
+ pr_debug("%s: %s: finally setting the tuning phase to %d\n",
+ mmc_hostname(mmc), __func__, phase);
} else {
if (--tuning_seq_cnt)
goto retry;
- /* Tuning failed */
- dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
- mmc_hostname(mmc));
+ /* tuning failed */
+ pr_err("%s: %s: no tuning point found\n",
+ mmc_hostname(mmc), __func__);
rc = -EIO;
}
+kfree:
+ kfree(data_buf);
+out:
+ spin_lock_irqsave(&host->lock, flags);
+ if (!rc)
+ msm_host->tuning_done = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+ msm_host->tuning_in_progress = false;
+ pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
return rc;
}
-static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
- unsigned int uhs)
+static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
{
- struct mmc_host *mmc = host->mmc;
- u16 ctrl_2;
+ struct sdhci_msm_gpio_data *curr;
+ int i, ret = 0;
- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
- /* Select Bus Speed Mode for host */
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- switch (uhs) {
- case MMC_TIMING_UHS_SDR12:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- break;
- case MMC_TIMING_UHS_SDR25:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
- break;
- case MMC_TIMING_UHS_SDR50:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
- break;
- case MMC_TIMING_MMC_HS200:
- case MMC_TIMING_UHS_SDR104:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
- break;
- case MMC_TIMING_UHS_DDR50:
- case MMC_TIMING_MMC_DDR52:
- ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
- break;
+ curr = pdata->pin_data->gpio_data;
+ for (i = 0; i < curr->size; i++) {
+ if (!gpio_is_valid(curr->gpio[i].no)) {
+ ret = -EINVAL;
+ pr_err("%s: Invalid gpio = %d\n", __func__,
+ curr->gpio[i].no);
+ goto free_gpios;
+ }
+ if (enable) {
+ ret = gpio_request(curr->gpio[i].no,
+ curr->gpio[i].name);
+ if (ret) {
+ pr_err("%s: gpio_request(%d, %s) failed %d\n",
+ __func__, curr->gpio[i].no,
+ curr->gpio[i].name, ret);
+ goto free_gpios;
+ }
+ curr->gpio[i].is_enabled = true;
+ } else {
+ gpio_free(curr->gpio[i].no);
+ curr->gpio[i].is_enabled = false;
+ }
}
+ return ret;
- /*
- * When clock frequency is less than 100MHz, the feedback clock must be
- * provided and DLL must not be used so that tuning can be skipped. To
- * provide feedback clock, the mode selection can be any value less
- * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
- */
- if (host->clock <= 100000000 &&
- (uhs == MMC_TIMING_MMC_HS400 ||
- uhs == MMC_TIMING_MMC_HS200 ||
- uhs == MMC_TIMING_UHS_SDR104))
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-
- dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
- mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
- sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+free_gpios:
+ for (i--; i >= 0; i--) {
+ gpio_free(curr->gpio[i].no);
+ curr->gpio[i].is_enabled = false;
+ }
+ return ret;
}
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
+ bool enable)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- u32 irq_status, irq_ack = 0;
+ int ret = 0;
- irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
- irq_status &= INT_MASK;
+ if (enable)
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_active);
+ else
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_sleep);
- writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ if (ret < 0)
+ pr_err("%s state for pinctrl failed with %d\n",
+ enable ? "Enabling" : "Disabling", ret);
- if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
- irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
- if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
- irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ return ret;
+}
+
+static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+ int ret = 0;
+
+ if (pdata->pin_cfg_sts == enable) {
+ return 0;
+ } else if (pdata->pctrl_data) {
+ ret = sdhci_msm_setup_pinctrl(pdata, enable);
+ goto out;
+ } else if (!pdata->pin_data) {
+ return 0;
+ }
+
+ if (pdata->pin_data->is_gpio)
+ ret = sdhci_msm_setup_gpio(pdata, enable);
+out:
+ if (!ret)
+ pdata->pin_cfg_sts = enable;
+
+ return ret;
+}
+
+static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
+ u32 **out, int *len, u32 size)
+{
+ int ret = 0;
+ struct device_node *np = dev->of_node;
+ size_t sz;
+ u32 *arr = NULL;
+
+ if (!of_get_property(np, prop_name, len)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ sz = *len = *len / sizeof(*arr);
+ if (sz <= 0 || (size > 0 && (sz > size))) {
+ dev_err(dev, "%s invalid size\n", prop_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
+ if (!arr) {
+ dev_err(dev, "%s failed allocating memory\n", prop_name);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32_array(np, prop_name, arr, sz);
+ if (ret < 0) {
+ dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
+ goto out;
+ }
+ *out = arr;
+out:
+ if (ret)
+ *len = 0;
+ return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
+ struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
+{
+ int len, ret = 0;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ struct sdhci_msm_reg_data *vreg;
+ struct device_node *np = dev->of_node;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "No vreg data found for %s\n", vreg_name);
+ return ret;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ vreg->name = vreg_name;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-always-on", vreg_name);
+ if (of_get_property(np, prop_name, NULL))
+ vreg->is_always_on = true;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-lpm-sup", vreg_name);
+ if (of_get_property(np, prop_name, NULL))
+ vreg->lpm_sup = true;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-voltage-level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->low_vol_level = be32_to_cpup(&prop[0]);
+ vreg->high_vol_level = be32_to_cpup(&prop[1]);
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-current-level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->lpm_uA = be32_to_cpup(&prop[0]);
+ vreg->hpm_uA = be32_to_cpup(&prop[1]);
+ }
+
+ *vreg_data = vreg;
+ dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+ vreg->name, vreg->is_always_on ? "always_on," : "",
+ vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+ vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+
+ return ret;
+}
+
+static int sdhci_msm_parse_pinctrl_info(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ struct sdhci_pinctrl_data *pctrl_data;
+ struct pinctrl *pctrl;
+ int ret = 0;
+
+ /* Try to obtain pinctrl handle */
+ pctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pctrl)) {
+ ret = PTR_ERR(pctrl);
+ goto out;
+ }
+ pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
+ if (!pctrl_data) {
+ dev_err(dev, "No memory for sdhci_pinctrl_data\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ pctrl_data->pctrl = pctrl;
+ /* Look-up and keep the states handy to be used later */
+ pctrl_data->pins_active = pinctrl_lookup_state(
+ pctrl_data->pctrl, "active");
+ if (IS_ERR(pctrl_data->pins_active)) {
+ ret = PTR_ERR(pctrl_data->pins_active);
+ dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
+ goto out;
+ }
+ pctrl_data->pins_sleep = pinctrl_lookup_state(
+ pctrl_data->pctrl, "sleep");
+ if (IS_ERR(pctrl_data->pins_sleep)) {
+ ret = PTR_ERR(pctrl_data->pins_sleep);
+ dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
+ goto out;
+ }
+ pdata->pctrl_data = pctrl_data;
+out:
+ return ret;
+}
+
+#define GPIO_NAME_MAX_LEN 32
+static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ int ret = 0, cnt, i;
+ struct sdhci_msm_pin_data *pin_data;
+ struct device_node *np = dev->of_node;
+
+ ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
+ if (!ret) {
+ goto out;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
+ goto out;
+ } else {
+ dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
+ ret);
+ ret = 0;
+ }
+ pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data) {
+ dev_err(dev, "No memory for pin_data\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnt = of_gpio_count(np);
+ if (cnt > 0) {
+ pin_data->gpio_data = devm_kzalloc(dev,
+ sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
+ if (!pin_data->gpio_data) {
+ dev_err(dev, "No memory for gpio_data\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ pin_data->gpio_data->size = cnt;
+ pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
+ sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
+
+ if (!pin_data->gpio_data->gpio) {
+ dev_err(dev, "No memory for gpio\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ const char *name = NULL;
+ char result[GPIO_NAME_MAX_LEN];
+ pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
+ of_property_read_string_index(np,
+ "qcom,gpio-names", i, &name);
+
+ snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
+ dev_name(dev), name ? name : "?");
+ pin_data->gpio_data->gpio[i].name = result;
+ dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
+ pin_data->gpio_data->gpio[i].name,
+ pin_data->gpio_data->gpio[i].no);
+ }
+ }
+ pdata->pin_data = pin_data;
+out:
+ if (ret)
+ dev_err(dev, "%s failed with err %d\n", __func__, ret);
+ return ret;
+}
+
+#ifdef CONFIG_SMP
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
+{
+ pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
+}
+#else
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
+#endif
+
+static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ const char *str;
+ u32 cpu;
+ int ret = 0;
+ int i;
+
+ pdata->pm_qos_data.irq_valid = false;
+ pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
+ if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
+ !strcmp(str, "affine_irq")) {
+ parse_affine_irq(pdata);
+ }
+
+ /* must specify cpu for "affine_cores" type */
+ if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
+ pdata->pm_qos_data.irq_cpu = -1;
+ ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
+ if (ret) {
+ dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
+ ret);
+ goto out;
+ }
+ if (cpu < 0 || cpu >= num_possible_cpus()) {
+ dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
+ __func__, cpu, num_possible_cpus());
+ ret = -EINVAL;
+ goto out;
+ }
+ pdata->pm_qos_data.irq_cpu = cpu;
+ }
+
+ if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
+ SDHCI_POWER_POLICY_NUM) {
+ dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
+ __func__, SDHCI_POWER_POLICY_NUM);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
+ of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
+ &pdata->pm_qos_data.irq_latency.latency[i]);
+
+ pdata->pm_qos_data.irq_valid = true;
+out:
+ return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ u32 mask;
+ int nr_groups;
+ int ret;
+ int i;
+
+ /* Read cpu group mapping */
+ nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
+ if (nr_groups <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
+ pdata->pm_qos_data.cpu_group_map.mask =
+ kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
+ if (!pdata->pm_qos_data.cpu_group_map.mask) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nr_groups; i++) {
+ of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
+ i, &mask);
+
+ pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
+ if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
+ cpu_possible_mask)) {
+ dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
+ __func__, mask, i);
+ ret = -EINVAL;
+ goto free_res;
+ }
+ }
+ return 0;
+
+free_res:
+ kfree(pdata->pm_qos_data.cpu_group_map.mask);
+out:
+ return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
+ int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
+{
+ struct device_node *np = dev->of_node;
+ struct sdhci_msm_pm_qos_latency *values;
+ int ret;
+ int i;
+ int group;
+ int cfg;
+
+ ret = of_property_count_u32_elems(np, name);
+ if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
+ dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
+ __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
+ ret);
+ return -EINVAL;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
+ GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
+ group = i / SDHCI_POWER_POLICY_NUM;
+ cfg = i % SDHCI_POWER_POLICY_NUM;
+ of_property_read_u32_index(np, name, i,
+ &(values[group].latency[cfg]));
+ }
+
+ *latency = values;
+ return 0;
+}
+
+static void sdhci_msm_pm_qos_parse(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
+ dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
+ __func__);
+
+ if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
+ pdata->pm_qos_data.cmdq_valid =
+ !sdhci_msm_pm_qos_parse_latency(dev,
+ "qcom,pm-qos-cmdq-latency-us",
+ pdata->pm_qos_data.cpu_group_map.nr_groups,
+ &pdata->pm_qos_data.cmdq_latency);
+ pdata->pm_qos_data.legacy_valid =
+ !sdhci_msm_pm_qos_parse_latency(dev,
+ "qcom,pm-qos-legacy-latency-us",
+ pdata->pm_qos_data.cpu_group_map.nr_groups,
+ &pdata->pm_qos_data.latency);
+ if (!pdata->pm_qos_data.cmdq_valid &&
+ !pdata->pm_qos_data.legacy_valid) {
+ /* clean-up previously allocated arrays */
+ kfree(pdata->pm_qos_data.latency);
+ kfree(pdata->pm_qos_data.cmdq_latency);
+ dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
+ __func__);
+ }
+ } else {
+ dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
+ __func__);
+ }
+}
+
+/* Parse platform data */
+static
+struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
+ struct sdhci_msm_host *msm_host)
+{
+ struct sdhci_msm_pltfm_data *pdata = NULL;
+ struct device_node *np = dev->of_node;
+ u32 bus_width = 0;
+ int len, i;
+ int clk_table_len;
+ u32 *clk_table = NULL;
+ enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+ const char *lower_bus_speed = NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "failed to allocate memory for platform data\n");
+ goto out;
+ }
+
+ pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+ if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
+ pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
+ of_property_read_u32(np, "qcom,bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
+ else if (bus_width == 4)
+ pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
+ else {
+ dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
+ pdata->mmc_bus_width = 0;
+ }
+
+ if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
+ &msm_host->mmc->clk_scaling.freq_table,
+ &msm_host->mmc->clk_scaling.freq_table_sz, 0))
+ pr_debug("%s: no clock scaling frequencies were supplied\n",
+ dev_name(dev));
+ else if (!msm_host->mmc->clk_scaling.freq_table ||
+ !msm_host->mmc->clk_scaling.freq_table_sz)
+ dev_err(dev, "bad dts clock scaling frequencies\n");
/*
- * The driver has to acknowledge the interrupt, switch voltages and
- * report back if it succeded or not to this register. The voltage
- * switches are handled by the sdhci core, so just report success.
+ * Few hosts can support DDR52 mode at the same lower
+ * system voltage corner as high-speed mode. In such cases,
+ * it is always better to put it in DDR mode which will
+ * improve the performance without any power impact.
*/
- writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+ if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
+ &lower_bus_speed)) {
+ if (!strcmp(lower_bus_speed, "DDR52"))
+ msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
+ MMC_SCALING_LOWER_DDR52_MODE;
+ }
+
+ if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
+ &clk_table, &clk_table_len, 0)) {
+ dev_err(dev, "failed parsing supported clock rates\n");
+ goto out;
+ }
+ if (!clk_table || !clk_table_len) {
+ dev_err(dev, "Invalid clock table\n");
+ goto out;
+ }
+ pdata->sup_clk_table = clk_table;
+ pdata->sup_clk_cnt = clk_table_len;
+
+ pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
+ sdhci_msm_slot_reg_data),
+ GFP_KERNEL);
+ if (!pdata->vreg_data) {
+ dev_err(dev, "failed to allocate memory for vreg data\n");
+ goto out;
+ }
+
+ if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
+ "vdd")) {
+ dev_err(dev, "failed parsing vdd data\n");
+ goto out;
+ }
+ if (sdhci_msm_dt_parse_vreg_info(dev,
+ &pdata->vreg_data->vdd_io_data,
+ "vdd-io")) {
+ dev_err(dev, "failed parsing vdd-io data\n");
+ goto out;
+ }
+
+ if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
+ dev_err(dev, "failed parsing gpio data\n");
+ goto out;
+ }
+
+ len = of_property_count_strings(np, "qcom,bus-speed-mode");
+
+ for (i = 0; i < len; i++) {
+ const char *name = NULL;
+
+ of_property_read_string_index(np,
+ "qcom,bus-speed-mode", i, &name);
+ if (!name)
+ continue;
+
+ if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
+ pdata->caps2 |= MMC_CAP2_HS400_1_8V;
+ else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
+ pdata->caps2 |= MMC_CAP2_HS400_1_2V;
+ else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+ pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
+ pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
+ pdata->caps |= MMC_CAP_1_8V_DDR
+ | MMC_CAP_UHS_DDR50;
+ else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
+ pdata->caps |= MMC_CAP_1_2V_DDR
+ | MMC_CAP_UHS_DDR50;
+ }
+
+ if (of_get_property(np, "qcom,nonremovable", NULL))
+ pdata->nonremovable = true;
+
+ if (of_get_property(np, "qcom,nonhotplug", NULL))
+ pdata->nonhotplug = true;
+
+ pdata->largeaddressbus =
+ of_property_read_bool(np, "qcom,large-address-bus");
+
+ if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
+ msm_host->mmc->wakeup_on_idle = true;
+
+ sdhci_msm_pm_qos_parse(dev, pdata);
+
+ if (of_get_property(np, "qcom,core_3_0v_support", NULL))
+ pdata->core_3_0v_support = true;
+
+ pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+
+ return pdata;
+out:
+ return NULL;
+}
+
+/* Returns required bandwidth in Bytes per Sec */
+static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
+ struct mmc_ios *ios)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ unsigned int bw;
+
+ bw = msm_host->clk_rate;
+ /*
+ * For DDR mode, SDCC controller clock will be at
+ * the double rate than the actual clock that goes to card.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ bw /= 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ bw /= 8;
+
+ return bw;
+}
+
+static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
+ unsigned int bw)
+{
+ unsigned int *table = host->pdata->voting_data->bw_vecs;
+ unsigned int size = host->pdata->voting_data->bw_vecs_size;
+ int i;
+
+ if (host->msm_bus_vote.is_max_bw_needed && bw)
+ return host->msm_bus_vote.max_bw_vote;
+
+ for (i = 0; i < size; i++) {
+ if (bw <= table[i])
+ break;
+ }
+
+ if (i && (i == size))
+ i--;
+
+ return i;
+}
+
+/*
+ * This function must be called with host lock acquired.
+ * Caller of this function should also ensure that msm bus client
+ * handle is not null.
+ */
+static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
+ int vote,
+ unsigned long *flags)
+{
+ struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
+ int rc = 0;
+
+ BUG_ON(!flags);
+
+ if (vote != msm_host->msm_bus_vote.curr_vote) {
+ spin_unlock_irqrestore(&host->lock, *flags);
+ rc = msm_bus_scale_client_update_request(
+ msm_host->msm_bus_vote.client_handle, vote);
+ spin_lock_irqsave(&host->lock, *flags);
+ if (rc) {
+ pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
+ mmc_hostname(host->mmc),
+ msm_host->msm_bus_vote.client_handle, vote, rc);
+ goto out;
+ }
+ msm_host->msm_bus_vote.curr_vote = vote;
+ }
+out:
+ return rc;
+}
+
+/*
+ * Internal work. Work to set 0 bandwidth for msm bus.
+ */
+static void sdhci_msm_bus_work(struct work_struct *work)
+{
+ struct sdhci_msm_host *msm_host;
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ msm_host = container_of(work, struct sdhci_msm_host,
+ msm_bus_vote.vote_work.work);
+ host = platform_get_drvdata(msm_host->pdev);
+
+ if (!msm_host->msm_bus_vote.client_handle)
+ return;
+
+ spin_lock_irqsave(&host->lock, flags);
+ /* don't vote for 0 bandwidth if any request is in progress */
+ if (!host->mrq) {
+ sdhci_msm_bus_set_vote(msm_host,
+ msm_host->msm_bus_vote.min_bw_vote, &flags);
+ } else
+ pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
+ mmc_hostname(host->mmc), __func__);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * This function cancels any scheduled delayed work and sets the bus
+ * vote based on bw (bandwidth) argument.
+ */
+static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
+ unsigned int bw)
+{
+ int vote;
+ unsigned long flags;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
+ spin_lock_irqsave(&host->lock, flags);
+ vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
+ sdhci_msm_bus_set_vote(msm_host, vote, &flags);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
+
+/* This function queues a work which will set the bandwidth requiement to 0 */
+static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
+{
+ unsigned long flags;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (msm_host->msm_bus_vote.min_bw_vote !=
+ msm_host->msm_bus_vote.curr_vote)
+ queue_delayed_work(system_wq,
+ &msm_host->msm_bus_vote.vote_work,
+ msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
+ struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_bus_scale_pdata *bus_pdata;
+
+ struct sdhci_msm_bus_voting_data *data;
+ struct device *dev = &pdev->dev;
+
+ data = devm_kzalloc(dev,
+ sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ data->bus_pdata = msm_bus_cl_get_pdata(pdev);
+ if (data->bus_pdata) {
+ rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
+ &data->bw_vecs, &data->bw_vecs_size, 0);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get bus-bw-vectors-bps\n",
+ __func__);
+ goto out;
+ }
+ host->pdata->voting_data = data;
+ }
+ if (host->pdata->voting_data &&
+ host->pdata->voting_data->bus_pdata &&
+ host->pdata->voting_data->bw_vecs &&
+ host->pdata->voting_data->bw_vecs_size) {
+
+ bus_pdata = host->pdata->voting_data->bus_pdata;
+ host->msm_bus_vote.client_handle =
+ msm_bus_scale_register_client(bus_pdata);
+ if (!host->msm_bus_vote.client_handle) {
+ dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
+ rc = -EFAULT;
+ goto out;
+ }
+ /* cache the vote index for minimum and maximum bandwidth */
+ host->msm_bus_vote.min_bw_vote =
+ sdhci_msm_bus_get_vote_for_bw(host, 0);
+ host->msm_bus_vote.max_bw_vote =
+ sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
+ } else {
+ devm_kfree(dev, data);
+ }
+
+out:
+ return rc;
+}
+
+static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
+{
+ if (host->msm_bus_vote.client_handle)
+ msm_bus_scale_unregister_client(
+ host->msm_bus_vote.client_handle);
+}
+
+static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct mmc_ios *ios = &host->mmc->ios;
+ unsigned int bw;
+
+ if (!msm_host->msm_bus_vote.client_handle)
+ return;
+
+ bw = sdhci_get_bw_required(host, ios);
+ if (enable) {
+ sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
+ } else {
+ /*
+ * If clock gating is enabled, then remove the vote
+ * immediately because clocks will be disabled only
+ * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
+ * additional delay is required to remove the bus vote.
+ */
+#ifdef CONFIG_MMC_CLKGATE
+ if (host->mmc->clkgate_delay)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ else
+#endif
+ sdhci_msm_bus_queue_work(host);
+ }
+}
+
+/* Regulator utility functions */
+static int sdhci_msm_vreg_init_reg(struct device *dev,
+ struct sdhci_msm_reg_data *vreg)
+{
+ int ret = 0;
+
+ /* check if regulator is already initialized? */
+ if (vreg->reg)
+ goto out;
+
+ /* Get the regulator handle */
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+
+ if (regulator_count_voltages(vreg->reg) > 0) {
+ vreg->set_voltage_sup = true;
+ /* sanity check */
+ if (!vreg->high_vol_level || !vreg->hpm_uA) {
+ pr_err("%s: %s invalid constraints specified\n",
+ __func__, vreg->name);
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
+{
+ if (vreg->reg)
+ devm_regulator_put(vreg->reg);
+}
+
+static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
+ *vreg, int uA_load)
+{
+ int ret = 0;
+
+ /*
+ * regulators that do not support regulator_set_voltage also
+ * do not support regulator_set_optimum_mode
+ */
+ if (vreg->set_voltage_sup) {
+ ret = regulator_set_load(vreg->reg, uA_load);
+ if (ret < 0)
+ pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
+ __func__, vreg->name, uA_load, ret);
+ else
+ /*
+ * regulator_set_load() can return non zero
+ * value even for success case.
+ */
+ ret = 0;
+ }
+ return ret;
+}
+
+static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
+ int min_uV, int max_uV)
+{
+ int ret = 0;
+ if (vreg->set_voltage_sup) {
+ ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+ if (ret) {
+ pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
+ __func__, vreg->name, min_uV, max_uV, ret);
+ }
+ }
+
+ return ret;
+}
+
+static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
+{
+ int ret = 0;
+
+ /* Put regulator in HPM (high power mode) */
+ ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
+ if (ret < 0)
+ return ret;
+
+ if (!vreg->is_enabled) {
+ /* Set voltage level */
+ ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
+ vreg->high_vol_level);
+ if (ret)
+ return ret;
+ }
+ ret = regulator_enable(vreg->reg);
+ if (ret) {
+ pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
+ __func__, vreg->name, ret);
+ return ret;
+ }
+ vreg->is_enabled = true;
+ return ret;
+}
+
+static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
+{
+ int ret = 0;
+
+ /* Never disable regulator marked as always_on */
+ if (vreg->is_enabled && !vreg->is_always_on) {
+ ret = regulator_disable(vreg->reg);
+ if (ret) {
+ pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+ vreg->is_enabled = false;
+
+ ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
+ if (ret < 0)
+ goto out;
+
+ /* Set min. voltage level to 0 */
+ ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
+ if (ret)
+ goto out;
+ } else if (vreg->is_enabled && vreg->is_always_on) {
+ if (vreg->lpm_sup) {
+ /* Put always_on regulator in LPM (low power mode) */
+ ret = sdhci_msm_vreg_set_optimum_mode(vreg,
+ vreg->lpm_uA);
+ if (ret < 0)
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
+ bool enable, bool is_init)
+{
+ int ret = 0, i;
+ struct sdhci_msm_slot_reg_data *curr_slot;
+ struct sdhci_msm_reg_data *vreg_table[2];
+
+ curr_slot = pdata->vreg_data;
+ if (!curr_slot) {
+ pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
+ __func__);
+ goto out;
+ }
+
+ vreg_table[0] = curr_slot->vdd_data;
+ vreg_table[1] = curr_slot->vdd_io_data;
+
+ for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
+ if (vreg_table[i]) {
+ if (enable)
+ ret = sdhci_msm_vreg_enable(vreg_table[i]);
+ else
+ ret = sdhci_msm_vreg_disable(vreg_table[i]);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+/*
+ * Reset vreg by ensuring it is off during probe. A call
+ * to enable vreg is needed to balance disable vreg
+ */
+static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
+{
+ int ret;
+
+ ret = sdhci_msm_setup_vreg(pdata, 1, true);
+ if (ret)
+ return ret;
+ ret = sdhci_msm_setup_vreg(pdata, 0, true);
+ return ret;
+}
+
+/* This init function should be called only once for each SDHC slot */
+static int sdhci_msm_vreg_init(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata,
+ bool is_init)
+{
+ int ret = 0;
+ struct sdhci_msm_slot_reg_data *curr_slot;
+ struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
+
+ curr_slot = pdata->vreg_data;
+ if (!curr_slot)
+ goto out;
+
+ curr_vdd_reg = curr_slot->vdd_data;
+ curr_vdd_io_reg = curr_slot->vdd_io_data;
+
+ if (!is_init)
+ /* Deregister all regulators from regulator framework */
+ goto vdd_io_reg_deinit;
+
+ /*
+ * Get the regulator handle from voltage regulator framework
+ * and then try to set the voltage level for the regulator
+ */
+ if (curr_vdd_reg) {
+ ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
+ if (ret)
+ goto out;
+ }
+ if (curr_vdd_io_reg) {
+ ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
+ if (ret)
+ goto vdd_reg_deinit;
+ }
+ ret = sdhci_msm_vreg_reset(pdata);
+ if (ret)
+ dev_err(dev, "vreg reset failed (%d)\n", ret);
+ goto out;
+
+vdd_io_reg_deinit:
+ if (curr_vdd_io_reg)
+ sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
+vdd_reg_deinit:
+ if (curr_vdd_reg)
+ sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
+out:
+ return ret;
+}
+
+
+static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
+ enum vdd_io_level level,
+ unsigned int voltage_level)
+{
+ int ret = 0;
+ int set_level;
+ struct sdhci_msm_reg_data *vdd_io_reg;
+
+ if (!pdata->vreg_data)
+ return ret;
+
+ vdd_io_reg = pdata->vreg_data->vdd_io_data;
+ if (vdd_io_reg && vdd_io_reg->is_enabled) {
+ switch (level) {
+ case VDD_IO_LOW:
+ set_level = vdd_io_reg->low_vol_level;
+ break;
+ case VDD_IO_HIGH:
+ set_level = vdd_io_reg->high_vol_level;
+ break;
+ case VDD_IO_SET_LEVEL:
+ set_level = voltage_level;
+ break;
+ default:
+ pr_err("%s: invalid argument level = %d",
+ __func__, level);
+ ret = -EINVAL;
+ return ret;
+ }
+ ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
+ set_level);
+ }
+ return ret;
+}
+
+/*
+ * Acquire spin-lock host->lock before calling this function
+ */
+static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
+ bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (enable && !msm_host->is_sdiowakeup_enabled)
+ enable_irq(msm_host->pdata->sdiowakeup_irq);
+ else if (!enable && msm_host->is_sdiowakeup_enabled)
+ disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
+ else
+ dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
+ __func__, enable, msm_host->is_sdiowakeup_enabled);
+ msm_host->is_sdiowakeup_enabled = enable;
+}
+
+static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ unsigned long flags;
+
+ pr_debug("%s: irq (%d) received\n", __func__, irq);
+
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+ spin_unlock_irqrestore(&host->lock, flags);
+ msm_host->sdio_pending_processing = true;
+
+ return IRQ_HANDLED;
+}
+
+void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_MASK),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL));
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u8 irq_status = 0;
+ u8 irq_ack = 0;
+ int ret = 0;
+ int pwr_state = 0, io_level = 0;
+ unsigned long flags;
+ int retry = 10;
- sdhci_msm_voltage_switch(host);
+ irq_status = sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS);
+
+ pr_debug("%s: Received IRQ(%d), status=0x%x\n",
+ mmc_hostname(msm_host->mmc), irq, irq_status);
+
+ /* Clear the interrupt */
+ sdhci_msm_writeb_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+
+ /*
+ * SDHC has core_mem and hc_mem device memory and these memory
+ * addresses do not fall within 1KB region. Hence, any update to
+ * core_mem address space would require an mb() to ensure this gets
+ * completed before its next update to registers within hc_mem.
+ */
+ mb();
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ BUG_ON(1);
+ }
+ sdhci_msm_writeb_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+ if (likely(retry < 10))
+ pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
+ mmc_hostname(host->mmc), irq_status, retry);
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
+ if (!ret) {
+ ret = sdhci_msm_setup_pins(msm_host->pdata, true);
+ ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+ VDD_IO_HIGH, 0);
+ }
+ if (ret)
+ irq_ack |= CORE_PWRCTL_BUS_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
+ if (!ret) {
+ ret = sdhci_msm_setup_pins(msm_host->pdata, false);
+ ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+ VDD_IO_LOW, 0);
+ }
+ if (ret)
+ irq_ack |= CORE_PWRCTL_BUS_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ /* Switch voltage Low */
+ ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
+ if (ret)
+ irq_ack |= CORE_PWRCTL_IO_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+ io_level = REQ_IO_LOW;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ /* Switch voltage High */
+ ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
+ if (ret)
+ irq_ack |= CORE_PWRCTL_IO_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+ io_level = REQ_IO_HIGH;
+ }
+
+ /* ACK status to the core */
+ sdhci_msm_writeb_relaxed(irq_ack, host,
+ msm_host_offset->CORE_PWRCTL_CTL);
+ /*
+ * SDHC has core_mem and hc_mem device memory and these memory
+ * addresses do not fall within 1KB region. Hence, any update to
+ * core_mem address space would require an mb() to ensure this gets
+ * completed before its next update to registers within hc_mem.
+ */
+ mb();
+
+ if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) &
+ ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ else if ((io_level & REQ_IO_LOW) ||
+ (msm_host->caps_0 & CORE_1_8V_SUPPORT))
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) |
+ CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ mb();
+
+ pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
+ spin_lock_irqsave(&host->lock, flags);
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+ complete(&msm_host->pwr_irq_completion);
+ spin_unlock_irqrestore(&host->lock, flags);
return IRQ_HANDLED;
}
-static const struct of_device_id sdhci_msm_dt_match[] = {
- { .compatible = "qcom,sdhci-msm-v4" },
- {},
-};
+static ssize_t
+show_polling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int poll;
+ unsigned long flags;
-MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+ spin_lock_irqsave(&host->lock, flags);
+ poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
+ spin_unlock_irqrestore(&host->lock, flags);
-static const struct sdhci_ops sdhci_msm_ops = {
- .platform_execute_tuning = sdhci_msm_execute_tuning,
- .reset = sdhci_reset,
- .set_clock = sdhci_set_clock,
- .set_bus_width = sdhci_set_bus_width,
+ return snprintf(buf, PAGE_SIZE, "%d\n", poll);
+}
+
+static ssize_t
+store_polling(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (value) {
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ mmc_detect_change(host->mmc, 0);
+ } else {
+ host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
+static ssize_t
+show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ msm_host->msm_bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ uint32_t value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ msm_host->msm_bus_vote.is_max_bw_needed = !!value;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ unsigned long flags;
+ bool done = false;
+ u32 io_sig_sts;
+
+ spin_lock_irqsave(&host->lock, flags);
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+ io_sig_sts = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_GENERICS);
+
+ /*
+ * The IRQ for request type IO High/Low will be generated when -
+ * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
+ * 2. If 1 is true and when there is a state change in 1.8V enable
+ * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
+ * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
+ * layer tries to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
+ if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
+ ((req_type & REQ_IO_HIGH) && !host->pwr)) {
+ pr_debug("%s: do not wait for power IRQ that never comes\n",
+ mmc_hostname(host->mmc));
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+ }
+
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /*
+ * This is needed here to hanlde a case where IRQ gets
+ * triggered even before this function is called so that
+ * x->done counter of completion gets reset. Otherwise,
+ * next call to wait_for_completion returns immediately
+ * without actually waiting for the IRQ to be handled.
+ */
+ if (done)
+ init_completion(&msm_host->pwr_irq_completion);
+ else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+ mmc_hostname(host->mmc), req_type);
+
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ if (enable) {
+ config |= CORE_CDR_EN;
+ config &= ~CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ } else {
+ config &= ~CORE_CDR_EN;
+ config |= CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ }
+}
+
+static unsigned int sdhci_msm_max_segs(void)
+{
+ return SDHCI_MSM_MAX_SEGMENTS;
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return msm_host->pdata->sup_clk_table[0];
+}
+
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int max_clk_index = msm_host->pdata->sup_clk_cnt;
+
+ return msm_host->pdata->sup_clk_table[max_clk_index - 1];
+}
+
+static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
+ u32 req_clk)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned int sel_clk = -1;
+ unsigned char cnt;
+
+ if (req_clk < sdhci_msm_get_min_clock(host)) {
+ sel_clk = sdhci_msm_get_min_clock(host);
+ return sel_clk;
+ }
+
+ for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
+ if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
+ break;
+ } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
+ sel_clk = msm_host->pdata->sup_clk_table[cnt];
+ break;
+ } else {
+ sel_clk = msm_host->pdata->sup_clk_table[cnt];
+ }
+ }
+ return sel_clk;
+}
+
+static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int rc = 0;
+
+ if (atomic_read(&msm_host->controller_clock))
+ return 0;
+
+ sdhci_msm_bus_voting(host, 1);
+
+ if (!IS_ERR(msm_host->pclk)) {
+ rc = clk_prepare_enable(msm_host->pclk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the pclk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto remove_vote;
+ }
+ }
+
+ rc = clk_prepare_enable(msm_host->clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_pclk;
+ }
+
+ atomic_set(&msm_host->controller_clock, 1);
+ pr_debug("%s: %s: enabled controller clock\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+
+disable_pclk:
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+remove_vote:
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+ return rc;
+}
+
+static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (atomic_read(&msm_host->controller_clock)) {
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
+ sdhci_msm_bus_voting(host, 0);
+ atomic_set(&msm_host->controller_clock, 0);
+ pr_debug("%s: %s: disabled controller clock\n",
+ mmc_hostname(host->mmc), __func__);
+ }
+}
+
+static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int rc = 0;
+
+ if (enable && !atomic_read(&msm_host->clks_on)) {
+ pr_debug("%s: request to enable clocks\n",
+ mmc_hostname(host->mmc));
+
+ /*
+ * The bus-width or the clock rate might have changed
+ * after controller clocks are enbaled, update bus vote
+ * in such case.
+ */
+ if (atomic_read(&msm_host->controller_clock))
+ sdhci_msm_bus_voting(host, 1);
+
+ rc = sdhci_msm_enable_controller_clock(host);
+ if (rc)
+ goto remove_vote;
+
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
+ rc = clk_prepare_enable(msm_host->bus_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_controller_clk;
+ }
+ }
+ if (!IS_ERR(msm_host->ff_clk)) {
+ rc = clk_prepare_enable(msm_host->ff_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_bus_clk;
+ }
+ }
+ if (!IS_ERR(msm_host->sleep_clk)) {
+ rc = clk_prepare_enable(msm_host->sleep_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_ff_clk;
+ }
+ }
+ mb();
+
+ } else if (!enable && atomic_read(&msm_host->clks_on)) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ mb();
+ /*
+ * During 1.8V signal switching the clock source must
+ * still be ON as it requires accessing SDHC
+ * registers (SDHCi host control2 register bit 3 must
+ * be written and polled after stopping the SDCLK).
+ */
+ if (host->mmc->card_clock_off)
+ return 0;
+ pr_debug("%s: request to disable clocks\n",
+ mmc_hostname(host->mmc));
+ if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
+ clk_disable_unprepare(msm_host->sleep_clk);
+ if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+ clk_disable_unprepare(msm_host->ff_clk);
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+
+ atomic_set(&msm_host->controller_clock, 0);
+ sdhci_msm_bus_voting(host, 0);
+ }
+ atomic_set(&msm_host->clks_on, enable);
+ goto out;
+disable_ff_clk:
+ if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+ clk_disable_unprepare(msm_host->ff_clk);
+disable_bus_clk:
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+disable_controller_clk:
+ if (!IS_ERR_OR_NULL(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR_OR_NULL(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ atomic_set(&msm_host->controller_clock, 0);
+remove_vote:
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+ return rc;
+}
+
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int rc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ struct mmc_card *card = host->mmc->card;
+ struct mmc_ios curr_ios = host->mmc->ios;
+ u32 sup_clock, ddr_clock, dll_lock;
+ bool curr_pwrsave;
+
+ if (!clock) {
+ /*
+ * disable pwrsave to ensure clock is not auto-gated until
+ * the rate is >400KHz (initialization complete).
+ */
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) &
+ ~CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ sdhci_msm_prepare_clocks(host, false);
+ host->clock = clock;
+ goto out;
+ }
+
+ rc = sdhci_msm_prepare_clocks(host, true);
+ if (rc)
+ goto out;
+
+ curr_pwrsave = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+ if ((clock > 400000) &&
+ !curr_pwrsave && card && mmc_host_may_gate_card(card))
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ | CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ /*
+ * Disable pwrsave for a newly added card if doesn't allow clock
+ * gating.
+ */
+ else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+
+ sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
+ if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
+ (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
+ (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
+ /*
+ * The SDHC requires internal clock frequency to be double the
+ * actual clock that will be set for DDR mode. The controller
+ * uses the faster clock(100/400MHz) for some of its parts and
+ * send the actual required clock (50/200MHz) to the card.
+ */
+ ddr_clock = clock * 2;
+ sup_clock = sdhci_msm_get_sup_clk_rate(host,
+ ddr_clock);
+ }
+
+ /*
+ * In general all timing modes are controlled via UHS mode select in
+ * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+ * their respective modes defined here, hence we use these values.
+ *
+ * HS200 - SDR104 (Since they both are equivalent in functionality)
+ * HS400 - This involves multiple configurations
+ * Initially SDR104 - when tuning is required as HS200
+ * Then when switching to DDR @ 400MHz (HS400) we use
+ * the vendor specific HC_SELECT_IN to control the mode.
+ *
+ * In addition to controlling the modes we also need to select the
+ * correct input clock for DLL depending on the mode.
+ *
+ * HS400 - divided clock (free running MCLK/2)
+ * All other modes - default (free running MCLK)
+ */
+ if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+ /* Select the divided clock (free running MCLK/2) */
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_MCLK_SEL_MASK)
+ | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ /*
+ * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+ * register
+ */
+ if ((msm_host->tuning_done ||
+ (card && mmc_card_strobe(card) &&
+ msm_host->enhanced_strobe)) &&
+ !msm_host->calibration_done) {
+ /*
+ * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+ * field in VENDOR_SPEC_FUNC
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr + \
+ msm_host_offset->CORE_VENDOR_SPEC)
+ | CORE_HC_SELECT_IN_HS400
+ | CORE_HC_SELECT_IN_EN), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ }
+ if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
+ /*
+ * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
+ * CORE_DLL_STATUS to be set. This should get set
+ * with in 15 us at 200 MHz.
+ */
+ rc = readl_poll_timeout(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS,
+ dll_lock, (dll_lock & (CORE_DLL_LOCK |
+ CORE_DDR_DLL_LOCK)), 10, 1000);
+ if (rc == -ETIMEDOUT)
+ pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ dll_lock);
+ }
+ } else {
+ if (!msm_host->use_cdclp533)
+ /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ & ~CORE_PWRSAVE_DLL), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+
+ /* Select the default clock (free running MCLK) */
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_MCLK_SEL_MASK)
+ | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+
+ /*
+ * Disable HC_SELECT_IN to be able to use the UHS mode select
+ * configuration from Host Control2 register for all other
+ * modes.
+ *
+ * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+ * in VENDOR_SPEC_FUNC
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_SELECT_IN_EN
+ & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ }
+ mb();
+
+ if (sup_clock != msm_host->clk_rate) {
+ pr_debug("%s: %s: setting clk rate to %u\n",
+ mmc_hostname(host->mmc), __func__, sup_clock);
+ rc = clk_set_rate(msm_host->clk, sup_clock);
+ if (rc) {
+ pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
+ mmc_hostname(host->mmc), __func__,
+ sup_clock, rc);
+ goto out;
+ }
+ msm_host->clk_rate = sup_clock;
+ host->clock = clock;
+ /*
+ * Update the bus vote in case of frequency change due to
+ * clock scaling.
+ */
+ sdhci_msm_bus_voting(host, 1);
+ }
+out:
+ sdhci_set_clock(host, clock);
+}
+
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((uhs == MMC_TIMING_MMC_HS400) ||
+ (uhs == MMC_TIMING_MMC_HS200) ||
+ (uhs == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (uhs == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (uhs == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (uhs == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((uhs == MMC_TIMING_UHS_DDR50) ||
+ (uhs == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ /*
+ * When clock frquency is less than 100MHz, the feedback clock must be
+ * provided and DLL must not be used so that tuning can be skipped. To
+ * provide feedback clock, the mode selection can be any value less
+ * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+ */
+ if (host->clock <= CORE_FREQ_100MHZ) {
+ if ((uhs == MMC_TIMING_MMC_HS400) ||
+ (uhs == MMC_TIMING_MMC_HS200) ||
+ (uhs == MMC_TIMING_UHS_SDR104))
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+ /*
+ * Make sure DLL is disabled when not required
+ *
+ * Write 1 to DLL_RST bit of DLL_CONFIG register
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_DLL_RST), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_DLL_PDN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ mb();
+
+ /*
+ * The DLL needs to be restored and CDCLP533 recalibrated
+ * when the clock frequency is set back to 400MHz.
+ */
+ msm_host->calibration_done = false;
+ }
+
+ pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
+ mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+}
+
+#define MAX_TEST_BUS 60
+#define DRV_NAME "cmdq-host"
+static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
+{
+ int i = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ struct cmdq_host *cq_host = host->cq_host;
+
+ u32 version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
+ u16 minor = version & CORE_VERSION_TARGET_MASK;
+ /* registers offset changed starting from 4.2.0 */
+ int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+
+ pr_err("---- Debug RAM dump ----\n");
+ pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
+ cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
+ cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
+
+ while (i < 16) {
+ pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
+ cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
+ i++;
+ }
+ pr_err("-------------------------\n");
+}
+
+void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ int tbsel, tbsel2;
+ int i, index = 0;
+ u32 test_bus_val = 0;
+ u32 debug_reg[MAX_TEST_BUS] = {0};
+
+ pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+ if (host->cq_host)
+ sdhci_msm_cmdq_dump_debug_ram(host);
+
+ MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_DATA_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_FIFO_CNT));
+ pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_DATA_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_FIFO_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_STATUS));
+ pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION));
+ pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
+ pr_info("Vndr func2: 0x%08x\n",
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
+
+ /*
+ * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
+ * of CORE_TESTBUS_CONFIG register.
+ *
+ * To select test bus 0 to 7 use tbsel and to select any test bus
+ * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
+ * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
+ * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
+ */
+ for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
+ for (tbsel = 0; tbsel < 8; tbsel++) {
+ if (index >= MAX_TEST_BUS)
+ break;
+ test_bus_val =
+ (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
+ tbsel | msm_host_offset->CORE_TESTBUS_ENA;
+ sdhci_msm_writel_relaxed(test_bus_val, host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ debug_reg[index++] = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_SDCC_DEBUG_REG);
+ }
+ }
+ for (i = 0; i < MAX_TEST_BUS; i = i + 4)
+ pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, i + 3, debug_reg[i], debug_reg[i+1],
+ debug_reg[i+2], debug_reg[i+3]);
+}
+
+/*
+ * sdhci_msm_enhanced_strobe_mask :-
+ * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
+ * SW should write 3 to
+ * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
+ * The default reset value of this register is 2.
+ */
+static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (!msm_host->enhanced_strobe ||
+ !mmc_card_strobe(msm_host->mmc->card)) {
+ pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ if (set) {
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ | CORE_CMDEN_HS400_INPUT_MASK_CNT),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+ } else {
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+ }
+}
+
+static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (set) {
+ sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
+ host, msm_host_offset->CORE_TESTBUS_CONFIG);
+ } else {
+ u32 value;
+
+ value = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
+ sdhci_msm_writel_relaxed(value, host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ }
+}
+
+void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
+{
+ u32 vendor_func2;
+ unsigned long timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ vendor_func2 = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+
+ if (enable) {
+ writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ timeout = 10000;
+ while (readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
+ if (timeout == 0) {
+ pr_info("%s: Applying wait idle disable workaround\n",
+ mmc_hostname(host->mmc));
+ /*
+ * Apply the reset workaround to not wait for
+ * pending data transfers on AXI before
+ * resetting the controller. This could be
+ * risky if the transfers were stuck on the
+ * AXI bus.
+ */
+ vendor_func2 = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ writel_relaxed(vendor_func2 |
+ HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ host->reset_wa_t = ktime_get();
+ return;
+ }
+ timeout--;
+ udelay(10);
+ }
+ pr_info("%s: waiting for SW_RST_REQ is successful\n",
+ mmc_hostname(host->mmc));
+ } else {
+ writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ }
+}
+
+static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
+{
+ struct sdhci_msm_pm_qos_irq *pm_qos_irq =
+ container_of(work, struct sdhci_msm_pm_qos_irq,
+ unvote_work.work);
+
+ if (atomic_read(&pm_qos_irq->counter))
+ return;
+
+ pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
+}
+
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_latency *latency =
+ &msm_host->pdata->pm_qos_data.irq_latency;
+ int counter;
+
+ if (!msm_host->pm_qos_irq.enabled)
+ return;
+
+ counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
+ /* Make sure to update the voting in case power policy has changed */
+ if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
+ && counter > 1)
+ return;
+
+ cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+ msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
+ pm_qos_update_request(&msm_host->pm_qos_irq.req,
+ msm_host->pm_qos_irq.latency);
+}
+
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int counter;
+
+ if (!msm_host->pm_qos_irq.enabled)
+ return;
+
+ if (atomic_read(&msm_host->pm_qos_irq.counter)) {
+ counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
+ } else {
+ WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
+ return;
+ }
+
+ if (counter)
+ return;
+
+ if (async) {
+ schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
+ msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+ return;
+ }
+
+ msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos_irq.req,
+ msm_host->pm_qos_irq.latency);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
+
+ return snprintf(buf, PAGE_SIZE,
+ "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
+ irq->enabled, atomic_read(&irq->counter), irq->latency);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ uint32_t value;
+ bool enable;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &value);
+ if (ret)
+ goto out;
+ enable = !!value;
+
+ if (enable == msm_host->pm_qos_irq.enabled)
+ goto out;
+
+ msm_host->pm_qos_irq.enabled = enable;
+ if (!enable) {
+ cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+ atomic_set(&msm_host->pm_qos_irq.counter, 0);
+ msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos_irq.req,
+ msm_host->pm_qos_irq.latency);
+ }
+
+out:
+ return count;
+}
+
+#ifdef CONFIG_SMP
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+ struct sdhci_host *host)
+{
+ msm_host->pm_qos_irq.req.irq = host->irq;
+}
+#else
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+ struct sdhci_host *host) { }
+#endif
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_latency *irq_latency;
+ int ret;
+
+ if (!msm_host->pdata->pm_qos_data.irq_valid)
+ return;
+
+ /* Initialize only once as this gets called per partition */
+ if (msm_host->pm_qos_irq.enabled)
+ return;
+
+ atomic_set(&msm_host->pm_qos_irq.counter, 0);
+ msm_host->pm_qos_irq.req.type =
+ msm_host->pdata->pm_qos_data.irq_req_type;
+ if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
+ (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
+ set_affine_irq(msm_host, host);
+ else
+ cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
+ cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
+
+ INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
+ sdhci_msm_pm_qos_irq_unvote_work);
+ /* For initialization phase, set the performance latency */
+ irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
+ msm_host->pm_qos_irq.latency =
+ irq_latency->latency[SDHCI_PERFORMANCE_MODE];
+ pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
+ msm_host->pm_qos_irq.latency);
+ msm_host->pm_qos_irq.enabled = true;
+
+ /* sysfs */
+ msm_host->pm_qos_irq.enable_attr.show =
+ sdhci_msm_pm_qos_irq_enable_show;
+ msm_host->pm_qos_irq.enable_attr.store =
+ sdhci_msm_pm_qos_irq_enable_store;
+ sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
+ msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
+ msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_irq.enable_attr);
+ if (ret)
+ pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
+ __func__, ret);
+
+ msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
+ msm_host->pm_qos_irq.status_attr.store = NULL;
+ sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
+ msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
+ msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_irq.status_attr);
+ if (ret)
+ pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
+ __func__, ret);
+}
+
+static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_group *group;
+ int i;
+ int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+ int offset = 0;
+
+ for (i = 0; i < nr_groups; i++) {
+ group = &msm_host->pm_qos[i];
+ offset += snprintf(&buf[offset], PAGE_SIZE,
+ "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
+ i, group->req.cpus_affine.bits[0],
+ msm_host->pm_qos_group_enable,
+ atomic_read(&group->counter),
+ group->latency);
+ }
+
+ return offset;
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ msm_host->pm_qos_group_enable ? "enabled" : "disabled");
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+ uint32_t value;
+ bool enable;
+ int ret;
+ int i;
+
+ ret = kstrtou32(buf, 0, &value);
+ if (ret)
+ goto out;
+ enable = !!value;
+
+ if (enable == msm_host->pm_qos_group_enable)
+ goto out;
+
+ msm_host->pm_qos_group_enable = enable;
+ if (!enable) {
+ for (i = 0; i < nr_groups; i++) {
+ cancel_delayed_work_sync(
+ &msm_host->pm_qos[i].unvote_work);
+ atomic_set(&msm_host->pm_qos[i].counter, 0);
+ msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos[i].req,
+ msm_host->pm_qos[i].latency);
+ }
+ }
+
+out:
+ return count;
+}
+
+static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
+{
+ int i;
+ struct sdhci_msm_cpu_group_map *map =
+ &msm_host->pdata->pm_qos_data.cpu_group_map;
+
+ if (cpu < 0)
+ goto not_found;
+
+ for (i = 0; i < map->nr_groups; i++)
+ if (cpumask_test_cpu(cpu, &map->mask[i]))
+ return i;
+
+not_found:
+ return -EINVAL;
+}
+
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency, int cpu)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+ struct sdhci_msm_pm_qos_group *pm_qos_group;
+ int counter;
+
+ if (!msm_host->pm_qos_group_enable || group < 0)
+ return;
+
+ pm_qos_group = &msm_host->pm_qos[group];
+ counter = atomic_inc_return(&pm_qos_group->counter);
+
+ /* Make sure to update the voting in case power policy has changed */
+ if (pm_qos_group->latency == latency->latency[host->power_policy]
+ && counter > 1)
+ return;
+
+ cancel_delayed_work_sync(&pm_qos_group->unvote_work);
+
+ pm_qos_group->latency = latency->latency[host->power_policy];
+ pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
+}
+
+static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
+{
+ struct sdhci_msm_pm_qos_group *group =
+ container_of(work, struct sdhci_msm_pm_qos_group,
+ unvote_work.work);
+
+ if (atomic_read(&group->counter))
+ return;
+
+ group->latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&group->req, group->latency);
+}
+
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+
+ if (!msm_host->pm_qos_group_enable || group < 0 ||
+ atomic_dec_return(&msm_host->pm_qos[group].counter))
+ return false;
+
+ if (async) {
+ schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
+ msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+ return true;
+ }
+
+ msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos[group].req,
+ msm_host->pm_qos[group].latency);
+ return true;
+}
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+ struct sdhci_msm_pm_qos_group *group;
+ int i;
+ int ret;
+
+ if (msm_host->pm_qos_group_enable)
+ return;
+
+ msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
+ GFP_KERNEL);
+ if (!msm_host->pm_qos)
+ return;
+
+ for (i = 0; i < nr_groups; i++) {
+ group = &msm_host->pm_qos[i];
+ INIT_DELAYED_WORK(&group->unvote_work,
+ sdhci_msm_pm_qos_cpu_unvote_work);
+ atomic_set(&group->counter, 0);
+ group->req.type = PM_QOS_REQ_AFFINE_CORES;
+ cpumask_copy(&group->req.cpus_affine,
+ &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
+ /* For initialization phase, set the performance mode latency */
+ group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
+ pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
+ group->latency);
+ pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
+ __func__, i,
+ group->req.cpus_affine.bits[0],
+ group->latency,
+ &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
+ }
+ msm_host->pm_qos_prev_cpu = -1;
+ msm_host->pm_qos_group_enable = true;
+
+ /* sysfs */
+ msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
+ msm_host->pm_qos_group_status_attr.store = NULL;
+ sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
+ msm_host->pm_qos_group_status_attr.attr.name =
+ "pm_qos_cpu_groups_status";
+ msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_group_status_attr);
+ if (ret)
+ dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
+ __func__, ret);
+ msm_host->pm_qos_group_enable_attr.show =
+ sdhci_msm_pm_qos_group_enable_show;
+ msm_host->pm_qos_group_enable_attr.store =
+ sdhci_msm_pm_qos_group_enable_store;
+ sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
+ msm_host->pm_qos_group_enable_attr.attr.name =
+ "pm_qos_cpu_groups_enable";
+ msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_group_enable_attr);
+ if (ret)
+ dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
+ __func__, ret);
+}
+
+static void sdhci_msm_pre_req(struct sdhci_host *host,
+ struct mmc_request *mmc_req)
+{
+ int cpu;
+ int group;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int prev_group = sdhci_msm_get_cpu_group(msm_host,
+ msm_host->pm_qos_prev_cpu);
+
+ sdhci_msm_pm_qos_irq_vote(host);
+
+ cpu = get_cpu();
+ put_cpu();
+ group = sdhci_msm_get_cpu_group(msm_host, cpu);
+ if (group < 0)
+ return;
+
+ if (group != prev_group && prev_group >= 0) {
+ sdhci_msm_pm_qos_cpu_unvote(host,
+ msm_host->pm_qos_prev_cpu, false);
+ prev_group = -1; /* make sure to vote for new group */
+ }
+
+ if (prev_group < 0) {
+ sdhci_msm_pm_qos_cpu_vote(host,
+ msm_host->pdata->pm_qos_data.latency, cpu);
+ msm_host->pm_qos_prev_cpu = cpu;
+ }
+}
+
+static void sdhci_msm_post_req(struct sdhci_host *host,
+ struct mmc_request *mmc_req)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ sdhci_msm_pm_qos_irq_unvote(host, false);
+
+ if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
+ msm_host->pm_qos_prev_cpu = -1;
+}
+
+static void sdhci_msm_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ sdhci_msm_pm_qos_irq_init(host);
+
+ if (msm_host->pdata->pm_qos_data.legacy_valid)
+ sdhci_msm_pm_qos_cpu_init(host,
+ msm_host->pdata->pm_qos_data.latency);
+}
+
+static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
+ u32 max_curr = 0;
+
+ if (curr_slot && curr_slot->vdd_data)
+ max_curr = curr_slot->vdd_data->hpm_uA;
+
+ return max_curr;
+}
+
+static struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
- .voltage_switch = sdhci_msm_voltage_switch,
+ .check_power_status = sdhci_msm_check_power_status,
+ .platform_execute_tuning = sdhci_msm_execute_tuning,
+ .enhanced_strobe = sdhci_msm_enhanced_strobe,
+ .toggle_cdr = sdhci_msm_toggle_cdr,
+ .get_max_segments = sdhci_msm_max_segs,
+ .set_clock = sdhci_msm_set_clock,
+ .get_min_clock = sdhci_msm_get_min_clock,
+ .get_max_clock = sdhci_msm_get_max_clock,
+ .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
+ .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
+ .enable_controller_clock = sdhci_msm_enable_controller_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
+ .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
+ .reset_workaround = sdhci_msm_reset_workaround,
+ .init = sdhci_msm_init,
+ .pre_req = sdhci_msm_pre_req,
+ .post_req = sdhci_msm_post_req,
+ .get_current_limit = sdhci_msm_get_current_limit,
};
-static const struct sdhci_pltfm_data sdhci_msm_pdata = {
- .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_NO_CARD_NO_RESET |
- SDHCI_QUIRK_SINGLE_POWER_WRITE,
- .ops = &sdhci_msm_ops,
-};
+static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
+ struct sdhci_host *host)
+{
+ u32 version, caps = 0;
+ u16 minor;
+ u8 major;
+ u32 val;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
+ major = (version & CORE_VERSION_MAJOR_MASK) >>
+ CORE_VERSION_MAJOR_SHIFT;
+ minor = version & CORE_VERSION_TARGET_MASK;
+
+ caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+
+ /*
+ * Starting with SDCC 5 controller (core major version = 1)
+ * controller won't advertise 3.0v, 1.8v and 8-bit features
+ * except for some targets.
+ */
+ if (major >= 1 && minor != 0x11 && minor != 0x12) {
+ struct sdhci_msm_reg_data *vdd_io_reg;
+ /*
+ * Enable 1.8V support capability on controllers that
+ * support dual voltage
+ */
+ vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
+ if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
+ caps |= CORE_3_0V_SUPPORT;
+ if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
+ caps |= CORE_1_8V_SUPPORT;
+ if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
+ caps |= CORE_8_BIT_SUPPORT;
+ }
+
+ /*
+ * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
+ * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
+ */
+ if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
+ host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
+ val = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ writel_relaxed((val | CORE_ONE_MID_EN),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ }
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x34 and later
+ * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+ */
+ if ((major == 1) && (minor < 0x34))
+ msm_host->use_cdclp533 = true;
+
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x42 and later
+ * will require additional steps when resetting DLL.
+ * It also supports HS400 enhanced strobe mode.
+ */
+ if ((major == 1) && (minor >= 0x42)) {
+ msm_host->use_updated_dll_reset = true;
+ msm_host->enhanced_strobe = true;
+ }
+
+ /*
+ * SDCC 5 controller with major version 1 and minor version 0x42,
+ * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
+ * gating cannot guarantee MCLK timing requirement i.e.
+ * when MCLK is gated OFF, it is not gated for less than 0.5us
+ * and MCLK must be switched on for at-least 1us before DATA
+ * starts coming.
+ */
+ if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
+ (minor == 0x49)))
+ msm_host->use_14lpp_dll = true;
+
+ /* Fake 3.0V support for SDIO devices which requires such voltage */
+ if (msm_host->pdata->core_3_0v_support) {
+ caps |= CORE_3_0V_SUPPORT;
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ SDHCI_CAPABILITIES) | caps), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ }
+
+ if ((major == 1) && (minor >= 0x49))
+ msm_host->rclk_delay_fix = true;
+ /*
+ * Mask 64-bit support for controller with 32-bit address bus so that
+ * smaller descriptor size will be used and improve memory consumption.
+ */
+ if (!msm_host->pdata->largeaddressbus)
+ caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
+
+ writel_relaxed(caps, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ /* keep track of the value in SDHCI_CAPABILITIES */
+ msm_host->caps_0 = caps;
+
+ if ((major == 1) && (minor >= 0x6b))
+ msm_host->ice_hci_support = true;
+}
+
+#ifdef CONFIG_MMC_CQ_HCI
+static void sdhci_msm_cmdq_init(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (nocmdq) {
+ dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
+ return;
+ }
+
+ host->cq_host = cmdq_pltfm_init(pdev);
+ if (IS_ERR(host->cq_host)) {
+ dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
+ PTR_ERR(host->cq_host));
+ host->cq_host = NULL;
+ } else {
+ msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
+ }
+}
+#else
+static void sdhci_msm_cmdq_init(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+
+}
+#endif
+
+static bool sdhci_msm_is_bootdevice(struct device *dev)
+{
+ if (strnstr(saved_command_line, "androidboot.bootdevice=",
+ strlen(saved_command_line))) {
+ char search_string[50];
+
+ snprintf(search_string, ARRAY_SIZE(search_string),
+ "androidboot.bootdevice=%s", dev_name(dev));
+ if (strnstr(saved_command_line, search_string,
+ strlen(saved_command_line)))
+ return true;
+ else
+ return false;
+ }
+
+ /*
+ * "androidboot.bootdevice=" argument is not present then
+ * return true as we don't know the boot device anyways.
+ */
+ return true;
+}
static int sdhci_msm_probe(struct platform_device *pdev)
{
+ const struct sdhci_msm_offset *msm_host_offset;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
- struct resource *core_memres;
- int ret;
- u16 host_version, core_minor;
- u32 core_version, caps;
- u8 core_major;
+ struct resource *core_memres = NULL;
+ int ret = 0, dead = 0;
+ u16 host_version;
+ u32 irq_status, irq_ctl;
+ struct resource *tlmm_memres = NULL;
+ void __iomem *tlmm_mem;
+ unsigned long flags;
- host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
- if (IS_ERR(host))
- return PTR_ERR(host);
+ pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
+ GFP_KERNEL);
+ if (!msm_host) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
+ msm_host->mci_removed = true;
+ msm_host->offset = &sdhci_msm_offset_mci_removed;
+ } else {
+ msm_host->mci_removed = false;
+ msm_host->offset = &sdhci_msm_offset_mci_present;
+ }
+ msm_host_offset = msm_host->offset;
+ msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
+ host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto out;
+ }
pltfm_host = sdhci_priv(host);
- msm_host = sdhci_pltfm_priv(pltfm_host);
+ pltfm_host->priv = msm_host;
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
- ret = mmc_of_parse(host->mmc);
- if (ret)
- goto pltfm_free;
+ /* Extract platform data */
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "Failed to get slot index %d\n",
+ ret);
+ goto pltfm_free;
+ }
- sdhci_get_of_property(pdev);
+ /* skip the probe if eMMC isn't a boot device */
+ if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
+ ret = -ENODEV;
+ goto pltfm_free;
+ }
+
+ if (disable_slots & (1 << (ret - 1))) {
+ dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
+ ret);
+ ret = -ENODEV;
+ goto pltfm_free;
+ }
+
+ if (ret <= 2)
+ sdhci_slot[ret-1] = msm_host;
+
+ msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
+ msm_host);
+ if (!msm_host->pdata) {
+ dev_err(&pdev->dev, "DT parsing error\n");
+ goto pltfm_free;
+ }
+ } else {
+ dev_err(&pdev->dev, "No device tree node\n");
+ goto pltfm_free;
+ }
+
+ /* Setup Clocks */
/* Setup SDCC bus voter clock. */
- msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(msm_host->bus_clk)) {
+ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
/* Vote for max. clk rate for max. performance */
ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
if (ret)
@@ -567,113 +4192,379 @@
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
- dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
- goto bus_clk_disable;
+ msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (!IS_ERR(msm_host->pclk)) {
+ ret = clk_prepare_enable(msm_host->pclk);
+ if (ret)
+ goto bus_clk_disable;
}
-
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ atomic_set(&msm_host->controller_clock, 1);
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
+ msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(msm_host->clk)) {
ret = PTR_ERR(msm_host->clk);
- dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
goto pclk_disable;
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
+ /* Set to the minimum supported clock frequency */
+ ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
+ if (ret) {
+ dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
+ goto pclk_disable;
+ }
ret = clk_prepare_enable(msm_host->clk);
if (ret)
goto pclk_disable;
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+ msm_host->clk_rate = sdhci_msm_get_min_clock(host);
+ atomic_set(&msm_host->clks_on, 1);
- if (IS_ERR(msm_host->core_mem)) {
- dev_err(&pdev->dev, "Failed to remap registers\n");
- ret = PTR_ERR(msm_host->core_mem);
- goto clk_disable;
+ /* Setup CDC calibration fixed feedback clock */
+ msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
+ if (!IS_ERR(msm_host->ff_clk)) {
+ ret = clk_prepare_enable(msm_host->ff_clk);
+ if (ret)
+ goto clk_disable;
+ }
+
+ /* Setup CDC calibration sleep clock */
+ msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+ if (!IS_ERR(msm_host->sleep_clk)) {
+ ret = clk_prepare_enable(msm_host->sleep_clk);
+ if (ret)
+ goto ff_clk_disable;
+ }
+
+ msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
+ ret = sdhci_msm_bus_register(msm_host, pdev);
+ if (ret)
+ goto sleep_clk_disable;
+
+ if (msm_host->msm_bus_vote.client_handle)
+ INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
+ sdhci_msm_bus_work);
+ sdhci_msm_bus_voting(host, 1);
+
+ /* Setup regulators */
+ ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
+ if (ret) {
+ dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
+ goto bus_unregister;
}
/* Reset the core and Enable SDHC mode */
- writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
- CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+ core_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "core_mem");
+ if (!msm_host->mci_removed) {
+ if (!core_memres) {
+ dev_err(&pdev->dev, "Failed to get iomem resource\n");
+ goto vreg_deinit;
+ }
+ msm_host->core_mem = devm_ioremap(&pdev->dev,
+ core_memres->start, resource_size(core_memres));
- /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
- usleep_range(1000, 5000);
- if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
- dev_err(&pdev->dev, "Stuck in reset\n");
- ret = -ETIMEDOUT;
- goto clk_disable;
+ if (!msm_host->core_mem) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = -ENOMEM;
+ goto vreg_deinit;
+ }
}
- /* Set HC_MODE_EN bit in HC_MODE register */
- writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ tlmm_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tlmm_mem");
+ if (tlmm_memres) {
+ tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
+ resource_size(tlmm_memres));
+
+ if (!tlmm_mem) {
+ dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
+ ret = -ENOMEM;
+ goto vreg_deinit;
+ }
+ writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
+ dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
+ &tlmm_memres->start, readl_relaxed(tlmm_mem));
+ }
+
+ /*
+ * Reset the vendor spec register to power on reset state.
+ */
+ writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
+ if (!msm_host->mci_removed) {
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+
+ /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
+ writel_relaxed(readl_relaxed(msm_host->core_mem +
+ CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
+ msm_host->core_mem + CORE_HC_MODE);
+ }
+ sdhci_set_default_hw_caps(msm_host, host);
+
+ /*
+ * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
+ * be used as required later on.
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) |
+ CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ /*
+ * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
+ * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
+ * interrupt in GIC (by registering the interrupt handler), we need to
+ * ensure that any pending power irq interrupt status is acknowledged
+ * otherwise power irq interrupt handler would be fired prematurely.
+ */
+ irq_status = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS);
+ sdhci_msm_writel_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+ irq_ctl = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL);
+
+ if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
+ if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
+ irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
+ sdhci_msm_writel_relaxed(irq_ctl, host,
+ msm_host_offset->CORE_PWRCTL_CTL);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
+ /*
+ * Following are the deviations from SDHC spec v3.0 -
+ * 1. Card detection is handled using separate GPIO.
+ * 2. Bus power control is handled by interacting with PMIC.
+ */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+ host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
+ host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+ host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
+ host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
+ host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
+
+ if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+ host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
- SDHCI_VENDOR_VER_SHIFT));
-
- core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
- core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
- CORE_VERSION_MAJOR_SHIFT;
- core_minor = core_version & CORE_VERSION_MINOR_MASK;
- dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
- core_version, core_major, core_minor);
-
- /*
- * Support for some capabilities is not advertised by newer
- * controller versions and must be explicitly enabled.
- */
- if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
- caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
- caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
- writel_relaxed(caps, host->ioaddr +
- CORE_VENDOR_SPEC_CAPABILITIES0);
+ SDHCI_VENDOR_VER_SHIFT));
+ if (((host_version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
+ /*
+ * Add 40us delay in interrupt handler when
+ * operating at initialization frequency(400KHz).
+ */
+ host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
+ /*
+ * Set Software Reset for DAT line in Software
+ * Reset Register (Bit 2).
+ */
+ host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
}
- /* Setup IRQ for handling power/voltage tasks with PMIC */
+ host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
+
+ /* Setup PWRCTL irq */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
- dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
- msm_host->pwr_irq);
- ret = msm_host->pwr_irq;
- goto clk_disable;
+ dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
+ msm_host->pwr_irq);
+ goto vreg_deinit;
}
-
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
if (ret) {
- dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
- goto clk_disable;
+ dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
+ msm_host->pwr_irq, ret);
+ goto vreg_deinit;
}
+ /* Enable pwr irq interrupts */
+ sdhci_msm_writel_relaxed(INT_MASK, host,
+ msm_host_offset->CORE_PWRCTL_MASK);
+
+#ifdef CONFIG_MMC_CLKGATE
+ /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
+ msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
+#endif
+
+ /* Set host capabilities */
+ msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
+ msm_host->mmc->caps |= msm_host->pdata->caps;
+ msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+ msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ msm_host->mmc->caps2 |= msm_host->pdata->caps2;
+ msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+ msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
+ msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+ msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
+ msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
+ msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
+ msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+
+ if (msm_host->pdata->nonremovable)
+ msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+ if (msm_host->pdata->nonhotplug)
+ msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
+
+ msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
+
+ init_completion(&msm_host->pwr_irq_completion);
+
+ if (gpio_is_valid(msm_host->pdata->status_gpio)) {
+ /*
+ * Set up the card detect GPIO in active configuration before
+ * configuring it as an IRQ. Otherwise, it can be in some
+ * weird/inconsistent state resulting in flood of interrupts.
+ */
+ sdhci_msm_setup_pins(msm_host->pdata, true);
+
+ /*
+ * This delay is needed for stabilizing the card detect GPIO
+ * line after changing the pull configs.
+ */
+ usleep_range(10000, 10500);
+ ret = mmc_gpio_request_cd(msm_host->mmc,
+ msm_host->pdata->status_gpio, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
+ __func__, ret);
+ goto vreg_deinit;
+ }
+ }
+
+ if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
+ (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
+ host->dma_mask = DMA_BIT_MASK(64);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
+ } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
+ host->dma_mask = DMA_BIT_MASK(32);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
+ } else {
+ dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
+ }
+
+ msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
+ "sdiowakeup_irq");
+ if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+ dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
+ msm_host->pdata->sdiowakeup_irq);
+ msm_host->is_sdiowakeup_enabled = true;
+ ret = request_irq(msm_host->pdata->sdiowakeup_irq,
+ sdhci_msm_sdiowakeup_irq,
+ IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ "sdhci-msm sdiowakeup", host);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
+ __func__, msm_host->pdata->sdiowakeup_irq, ret);
+ msm_host->pdata->sdiowakeup_irq = -1;
+ msm_host->is_sdiowakeup_enabled = false;
+ goto vreg_deinit;
+ } else {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+ msm_host->sdio_pending_processing = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ }
+
+ sdhci_msm_cmdq_init(host, pdev);
ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
+ goto vreg_deinit;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
+ msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
+ sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
+ msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+ msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev,
+ &msm_host->msm_bus_vote.max_bus_bw);
if (ret)
- goto clk_disable;
+ goto remove_host;
- return 0;
+ if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
+ msm_host->polling.show = show_polling;
+ msm_host->polling.store = store_polling;
+ sysfs_attr_init(&msm_host->polling.attr);
+ msm_host->polling.attr.name = "polling";
+ msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev, &msm_host->polling);
+ if (ret)
+ goto remove_max_bus_bw_file;
+ }
+ msm_host->auto_cmd21_attr.show = show_auto_cmd21;
+ msm_host->auto_cmd21_attr.store = store_auto_cmd21;
+ sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
+ msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
+ msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+ if (ret) {
+ pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+ }
+ /* Successful initialization */
+ goto out;
+
+remove_max_bus_bw_file:
+ device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+remove_host:
+ dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ pm_runtime_disable(&pdev->dev);
+ sdhci_remove_host(host, dead);
+vreg_deinit:
+ sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+bus_unregister:
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ sdhci_msm_bus_unregister(msm_host);
+sleep_clk_disable:
+ if (!IS_ERR(msm_host->sleep_clk))
+ clk_disable_unprepare(msm_host->sleep_clk);
+ff_clk_disable:
+ if (!IS_ERR(msm_host->ff_clk))
+ clk_disable_unprepare(msm_host->ff_clk);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
bus_clk_disable:
- if (!IS_ERR(msm_host->bus_clk))
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
pltfm_free:
sdhci_pltfm_free(pdev);
+out:
+ pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
return ret;
}
@@ -681,29 +4572,249 @@
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
- 0xffffffff);
+ 0xffffffff);
+ pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
+ if (!gpio_is_valid(msm_host->pdata->status_gpio))
+ device_remove_file(&pdev->dev, &msm_host->polling);
+ device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+ pm_runtime_disable(&pdev->dev);
sdhci_remove_host(host, dead);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
- if (!IS_ERR(msm_host->bus_clk))
- clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
+
+ sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+
+ sdhci_msm_setup_pins(pdata, true);
+ sdhci_msm_setup_pins(pdata, false);
+
+ if (msm_host->msm_bus_vote.client_handle) {
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ sdhci_msm_bus_unregister(msm_host);
+ }
return 0;
}
+#ifdef CONFIG_PM
+static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
+ sdhci_is_valid_gpio_wakeup_int(msm_host) &&
+ mmc_card_wake_sdio_irq(host->mmc))) {
+ msm_host->sdio_pending_processing = false;
+ return 1;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (enable) {
+ /* configure DAT1 gpio if applicable */
+ if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+ msm_host->sdio_pending_processing = false;
+ ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+ if (!ret)
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
+ goto out;
+ } else {
+ pr_err("%s: sdiowakeup_irq(%d) invalid\n",
+ mmc_hostname(host->mmc), enable);
+ }
+ } else {
+ if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+ ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+ msm_host->sdio_pending_processing = false;
+ } else {
+ pr_err("%s: sdiowakeup_irq(%d)invalid\n",
+ mmc_hostname(host->mmc), enable);
+
+ }
+ }
+out:
+ if (ret)
+ pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
+ mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
+ ret, msm_host->pdata->sdiowakeup_irq);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return ret;
+}
+
+
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ ktime_t start = ktime_get();
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+ goto defer_disable_host_irq;
+
+ sdhci_cfg_irq(host, false, true);
+
+defer_disable_host_irq:
+ disable_irq(msm_host->pwr_irq);
+
+ /*
+ * Remove the vote immediately only if clocks are off in which
+ * case we might have queued work to remove vote but it may not
+ * be completed before runtime suspend or system suspend.
+ */
+ if (!atomic_read(&msm_host->clks_on)) {
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ }
+ trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+
+ return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ ktime_t start = ktime_get();
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+ goto defer_enable_host_irq;
+
+ sdhci_cfg_irq(host, true, true);
+
+defer_enable_host_irq:
+ enable_irq(msm_host->pwr_irq);
+
+ trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return 0;
+}
+
+static int sdhci_msm_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+ int sdio_cfg = 0;
+ ktime_t start = ktime_get();
+
+ if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+ (msm_host->mmc->slot.cd_irq >= 0))
+ disable_irq(msm_host->mmc->slot.cd_irq);
+
+ if (pm_runtime_suspended(dev)) {
+ pr_debug("%s: %s: already runtime suspended\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+ ret = sdhci_msm_runtime_suspend(dev);
+out:
+ sdhci_msm_disable_controller_clock(host);
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
+ if (sdio_cfg)
+ sdhci_cfg_irq(host, false, true);
+ }
+
+ trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return ret;
+}
+
+static int sdhci_msm_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+ int sdio_cfg = 0;
+ ktime_t start = ktime_get();
+
+ if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+ (msm_host->mmc->slot.cd_irq >= 0))
+ enable_irq(msm_host->mmc->slot.cd_irq);
+
+ if (pm_runtime_suspended(dev)) {
+ pr_debug("%s: %s: runtime suspended, defer system resume\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ ret = sdhci_msm_runtime_resume(dev);
+out:
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
+ if (sdio_cfg)
+ sdhci_cfg_irq(host, true, true);
+ }
+
+ trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return ret;
+}
+
+static int sdhci_msm_suspend_noirq(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+
+ /*
+ * ksdioirqd may be running, hence retry
+ * suspend in case the clocks are ON
+ */
+ if (atomic_read(&msm_host->clks_on)) {
+ pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
+ mmc_hostname(host->mmc), __func__);
+ ret = -EAGAIN;
+ }
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+ if (msm_host->sdio_pending_processing)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static const struct dev_pm_ops sdhci_msm_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
+ SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
+ NULL)
+ .suspend_noirq = sdhci_msm_suspend_noirq,
+};
+
+#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
+
+#else
+#define SDHCI_MSM_PMOPS NULL
+#endif
+static const struct of_device_id sdhci_msm_dt_match[] = {
+ {.compatible = "qcom,sdhci-msm"},
+ {.compatible = "qcom,sdhci-msm-v5"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
static struct platform_driver sdhci_msm_driver = {
- .probe = sdhci_msm_probe,
- .remove = sdhci_msm_remove,
- .driver = {
- .name = "sdhci_msm",
- .of_match_table = sdhci_msm_dt_match,
+ .probe = sdhci_msm_probe,
+ .remove = sdhci_msm_remove,
+ .driver = {
+ .name = "sdhci_msm",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_msm_dt_match,
+ .pm = SDHCI_MSM_PMOPS,
},
};
module_platform_driver(sdhci_msm_driver);
-MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
new file mode 100644
index 0000000..53b1953
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_MSM_H__
+#define __SDHCI_MSM_H__
+
+#include <linux/mmc/mmc.h>
+#include <linux/pm_qos.h>
+#include "sdhci-pltfm.h"
+
+/* This structure keeps information per regulator */
+struct sdhci_msm_reg_data {
+ /* voltage regulator handle */
+ struct regulator *reg;
+ /* regulator name */
+ const char *name;
+ /* voltage level to be set */
+ u32 low_vol_level;
+ u32 high_vol_level;
+ /* Load values for low power and high power mode */
+ u32 lpm_uA;
+ u32 hpm_uA;
+
+ /* is this regulator enabled? */
+ bool is_enabled;
+ /* is this regulator needs to be always on? */
+ bool is_always_on;
+ /* is low power mode setting required for this regulator? */
+ bool lpm_sup;
+ bool set_voltage_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct sdhci_msm_slot_reg_data {
+ /* keeps VDD/VCC regulator info */
+ struct sdhci_msm_reg_data *vdd_data;
+ /* keeps VDD IO regulator info */
+ struct sdhci_msm_reg_data *vdd_io_data;
+};
+
+struct sdhci_msm_gpio {
+ u32 no;
+ const char *name;
+ bool is_enabled;
+};
+
+struct sdhci_msm_gpio_data {
+ struct sdhci_msm_gpio *gpio;
+ u8 size;
+};
+
+struct sdhci_msm_pin_data {
+ /*
+ * = 1 if controller pins are using gpios
+ * = 0 if controller has dedicated MSM pads
+ */
+ u8 is_gpio;
+ struct sdhci_msm_gpio_data *gpio_data;
+};
+
+struct sdhci_pinctrl_data {
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pins_active;
+ struct pinctrl_state *pins_sleep;
+};
+
+struct sdhci_msm_bus_voting_data {
+ struct msm_bus_scale_pdata *bus_pdata;
+ unsigned int *bw_vecs;
+ unsigned int bw_vecs_size;
+};
+
+struct sdhci_msm_cpu_group_map {
+ int nr_groups;
+ cpumask_t *mask;
+};
+
+struct sdhci_msm_pm_qos_latency {
+ s32 latency[SDHCI_POWER_POLICY_NUM];
+};
+
+struct sdhci_msm_pm_qos_data {
+ struct sdhci_msm_cpu_group_map cpu_group_map;
+ enum pm_qos_req_type irq_req_type;
+ int irq_cpu;
+ struct sdhci_msm_pm_qos_latency irq_latency;
+ struct sdhci_msm_pm_qos_latency *cmdq_latency;
+ struct sdhci_msm_pm_qos_latency *latency;
+ bool irq_valid;
+ bool cmdq_valid;
+ bool legacy_valid;
+};
+
+/*
+ * PM QoS for group voting management - each cpu group defined is associated
+ * with 1 instance of this structure.
+ */
+struct sdhci_msm_pm_qos_group {
+ struct pm_qos_request req;
+ struct delayed_work unvote_work;
+ atomic_t counter;
+ s32 latency;
+};
+
+/* PM QoS HW IRQ voting */
+struct sdhci_msm_pm_qos_irq {
+ struct pm_qos_request req;
+ struct delayed_work unvote_work;
+ struct device_attribute enable_attr;
+ struct device_attribute status_attr;
+ atomic_t counter;
+ s32 latency;
+ bool enabled;
+};
+
+struct sdhci_msm_pltfm_data {
+ /* Supported UHS-I Modes */
+ u32 caps;
+
+ /* More capabilities */
+ u32 caps2;
+
+ unsigned long mmc_bus_width;
+ struct sdhci_msm_slot_reg_data *vreg_data;
+ bool nonremovable;
+ bool nonhotplug;
+ bool largeaddressbus;
+ bool pin_cfg_sts;
+ struct sdhci_msm_pin_data *pin_data;
+ struct sdhci_pinctrl_data *pctrl_data;
+ int status_gpio; /* card detection GPIO that is configured as IRQ */
+ struct sdhci_msm_bus_voting_data *voting_data;
+ u32 *sup_clk_table;
+ unsigned char sup_clk_cnt;
+ int sdiowakeup_irq;
+ u32 *sup_ice_clk_table;
+ unsigned char sup_ice_clk_cnt;
+ struct sdhci_msm_pm_qos_data pm_qos_data;
+ bool core_3_0v_support;
+ bool sdr104_wa;
+};
+
+struct sdhci_msm_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ bool is_max_bw_needed;
+ struct delayed_work vote_work;
+ struct device_attribute max_bus_bw;
+};
+
+struct sdhci_msm_ice_data {
+ struct qcom_ice_variant_ops *vops;
+ struct completion async_done;
+ struct platform_device *pdev;
+ int state;
+};
+
+struct sdhci_msm_host {
+ struct platform_device *pdev;
+ void __iomem *core_mem; /* MSM SDCC mapped address */
+ void __iomem *cryptoio; /* ICE HCI mapped address */
+ bool ice_hci_support;
+ int pwr_irq; /* power irq */
+ struct clk *clk; /* main SD/MMC bus clock */
+ struct clk *pclk; /* SDHC peripheral bus clock */
+ struct clk *bus_clk; /* SDHC bus voter clock */
+ struct clk *ff_clk; /* CDC calibration fixed feedback clock */
+ struct clk *sleep_clk; /* CDC calibration sleep clock */
+ struct clk *ice_clk; /* SDHC peripheral ICE clock */
+ atomic_t clks_on; /* Set if clocks are enabled */
+ struct sdhci_msm_pltfm_data *pdata;
+ struct mmc_host *mmc;
+ struct sdhci_pltfm_data sdhci_msm_pdata;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ struct completion pwr_irq_completion;
+ struct sdhci_msm_bus_vote msm_bus_vote;
+ struct device_attribute polling;
+ u32 clk_rate; /* Keeps track of current clock rate that is set */
+ bool tuning_done;
+ bool calibration_done;
+ u8 saved_tuning_phase;
+ bool en_auto_cmd21;
+ struct device_attribute auto_cmd21_attr;
+ bool is_sdiowakeup_enabled;
+ bool sdio_pending_processing;
+ atomic_t controller_clock;
+ bool use_cdclp533;
+ bool use_updated_dll_reset;
+ bool use_14lpp_dll;
+ bool enhanced_strobe;
+ bool rclk_delay_fix;
+ u32 caps_0;
+ struct sdhci_msm_ice_data ice;
+ u32 ice_clk_rate;
+ struct sdhci_msm_pm_qos_group *pm_qos;
+ int pm_qos_prev_cpu;
+ struct device_attribute pm_qos_group_enable_attr;
+ struct device_attribute pm_qos_group_status_attr;
+ bool pm_qos_group_enable;
+ struct sdhci_msm_pm_qos_irq pm_qos_irq;
+ bool tuning_in_progress;
+ bool mci_removed;
+ const struct sdhci_msm_offset *offset;
+};
+
+extern char *saved_command_line;
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async);
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency);
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency, int cpu);
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async);
+
+
+#endif /* __SDHCI_MSM_H__ */
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 387ae1c..a8b430f 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
#include "sdhci-pltfm.h"
+#define SDMMC_MC1R 0x204
+#define SDMMC_MC1R_DDR BIT(3)
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
@@ -103,11 +105,18 @@
sdhci_set_power_noreg(host, mode, vdd);
}
+void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ if (timing == MMC_TIMING_MMC_DDR52)
+ sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+ sdhci_set_uhs_signaling(host, timing);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
.set_power = sdhci_at91_set_power,
};
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1bb11e4..3c27401 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -559,16 +559,19 @@
};
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks = ESDHC_DEFAULT_QUIRKS |
+#ifdef CONFIG_PPC
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+#endif
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_be_ops,
};
static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
- | SDHCI_QUIRK_NO_CARD_NO_RESET
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks = ESDHC_DEFAULT_QUIRKS |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_le_ops,
};
@@ -623,8 +626,7 @@
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
of_device_is_compatible(np, "fsl,p1020-esdhc") ||
- of_device_is_compatible(np, "fsl,t1040-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+ of_device_is_compatible(np, "fsl,t1040-esdhc"))
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3280f20..33b4fa6 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -23,6 +23,7 @@
struct sdhci_pltfm_host {
struct clk *clk;
+ void *priv; /* to handle quirks across io-accessor calls */
/* migrate from sdhci_of_host */
unsigned int clock;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a983ba0..53a6ae8 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -30,8 +30,12 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/sdio.h>
+
+#include <trace/events/mmc.h>
#include "sdhci.h"
+#include "cmdq_hci.h"
#define DRIVER_NAME "sdhci"
@@ -40,67 +44,114 @@
#define MAX_TUNING_LOOP 40
+#define SDHCI_DBG_DUMP_RS_INTERVAL (10 * HZ)
+#define SDHCI_DBG_DUMP_RS_BURST 2
+
static unsigned int debug_quirks = 0;
static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
+static bool sdhci_check_state(struct sdhci_host *);
+
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable);
+
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static void sdhci_dump_state(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ #ifdef CONFIG_MMC_CLKGATE
+ pr_info("%s: clk: %d clk-gated: %d claimer: %s pwr: %d host->irq = %d\n",
+ mmc_hostname(mmc), host->clock, mmc->clk_gated,
+ mmc->claimer->comm, host->pwr,
+ (host->flags & SDHCI_HOST_IRQ_STATUS));
+ #else
+ pr_info("%s: clk: %d claimer: %s pwr: %d\n",
+ mmc_hostname(mmc), host->clock,
+ mmc->claimer->comm, host->pwr);
+ #endif
+ pr_info("%s: rpmstatus[pltfm](runtime-suspend:usage_count:disable_depth)(%d:%d:%d)\n",
+ mmc_hostname(mmc), mmc->parent->power.runtime_status,
+ atomic_read(&mmc->parent->power.usage_count),
+ mmc->parent->power.disable_depth);
+}
+
static void sdhci_dumpregs(struct sdhci_host *host)
{
- pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
- mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc,
+ "%s: 0x04=0x%08x 0x06=0x%08x 0x0E=0x%08x 0x30=0x%08x 0x34=0x%08x 0x38=0x%08x\n",
+ __func__,
+ sdhci_readw(host, SDHCI_BLOCK_SIZE),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT),
+ sdhci_readw(host, SDHCI_COMMAND),
+ sdhci_readl(host, SDHCI_INT_STATUS),
+ sdhci_readl(host, SDHCI_INT_ENABLE),
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+ mmc_stop_tracing(host->mmc);
- pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ mmc_hostname(host->mmc));
+
+ pr_info(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
sdhci_readw(host, SDHCI_HOST_VERSION));
- pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
sdhci_readw(host, SDHCI_BLOCK_SIZE),
sdhci_readw(host, SDHCI_BLOCK_COUNT));
- pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE));
- pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
sdhci_readl(host, SDHCI_PRESENT_STATE),
sdhci_readb(host, SDHCI_HOST_CONTROL));
- pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
sdhci_readb(host, SDHCI_POWER_CONTROL),
sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
sdhci_readl(host, SDHCI_INT_STATUS));
- pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
sdhci_readl(host, SDHCI_INT_ENABLE),
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
- sdhci_readw(host, SDHCI_ACMD12_ERR),
+ pr_info(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ host->auto_cmd_err_sts,
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
- pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
- pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Resp 1: 0x%08x | Resp 0: 0x%08x\n",
+ sdhci_readl(host, SDHCI_RESPONSE + 0x4),
+ sdhci_readl(host, SDHCI_RESPONSE));
+ pr_info(DRIVER_NAME ": Resp 3: 0x%08x | Resp 2: 0x%08x\n",
+ sdhci_readl(host, SDHCI_RESPONSE + 0xC),
+ sdhci_readl(host, SDHCI_RESPONSE + 0x8));
+ pr_info(DRIVER_NAME ": Host ctl2: 0x%08x\n",
sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA) {
if (host->flags & SDHCI_USE_64_BIT_DMA)
- pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+ pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
else
- pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
}
- pr_err(DRIVER_NAME ": ===========================================\n");
+ if (host->ops->dump_vendor_regs)
+ host->ops->dump_vendor_regs(host);
+ sdhci_dump_state(host);
+ pr_info(DRIVER_NAME ": ===========================================\n");
}
/*****************************************************************************\
@@ -166,6 +217,7 @@
{
unsigned long timeout;
+retry_reset:
sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
if (mask & SDHCI_RESET_ALL) {
@@ -176,19 +228,58 @@
}
/* Wait max 100 ms */
- timeout = 100;
+ timeout = 100000;
+
+ if (host->ops->check_power_status && host->pwr &&
+ (mask & SDHCI_RESET_ALL))
+ host->ops->check_power_status(host, REQ_BUS_OFF);
+
+ /* clear pending normal/error interrupt status */
+ sdhci_writel(host, sdhci_readl(host, SDHCI_INT_STATUS),
+ SDHCI_INT_STATUS);
/* hw clears the bit when it's done */
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
if (timeout == 0) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND)
+ && host->ops->reset_workaround) {
+ if (!host->reset_wa_applied) {
+ /*
+ * apply the workaround and issue
+ * reset again.
+ */
+ host->ops->reset_workaround(host, 1);
+ host->reset_wa_applied = 1;
+ host->reset_wa_cnt++;
+ goto retry_reset;
+ } else {
+ pr_err("%s: Reset 0x%x failed with workaround\n",
+ mmc_hostname(host->mmc),
+ (int)mask);
+ /* clear the workaround */
+ host->ops->reset_workaround(host, 0);
+ host->reset_wa_applied = 0;
+ }
+ }
+
sdhci_dumpregs(host);
return;
}
timeout--;
- mdelay(1);
+ udelay(1);
}
+
+ if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND) &&
+ host->ops->reset_workaround && host->reset_wa_applied) {
+ pr_info("%s: Reset 0x%x successful with workaround\n",
+ mmc_hostname(host->mmc), (int)mask);
+ /* clear the workaround */
+ host->ops->reset_workaround(host, 0);
+ host->reset_wa_applied = 0;
+ }
+
}
EXPORT_SYMBOL_GPL(sdhci_reset);
@@ -227,7 +318,7 @@
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
- SDHCI_INT_RESPONSE;
+ SDHCI_INT_RESPONSE | SDHCI_INT_AUTO_CMD_ERR;
if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
host->tuning_mode == SDHCI_TUNING_MODE_3)
@@ -274,9 +365,12 @@
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
unsigned long flags;
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
- if (host->runtime_suspended)
+ if (host->runtime_suspended || sdhci_check_state(host))
goto out;
if (brightness == LED_OFF)
@@ -622,6 +716,9 @@
void *align;
char *buffer;
unsigned long flags;
+ u32 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+
+ trace_mmc_adma_table_post(command, data->sg_len);
if (data->flags & MMC_DATA_READ) {
bool has_unaligned = false;
@@ -660,6 +757,7 @@
u8 count;
struct mmc_data *data = cmd->data;
unsigned target_timeout, current_timeout;
+ u32 curr_clk = 0; /* In KHz */
/*
* If the host controller provides us with an incorrect timeout
@@ -705,7 +803,14 @@
* (1) / (2) > 2^6
*/
count = 0;
- current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK) {
+ curr_clk = host->clock / 1000;
+ if (host->quirks2 & SDHCI_QUIRK2_DIVIDE_TOUT_BY_4)
+ curr_clk /= 4;
+ current_timeout = (1 << 13) * 1000 / curr_clk;
+ } else {
+ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ }
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
@@ -713,10 +818,12 @@
break;
}
- if (count >= 0xF) {
- DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
- count = 0xE;
+ if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
+ count = 0xE;
+ }
}
return count;
@@ -748,6 +855,17 @@
}
}
+static void sdhci_set_blk_size_reg(struct sdhci_host *host, unsigned int blksz,
+ unsigned int sdma_boundary)
+{
+ if (host->flags & SDHCI_USE_ADMA)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(0, blksz),
+ SDHCI_BLOCK_SIZE);
+ else
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(sdma_boundary, blksz),
+ SDHCI_BLOCK_SIZE);
+}
+
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 ctrl;
@@ -762,7 +880,7 @@
WARN_ON(host->data);
/* Sanity checks */
- BUG_ON(data->blksz * data->blocks > 524288);
+ BUG_ON(data->blksz * data->blocks > host->mmc->max_req_size);
BUG_ON(data->blksz > host->mmc->max_blk_size);
BUG_ON(data->blocks > 65535);
@@ -777,6 +895,10 @@
host->flags |= SDHCI_REQ_USE_DMA;
+ if ((host->quirks2 & SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING) &&
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+
/*
* FIXME: This doesn't account for merging when mapping the
* scatterlist.
@@ -831,6 +953,7 @@
WARN_ON(1);
host->flags &= ~SDHCI_REQ_USE_DMA;
} else if (host->flags & SDHCI_USE_ADMA) {
+ trace_mmc_adma_table_pre(cmd->opcode, data->sg_len);
sdhci_adma_table_pre(host, data, sg_cnt);
sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
@@ -880,9 +1003,13 @@
sdhci_set_transfer_irqs(host);
/* Set the DMA boundary value and block size */
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
- data->blksz), SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, data->blksz, SDHCI_DEFAULT_BOUNDARY_ARG);
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ MMC_TRACE(host->mmc,
+ "%s: 0x28=0x%08x 0x3E=0x%08x 0x06=0x%08x\n", __func__,
+ sdhci_readb(host, SDHCI_HOST_CONTROL),
+ sdhci_readw(host, SDHCI_HOST_CONTROL2),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
}
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
@@ -931,12 +1058,26 @@
}
}
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
mode |= SDHCI_TRNS_READ;
+ if (host->ops->toggle_cdr) {
+ if ((cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK))
+ host->ops->toggle_cdr(host, false);
+ else
+ host->ops->toggle_cdr(host, true);
+ }
+ }
+ if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+ host->ops->toggle_cdr(host, false);
if (host->flags & SDHCI_REQ_USE_DMA)
mode |= SDHCI_TRNS_DMA;
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+ MMC_TRACE(host->mmc, "%s: 0x00=0x%08x 0x0C=0x%08x\n", __func__,
+ sdhci_readw(host, SDHCI_ARGUMENT2),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE));
}
static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
@@ -997,6 +1138,8 @@
host->data = NULL;
host->data_cmd = NULL;
+ MMC_TRACE(host->mmc, "%s: 0x24=0x%08x\n", __func__,
+ sdhci_readl(host, SDHCI_PRESENT_STATE));
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
(SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
sdhci_adma_table_post(host, data);
@@ -1082,7 +1225,7 @@
cmd->flags |= MMC_RSP_BUSY;
/* Wait max 10 ms */
- timeout = 10;
+ timeout = 10000;
mask = SDHCI_CMD_INHIBIT;
if (sdhci_data_line_cmd(cmd))
@@ -1103,7 +1246,7 @@
return;
}
timeout--;
- mdelay(1);
+ udelay(1);
}
timeout = jiffies;
@@ -1152,6 +1295,14 @@
cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
flags |= SDHCI_CMD_DATA;
+ if (cmd->data)
+ host->data_start_time = ktime_get();
+ trace_mmc_cmd_rw_start(cmd->opcode, cmd->arg, cmd->flags);
+ MMC_TRACE(host->mmc,
+ "%s: updated 0x8=0x%08x 0xC=0x%08x 0xE=0x%08x\n", __func__,
+ sdhci_readl(host, SDHCI_ARGUMENT),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE),
+ sdhci_readw(host, SDHCI_COMMAND));
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1174,8 +1325,14 @@
sdhci_readb(host,
SDHCI_RESPONSE + (3-i)*4-1);
}
+ MMC_TRACE(host->mmc,
+ "%s: resp 0: 0x%08x resp 1: 0x%08x resp 2: 0x%08x resp 3: 0x%08x\n",
+ __func__, cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
} else {
cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ MMC_TRACE(host->mmc, "%s: resp 0: 0x%08x\n",
+ __func__, cmd->resp[0]);
}
}
@@ -1335,6 +1492,10 @@
clock_set:
if (real_div)
*actual_clock = (host->max_clk * clk_mul) / real_div;
+
+ if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+ div = 0;
+
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1350,7 +1511,8 @@
host->mmc->actual_clock = 0;
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ if (host->clock)
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
@@ -1371,9 +1533,7 @@
return;
}
timeout--;
- spin_unlock_irq(&host->lock);
usleep_range(900, 1100);
- spin_lock_irq(&host->lock);
}
clk |= SDHCI_CLOCK_CARD_EN;
@@ -1428,6 +1588,8 @@
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_OFF);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
} else {
@@ -1435,20 +1597,27 @@
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
- if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_OFF);
+ }
/*
* At least the Marvell CaFe chip gets confused if we set the
* voltage and set turn on power at the same time, so set the
* voltage first.
*/
- if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_ON);
+ }
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_ON);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_on(host);
@@ -1479,6 +1648,105 @@
* *
\*****************************************************************************/
+static int sdhci_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->platform_bus_voting)
+ host->ops->platform_bus_voting(host, 1);
+
+ return 0;
+}
+
+static int sdhci_disable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->platform_bus_voting)
+ host->ops->platform_bus_voting(host, 0);
+
+ return 0;
+}
+
+static void sdhci_notify_halt(struct mmc_host *mmc, bool halt)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ pr_debug("%s: halt notification was sent, halt=%d\n",
+ mmc_hostname(mmc), halt);
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ if (halt)
+ host->desc_sz = 16;
+ else
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+ }
+}
+
+static inline void sdhci_update_power_policy(struct sdhci_host *host,
+ enum sdhci_power_policy policy)
+{
+ host->power_policy = policy;
+}
+
+static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
+{
+ int err = 0;
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ switch (state) {
+ case MMC_LOAD_HIGH:
+ sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
+ break;
+ case MMC_LOAD_LOW:
+ sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (host->ops->notify_load)
+ err = host->ops->notify_load(host, state);
+
+ return err;
+}
+
+static bool sdhci_check_state(struct sdhci_host *host)
+{
+ if (!host->clock || !host->pwr)
+ return true;
+ else
+ return false;
+}
+
+static bool sdhci_check_auto_tuning(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (((cmd->opcode != MMC_READ_SINGLE_BLOCK) &&
+ (cmd->opcode != MMC_READ_MULTIPLE_BLOCK) &&
+ (cmd->opcode != SD_IO_RW_EXTENDED)) || (host->clock < 100000000))
+ return false;
+ else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+ host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+ return true;
+ else
+ return false;
+}
+
+static int sdhci_get_tuning_cmd(struct sdhci_host *host)
+{
+ if (!host->mmc || !host->mmc->card)
+ return 0;
+ /*
+ * If we are here, all conditions have already been true
+ * and the card can either be an eMMC or SD/SDIO
+ */
+ if (mmc_card_mmc(host->mmc->card))
+ return MMC_SEND_TUNING_BLOCK_HS200;
+ else
+ return MMC_SEND_TUNING_BLOCK;
+}
+
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
@@ -1487,12 +1755,39 @@
host = mmc_priv(mmc);
- /* Firstly check card presence */
+ if (sdhci_check_state(host)) {
+ sdhci_dump_state(host);
+ WARN(1, "sdhci in bad state");
+ mrq->cmd->error = -EIO;
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ host->mrq = NULL;
+ sdhci_dumpregs(host);
+ mmc_request_done(host->mmc, mrq);
+ return;
+ }
+
+ /*
+ * Firstly check card presence from cd-gpio. The return could
+ * be one of the following possibilities:
+ * negative: cd-gpio is not available
+ * zero: cd-gpio is used, and card is removed
+ * one: cd-gpio is used, and card is present
+ */
present = mmc->ops->get_cd(mmc);
+ if (present < 0) {
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+ }
spin_lock_irqsave(&host->lock, flags);
- sdhci_led_activate(host);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ sdhci_led_activate(host);
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
@@ -1509,6 +1804,15 @@
mrq->cmd->error = -ENOMEDIUM;
sdhci_finish_mrq(host, mrq);
} else {
+ if (host->ops->config_auto_tuning_cmd) {
+ if (sdhci_check_auto_tuning(host, mrq->cmd))
+ host->ops->config_auto_tuning_cmd(host, true,
+ sdhci_get_tuning_cmd(host));
+ else
+ host->ops->config_auto_tuning_cmd(host, false,
+ sdhci_get_tuning_cmd(host));
+ }
+
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@@ -1565,38 +1869,50 @@
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync)
+{
+ if (enable && !(host->flags & SDHCI_HOST_IRQ_STATUS)) {
+ enable_irq(host->irq);
+ host->flags |= SDHCI_HOST_IRQ_STATUS;
+ } else if (!enable && (host->flags & SDHCI_HOST_IRQ_STATUS)) {
+ if (sync)
+ disable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ host->flags &= ~SDHCI_HOST_IRQ_STATUS;
+ }
+}
+EXPORT_SYMBOL(sdhci_cfg_irq);
+
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
u8 ctrl;
-
- spin_lock_irqsave(&host->lock, flags);
+ int ret;
if (host->flags & SDHCI_DEVICE_DEAD) {
- spin_unlock_irqrestore(&host->lock, flags);
if (!IS_ERR(mmc->supply.vmmc) &&
ios->power_mode == MMC_POWER_OFF)
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
return;
}
- /*
- * Reset the chip on each power off.
- * Should clear out any weird states.
- */
- if (ios->power_mode == MMC_POWER_OFF) {
- sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
- sdhci_reinit(host);
- }
-
if (host->version >= SDHCI_SPEC_300 &&
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
- if (!ios->clock || ios->clock != host->clock) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->mmc && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ sdhci_cfg_irq(host, false, false);
+
+ if (ios->clock &&
+ ((ios->clock != host->clock) || (ios->timing != host->timing))) {
+ spin_unlock_irqrestore(&host->lock, flags);
host->ops->set_clock(host, ios->clock);
+ spin_lock_irqsave(&host->lock, flags);
host->clock = ios->clock;
if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
@@ -1611,11 +1927,48 @@
host->mmc->max_busy_timeout /= host->timeout_clk;
}
}
+ if (ios->clock && host->sdio_irq_async_status)
+ sdhci_enable_sdio_irq_nolock(host, false);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /*
+ * The controller clocks may be off during power-up and we may end up
+ * enabling card clock before giving power to the card. Hence, during
+ * MMC_POWER_UP enable the controller clock and turn-on the regulators.
+ * The mmc_power_up would provide the necessary delay before turning on
+ * the clocks to the card.
+ */
+ if (ios->power_mode & MMC_POWER_UP) {
+ if (host->ops->enable_controller_clock) {
+ ret = host->ops->enable_controller_clock(host);
+ if (ret) {
+ pr_err("%s: enabling controller clock: failed: %d\n",
+ mmc_hostname(host->mmc), ret);
+ } else {
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->clock) {
+ if (host->mmc && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ sdhci_cfg_irq(host, true, false);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
if (host->ops->set_power)
host->ops->set_power(host, ios->power_mode, ios->vdd);
else
- sdhci_set_power(host, ios->power_mode, ios->vdd);
+ if (!host->ops->enable_controller_clock && (ios->power_mode &
+ (MMC_POWER_UP |
+ MMC_POWER_ON)))
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+
+ spin_lock_irqsave(&host->lock, flags);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1683,7 +2036,11 @@
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
- host->ops->set_clock(host, host->clock);
+ if (ios->clock) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ host->ops->set_clock(host, host->clock);
+ spin_lock_irqsave(&host->lock, flags);
+ }
}
/* Reset SD Clock Enable */
@@ -1710,10 +2067,15 @@
}
/* Re-enable SD Clock */
- host->ops->set_clock(host, host->clock);
+ if (ios->clock) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ host->ops->set_clock(host, host->clock);
+ spin_lock_irqsave(&host->lock, flags);
+ }
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ spin_unlock_irqrestore(&host->lock, flags);
/*
* Some (ENE) controllers go apeshit on some ios operation,
* signalling timeout and CRC errors even on CMD0. Resetting
@@ -1722,8 +2084,25 @@
if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
- mmiowb();
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF) {
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ sdhci_reinit(host);
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+ }
+ if (!ios->clock)
+ host->ops->set_clock(host, ios->clock);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->mmc && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ sdhci_cfg_irq(host, true, false);
spin_unlock_irqrestore(&host->lock, flags);
+
+ mmiowb();
}
static int sdhci_get_cd(struct mmc_host *mmc)
@@ -1806,16 +2185,28 @@
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
- if (!(host->flags & SDHCI_DEVICE_DEAD)) {
- if (enable)
- host->ier |= SDHCI_INT_CARD_INT;
- else
- host->ier &= ~SDHCI_INT_CARD_INT;
+ u16 ctrl = 0;
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
- mmiowb();
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ return;
+
+ if (mmc_card_and_host_support_async_int(host->mmc)) {
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (enable)
+ ctrl |= SDHCI_CTRL_ASYNC_INT_ENABLE;
+ else
+ ctrl &= ~SDHCI_CTRL_ASYNC_INT_ENABLE;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}
+
+ if (enable)
+ host->ier |= SDHCI_INT_CARD_INT;
+ else
+ host->ier &= ~SDHCI_INT_CARD_INT;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ mmiowb();
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1823,6 +2214,9 @@
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ if (enable)
+ pm_runtime_get_noresume(host->mmc->parent);
+
spin_lock_irqsave(&host->lock, flags);
if (enable)
host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1831,6 +2225,9 @@
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable)
+ pm_runtime_put_noidle(host->mmc->parent);
}
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -1856,6 +2253,8 @@
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl &= ~SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_IO_HIGH);
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios);
@@ -1895,6 +2294,8 @@
*/
ctrl |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_IO_LOW);
/* Some controller need to do more when switching */
if (host->ops->voltage_switch)
@@ -1950,6 +2351,17 @@
return 0;
}
+static int sdhci_enhanced_strobe(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = 0;
+
+ if (host->ops->enhanced_strobe)
+ err = host->ops->enhanced_strobe(host);
+
+ return err;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1978,9 +2390,10 @@
switch (host->timing) {
/* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
- err = -EINVAL;
- goto out_unlock;
-
+ if (!(mmc->caps2 & MMC_CAP2_HS400_POST_TUNING)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
case MMC_TIMING_MMC_HS200:
/*
* Periodic re-tuning for HS400 is not expected to be needed, so
@@ -2056,14 +2469,11 @@
*/
if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, 128, 7);
else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, 64, 7);
} else {
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, 64, 7);
}
/*
@@ -2179,6 +2589,9 @@
if (host->version < SDHCI_SPEC_300)
return;
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+ return;
+
/*
* We only enable or disable Preset Value if they are not already
* enabled or disabled respectively. Otherwise, we bail out.
@@ -2214,6 +2627,9 @@
DMA_TO_DEVICE : DMA_FROM_DEVICE);
data->host_cookie = COOKIE_UNMAPPED;
+
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
}
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -2275,7 +2691,27 @@
spin_unlock_irqrestore(&host->lock, flags);
}
+static int sdhci_late_init(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->init)
+ host->ops->init(host);
+
+ return 0;
+}
+
+static void sdhci_force_err_irq(struct mmc_host *mmc, u64 errmask)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u16 mask = errmask & 0xFFFF;
+
+ pr_err("%s: Force raise error mask:0x%04x\n", __func__, mask);
+ sdhci_writew(host, mask, SDHCI_SET_INT_ERROR);
+}
+
static const struct mmc_host_ops sdhci_ops = {
+ .init = sdhci_late_init,
.request = sdhci_request,
.post_req = sdhci_post_req,
.pre_req = sdhci_pre_req,
@@ -2287,9 +2723,15 @@
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
+ .enhanced_strobe = sdhci_enhanced_strobe,
.select_drive_strength = sdhci_select_drive_strength,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
+ .enable = sdhci_enable,
+ .disable = sdhci_disable,
+ .notify_load = sdhci_notify_load,
+ .notify_halt = sdhci_notify_halt,
+ .force_err_irq = sdhci_force_err_irq,
};
/*****************************************************************************\
@@ -2362,12 +2804,17 @@
sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
+ } else {
+ if (host->quirks2 & SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT)
+ sdhci_reset(host, SDHCI_RESET_DATA);
}
if (!sdhci_has_requests(host))
- sdhci_led_deactivate(host);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ sdhci_led_deactivate(host);
host->mrqs_done[i] = NULL;
+ host->auto_cmd_err_sts = 0;
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
@@ -2423,6 +2870,11 @@
sdhci_dumpregs(host);
if (host->data) {
+ pr_info("%s: bytes to transfer: %d transferred: %d\n",
+ mmc_hostname(host->mmc),
+ (host->data->blksz * host->data->blocks),
+ (sdhci_readw(host, SDHCI_BLOCK_SIZE) & 0xFFF) *
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
} else if (host->data_cmd) {
@@ -2446,6 +2898,7 @@
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
+ u16 auto_cmd_status;
if (!host->cmd) {
/*
* SDHCI recovers from errors by resetting the cmd and data
@@ -2460,13 +2913,31 @@
return;
}
+ trace_mmc_cmd_rw_end(host->cmd->opcode, intmask,
+ sdhci_readl(host, SDHCI_RESPONSE));
+
if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
- SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
+ SDHCI_INT_END_BIT | SDHCI_INT_INDEX |
+ SDHCI_INT_AUTO_CMD_ERR)) {
if (intmask & SDHCI_INT_TIMEOUT)
host->cmd->error = -ETIMEDOUT;
else
host->cmd->error = -EILSEQ;
+ if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
+ auto_cmd_status = host->auto_cmd_err_sts;
+ pr_err_ratelimited("%s: %s: AUTO CMD err sts 0x%08x\n",
+ mmc_hostname(host->mmc), __func__, auto_cmd_status);
+ if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
+ SDHCI_AUTO_CMD_INDEX_ERR |
+ SDHCI_AUTO_CMD_ENDBIT_ERR))
+ host->cmd->error = -EIO;
+ else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
+ host->cmd->error = -ETIMEDOUT;
+ else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
+ host->cmd->error = -EILSEQ;
+ }
+
/*
* If this command initiates a data phase and a response
* CRC error is signalled, the card can start transferring
@@ -2476,10 +2947,13 @@
* If the card did not receive the command or returned an
* error which prevented it sending data, the data phase
* will time out.
+ *
+ * Even in case of cmd INDEX OR ENDBIT error we
+ * handle it the same way.
*/
if (host->cmd->data &&
- (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
- SDHCI_INT_CRC) {
+ (((intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+ SDHCI_INT_CRC) || (host->cmd->error == -EILSEQ))) {
host->cmd = NULL;
return;
}
@@ -2528,12 +3002,16 @@
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
u32 command;
+ bool pr_msg = false;
+
+ command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+ trace_mmc_data_rw_end(command, intmask);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
- command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
- if (command == MMC_SEND_TUNING_BLOCK ||
- command == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (!(host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) &&
+ (command == MMC_SEND_TUNING_BLOCK ||
+ command == MMC_SEND_TUNING_BLOCK_HS200)) {
host->tuning_done = 1;
wake_up(&host->buf_ready_int);
return;
@@ -2568,6 +3046,9 @@
sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
+ if (host->quirks2 &
+ SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD)
+ return;
}
/*
@@ -2590,8 +3071,7 @@
else if (intmask & SDHCI_INT_DATA_END_BIT)
host->data->error = -EILSEQ;
else if ((intmask & SDHCI_INT_DATA_CRC) &&
- SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ (command != MMC_BUS_TEST_R))
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -2600,10 +3080,29 @@
if (host->ops->adma_workaround)
host->ops->adma_workaround(host, intmask);
}
+ if (host->data->error) {
+ if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT
+ | SDHCI_INT_DATA_END_BIT)) {
+ command = SDHCI_GET_CMD(sdhci_readw(host,
+ SDHCI_COMMAND));
+ if ((command != MMC_SEND_TUNING_BLOCK_HS200) &&
+ (command != MMC_SEND_TUNING_BLOCK))
+ pr_msg = true;
+ } else {
+ pr_msg = true;
+ }
+ if (pr_msg && __ratelimit(&host->dbg_dump_rs)) {
+ pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
+ mmc_hostname(host->mmc), intmask,
+ host->data->error, ktime_to_ms(ktime_sub(
+ ktime_get(), host->data_start_time)));
- if (host->data->error)
+ if (!host->mmc->sdr104_wa ||
+ (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104))
+ sdhci_dumpregs(host);
+ }
sdhci_finish_data(host);
- else {
+ } else {
if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
sdhci_transfer_pio(host);
@@ -2649,6 +3148,58 @@
}
}
+#ifdef CONFIG_MMC_CQ_HCI
+static int sdhci_get_cmd_err(u32 intmask)
+{
+ if (intmask & SDHCI_INT_TIMEOUT)
+ return -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX))
+ return -EILSEQ;
+ return 0;
+}
+
+static int sdhci_get_data_err(u32 intmask)
+{
+ if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ return -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ return -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR)
+ return -EIO;
+ return 0;
+}
+
+static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
+{
+ int err = 0;
+ u32 mask = 0;
+ irqreturn_t ret;
+
+ if (intmask & SDHCI_INT_CMD_MASK)
+ err = sdhci_get_cmd_err(intmask);
+ else if (intmask & SDHCI_INT_DATA_MASK)
+ err = sdhci_get_data_err(intmask);
+
+ ret = cmdq_irq(host->mmc, err);
+ if (err) {
+ /* Clear the error interrupts */
+ mask = intmask & SDHCI_INT_ERROR_MASK;
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
+ }
+ return ret;
+
+}
+
+#else
+static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
+{
+ pr_err("%s: Received cmdq-irq when disabled !!!!\n",
+ mmc_hostname(host->mmc));
+ return IRQ_NONE;
+}
+#endif
+
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
irqreturn_t result = IRQ_NONE;
@@ -2663,6 +3214,31 @@
return IRQ_NONE;
}
+ if (!host->clock && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card)) {
+ if (!mmc_card_and_host_support_async_int(host->mmc)) {
+ spin_unlock(&host->lock);
+ return IRQ_NONE;
+ }
+ /*
+ * async card interrupt is level sensitive and received
+ * when clocks are off.
+ * If sdio card has asserted async interrupt, in that
+ * case we need to disable host->irq.
+ * Later we can disable card interrupt and re-enable
+ * host->irq.
+ */
+
+ pr_debug("%s: %s: sdio_async intr. received\n",
+ mmc_hostname(host->mmc), __func__);
+ sdhci_cfg_irq(host, false, false);
+ host->sdio_irq_async_status = true;
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
+ spin_unlock(&host->lock);
+ return result;
+ }
+
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
if (!intmask || intmask == 0xffffffff) {
result = IRQ_NONE;
@@ -2670,6 +3246,22 @@
}
do {
+ if (host->mmc->card && mmc_card_cmdq(host->mmc->card) &&
+ !mmc_host_halt(host->mmc) && !mmc_host_cq_disable(host->mmc)) {
+ pr_debug("*** %s: cmdq intr: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ intmask);
+ result = sdhci_cmdq_irq(host, intmask);
+ if (result == IRQ_HANDLED)
+ goto out;
+ }
+
+ MMC_TRACE(host->mmc,
+ "%s: intmask: 0x%x\n", __func__, intmask);
+
+ if (intmask & SDHCI_INT_AUTO_CMD_ERR)
+ host->auto_cmd_err_sts = sdhci_readw(host,
+ SDHCI_AUTO_CMD_ERR);
/* Clear selected interrupts. */
mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
SDHCI_INT_BUS_POWER);
@@ -2708,11 +3300,19 @@
result = IRQ_WAKE_THREAD;
}
- if (intmask & SDHCI_INT_CMD_MASK)
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+ (host->clock <= 400000))
+ udelay(40);
sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+ }
- if (intmask & SDHCI_INT_DATA_MASK)
+ if (intmask & SDHCI_INT_DATA_MASK) {
+ if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+ (host->clock <= 400000))
+ udelay(40);
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+ }
if (intmask & SDHCI_INT_BUS_POWER)
pr_err("%s: Card is consuming too much power!\n",
@@ -2777,8 +3377,11 @@
sdio_run_irqs(host->mmc);
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED) {
+ if (host->sdio_irq_async_status)
+ host->sdio_irq_async_status = false;
sdhci_enable_sdio_irq_nolock(host, true);
+ }
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2991,11 +3594,184 @@
host->flags = SDHCI_SIGNALING_330;
+ spin_lock_init(&host->lock);
+ ratelimit_state_init(&host->dbg_dump_rs, SDHCI_DBG_DUMP_RS_INTERVAL,
+ SDHCI_DBG_DUMP_RS_BURST);
+
return host;
}
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+#ifdef CONFIG_MMC_CQ_HCI
+static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u8 ctrl;
+
+ if (host->version >= SDHCI_SPEC_200) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ ctrl |= SDHCI_CTRL_ADMA64;
+ else
+ ctrl |= SDHCI_CTRL_ADMA32;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+ if (host->ops->toggle_cdr)
+ host->ops->toggle_cdr(host, false);
+}
+
+static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 ier = 0;
+
+ ier &= ~SDHCI_INT_ALL_MASK;
+
+ if (clear) {
+ ier = SDHCI_INT_CMDQ_EN | SDHCI_INT_ERROR_MASK;
+ sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+ } else {
+ ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
+ SDHCI_INT_INDEX | SDHCI_INT_END_BIT |
+ SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+ SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
+ SDHCI_INT_AUTO_CMD_ERR;
+ sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+ }
+}
+
+static void sdhci_cmdq_set_data_timeout(struct mmc_host *mmc, u32 val)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_writeb(host, val, SDHCI_TIMEOUT_CONTROL);
+}
+
+static void sdhci_cmdq_dump_vendor_regs(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_dumpregs(host);
+}
+
+static int sdhci_cmdq_init(struct sdhci_host *host, struct mmc_host *mmc,
+ bool dma64)
+{
+ return cmdq_init(host->cq_host, mmc, dma64);
+}
+
+static void sdhci_cmdq_set_block_size(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_set_blk_size_reg(host, 512, 0);
+}
+
+static void sdhci_enhanced_strobe_mask(struct mmc_host *mmc, bool set)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->enhanced_strobe_mask)
+ host->ops->enhanced_strobe_mask(host, set);
+}
+
+static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->clear_set_dumpregs)
+ host->ops->clear_set_dumpregs(host, set);
+}
+
+static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_writel(host, sdhci_readl(host, SDHCI_INT_ENABLE) |
+ SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+}
+#else
+static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
+{
+
+}
+static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
+{
+
+}
+
+static void sdhci_cmdq_set_data_timeout(struct mmc_host *mmc, u32 val)
+{
+
+}
+
+static void sdhci_cmdq_dump_vendor_regs(struct mmc_host *mmc)
+{
+
+}
+
+static int sdhci_cmdq_init(struct sdhci_host *host, struct mmc_host *mmc,
+ bool dma64)
+{
+ return -ENOSYS;
+}
+
+static void sdhci_cmdq_set_block_size(struct mmc_host *mmc)
+{
+
+}
+
+static void sdhci_enhanced_strobe_mask(struct mmc_host *mmc, bool set)
+{
+
+}
+
+static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
+{
+
+}
+
+static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
+{
+}
+#endif
+
+static const struct cmdq_host_ops sdhci_cmdq_ops = {
+ .clear_set_irqs = sdhci_cmdq_clear_set_irqs,
+ .set_data_timeout = sdhci_cmdq_set_data_timeout,
+ .dump_vendor_regs = sdhci_cmdq_dump_vendor_regs,
+ .set_block_size = sdhci_cmdq_set_block_size,
+ .clear_set_dumpregs = sdhci_cmdq_clear_set_dumpregs,
+ .enhanced_strobe_mask = sdhci_enhanced_strobe_mask,
+ .post_cqe_halt = sdhci_cmdq_post_cqe_halt,
+ .set_transfer_params = sdhci_cmdq_set_transfer_params,
+};
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+ u32 caps;
+
+ caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+ sdhci_readl(host, SDHCI_CAPABILITIES);
+
+ if (caps & SDHCI_CAN_64BIT)
+ return 1;
+ return 0;
+}
+#else
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+ return 0;
+}
+#endif
+
static int sdhci_set_dma_mask(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
@@ -3061,6 +3837,7 @@
int sdhci_setup_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
+ u32 caps[2] = {0, 0};
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
@@ -3085,6 +3862,8 @@
sdhci_read_caps(host);
+ caps[0] = host->caps;
+
override_timeout_clk = host->timeout_clk;
if (host->version > SDHCI_SPEC_300) {
@@ -3122,7 +3901,7 @@
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (host->caps & SDHCI_CAN_64BIT)
+ if (sdhci_is_adma2_64bit(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -3280,6 +4059,9 @@
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ if (caps[0] & SDHCI_CAN_ASYNC_INT)
+ mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
@@ -3312,7 +4094,8 @@
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
mmc_card_is_removable(mmc) &&
- mmc_gpio_get_cd(host->mmc) < 0)
+ mmc_gpio_get_cd(host->mmc) < 0 &&
+ !(mmc->caps2 & MMC_CAP2_NONHOTPLUG))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
@@ -3403,10 +4186,15 @@
* value.
*/
max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
- if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
- int curr = regulator_get_current_limit(mmc->supply.vmmc);
- if (curr > 0) {
+ if (!max_current_caps) {
+ u32 curr = 0;
+ if (!IS_ERR(mmc->supply.vmmc))
+ curr = regulator_get_current_limit(mmc->supply.vmmc);
+ else if (host->ops->get_current_limit)
+ curr = host->ops->get_current_limit(host);
+
+ if (curr > 0) {
/* convert to SDHCI_MAX_CURRENT format */
curr = curr/1000; /* convert to mA */
curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
@@ -3481,8 +4269,6 @@
if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
host->flags |= SDHCI_SIGNALING_120;
- spin_lock_init(&host->lock);
-
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
@@ -3572,6 +4358,8 @@
init_waitqueue_head(&host->buf_ready_int);
+ host->flags |= SDHCI_HOST_IRQ_STATUS;
+
sdhci_init(host, 0);
ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
@@ -3586,31 +4374,54 @@
sdhci_dumpregs(host);
#endif
- ret = sdhci_led_register(host);
- if (ret) {
- pr_err("%s: Failed to register LED device: %d\n",
- mmc_hostname(mmc), ret);
- goto unirq;
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL)) {
+ ret = sdhci_led_register(host);
+ if (ret) {
+ pr_err("%s: Failed to register LED device: %d\n",
+ mmc_hostname(mmc), ret);
+ goto unirq;
+ }
}
mmiowb();
+ if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR) {
+ host->ier = (host->ier & ~SDHCI_INT_DATA_END_BIT);
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ if (mmc->caps2 & MMC_CAP2_CMD_QUEUE) {
+ bool dma64 = (host->flags & SDHCI_USE_64_BIT_DMA) ?
+ true : false;
+ ret = sdhci_cmdq_init(host, mmc, dma64);
+ if (ret)
+ pr_err("%s: CMDQ init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ else
+ host->cq_host->ops = &sdhci_cmdq_ops;
+ }
+
+ pr_info("%s: SDHCI controller on %s [%s] using %s in %s mode\n",
+ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ (host->flags & SDHCI_USE_ADMA) ?
+ ((host->flags & SDHCI_USE_64_BIT_DMA) ?
+ "64-bit ADMA" : "32-bit ADMA") :
+ ((host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"),
+ ((mmc->caps2 & MMC_CAP2_CMD_QUEUE) && !ret) ?
+ "CMDQ" : "legacy");
+
+ sdhci_enable_card_detection(host);
+
ret = mmc_add_host(mmc);
if (ret)
goto unled;
- pr_info("%s: SDHCI controller on %s [%s] using %s\n",
- mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
- (host->flags & SDHCI_USE_ADMA) ?
- (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
- (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
-
- sdhci_enable_card_detection(host);
-
return 0;
unled:
- sdhci_led_unregister(host);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ sdhci_led_unregister(host);
unirq:
sdhci_do_reset(host, SDHCI_RESET_ALL);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
@@ -3666,9 +4477,10 @@
sdhci_disable_card_detection(host);
- mmc_remove_host(mmc);
+ mmc_remove_host(host->mmc);
- sdhci_led_unregister(host);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ sdhci_led_unregister(host);
if (!dead)
sdhci_do_reset(host, SDHCI_RESET_ALL);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2570455..d9e656a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -17,7 +17,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
-
+#include <linux/ratelimit.h>
#include <linux/mmc/host.h>
/*
@@ -141,22 +141,32 @@
#define SDHCI_INT_DATA_CRC 0x00200000
#define SDHCI_INT_DATA_END_BIT 0x00400000
#define SDHCI_INT_BUS_POWER 0x00800000
-#define SDHCI_INT_ACMD12ERR 0x01000000
+#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
- SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+ SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
+ SDHCI_INT_AUTO_CMD_ERR)
+
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
SDHCI_INT_BLK_GAP)
+
+#define SDHCI_INT_CMDQ_EN (0x1 << 14)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
-#define SDHCI_ACMD12_ERR 0x3C
+#define SDHCI_AUTO_CMD_ERR 0x3C
+#define SDHCI_AUTO_CMD12_NOT_EXEC 0x0001
+#define SDHCI_AUTO_CMD_TIMEOUT_ERR 0x0002
+#define SDHCI_AUTO_CMD_CRC_ERR 0x0004
+#define SDHCI_AUTO_CMD_ENDBIT_ERR 0x0008
+#define SDHCI_AUTO_CMD_INDEX_ERR 0x0010
+#define SDHCI_AUTO_CMD12_NOT_ISSUED 0x0080
#define SDHCI_HOST_CONTROL2 0x3E
#define SDHCI_CTRL_UHS_MASK 0x0007
@@ -174,6 +184,7 @@
#define SDHCI_CTRL_DRV_TYPE_D 0x0030
#define SDHCI_CTRL_EXEC_TUNING 0x0040
#define SDHCI_CTRL_TUNED_CLK 0x0080
+#define SDHCI_CTRL_ASYNC_INT_ENABLE 0x4000
#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define SDHCI_CAPABILITIES 0x40
@@ -195,6 +206,7 @@
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT 0x10000000
+#define SDHCI_CAN_ASYNC_INT 0x20000000
#define SDHCI_SUPPORT_SDR50 0x00000001
#define SDHCI_SUPPORT_SDR104 0x00000002
@@ -328,6 +340,12 @@
COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */
};
+enum sdhci_power_policy {
+ SDHCI_PERFORMANCE_MODE,
+ SDHCI_POWER_SAVE_MODE,
+ SDHCI_POWER_POLICY_NUM /* Always keep this one last */
+};
+
struct sdhci_host {
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
@@ -425,6 +443,83 @@
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
/* Broken Clock divider zero in controller */
#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
+/*
+ * Read Transfer Active/ Write Transfer Active may be not
+ * de-asserted after end of transaction. Issue reset for DAT line.
+ */
+#define SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT (1<<17)
+/*
+ * Slow interrupt clearance at 400KHz may cause
+ * host controller driver interrupt handler to
+ * be called twice.
+*/
+#define SDHCI_QUIRK2_SLOW_INT_CLR (1<<18)
+
+/*
+ * If the base clock can be scalable, then there should be no further
+ * clock dividing as the input clock itself will be scaled down to
+ * required frequency.
+ */
+#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK (1<<19)
+
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD (1<<20)
+
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE (1<<21)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT (1<<22)
+/*
+ * This is applicable for controllers that advertize timeout clock
+ * value in capabilities register (bit 5-0) as just 50MHz whereas the
+ * base clock frequency is 200MHz. So, the controller internally
+ * multiplies the value in timeout control register by 4 with the
+ * assumption that driver always uses fixed timeout clock value from
+ * capabilities register to calculate the timeout. But when the driver
+ * uses SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK base clock frequency is directly
+ * controller by driver and it's rate varies upto max. 200MHz. This new quirk
+ * will be used in such cases to avoid controller mulplication when timeout is
+ * calculated based on the base clock.
+ */
+#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 23)
+
+/*
+ * Some SDHC controllers are unable to handle data-end bit error in
+ * 1-bit mode of SDIO.
+ */
+#define SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR (1<<24)
+
+/* Use reset workaround in case sdhci reset timeouts */
+#define SDHCI_QUIRK2_USE_RESET_WORKAROUND (1<<26)
+
+/* Some controllers doesn't have have any LED control */
+#define SDHCI_QUIRK2_BROKEN_LED_CONTROL (1<<27)
+
+/*
+ * Some controllers doesn't follow the tuning procedure as defined in spec.
+ * The tuning data has to be compared from SW driver to validate the correct
+ * phase.
+ */
+#define SDHCI_QUIRK2_NON_STANDARD_TUNING (1 << 28)
+/*
+ * Some controllers may use PIO mode to workaround HW issues in ADMA for
+ * eMMC tuning commands.
+ */
+#define SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING (1 << 23)
+
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -435,6 +530,7 @@
struct mmc_host *mmc; /* MMC structure */
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
+ u64 coherent_dma_mask;
#if IS_ENABLED(CONFIG_LEDS_CLASS)
struct led_classdev led; /* LED control */
@@ -458,6 +554,7 @@
#define SDHCI_SIGNALING_330 (1<<14) /* Host is capable of 3.3V signaling */
#define SDHCI_SIGNALING_180 (1<<15) /* Host is capable of 1.8V signaling */
#define SDHCI_SIGNALING_120 (1<<16) /* Host is capable of 1.2V signaling */
+#define SDHCI_HOST_IRQ_STATUS (1<<17) /* host->irq status */
unsigned int version; /* SDHCI spec. version */
@@ -474,6 +571,7 @@
bool pending_reset; /* Cmd/data reset is pending */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
+ struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
struct mmc_command *data_cmd; /* Current data command */
struct mmc_data *data; /* Current data request */
@@ -525,6 +623,19 @@
#define SDHCI_TUNING_MODE_2 1
#define SDHCI_TUNING_MODE_3 2
+ ktime_t data_start_time;
+
+ enum sdhci_power_policy power_policy;
+
+ bool sdio_irq_async_status;
+
+ u32 auto_cmd_err_sts;
+ struct ratelimit_state dbg_dump_rs;
+ struct cmdq_host *cq_host;
+ int reset_wa_applied; /* reset workaround status */
+ ktime_t reset_wa_t; /* time when the reset workaround is applied */
+ int reset_wa_cnt; /* total number of times workaround is used */
+
unsigned long private[0] ____cacheline_aligned;
};
@@ -558,12 +669,34 @@
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ unsigned int (*get_max_segments)(void);
+#define REQ_BUS_OFF (1 << 0)
+#define REQ_BUS_ON (1 << 1)
+#define REQ_IO_LOW (1 << 2)
+#define REQ_IO_HIGH (1 << 3)
void (*card_event)(struct sdhci_host *host);
+ int (*enhanced_strobe)(struct sdhci_host *host);
+ void (*platform_bus_voting)(struct sdhci_host *host, u32 enable);
+ void (*toggle_cdr)(struct sdhci_host *host, bool enable);
+ void (*check_power_status)(struct sdhci_host *host, u32 req_type);
+ int (*config_auto_tuning_cmd)(struct sdhci_host *host,
+ bool enable,
+ u32 type);
+ int (*enable_controller_clock)(struct sdhci_host *host);
+ void (*clear_set_dumpregs)(struct sdhci_host *host, bool set);
+ void (*enhanced_strobe_mask)(struct sdhci_host *host, bool set);
+ void (*dump_vendor_regs)(struct sdhci_host *host);
void (*voltage_switch)(struct sdhci_host *host);
int (*select_drive_strength)(struct sdhci_host *host,
struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
+ int (*notify_load)(struct sdhci_host *host, enum mmc_load state);
+ void (*reset_workaround)(struct sdhci_host *host, u32 enable);
+ void (*init)(struct sdhci_host *host);
+ void (*pre_req)(struct sdhci_host *host, struct mmc_request *req);
+ void (*post_req)(struct sdhci_host *host, struct mmc_request *req);
+ unsigned int (*get_current_limit)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -698,4 +831,5 @@
extern int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync);
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 3779475..283ff7e 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -229,12 +229,10 @@
last_trx_part = curr_part - 1;
- /*
- * We have whole TRX scanned, skip to the next part. Use
- * roundown (not roundup), as the loop will increase
- * offset in next step.
- */
- offset = rounddown(offset + trx->length, blocksize);
+ /* Jump to the end of TRX */
+ offset = roundup(offset + trx->length, blocksize);
+ /* Next loop iteration will increase the offset */
+ offset -= blocksize;
continue;
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba3..3971256 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 368bb07..481895b 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@
int work_done = 0;
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
- u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+ u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
/* Handle bus state changes */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a0dabd4..7ab24c5 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -740,13 +740,18 @@
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode imode;
+ struct gs_identify_mode *imode;
int rc;
+ imode = kmalloc(sizeof(*imode), GFP_KERNEL);
+
+ if (!imode)
+ return -ENOMEM;
+
if (do_identify)
- imode.mode = GS_CAN_IDENTIFY_ON;
+ imode->mode = GS_CAN_IDENTIFY_ON;
else
- imode.mode = GS_CAN_IDENTIFY_OFF;
+ imode->mode = GS_CAN_IDENTIFY_OFF;
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@ -756,10 +761,12 @@
USB_RECIP_INTERFACE,
dev->channel,
0,
- &imode,
- sizeof(imode),
+ imode,
+ sizeof(*imode),
100);
+ kfree(imode);
+
return (rc > 0) ? 0 : rc;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da9..6b86353 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +124,19 @@
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -349,9 +351,9 @@
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4d3ddc2..5d48458 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -444,8 +444,14 @@
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
- if (ring->stride <= TXBB_SIZE)
+ if (ring->stride <= TXBB_SIZE) {
+ /* Stamp first unused send wqe */
+ __be32 *ptr = (__be32 *)ring->buf;
+ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+ *ptr = stamp;
+ /* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
+ }
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75d07fa..b2ca8a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4020,49 +4020,51 @@
return err;
}
+#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
+#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
+#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
+
static const struct pci_device_id mlx4_pci_table[] = {
- /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
- /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+ /* MT25408 "Hermon" */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
+ /* MT25458 ConnectX EN 10GBASE-T */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
+ /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
+ /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
+ /* MT26478 ConnectX2 40GigE PCIe Gen2 */
+ MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
+ /* MT25400 Family [ConnectX-2] */
+ MLX_VF(0x1002), /* Virtual Function */
/* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
- /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
- { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
+ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
+ MLX_VF(0x1004), /* Virtual Function */
+ MLX_GN(0x1005), /* MT27510 Family */
+ MLX_GN(0x1006), /* MT27511 Family */
+ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
+ MLX_GN(0x1008), /* MT27521 Family */
+ MLX_GN(0x1009), /* MT27530 Family */
+ MLX_GN(0x100a), /* MT27531 Family */
+ MLX_GN(0x100b), /* MT27540 Family */
+ MLX_GN(0x100c), /* MT27541 Family */
+ MLX_GN(0x100d), /* MT27550 Family */
+ MLX_GN(0x100e), /* MT27551 Family */
+ MLX_GN(0x100f), /* MT27560 Family */
+ MLX_GN(0x1010), /* MT27561 Family */
+
+ /*
+ * See the mellanox_check_broken_intx_masking() quirk when
+ * adding devices
+ */
+
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index c548bea..32f76bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2980,6 +2980,9 @@
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 81d8e3b..21ce0b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,7 +82,7 @@
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
#define MLX5_UMR_ALIGN (2048)
-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 90e81ae..e034dbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -563,6 +563,7 @@
int idx = 0;
int err = 0;
+ info->data = MAX_NUM_OF_ETHTOOL_RULES;
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
err = mlx5e_ethtool_get_flow(priv, info, location);
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 5595724..b5d5519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@
struct netdev_notifier_changeupper_info *info)
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
- struct netdev_lag_upper_info *lag_upper_info;
+ struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded;
int bond_status = 0;
int num_slaves = 0;
@@ -303,7 +303,8 @@
if (!netif_is_lag_master(upper))
return 0;
- lag_upper_info = info->upper_info;
+ if (info->linking)
+ lag_upper_info = info->upper_info;
/* The event may still be of interest if the slave does not belong to
* us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7a196a0..d776db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@
if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
FW_INIT_TIMEOUT_MILI);
- goto out_err;
+ goto err_cmd_cleanup;
}
err = mlx5_core_enable_hca(dev, 0);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1a92de7..a2d218b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1059,12 +1059,70 @@
.get_mdio_data = sh_get_mdio,
};
+/* free Tx skb function */
+static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_txdesc *txdesc;
+ int free_num = 0;
+ int entry;
+ bool sent;
+
+ for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+ entry = mdp->dirty_tx % mdp->num_tx_ring;
+ txdesc = &mdp->tx_ring[entry];
+ sent = !(txdesc->status & cpu_to_le32(TD_TACT));
+ if (sent_only && !sent)
+ break;
+ /* TACT bit must be checked before all the following reads */
+ dma_rmb();
+ netif_info(mdp, tx_done, ndev,
+ "tx entry %d status 0x%08x\n",
+ entry, le32_to_cpu(txdesc->status));
+ /* Free the original skb. */
+ if (mdp->tx_skbuff[entry]) {
+ dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+ le32_to_cpu(txdesc->len) >> 16,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+ mdp->tx_skbuff[entry] = NULL;
+ free_num++;
+ }
+ txdesc->status = cpu_to_le32(TD_TFP);
+ if (entry >= mdp->num_tx_ring - 1)
+ txdesc->status |= cpu_to_le32(TD_TDLE);
+
+ if (sent) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+ }
+ }
+ return free_num;
+}
+
/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ringsize, i;
+ if (mdp->rx_ring) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
+ if (mdp->rx_skbuff[i]) {
+ struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
+
+ dma_unmap_single(&ndev->dev,
+ le32_to_cpu(rxdesc->addr),
+ ALIGN(mdp->rx_buf_sz, 32),
+ DMA_FROM_DEVICE);
+ }
+ }
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1073,27 +1131,18 @@
kfree(mdp->rx_skbuff);
mdp->rx_skbuff = NULL;
- /* Free Tx skb ringbuffer */
- if (mdp->tx_skbuff) {
- for (i = 0; i < mdp->num_tx_ring; i++)
- dev_kfree_skb(mdp->tx_skbuff[i]);
- }
- kfree(mdp->tx_skbuff);
- mdp->tx_skbuff = NULL;
-
- if (mdp->rx_ring) {
- ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring,
- mdp->rx_desc_dma);
- mdp->rx_ring = NULL;
- }
-
if (mdp->tx_ring) {
+ sh_eth_tx_free(ndev, false);
+
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
+
+ /* Free Tx skb ringbuffer */
+ kfree(mdp->tx_skbuff);
+ mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
@@ -1341,43 +1390,6 @@
update_mac_address(ndev);
}
-/* free Tx skb function */
-static int sh_eth_txfree(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- struct sh_eth_txdesc *txdesc;
- int free_num = 0;
- int entry;
-
- for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
- entry = mdp->dirty_tx % mdp->num_tx_ring;
- txdesc = &mdp->tx_ring[entry];
- if (txdesc->status & cpu_to_le32(TD_TACT))
- break;
- /* TACT bit must be checked before all the following reads */
- dma_rmb();
- netif_info(mdp, tx_done, ndev,
- "tx entry %d status 0x%08x\n",
- entry, le32_to_cpu(txdesc->status));
- /* Free the original skb. */
- if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
- le32_to_cpu(txdesc->len) >> 16,
- DMA_TO_DEVICE);
- dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
- mdp->tx_skbuff[entry] = NULL;
- free_num++;
- }
- txdesc->status = cpu_to_le32(TD_TFP);
- if (entry >= mdp->num_tx_ring - 1)
- txdesc->status |= cpu_to_le32(TD_TDLE);
-
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
- }
- return free_num;
-}
-
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
{
@@ -1620,7 +1632,7 @@
intr_status, mdp->cur_tx, mdp->dirty_tx,
(u32)ndev->state, edtrr);
/* dirty buffer free */
- sh_eth_txfree(ndev);
+ sh_eth_tx_free(ndev, true);
/* SH7712 BUG */
if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1679,7 +1691,7 @@
/* Clear Tx interrupts */
sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
- sh_eth_txfree(ndev);
+ sh_eth_tx_free(ndev, true);
netif_wake_queue(ndev);
}
@@ -2307,7 +2319,7 @@
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
- if (!sh_eth_txfree(ndev)) {
+ if (!sh_eth_tx_free(ndev, true)) {
netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d2e61e0..f7c6a40 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2709,7 +2709,7 @@
}
#define MACSEC_FEATURES \
- (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+ (NETIF_F_SG | NETIF_F_HIGHDMA)
static struct lock_class_key macsec_netdev_addr_lock_key;
static int macsec_dev_init(struct net_device *dev)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 26d6f0b..dc8ccac 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1140,6 +1140,7 @@
static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ struct sk_buff *skb;
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
@@ -1148,7 +1149,15 @@
* but we need to cancel it and purge left skbs if any.
*/
cancel_work_sync(&port->bc_work);
- __skb_queue_purge(&port->bc_queue);
+
+ while ((skb = __skb_dequeue(&port->bc_queue))) {
+ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+ if (src)
+ dev_put(src->dev);
+
+ kfree_skb(skb);
+ }
kfree_rcu(port, rcu);
}
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7a240fc..4865221 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1438,8 +1438,6 @@
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
- } else {
- netif_rx_ni(skb);
}
return true;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 201ffa5..a9be26f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -552,16 +552,18 @@
EXPORT_SYMBOL(phy_mii_ioctl);
/**
- * phy_start_aneg - start auto-negotiation for this PHY device
+ * phy_start_aneg_priv - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: Sanitizes the settings (if we're not autonegotiating
* them), and then calls the driver's config_aneg function.
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-int phy_start_aneg(struct phy_device *phydev)
+static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
{
+ bool trigger = 0;
int err;
mutex_lock(&phydev->lock);
@@ -586,10 +588,40 @@
}
}
+ /* Re-schedule a PHY state machine to check PHY status because
+ * negotiation may already be done and aneg interrupt may not be
+ * generated.
+ */
+ if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+ err = phy_aneg_done(phydev);
+ if (err > 0) {
+ trigger = true;
+ err = 0;
+ }
+ }
+
out_unlock:
mutex_unlock(&phydev->lock);
+
+ if (trigger)
+ phy_trigger_machine(phydev, sync);
+
return err;
}
+
+/**
+ * phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+ * them), and then calls the driver's config_aneg function.
+ * If the PHYCONTROL Layer is operating, we change the state to
+ * reflect the beginning of Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+ return phy_start_aneg_priv(phydev, true);
+}
EXPORT_SYMBOL(phy_start_aneg);
/**
@@ -617,7 +649,7 @@
* state machine runs.
*/
-static void phy_trigger_machine(struct phy_device *phydev, bool sync)
+void phy_trigger_machine(struct phy_device *phydev, bool sync)
{
if (sync)
cancel_delayed_work_sync(&phydev->state_queue);
@@ -639,7 +671,7 @@
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
@@ -1100,7 +1132,7 @@
mutex_unlock(&phydev->lock);
if (needs_aneg)
- err = phy_start_aneg(phydev);
+ err = phy_start_aneg_priv(phydev, false);
else if (do_suspend)
phy_suspend(phydev);
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
index 0184c96..3a45cf8 100644
--- a/drivers/net/ppp/pppolac.c
+++ b/drivers/net/ppp/pppolac.c
@@ -206,7 +206,9 @@
while ((skb = skb_dequeue(&delivery_queue))) {
struct sock *sk_udp = skb->sk;
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
- struct msghdr msg = { 0 };
+ struct msghdr msg = {
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
skb->len);
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
index d9e0603..cdb4fa1 100644
--- a/drivers/net/ppp/pppopns.c
+++ b/drivers/net/ppp/pppopns.c
@@ -189,7 +189,9 @@
while ((skb = skb_dequeue(&delivery_queue))) {
struct sock *sk_raw = skb->sk;
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
- struct msghdr msg = { 0 };
+ struct msghdr msg = {
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
skb->len);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index a2afb8e..80ef486 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1124,7 +1124,7 @@
goto nla_put_failure;
/* rule only needs to appear once */
- nlh->nlmsg_flags &= NLM_F_EXCL;
+ nlh->nlmsg_flags |= NLM_F_EXCL;
frh = nlmsg_data(nlh);
memset(frh, 0, sizeof(*frh));
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index f00d429..91594de 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -25,3 +25,5 @@
obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
+
+obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index e2512d5..eedf86b 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -528,6 +528,9 @@
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
+ if (!spec_priv->rfs_chan_spec_scan)
+ return 1;
+
/* Output buffers are full, no need to process anything
* since there is no space to put the result anyway
*/
@@ -1072,7 +1075,7 @@
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
+ if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
@@ -1086,6 +1089,9 @@
debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
+ if (!spec_priv->rfs_chan_spec_scan)
+ return;
+
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
debugfs_phy, spec_priv,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 6c68fd9..4e111cb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -474,22 +474,23 @@
}
mutex_unlock(&wil->p2p_wdev_mutex);
- /* social scan on P2P_DEVICE is handled as p2p search */
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
- wil_p2p_is_social_scan(request)) {
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (!wil->p2p.p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
rc = -EIO;
goto out;
}
- wil->scan_request = request;
- wil->radio_wdev = wdev;
- rc = wil_p2p_search(wil, request);
- if (rc) {
- wil->radio_wdev = wil_to_wdev(wil);
- wil->scan_request = NULL;
+ /* social scan on P2P_DEVICE is handled as p2p search */
+ if (wil_p2p_is_social_scan(request)) {
+ wil->scan_request = request;
+ wil->radio_wdev = wdev;
+ rc = wil_p2p_search(wil, request);
+ if (rc) {
+ wil->radio_wdev = wil_to_wdev(wil);
+ wil->scan_request = NULL;
+ }
+ goto out;
}
- goto out;
}
(void)wil_p2p_stop_discovery(wil);
@@ -499,9 +500,9 @@
for (i = 0; i < request->n_ssids; i++) {
wil_dbg_misc(wil, "SSID[%d]", i);
- print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
- request->ssids[i].ssid,
- request->ssids[i].ssid_len);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
}
if (request->n_ssids)
@@ -538,8 +539,8 @@
}
if (request->ie_len)
- print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET,
- request->ie, request->ie_len);
+ wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ie, request->ie_len, true);
else
wil_dbg_misc(wil, "Scan has no IE's\n");
@@ -763,6 +764,8 @@
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
if (rc == 0) {
netif_carrier_on(ndev);
+ wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
+ wil->bss = bss;
/* Connect can take lots of time */
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
@@ -791,6 +794,7 @@
return 0;
}
+ wil->locally_generated_disc = true;
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
@@ -844,7 +848,8 @@
*/
wil_dbg_misc(wil, "mgmt_tx\n");
- print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+ wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
@@ -1177,18 +1182,18 @@
static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
{
- print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET,
- b->head, b->head_len);
- print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET,
- b->tail, b->tail_len);
- print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET,
- b->beacon_ies, b->beacon_ies_len);
- print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET,
- b->probe_resp, b->probe_resp_len);
- print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
- b->proberesp_ies, b->proberesp_ies_len);
- print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
- b->assocresp_ies, b->assocresp_ies_len);
+ wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->head, b->head_len, true);
+ wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->tail, b->tail_len, true);
+ wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->beacon_ies, b->beacon_ies_len, true);
+ wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->probe_resp, b->probe_resp_len, true);
+ wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->proberesp_ies, b->proberesp_ies_len, true);
+ wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ b->assocresp_ies, b->assocresp_ies_len, true);
}
/* internal functions for device reset and starting AP */
@@ -1282,6 +1287,7 @@
wil->pbss = pbss;
netif_carrier_on(ndev);
+ wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS);
rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
if (rc)
@@ -1297,6 +1303,7 @@
wmi_pcp_stop(wil);
err_pcp_start:
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
out:
mutex_unlock(&wil->mutex);
return rc;
@@ -1382,8 +1389,8 @@
wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
info->dtim_period);
wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
- print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
- info->ssid, info->ssid_len);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ info->ssid, info->ssid_len, true);
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
@@ -1403,6 +1410,7 @@
wil_dbg_misc(wil, "stop_ap\n");
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3e8cdf1..5648ebb 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -524,9 +524,8 @@
if (!buf)
return -ENOMEM;
- wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
- (const volatile void __iomem *)
- wil_blob->blob.data + pos, count);
+ wil_memcpy_fromio_32(buf, (const void __iomem *)
+ wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2c48419..36959a3 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -30,8 +30,8 @@
module_param(debug_fw, bool, 0444);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
-static bool oob_mode;
-module_param(oob_mode, bool, 0444);
+static u8 oob_mode;
+module_param(oob_mode, byte, 0444);
MODULE_PARM_DESC(oob_mode,
" enable out of the box (OOB) mode in FW, for diagnostics and certification");
@@ -135,14 +135,6 @@
*d++ = __raw_readl(s++);
}
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
- const volatile void __iomem *src, size_t count)
-{
- wil_halp_vote(wil);
- wil_memcpy_fromio_32(dst, src, count);
- wil_halp_unvote(wil);
-}
-
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count)
{
@@ -153,15 +145,6 @@
__raw_writel(*s++, d++);
}
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
- volatile void __iomem *dst,
- const void *src, size_t count)
-{
- wil_halp_vote(wil);
- wil_memcpy_toio_32(dst, src, count);
- wil_halp_unvote(wil);
-}
-
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -274,15 +257,20 @@
wil_bcast_fini(wil);
wil_update_net_queues_bh(wil, NULL, true);
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
if (test_bit(wil_status_fwconnected, wil->status)) {
clear_bit(wil_status_fwconnected, wil->status);
cfg80211_disconnected(ndev, reason_code,
- NULL, 0, false, GFP_KERNEL);
+ NULL, 0,
+ wil->locally_generated_disc,
+ GFP_KERNEL);
+ wil->locally_generated_disc = false;
} else if (test_bit(wil_status_fwconnecting, wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
+ wil->bss = NULL;
}
clear_bit(wil_status_fwconnecting, wil->status);
break;
@@ -304,10 +292,34 @@
{
struct wil6210_priv *wil = container_of(work,
struct wil6210_priv, disconnect_worker);
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_disconnect_event evt;
+ } __packed reply;
- mutex_lock(&wil->mutex);
- _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false);
- mutex_unlock(&wil->mutex);
+ if (test_bit(wil_status_fwconnected, wil->status))
+ /* connect succeeded after all */
+ return;
+
+ if (!test_bit(wil_status_fwconnecting, wil->status))
+ /* already disconnected */
+ return;
+
+ rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
+ WMI_DISCONNECT_EVENTID, &reply, sizeof(reply),
+ WIL6210_DISCONNECT_TO_MS);
+ if (rc) {
+ wil_err(wil, "disconnect error %d\n", rc);
+ return;
+ }
+
+ wil_update_net_queues_bh(wil, NULL, true);
+ netif_carrier_off(ndev);
+ cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
+ clear_bit(wil_status_fwconnecting, wil->status);
}
static void wil_connect_timer_fn(ulong x)
@@ -557,6 +569,12 @@
return -EAGAIN;
}
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
+{
+ if (wil->platform_ops.bus_request)
+ wil->platform_ops.bus_request(wil->platform_handle, kbps);
+}
+
/**
* wil6210_disconnect - disconnect one connection
* @wil: driver context
@@ -610,13 +628,25 @@
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
-static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
{
- wil_info(wil, "enable=%d\n", enable);
- if (enable)
+ wil_info(wil, "oob_mode to %d\n", mode);
+ switch (mode) {
+ case 0:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE |
+ BIT_USER_OOB_R2_MODE);
+ break;
+ case 1:
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
- else
+ break;
+ case 2:
wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+ wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE);
+ break;
+ default:
+ wil_err(wil, "invalid oob_mode: %d\n", mode);
+ }
}
static int wil_target_reset(struct wil6210_priv *wil)
@@ -1073,9 +1103,7 @@
napi_enable(&wil->napi_tx);
set_bit(wil_status_napi_en, wil->status);
- if (wil->platform_ops.bus_request)
- wil->platform_ops.bus_request(wil->platform_handle,
- WIL_MAX_BUS_REQUEST_KBPS);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
return 0;
}
@@ -1099,8 +1127,7 @@
set_bit(wil_status_resetting, wil->status);
- if (wil->platform_ops.bus_request)
- wil->platform_ops.bus_request(wil->platform_handle, 0);
+ wil6210_bus_request(wil, 0);
wil_disable_irq(wil);
if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
@@ -1163,6 +1190,7 @@
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
+ reinit_completion(&wil->halp.comp);
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 7260bef..2ae4fe8 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -71,6 +71,11 @@
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 0;
+ }
+
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
@@ -86,10 +91,14 @@
if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle);
- if (rc)
+ if (rc) {
wil_enable_irq(wil);
+ goto out;
+ }
}
+ set_bit(wil_status_suspended, wil->status);
+
out:
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
@@ -117,10 +126,13 @@
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
- * invocation. This prevent recursive call to wil_up()
+ * invocation. This prevent recursive call to wil_up().
+ * wil_status_suspended will be cleared in wil_reset
*/
if (ndev->flags & IFF_UP)
rc = wil_up(wil);
+ else
+ clear_bit(wil_status_suspended, wil->status);
out:
wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4bccef3..734449d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -41,6 +41,7 @@
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
+#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
/**
@@ -140,6 +141,7 @@
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
+ #define BIT_USER_OOB_R2_MODE BIT(30)
#define RGF_USER_USAGE_8 (0x880020)
#define BIT_USER_PREVENT_DEEP_SLEEP BIT(0)
#define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1)
@@ -413,6 +415,7 @@
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
+ wil_status_suspended, /* suspend completed, device is suspended */
wil_status_last /* keep last */
};
@@ -616,6 +619,8 @@
u16 channel; /* relevant in AP mode */
int sinfo_gen;
u32 ap_isolate; /* no intra-BSS communication */
+ struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
+ int locally_generated_disc; /* relevant in STA mode */
/* interrupt moderation */
u32 tx_max_burst_duration;
u32 tx_interframe_timeout;
@@ -771,6 +776,12 @@
print_hex_dump_debug("DBG[ WMI]" prefix_str,\
prefix_type, rowsize, \
groupsize, buf, len, ascii)
+
+#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump_debug("DBG[MISC]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
#else /* defined(CONFIG_DYNAMIC_DEBUG) */
static inline
void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize,
@@ -783,18 +794,18 @@
int groupsize, const void *buf, size_t len, bool ascii)
{
}
+
+static inline
+void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize,
+ int groupsize, const void *buf, size_t len, bool ascii)
+{
+}
#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
-void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
- const volatile void __iomem *src,
- size_t count);
-void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
- volatile void __iomem *dst,
- const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
@@ -910,7 +921,7 @@
u8 type);
int wmi_abort_scan(struct wil6210_priv *wil);
void wil_abort_scan(struct wil6210_priv *wil, bool sync);
-
+void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0ede7f7..31d6ab9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -566,6 +566,7 @@
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (rc) {
netif_carrier_off(ndev);
+ wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
wil_err(wil, "cfg80211_connect_result with failure\n");
cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
NULL, 0,
@@ -573,12 +574,16 @@
GFP_KERNEL);
goto out;
} else {
- cfg80211_connect_result(ndev, evt->bssid,
- assoc_req_ie, assoc_req_ielen,
- assoc_resp_ie, assoc_resp_ielen,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ cfg80211_ref_bss(wiphy, wil->bss);
+ cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
+ wil->bss = NULL;
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (rc) {
@@ -1524,6 +1529,7 @@
wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
+ wil->locally_generated_disc = true;
if (del_sta) {
ether_addr_copy(del_sta_cmd.dst_mac, mac);
rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd,
@@ -1765,14 +1771,19 @@
void wmi_event_flush(struct wil6210_priv *wil)
{
+ ulong flags;
struct pending_wmi_event *evt, *t;
wil_dbg_wmi(wil, "event_flush\n");
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
list_del(&evt->list);
kfree(evt);
}
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index de19c7c..85d949e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2238,14 +2238,16 @@
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_p2p_info *p2p = &cfg->p2p;
struct brcmf_cfg80211_vif *vif;
+ enum nl80211_iftype iftype;
bool wait_for_disable = false;
int err;
brcmf_dbg(TRACE, "delete P2P vif\n");
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+ iftype = vif->wdev.iftype;
brcmf_cfg80211_arm_vif_event(cfg, vif);
- switch (vif->wdev.iftype) {
+ switch (iftype) {
case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
wait_for_disable = true;
@@ -2275,7 +2277,7 @@
BRCMF_P2P_DISABLE_TIMEOUT);
err = 0;
- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
brcmf_vif_clear_mgmt_ies(vif);
err = brcmf_p2p_release_p2p_if(vif);
}
@@ -2291,7 +2293,7 @@
brcmf_remove_interface(vif->ifp, true);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
- if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE)
p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
return err;
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 636f466..3c89a73 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012,2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012,2014-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -207,7 +207,10 @@
print_stack_trace(&wcnss_allocs[i].trace, 1);
}
}
+#else
+void wcnss_prealloc_check_memory_leak(void) {}
#endif
+EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
int wcnss_pre_alloc_reset(void)
{
@@ -223,6 +226,7 @@
return n;
}
+EXPORT_SYMBOL(wcnss_pre_alloc_reset);
static int __init wcnss_pre_alloc_init(void)
{
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 4e0c565..b7273be 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1422,7 +1422,7 @@
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
-#ifdef CONFIG_RT2X00_LIB_USB
+#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
if (rt2x00_is_usb(rt2x00dev)) {
usb_kill_anchored_urbs(rt2x00dev->anchor);
hrtimer_cancel(&rt2x00dev->txstatus_timer);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 6005e14..662705e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -319,10 +319,8 @@
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -410,10 +408,8 @@
entry->skb->data, entry->skb->len,
rt2x00usb_interrupt_rxdone, entry);
- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -824,10 +820,6 @@
if (retval)
goto exit_free_device;
- retval = rt2x00lib_probe_dev(rt2x00dev);
- if (retval)
- goto exit_free_reg;
-
rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
sizeof(struct usb_anchor),
GFP_KERNEL);
@@ -835,10 +827,17 @@
retval = -ENOMEM;
goto exit_free_reg;
}
-
init_usb_anchor(rt2x00dev->anchor);
+
+ retval = rt2x00lib_probe_dev(rt2x00dev);
+ if (retval)
+ goto exit_free_anchor;
+
return 0;
+exit_free_anchor:
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
+
exit_free_reg:
rt2x00usb_free_reg(rt2x00dev);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 23d4a17..351bac8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -934,8 +934,14 @@
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
if (rc < 0)
goto out_unlock;
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
+
+ vfree(buf);
+ return rc;
+
out_unlock:
nvdimm_bus_unlock(&nvdimm_bus->dev);
out:
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index d614493..dcb32f3 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -388,7 +388,7 @@
int alias_dpa_busy(struct device *dev, void *data)
{
- resource_size_t map_end, blk_start, new, busy;
+ resource_size_t map_end, blk_start, new;
struct blk_alloc_info *info = data;
struct nd_mapping *nd_mapping;
struct nd_region *nd_region;
@@ -429,29 +429,19 @@
retry:
/*
* Find the free dpa from the end of the last pmem allocation to
- * the end of the interleave-set mapping that is not already
- * covered by a blk allocation.
+ * the end of the interleave-set mapping.
*/
- busy = 0;
for_each_dpa_resource(ndd, res) {
+ if (strncmp(res->name, "pmem", 4) != 0)
+ continue;
if ((res->start >= blk_start && res->start < map_end)
|| (res->end >= blk_start
&& res->end <= map_end)) {
- if (strncmp(res->name, "pmem", 4) == 0) {
- new = max(blk_start, min(map_end + 1,
- res->end + 1));
- if (new != blk_start) {
- blk_start = new;
- goto retry;
- }
- } else
- busy += min(map_end, res->end)
- - max(nd_mapping->start, res->start) + 1;
- } else if (nd_mapping->start > res->start
- && map_end < res->end) {
- /* total eclipse of the PMEM region mapping */
- busy += nd_mapping->size;
- break;
+ new = max(blk_start, min(map_end + 1, res->end + 1));
+ if (new != blk_start) {
+ blk_start = new;
+ goto retry;
+ }
}
}
@@ -463,52 +453,11 @@
return 1;
}
- info->available -= blk_start - nd_mapping->start + busy;
+ info->available -= blk_start - nd_mapping->start;
return 0;
}
-static int blk_dpa_busy(struct device *dev, void *data)
-{
- struct blk_alloc_info *info = data;
- struct nd_mapping *nd_mapping;
- struct nd_region *nd_region;
- resource_size_t map_end;
- int i;
-
- if (!is_nd_pmem(dev))
- return 0;
-
- nd_region = to_nd_region(dev);
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- nd_mapping = &nd_region->mapping[i];
- if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
- break;
- }
-
- if (i >= nd_region->ndr_mappings)
- return 0;
-
- map_end = nd_mapping->start + nd_mapping->size - 1;
- if (info->res->start >= nd_mapping->start
- && info->res->start < map_end) {
- if (info->res->end <= map_end) {
- info->busy = 0;
- return 1;
- } else {
- info->busy -= info->res->end - map_end;
- return 0;
- }
- } else if (info->res->end >= nd_mapping->start
- && info->res->end <= map_end) {
- info->busy -= nd_mapping->start - info->res->start;
- return 0;
- } else {
- info->busy -= nd_mapping->size;
- return 0;
- }
-}
-
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @nd_mapping: container of dpa-resource-root + labels
@@ -538,11 +487,7 @@
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "blk", 3) != 0)
continue;
-
- info.res = res;
- info.busy = resource_size(res);
- device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
- info.available -= info.busy;
+ info.available -= resource_size(res);
}
return info.available;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index da10b48..5f2feee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1204,8 +1204,8 @@
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if (ctrl->stripe_size)
- blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+ if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+ blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
@@ -1261,19 +1261,6 @@
ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
- unsigned int max_hw_sectors;
-
- ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
- max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
- if (ctrl->max_hw_sectors) {
- ctrl->max_hw_sectors = min(max_hw_sectors,
- ctrl->max_hw_sectors);
- } else {
- ctrl->max_hw_sectors = max_hw_sectors;
- }
- }
-
nvme_set_queue_limits(ctrl, ctrl->admin_q);
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
@@ -2057,9 +2044,9 @@
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
*/
- if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- revalidate_disk(ns->disk);
-
+ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ continue;
+ revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d47f5a5..8edafd8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -121,7 +121,6 @@
u32 page_size;
u32 max_hw_sectors;
- u32 stripe_size;
u16 oncs;
u16 vid;
atomic_t abort_limit;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5e52034..8a9c186 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1983,8 +1983,10 @@
pci_set_drvdata(pdev, NULL);
- if (!pci_device_is_present(pdev))
+ if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+ nvme_dev_disable(dev, false);
+ }
flush_work(&dev->reset_work);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 366d8c3..c807c28 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -1,7 +1,7 @@
/*
* Device tree based initialization code for reserved memory.
*
- * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
+ * Copyright (c) 2013, 2015, 2017 The Linux Foundation. All Rights Reserved.
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
@@ -25,7 +25,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#define MAX_RESERVED_REGIONS 16
+#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 6555eb7..6ae8964 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -120,6 +120,20 @@
If unsure, say N.
+config PCI_MSM
+ bool "MSM PCIe Controller driver"
+ depends on ARCH_QCOM && PCI
+ select PCI_DOMAINS
+ select PCI_DOMAINS_GENERIC
+ select PCI_MSI
+ help
+ Enables the PCIe functionality by configuring PCIe core on
+ MSM chipset and by enabling the ARM PCI framework extension.
+ The PCIe core is essential for communication between the host
+ and an endpoint.
+
+ If unsure, say N.
+
config PCI_LABEL
def_bool y if (DMI || ACPI)
select NLS
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index fdbbe41..a0fa943 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -24,13 +24,14 @@
#include <linux/kernel.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/regulator/rpm-smd-regulator.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/of_gpio.h>
-#include <linux/clk/msm-clk.h>
+#include <linux/clk/qcom.h>
#include <linux/reset.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
@@ -47,170 +48,27 @@
#include <linux/ipc_logging.h>
#include <linux/msm_pcie.h>
-#ifdef CONFIG_ARCH_MDMCALIFORNIUM
#define PCIE_VENDOR_ID_RCP 0x17cb
-#define PCIE_DEVICE_ID_RCP 0x0302
-
-#define PCIE20_L1SUB_CONTROL1 0x158
-#define PCIE20_PARF_DBI_BASE_ADDR 0x350
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
-
-#define TX_BASE 0x200
-#define RX_BASE 0x400
-#define PCS_BASE 0x800
-#define PCS_MISC_BASE 0x600
-
-#elif defined(CONFIG_ARCH_MSM8998)
-#define PCIE_VENDOR_ID_RCP 0x17cb
-#define PCIE_DEVICE_ID_RCP 0x0105
+#define PCIE_DEVICE_ID_RCP 0x0106
#define PCIE20_L1SUB_CONTROL1 0x1E4
#define PCIE20_PARF_DBI_BASE_ADDR 0x350
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
-#define TX_BASE 0
-#define RX_BASE 0
#define PCS_BASE 0x800
-#define PCS_MISC_BASE 0
-#else
-#define PCIE_VENDOR_ID_RCP 0x17cb
-#define PCIE_DEVICE_ID_RCP 0x0104
-
-#define PCIE20_L1SUB_CONTROL1 0x158
-#define PCIE20_PARF_DBI_BASE_ADDR 0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
-
-#define TX_BASE 0x1000
-#define RX_BASE 0x1200
-#define PCS_BASE 0x1400
-#define PCS_MISC_BASE 0
-#endif
-
-#define TX(n, m) (TX_BASE + n * m * 0x1000)
-#define RX(n, m) (RX_BASE + n * m * 0x1000)
#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
-#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
-
-#define QSERDES_COM_BG_TIMER 0x00C
-#define QSERDES_COM_SSC_EN_CENTER 0x010
-#define QSERDES_COM_SSC_ADJ_PER1 0x014
-#define QSERDES_COM_SSC_ADJ_PER2 0x018
-#define QSERDES_COM_SSC_PER1 0x01C
-#define QSERDES_COM_SSC_PER2 0x020
-#define QSERDES_COM_SSC_STEP_SIZE1 0x024
-#define QSERDES_COM_SSC_STEP_SIZE2 0x028
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
-#define QSERDES_COM_CLK_ENABLE1 0x038
-#define QSERDES_COM_SYS_CLK_CTRL 0x03C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
-#define QSERDES_COM_PLL_IVCO 0x048
-#define QSERDES_COM_LOCK_CMP1_MODE0 0x04C
-#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
-#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
-#define QSERDES_COM_BG_TRIM 0x070
-#define QSERDES_COM_CLK_EP_DIV 0x074
-#define QSERDES_COM_CP_CTRL_MODE0 0x078
-#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
-#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
-#define QSERDES_COM_SYSCLK_EN_SEL 0x0AC
-#define QSERDES_COM_RESETSM_CNTRL 0x0B4
-#define QSERDES_COM_RESTRIM_CTRL 0x0BC
-#define QSERDES_COM_RESCODE_DIV_NUM 0x0C4
-#define QSERDES_COM_LOCK_CMP_EN 0x0C8
-#define QSERDES_COM_DEC_START_MODE0 0x0D0
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0DC
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0E0
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0E4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10C
-#define QSERDES_COM_VCO_TUNE_CTRL 0x124
-#define QSERDES_COM_VCO_TUNE_MAP 0x128
-#define QSERDES_COM_VCO_TUNE1_MODE0 0x12C
-#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
-#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
-#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
-#define QSERDES_COM_BG_CTRL 0x170
-#define QSERDES_COM_CLK_SELECT 0x174
-#define QSERDES_COM_HSCLK_SEL 0x178
-#define QSERDES_COM_CORECLK_DIV 0x184
-#define QSERDES_COM_CORE_CLK_EN 0x18C
-#define QSERDES_COM_C_READY_STATUS 0x190
-#define QSERDES_COM_CMN_CONFIG 0x194
-#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19C
-#define QSERDES_COM_DEBUG_BUS0 0x1A0
-#define QSERDES_COM_DEBUG_BUS1 0x1A4
-#define QSERDES_COM_DEBUG_BUS2 0x1A8
-#define QSERDES_COM_DEBUG_BUS3 0x1AC
-#define QSERDES_COM_DEBUG_BUS_SEL 0x1B0
-
-#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m) (TX(n, m) + 0x4C)
-#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m) (TX(n, m) + 0x64)
-#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
-#define QSERDES_TX_N_LANE_MODE(n, m) (TX(n, m) + 0x94)
-#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m) (TX(n, m) + 0xAC)
-
-#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m) (RX(n, m) + 0x010)
-#define QSERDES_RX_N_UCDR_SO_GAIN(n, m) (RX(n, m) + 0x01C)
-#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
-#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m) (RX(n, m) + 0x0D8)
-#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m) (RX(n, m) + 0x0DC)
-#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m) (RX(n, m) + 0x0E0)
-#define QSERDES_RX_N_SIGDET_ENABLES(n, m) (RX(n, m) + 0x110)
-#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m) (RX(n, m) + 0x11C)
-#define QSERDES_RX_N_SIGDET_LVL(n, m) (RX(n, m) + 0x118)
-#define QSERDES_RX_N_RX_BAND(n, m) (RX(n, m) + 0x120)
-
-#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x00)
-#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x04)
-#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x08)
-#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m) (PCS_MISC_PORT(n, m) + 0x0C)
-#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x14)
-#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x18)
-#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x1C)
-#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m) (PCS_MISC_PORT(n, m) + 0x20)
#define PCIE_N_SW_RESET(n, m) (PCS_PORT(n, m) + 0x00)
#define PCIE_N_POWER_DOWN_CONTROL(n, m) (PCS_PORT(n, m) + 0x04)
-#define PCIE_N_START_CONTROL(n, m) (PCS_PORT(n, m) + 0x08)
-#define PCIE_N_TXDEEMPH_M6DB_V0(n, m) (PCS_PORT(n, m) + 0x24)
-#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m) (PCS_PORT(n, m) + 0x28)
-#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m) (PCS_PORT(n, m) + 0x54)
-#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m) (PCS_PORT(n, m) + 0x58)
-#define PCIE_N_POWER_STATE_CONFIG1(n, m) (PCS_PORT(n, m) + 0x60)
-#define PCIE_N_POWER_STATE_CONFIG4(n, m) (PCS_PORT(n, m) + 0x6C)
-#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA0)
-#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m) (PCS_PORT(n, m) + 0xA4)
-#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m) (PCS_PORT(n, m) + 0xA8)
-#define PCIE_N_TEST_CONTROL4(n, m) (PCS_PORT(n, m) + 0x11C)
-#define PCIE_N_TEST_CONTROL5(n, m) (PCS_PORT(n, m) + 0x120)
-#define PCIE_N_TEST_CONTROL6(n, m) (PCS_PORT(n, m) + 0x124)
-#define PCIE_N_TEST_CONTROL7(n, m) (PCS_PORT(n, m) + 0x128)
#define PCIE_N_PCS_STATUS(n, m) (PCS_PORT(n, m) + 0x174)
-#define PCIE_N_DEBUG_BUS_0_STATUS(n, m) (PCS_PORT(n, m) + 0x198)
-#define PCIE_N_DEBUG_BUS_1_STATUS(n, m) (PCS_PORT(n, m) + 0x19C)
-#define PCIE_N_DEBUG_BUS_2_STATUS(n, m) (PCS_PORT(n, m) + 0x1A0)
-#define PCIE_N_DEBUG_BUS_3_STATUS(n, m) (PCS_PORT(n, m) + 0x1A4)
-#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1A8)
-#define PCIE_N_OSC_DTCT_ACTIONS(n, m) (PCS_PORT(n, m) + 0x1AC)
-#define PCIE_N_SIGDET_CNTRL(n, m) (PCS_PORT(n, m) + 0x1B0)
-#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m) (PCS_PORT(n, m) + 0x1DC)
-#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m) (PCS_PORT(n, m) + 0x1E0)
#define PCIE_COM_SW_RESET 0x400
#define PCIE_COM_POWER_DOWN_CONTROL 0x404
-#define PCIE_COM_START_CONTROL 0x408
-#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX 0x438
-#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX 0x43C
-#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX 0x440
-#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX 0x444
#define PCIE_COM_PCS_READY_STATUS 0x448
-#define PCIE_COM_DEBUG_BUS_0_STATUS 0x45C
-#define PCIE_COM_DEBUG_BUS_1_STATUS 0x460
-#define PCIE_COM_DEBUG_BUS_2_STATUS 0x464
-#define PCIE_COM_DEBUG_BUS_3_STATUS 0x468
#define PCIE20_PARF_SYS_CTRL 0x00
+#define PCIE20_PARF_PM_CTRL 0x20
#define PCIE20_PARF_PM_STTS 0x24
#define PCIE20_PARF_PCS_DEEMPH 0x34
#define PCIE20_PARF_PCS_SWING 0x38
@@ -227,6 +85,7 @@
#define PCIE20_PARF_SID_OFFSET 0x234
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
#define PCIE20_PARF_BDF_TRANSLATE_N 0x250
+#define PCIE20_PARF_DEVICE_TYPE 0x1000
#define PCIE20_ELBI_VERSION 0x00
#define PCIE20_ELBI_SYS_CTRL 0x04
@@ -299,7 +158,7 @@
#define MAX_PROP_SIZE 32
#define MAX_RC_NAME_LEN 15
#define MSM_PCIE_MAX_VREG 4
-#define MSM_PCIE_MAX_CLK 9
+#define MSM_PCIE_MAX_CLK 12
#define MSM_PCIE_MAX_PIPE_CLK 1
#define MAX_RC_NUM 3
#define MAX_DEVICE_NUM 20
@@ -313,7 +172,7 @@
#define PCIE_CLEAR 0xDEADBEEF
#define PCIE_LINK_DOWN 0xFFFFFFFF
-#define MSM_PCIE_MAX_RESET 4
+#define MSM_PCIE_MAX_RESET 5
#define MSM_PCIE_MAX_PIPE_RESET 1
#define MSM_PCIE_MSI_PHY 0xa0000000
@@ -480,6 +339,11 @@
MSM_PCIE_LINK_DISABLED
};
+enum msm_pcie_boot_option {
+ MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
+ MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
+};
+
/* gpio info structure */
struct msm_pcie_gpio_info_t {
char *name;
@@ -623,12 +487,11 @@
uint32_t wr_halt_size;
uint32_t cpl_timeout;
uint32_t current_bdf;
- short current_short_bdf;
uint32_t perst_delay_us_min;
uint32_t perst_delay_us_max;
uint32_t tlp_rd_size;
bool linkdown_panic;
- bool ep_wakeirq;
+ uint32_t boot_option;
uint32_t rc_idx;
uint32_t phy_ver;
@@ -728,18 +591,21 @@
static struct msm_pcie_reset_info_t
msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
{
+ {NULL, "pcie_0_core_reset", false},
{NULL, "pcie_phy_reset", false},
{NULL, "pcie_phy_com_reset", false},
{NULL, "pcie_phy_nocsr_com_phy_reset", false},
{NULL, "pcie_0_phy_reset", false}
},
{
+ {NULL, "pcie_1_core_reset", false},
{NULL, "pcie_phy_reset", false},
{NULL, "pcie_phy_com_reset", false},
{NULL, "pcie_phy_nocsr_com_phy_reset", false},
{NULL, "pcie_1_phy_reset", false}
},
{
+ {NULL, "pcie_2_core_reset", false},
{NULL, "pcie_phy_reset", false},
{NULL, "pcie_phy_com_reset", false},
{NULL, "pcie_phy_nocsr_com_phy_reset", false},
@@ -772,6 +638,9 @@
{NULL, "pcie_0_slv_axi_clk", 0, true, true},
{NULL, "pcie_0_ldo", 0, false, true},
{NULL, "pcie_0_smmu_clk", 0, false, false},
+ {NULL, "pcie_0_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_phy_refgen_clk", 0, false, false},
+ {NULL, "pcie_tbu_clk", 0, false, false},
{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
{NULL, "pcie_phy_aux_clk", 0, false, false}
},
@@ -783,6 +652,9 @@
{NULL, "pcie_1_slv_axi_clk", 0, true, true},
{NULL, "pcie_1_ldo", 0, false, true},
{NULL, "pcie_1_smmu_clk", 0, false, false},
+ {NULL, "pcie_1_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_phy_refgen_clk", 0, false, false},
+ {NULL, "pcie_tbu_clk", 0, false, false},
{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
{NULL, "pcie_phy_aux_clk", 0, false, false}
},
@@ -794,6 +666,9 @@
{NULL, "pcie_2_slv_axi_clk", 0, true, true},
{NULL, "pcie_2_ldo", 0, false, true},
{NULL, "pcie_2_smmu_clk", 0, false, false},
+ {NULL, "pcie_2_slv_q2a_axi_clk", 0, false, false},
+ {NULL, "pcie_phy_refgen_clk", 0, false, false},
+ {NULL, "pcie_tbu_clk", 0, false, false},
{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
{NULL, "pcie_phy_aux_clk", 0, false, false}
}
@@ -854,6 +729,8 @@
{"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
};
+static int msm_pcie_config_device(struct pci_dev *dev, void *pdev);
+
#ifdef CONFIG_ARM
#define PCIE_BUS_PRIV_DATA(bus) \
(((struct pci_sys_data *)bus->sysdata)->private_data)
@@ -932,393 +809,9 @@
dev->rc_idx, info->name);
}
-#if defined(CONFIG_ARCH_FSM9010)
-#define PCIE20_PARF_PHY_STTS 0x3c
-#define PCIE2_PHY_RESET_CTRL 0x44
-#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
-#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
-#define PCIE20_PARF_PCS_SWING_CTRL1 0x88
-#define PCIE20_PARF_PCS_SWING_CTRL2 0x8c
-#define PCIE20_PARF_PCS_DEEMPH1 0x74
-#define PCIE20_PARF_PCS_DEEMPH2 0x78
-#define PCIE20_PARF_PCS_DEEMPH3 0x7c
-#define PCIE20_PARF_CONFIGBITS 0x84
-#define PCIE20_PARF_PHY_CTRL3 0x94
-#define PCIE20_PARF_PCS_CTRL 0x80
-
-#define TX_AMP_VAL 127
-#define PHY_RX0_EQ_GEN1_VAL 0
-#define PHY_RX0_EQ_GEN2_VAL 4
-#define TX_DEEMPH_GEN1_VAL 24
-#define TX_DEEMPH_GEN2_3_5DB_VAL 24
-#define TX_DEEMPH_GEN2_6DB_VAL 34
-#define PHY_TX0_TERM_OFFST_VAL 0
-
-static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
-{
-}
-
-static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
-{
- msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
- BIT(0), (assert) ? 1 : 0);
-}
-
-static void pcie_phy_init(struct msm_pcie_dev_t *dev)
-{
- PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
- dev->rc_idx);
-
- /* De-assert Phy SW Reset */
- pcie20_phy_reset(dev, 1);
-
- /* Program SSP ENABLE */
- if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
- BIT(0), 0);
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
- BIT(0)) == 0)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
- BIT(0), 1);
- /* Program Tx Amplitude */
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
- (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
- TX_AMP_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
- BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
- TX_AMP_VAL);
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
- (BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
- TX_AMP_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
- BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
- TX_AMP_VAL);
- /* Program De-Emphasis */
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
- (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
- TX_DEEMPH_GEN2_6DB_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
- BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
- TX_DEEMPH_GEN2_6DB_VAL);
-
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
- (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
- TX_DEEMPH_GEN2_3_5DB_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
- BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
- TX_DEEMPH_GEN2_3_5DB_VAL);
-
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
- (BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
- TX_DEEMPH_GEN1_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
- BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
- TX_DEEMPH_GEN1_VAL);
-
- /* Program Rx_Eq */
- if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
- (BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
- BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
-
- /* Program Tx0_term_offset */
- if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
- (BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
- PHY_TX0_TERM_OFFST_VAL)
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
- BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
- PHY_TX0_TERM_OFFST_VAL);
-
- /* Program REF_CLK source */
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
- (dev->ext_ref_clk) ? 1 : 0);
- /* disable Tx2Rx Loopback */
- if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
- msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
- BIT(1), 0);
- /* De-assert Phy SW Reset */
- pcie20_phy_reset(dev, 0);
-}
-
-static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
-{
-
- /* read PCIE20_PARF_PHY_STTS twice */
- readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
- if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
- return false;
- else
- return true;
-}
-#else
-static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
- u32 cntrl4_val, u32 cntrl5_val,
- u32 cntrl6_val, u32 cntrl7_val)
-{
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
-
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_TEST_CONTROL4(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_TEST_CONTROL5(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_TEST_CONTROL6(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_TEST_CONTROL7(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
- dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
- dev->common_phy)));
-}
-
static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
{
int i, size;
- u32 write_val;
-
- if (dev->phy_ver >= 0x20) {
- PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
- dev->rc_idx);
- return;
- }
-
- PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
-
- pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
- pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
- pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
-
- for (i = 0; i < 3; i++) {
- write_val = 0x1 + i;
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
- dev->common_phy), write_val);
- PCIE_DUMP(dev,
- "PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
- dev->common_phy)));
-
- pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
- }
-
- pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
-
- if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
- pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
- pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
- pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
-
- for (i = 0; i < 8; i += 4) {
- write_val = 0x1 + i;
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
- dev->common_phy), write_val);
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
- dev->common_phy), write_val + 1);
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
- dev->common_phy), write_val + 2);
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
- dev->common_phy), write_val + 3);
-
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_0_STATUS(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_1_STATUS(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_2_STATUS(
- dev->rc_idx, dev->common_phy)));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_MISC_N_DEBUG_BUS_3_STATUS(
- dev->rc_idx, dev->common_phy)));
- }
-
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
- dev->rc_idx, dev->common_phy), 0);
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
- dev->rc_idx, dev->common_phy), 0);
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
- dev->rc_idx, dev->common_phy), 0);
- msm_pcie_write_reg(dev->phy,
- PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
- dev->rc_idx, dev->common_phy), 0);
- }
-
- for (i = 0; i < 2; i++) {
- write_val = 0x2 + i;
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
- write_val);
-
- PCIE_DUMP(dev,
- "PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
- PCIE_DUMP(dev,
- "PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
- PCIE_DUMP(dev,
- "PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
- PCIE_DUMP(dev,
- "PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
- PCIE_DUMP(dev,
- "PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
- dev->rc_idx,
- readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
- }
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
-
- if (dev->common_phy) {
- msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
- 0x01);
- msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
- 0x02);
- msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
- 0x03);
- msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
- 0x04);
-
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_0_STATUS));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_1_STATUS));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_2_STATUS));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_3_STATUS));
-
- msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
- 0x05);
-
- PCIE_DUMP(dev,
- "PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
- PCIE_DUMP(dev,
- "PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
- dev->rc_idx,
- readl_relaxed(dev->phy +
- PCIE_COM_DEBUG_BUS_0_STATUS));
- }
size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
for (i = 0; i < size; i += 32) {
@@ -1336,181 +829,6 @@
}
}
-#ifdef CONFIG_ARCH_MDMCALIFORNIUM
-static void pcie_phy_init(struct msm_pcie_dev_t *dev)
-{
- u8 common_phy;
-
- PCIE_DBG(dev,
- "RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
- dev->rc_idx);
-
- if (dev->common_phy)
- common_phy = 1;
- else
- common_phy = 0;
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_SW_RESET(dev->rc_idx, common_phy),
- 0x01);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
- 0x03);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
-
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
-
- if (dev->tcsr) {
- PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
- dev->rc_idx, readl_relaxed(dev->tcsr));
-
- if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
- msm_pcie_write_reg(dev->phy,
- QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
- else
- msm_pcie_write_reg(dev->phy,
- QSERDES_COM_SYSCLK_EN_SEL, 0x04);
- }
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
-
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
- common_phy), 0x45);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
-
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
- 0x02);
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
- 0x12);
-
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
- 0x1C);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
- 0x14);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
- 0x01);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
- 0xDB);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
- common_phy),
- 0x4B);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
- 0x04);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
- 0x04);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
- 0x04);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
- 0x40);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
- 0x40);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
- 0x40);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
- 0x73);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
- 0x99);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
- 0x15);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
- 0x0E);
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
- 0x07);
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_SW_RESET(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
- 0x03);
-}
-
-static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
-{
-}
-
-static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
-{
- if (readl_relaxed(dev->phy +
- PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
- return false;
- else
- return true;
-}
-#else
static void pcie_phy_init(struct msm_pcie_dev_t *dev)
{
int i;
@@ -1532,64 +850,6 @@
phy_seq->delay + 1);
phy_seq++;
}
- return;
- }
-
- if (dev->common_phy)
- msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
-
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
-
- if (dev->phy_ver == 0x3) {
- msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
- msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
- }
-
- if (dev->common_phy) {
- msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
- msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
}
}
@@ -1597,18 +857,9 @@
{
int i;
struct msm_pcie_phy_info_t *phy_seq;
- u8 common_phy;
-
- if (dev->phy_ver >= 0x20)
- return;
PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
- if (dev->common_phy)
- common_phy = 1;
- else
- common_phy = 0;
-
if (dev->port_phy_sequence) {
i = dev->port_phy_len;
phy_seq = dev->port_phy_sequence;
@@ -1621,93 +872,8 @@
phy_seq->delay + 1);
phy_seq++;
}
- return;
}
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
- common_phy), 0x45);
- msm_pcie_write_reg(dev->phy,
- QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
- 0x06);
-
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
- 0x1C);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
- 0x17);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
- 0x01);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
- 0xDB);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
- 0x18);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
- 0x04);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
- 0x04);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
- 0x4C);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
- 0x01);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
- 0x05);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
- common_phy), 0x4B);
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
- 0x14);
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
- 0x05);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
- 0x02);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
- 0xA3);
-
- if (dev->phy_ver == 0x3) {
- msm_pcie_write_reg(dev->phy,
- QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
- 0x19);
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
- 0x0E);
- }
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
- 0x03);
- usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
-
- msm_pcie_write_reg(dev->phy,
- PCIE_N_SW_RESET(dev->rc_idx, common_phy),
- 0x00);
- msm_pcie_write_reg(dev->phy,
- PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
- 0x0A);
}
static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
@@ -1726,8 +892,6 @@
else
return true;
}
-#endif
-#endif
static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
{
@@ -1949,8 +1113,8 @@
dev->aer_enable ? "" : "not");
PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
dev->ext_ref_clk);
- PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
- dev->ep_wakeirq);
+ PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
+ dev->boot_option);
PCIE_DBG_FS(dev, "phy_ver is %d\n",
dev->phy_ver);
PCIE_DBG_FS(dev, "drv_ready is %d\n",
@@ -1969,8 +1133,6 @@
dev->msi_gicm_base);
PCIE_DBG_FS(dev, "bus_client: %d\n",
dev->bus_client);
- PCIE_DBG_FS(dev, "current short bdf: %d\n",
- dev->current_short_bdf);
PCIE_DBG_FS(dev, "smmu does %s exist\n",
dev->smmu_exist ? "" : "not");
PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
@@ -2417,8 +1579,16 @@
dev->res[base_sel - 1].base,
wr_offset, wr_mask, wr_value);
- msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset))
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ else
+ msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+ wr_offset, wr_mask, wr_value);
break;
case 13: /* dump all registers of base_sel */
@@ -2505,6 +1675,48 @@
}
EXPORT_SYMBOL(msm_pcie_debug_info);
+#ifdef CONFIG_SYSFS
+static ssize_t msm_pcie_enumerate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
+ dev_get_drvdata(dev);
+
+ if (pcie_dev)
+ msm_pcie_enumerate(pcie_dev->rc_idx);
+
+ return count;
+}
+
+static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
+
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+ int ret;
+
+ ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
+ if (ret)
+ PCIE_DBG_FS(dev,
+ "RC%d: failed to create sysfs enumerate node\n",
+ dev->rc_idx);
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+ if (dev->pdev)
+ device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
+}
+#else
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *dent_msm_pcie;
static struct dentry *dfile_rc_sel;
@@ -2514,7 +1726,7 @@
static struct dentry *dfile_wr_offset;
static struct dentry *dfile_wr_mask;
static struct dentry *dfile_wr_value;
-static struct dentry *dfile_ep_wakeirq;
+static struct dentry *dfile_boot_option;
static struct dentry *dfile_aer_enable;
static struct dentry *dfile_corr_counter_limit;
@@ -2528,13 +1740,14 @@
char str[MAX_MSG_LEN];
unsigned int testcase = 0;
int i;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
testcase = (testcase * 10) + (str[i] - '0');
if (!rc_sel)
@@ -2563,13 +1776,14 @@
char str[MAX_MSG_LEN];
int i;
u32 new_rc_sel = 0;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
@@ -2606,13 +1820,14 @@
int i;
u32 new_base_sel = 0;
char *base_sel_name;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
new_base_sel = (new_base_sel * 10) + (str[i] - '0');
if (!new_base_sel || new_base_sel > 5) {
@@ -2707,14 +1922,15 @@
unsigned long ret;
char str[MAX_MSG_LEN];
int i;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
wr_offset = 0;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
wr_offset = (wr_offset * 10) + (str[i] - '0');
pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
@@ -2733,14 +1949,15 @@
unsigned long ret;
char str[MAX_MSG_LEN];
int i;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
wr_mask = 0;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
wr_mask = (wr_mask * 10) + (str[i] - '0');
pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
@@ -2758,14 +1975,15 @@
unsigned long ret;
char str[MAX_MSG_LEN];
int i;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
wr_value = 0;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
wr_value = (wr_value * 10) + (str[i] - '0');
pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
@@ -2777,13 +1995,13 @@
.write = msm_pcie_set_wr_value,
};
-static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
+static ssize_t msm_pcie_set_boot_option(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long ret;
char str[MAX_MSG_LEN];
- u32 new_ep_wakeirq = 0;
+ u32 new_boot_option = 0;
int i;
memset(str, 0, sizeof(str));
@@ -2792,33 +2010,33 @@
return -EFAULT;
for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
- new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
+ new_boot_option = (new_boot_option * 10) + (str[i] - '0');
- if (new_ep_wakeirq <= 1) {
+ if (new_boot_option <= 1) {
for (i = 0; i < MAX_RC_NUM; i++) {
if (!rc_sel) {
- msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
+ msm_pcie_dev[0].boot_option = new_boot_option;
PCIE_DBG_FS(&msm_pcie_dev[0],
- "PCIe: RC0: ep_wakeirq is now %d\n",
- msm_pcie_dev[0].ep_wakeirq);
+ "PCIe: RC0: boot_option is now 0x%x\n",
+ msm_pcie_dev[0].boot_option);
break;
} else if (rc_sel & (1 << i)) {
- msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
+ msm_pcie_dev[i].boot_option = new_boot_option;
PCIE_DBG_FS(&msm_pcie_dev[i],
- "PCIe: RC%d: ep_wakeirq is now %d\n",
- i, msm_pcie_dev[i].ep_wakeirq);
+ "PCIe: RC%d: boot_option is now 0x%x\n",
+ i, msm_pcie_dev[i].boot_option);
}
}
} else {
- pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
- new_ep_wakeirq);
+ pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
+ new_boot_option);
}
return count;
}
-const struct file_operations msm_pcie_ep_wakeirq_ops = {
- .write = msm_pcie_set_ep_wakeirq,
+const struct file_operations msm_pcie_boot_option_ops = {
+ .write = msm_pcie_set_boot_option,
};
static ssize_t msm_pcie_set_aer_enable(struct file *file,
@@ -2884,14 +2102,15 @@
unsigned long ret;
char str[MAX_MSG_LEN];
int i;
+ u32 size = sizeof(str) < count ? sizeof(str) : count;
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, sizeof(str));
+ memset(str, 0, size);
+ ret = copy_from_user(str, buf, size);
if (ret)
return -EFAULT;
corr_counter_limit = 0;
- for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+ for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
@@ -2970,12 +2189,12 @@
goto wr_value_error;
}
- dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
+ dfile_boot_option = debugfs_create_file("boot_option", 0664,
dent_msm_pcie, 0,
- &msm_pcie_ep_wakeirq_ops);
- if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
- pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
- goto ep_wakeirq_error;
+ &msm_pcie_boot_option_ops);
+ if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
+ pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
+ goto boot_option_error;
}
dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
@@ -2998,8 +2217,8 @@
corr_counter_limit_error:
debugfs_remove(dfile_aer_enable);
aer_enable_error:
- debugfs_remove(dfile_ep_wakeirq);
-ep_wakeirq_error:
+ debugfs_remove(dfile_boot_option);
+boot_option_error:
debugfs_remove(dfile_wr_value);
wr_value_error:
debugfs_remove(dfile_wr_mask);
@@ -3026,7 +2245,7 @@
debugfs_remove(dfile_wr_offset);
debugfs_remove(dfile_wr_mask);
debugfs_remove(dfile_wr_value);
- debugfs_remove(dfile_ep_wakeirq);
+ debugfs_remove(dfile_boot_option);
debugfs_remove(dfile_aer_enable);
debugfs_remove(dfile_corr_counter_limit);
}
@@ -3257,7 +2476,7 @@
word_offset = where & ~0x3;
byte_offset = where & 0x3;
- mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
+ mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
if (rc || !dev->enumerated) {
config_base = rc ? dev->dm_core : dev->conf;
@@ -3292,12 +2511,17 @@
writel_relaxed(wr_val, config_base + word_offset);
wmb(); /* ensure config data is written to hardware register */
- if (rd_val == PCIE_LINK_DOWN)
- PCIE_ERR(dev,
- "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
- rc_idx, bus->number, devfn, where, size);
- else if (dev->shadow_en)
- msm_pcie_save_shadow(dev, word_offset, wr_val, bdf, rc);
+ if (dev->shadow_en) {
+ if (rd_val == PCIE_LINK_DOWN &&
+ (readl_relaxed(config_base) == PCIE_LINK_DOWN))
+ PCIE_ERR(dev,
+ "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
+ rc_idx, bus->number, devfn,
+ where, size);
+ else
+ msm_pcie_save_shadow(dev, word_offset, wr_val,
+ bdf, rc);
+ }
PCIE_DBG3(dev,
"RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
@@ -3444,8 +2668,8 @@
dev->rc_idx,
dev->vreg[i].name);
regulator_set_voltage(hdl,
- RPM_REGULATOR_CORNER_NONE,
- INT_MAX);
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
}
}
@@ -3474,8 +2698,8 @@
dev->rc_idx,
dev->vreg[i].name);
regulator_set_voltage(dev->vreg[i].hdl,
- RPM_REGULATOR_CORNER_NONE,
- INT_MAX);
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
}
}
}
@@ -3577,6 +2801,19 @@
for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
reset_info = &dev->reset[i];
if (reset_info->hdl) {
+ rc = reset_control_assert(reset_info->hdl);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: RC%d failed to assert reset for %s.\n",
+ dev->rc_idx, reset_info->name);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d successfully asserted reset for %s.\n",
+ dev->rc_idx, reset_info->name);
+
+ /* add a 1ms delay to ensure the reset is asserted */
+ usleep_range(1000, 1005);
+
rc = reset_control_deassert(reset_info->hdl);
if (rc)
PCIE_ERR(dev,
@@ -3681,6 +2918,19 @@
for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
pipe_reset_info = &dev->pipe_reset[i];
if (pipe_reset_info->hdl) {
+ rc = reset_control_assert(pipe_reset_info->hdl);
+ if (rc)
+ PCIE_ERR(dev,
+ "PCIe: RC%d failed to assert pipe reset for %s.\n",
+ dev->rc_idx, pipe_reset_info->name);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d successfully asserted pipe reset for %s.\n",
+ dev->rc_idx, pipe_reset_info->name);
+
+ /* add a 1ms delay to ensure the reset is asserted */
+ usleep_range(1000, 1005);
+
rc = reset_control_deassert(
pipe_reset_info->hdl);
if (rc)
@@ -3734,8 +2984,6 @@
static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
{
- int i;
-
PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
/*
@@ -3791,27 +3039,6 @@
PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
}
-
- /* configure SMMU registers */
- if (dev->smmu_exist) {
- msm_pcie_write_reg(dev->parf,
- PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
- msm_pcie_write_reg(dev->parf,
- PCIE20_PARF_SID_OFFSET, 0);
-
- if (dev->enumerated) {
- for (i = 0; i < MAX_DEVICE_NUM; i++) {
- if (dev->pcidev_table[i].dev &&
- dev->pcidev_table[i].short_bdf) {
- msm_pcie_write_reg(dev->parf,
- PCIE20_PARF_BDF_TRANSLATE_N +
- dev->pcidev_table[i].short_bdf
- * 4,
- dev->pcidev_table[i].bdf >> 16);
- }
- }
- }
- }
}
static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
@@ -4459,6 +3686,13 @@
msm_pcie_restore_sec_config(dev);
}
+ /* configure PCIe to RC mode */
+ msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
+
+ /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
+ if (dev->l1_supported)
+ msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+
/* enable PCIe clocks and resets */
msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
@@ -4591,6 +3825,8 @@
do {
usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+ PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
+ dev->rc_idx, (val >> 12) & 0x3f);
} while ((!(val & XMLH_LINK_UP) ||
!msm_pcie_confirm_linkup(dev, false, false, NULL))
&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
@@ -4618,6 +3854,9 @@
msm_pcie_config_link_state(dev);
+ if (dev->enumerated)
+ pci_walk_bus(dev->dev->bus, &msm_pcie_config_device, dev);
+
dev->link_status = MSM_PCIE_LINK_ENABLED;
dev->power_on = true;
dev->suspending = false;
@@ -4865,106 +4104,41 @@
return ret;
}
-int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
+static void msm_pcie_configure_sid(struct msm_pcie_dev_t *pcie_dev,
+ struct pci_dev *dev)
{
- struct pci_dev *pcidev;
- struct msm_pcie_dev_t *pcie_dev;
- struct pci_bus *bus;
- int i;
+ u32 offset;
+ u32 sid;
u32 bdf;
+ int ret;
- if (!dev) {
- pr_err("%s: PCIe: endpoint device passed in is NULL\n",
- __func__);
- return MSM_PCIE_ERROR;
- }
-
- pcidev = to_pci_dev(dev);
- if (!pcidev) {
- pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
- __func__);
- return MSM_PCIE_ERROR;
- }
-
- bus = pcidev->bus;
- if (!bus) {
- pr_err("%s: PCIe: Bus of PCI device is NULL\n",
- __func__);
- return MSM_PCIE_ERROR;
- }
-
- while (!pci_is_root_bus(bus))
- bus = bus->parent;
-
- pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
- if (!pcie_dev) {
- pr_err("%s: PCIe: Could not get PCIe structure\n",
- __func__);
- return MSM_PCIE_ERROR;
- }
-
- if (!pcie_dev->smmu_exist) {
+ ret = iommu_fwspec_get_id(&dev->dev, &sid);
+ if (ret) {
PCIE_DBG(pcie_dev,
- "PCIe: RC:%d: smmu does not exist\n",
+ "PCIe: RC%d: Device does not have a SID\n",
pcie_dev->rc_idx);
- return MSM_PCIE_ERROR;
- }
-
- PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
- pcie_dev->rc_idx, dev);
- PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
- pcie_dev->rc_idx, pcidev);
-
- *domain = pcie_dev->rc_idx;
-
- if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
- pcie_dev->current_short_bdf++;
- } else {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: No more short BDF left\n",
- pcie_dev->rc_idx);
- return MSM_PCIE_ERROR;
- }
-
- bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
-
- for (i = 0; i < MAX_DEVICE_NUM; i++) {
- if (pcie_dev->pcidev_table[i].bdf == bdf) {
- *sid = pcie_dev->smmu_sid_base +
- ((pcie_dev->rc_idx << 4) |
- pcie_dev->current_short_bdf);
-
- msm_pcie_write_reg(pcie_dev->parf,
- PCIE20_PARF_BDF_TRANSLATE_N +
- pcie_dev->current_short_bdf * 4,
- bdf >> 16);
-
- pcie_dev->pcidev_table[i].sid = *sid;
- pcie_dev->pcidev_table[i].short_bdf =
- pcie_dev->current_short_bdf;
- break;
- }
- }
-
- if (i == MAX_DEVICE_NUM) {
- pcie_dev->current_short_bdf--;
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d could not find BDF:%d\n",
- pcie_dev->rc_idx, bdf);
- return MSM_PCIE_ERROR;
+ return;
}
PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
- pcie_dev->rc_idx,
- bdf >> 24,
- bdf >> 19 & 0x1f,
- bdf >> 16 & 0x07,
- *sid);
+ "PCIe: RC%d: Device SID: 0x%x\n",
+ pcie_dev->rc_idx, sid);
- return 0;
+ bdf = BDF_OFFSET(dev->bus->number, dev->devfn);
+ offset = (sid - pcie_dev->smmu_sid_base) * 4;
+
+ if (offset >= MAX_SHORT_BDF_NUM * 4) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: Invalid SID offset: 0x%x. Should be less than 0x%x\n",
+ pcie_dev->rc_idx, offset, MAX_SHORT_BDF_NUM * 4);
+ return;
+ }
+
+ msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
+ msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_SID_OFFSET, 0);
+ msm_pcie_write_reg(pcie_dev->parf,
+ PCIE20_PARF_BDF_TRANSLATE_N + offset, bdf >> 16);
}
-EXPORT_SYMBOL(msm_pcie_configure_sid);
int msm_pcie_enumerate(u32 rc_idx)
{
@@ -5358,14 +4532,10 @@
PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
dev->rc_idx);
- if (!dev->enumerated) {
- PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
- if (dev->ep_wakeirq)
- schedule_work(&dev->handle_wake_work);
- else
- PCIE_DBG(dev,
- "wake irq is received but ep_wakeirq is not supported for RC%d.\n",
- dev->rc_idx);
+ if (!dev->enumerated && !(dev->boot_option &
+ MSM_PCIE_NO_WAKE_ENUMERATION)) {
+ PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
+ schedule_work(&dev->handle_wake_work);
} else {
PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
__pm_stay_awake(&dev->ws);
@@ -5511,7 +4681,7 @@
handle_aer_irq(irq, data);
break;
default:
- PCIE_ERR(dev,
+ PCIE_DUMP(dev,
"PCIe: RC%d: Unexpected event %d is caught!\n",
dev->rc_idx, i);
}
@@ -5523,35 +4693,84 @@
return IRQ_HANDLED;
}
-void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
+static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev)
{
- int pos, i;
- struct msm_pcie_dev_t *dev;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+ int bypass_en = 0;
- if (pcie_dev)
- dev = pcie_dev;
- else
- dev = irq_get_chip_data(irq);
-
- if (!dev) {
- pr_err("PCIe: device is null. IRQ:%d\n", irq);
+ if (!domain) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: client does not have an iommu domain\n",
+ dev->rc_idx);
return;
}
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+ if (!bypass_en) {
+ int ret;
+ phys_addr_t pcie_base_addr =
+ dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+ dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
+
+ ret = iommu_unmap(domain, iova, PAGE_SIZE);
+ if (ret != PAGE_SIZE)
+ PCIE_ERR(dev,
+ "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
+ dev->rc_idx, ret);
+ }
+}
+
+void msm_pcie_destroy_irq(unsigned int irq)
+{
+ int pos;
+ struct pci_dev *pdev = irq_get_chip_data(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
+ struct msi_desc *firstentry;
+ struct msm_pcie_dev_t *dev;
+ u32 nvec;
+ int firstirq;
+
+ if (!pdev) {
+ pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
+ return;
+ }
+
+ dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+ if (!dev) {
+ pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
+ return;
+ }
+
+ if (!entry) {
+ PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
+ dev->rc_idx, irq);
+ return;
+ }
+
+ firstentry = first_pci_msi_entry(pdev);
+ if (!firstentry) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
+ dev->rc_idx, irq);
+ return;
+ }
+
+ firstirq = firstentry->irq;
+ nvec = (1 << entry->msi_attrib.multiple);
+
if (dev->msi_gicm_addr) {
PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
- for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
- if (irq == dev->msi[i].num)
- break;
- if (i == MSM_PCIE_MAX_MSI) {
+ if (irq < firstirq || irq > firstirq + nvec - 1) {
PCIE_ERR(dev,
"Could not find irq: %d in RC%d MSI table\n",
irq, dev->rc_idx);
return;
}
-
- pos = i;
+ if (irq == firstirq + nvec - 1)
+ msm_pcie_unmap_qgic_addr(dev, pdev);
+ pos = irq - firstirq;
} else {
PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
pos = irq - irq_find_mapping(dev->irq_domain, 0);
@@ -5570,7 +4789,7 @@
void arch_teardown_msi_irq(unsigned int irq)
{
PCIE_GEN_DBG("irq %d deallocated\n", irq);
- msm_pcie_destroy_irq(irq, NULL);
+ msm_pcie_destroy_irq(irq);
}
void arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -5590,7 +4809,7 @@
continue;
nvec = 1 << entry->msi_attrib.multiple;
for (i = 0; i < nvec; i++)
- msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
+ arch_teardown_msi_irq(entry->irq + i);
}
}
@@ -5651,6 +4870,7 @@
PCIE_DBG(dev, "irq %d allocated\n", irq);
+ irq_set_chip_data(irq, pdev);
irq_set_msi_desc(irq, desc);
/* write msi vector and data */
@@ -5698,10 +4918,76 @@
return irq;
}
+static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
+ struct pci_dev *pdev,
+ struct msi_msg *msg)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+ struct iommu_domain_geometry geometry;
+ int ret, fastmap_en = 0, bypass_en = 0;
+ dma_addr_t iova;
+ phys_addr_t gicm_db_offset;
+
+ msg->address_hi = 0;
+ msg->address_lo = dev->msi_gicm_addr;
+
+ if (!domain) {
+ PCIE_DBG(dev,
+ "PCIe: RC%d: client does not have an iommu domain\n",
+ dev->rc_idx);
+ return 0;
+ }
+
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+
+ PCIE_DBG(dev,
+ "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
+ dev->rc_idx, bypass_en ? "bypass" : "enabled",
+ pdev->bus->number, pdev->devfn);
+
+ if (bypass_en)
+ return 0;
+
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en);
+ if (fastmap_en) {
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry);
+ iova = geometry.aperture_start;
+ PCIE_DBG(dev,
+ "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n",
+ dev->rc_idx, iova);
+ } else {
+ phys_addr_t pcie_base_addr;
+
+ /*
+ * Use PCIe DBI address as the IOVA since client cannot
+ * use this address for their IOMMU mapping. This will
+ * prevent any conflicts between PCIe host and
+ * client's mapping.
+ */
+ pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+ iova = rounddown(pcie_base_addr, PAGE_SIZE);
+ }
+
+ ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ PCIE_ERR(dev,
+ "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
+ dev->rc_idx, ret);
+ return -ENOMEM;
+ }
+
+ gicm_db_offset = dev->msi_gicm_addr -
+ rounddown(dev->msi_gicm_addr, PAGE_SIZE);
+ msg->address_lo = iova + gicm_db_offset;
+
+ return 0;
+}
+
static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
struct msi_desc *desc, int nvec)
{
- int irq, index, firstirq = 0;
+ int irq, index, ret, firstirq = 0;
struct msi_msg msg;
struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
@@ -5718,12 +5004,16 @@
firstirq = irq;
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_chip_data(irq, pdev);
}
/* write msi vector and data */
irq_set_msi_desc(firstirq, desc);
- msg.address_hi = 0;
- msg.address_lo = dev->msi_gicm_addr;
+
+ ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
+ if (ret)
+ return ret;
+
msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
write_msi_msg(firstirq, &msg);
@@ -5795,7 +5085,6 @@
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
return 0;
}
@@ -5953,6 +5242,28 @@
disable_irq(dev->wake_n);
}
+static int msm_pcie_config_device(struct pci_dev *dev, void *pdev)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)pdev;
+ u8 busnr = dev->bus->number;
+ u8 slot = PCI_SLOT(dev->devfn);
+ u8 func = PCI_FUNC(dev->devfn);
+
+ PCIE_DBG(pcie_dev, "PCIe: RC%d: configure PCI device %02x:%02x.%01x\n",
+ pcie_dev->rc_idx, busnr, slot, func);
+
+ msm_pcie_configure_sid(pcie_dev, dev);
+
+ return 0;
+}
+
+/* Hook to setup PCI device during PCI framework scan */
+int pcibios_add_device(struct pci_dev *dev)
+{
+ struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+ return msm_pcie_config_device(dev, pcie_dev);
+}
static int msm_pcie_probe(struct platform_device *pdev)
{
@@ -6041,12 +5352,12 @@
msm_pcie_dev[rc_idx].rc_idx,
msm_pcie_dev[rc_idx].smmu_sid_base);
- msm_pcie_dev[rc_idx].ep_wakeirq =
- of_property_read_bool((&pdev->dev)->of_node,
- "qcom,ep-wakeirq");
+ msm_pcie_dev[rc_idx].boot_option = 0;
+ ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
+ &msm_pcie_dev[rc_idx].boot_option);
PCIE_DBG(&msm_pcie_dev[rc_idx],
- "PCIe: EP of RC%d does %s assert wake when it is up.\n",
- rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
+ "PCIe: RC%d boot option is 0x%x.\n",
+ rc_idx, msm_pcie_dev[rc_idx].boot_option);
msm_pcie_dev[rc_idx].phy_ver = 1;
ret = of_property_read_u32((&pdev->dev)->of_node,
@@ -6228,7 +5539,6 @@
msm_pcie_dev[rc_idx].wake_counter = 0;
msm_pcie_dev[rc_idx].aer_enable = true;
msm_pcie_dev[rc_idx].power_on = false;
- msm_pcie_dev[rc_idx].current_short_bdf = 0;
msm_pcie_dev[rc_idx].use_msi = false;
msm_pcie_dev[rc_idx].use_pinctrl = false;
msm_pcie_dev[rc_idx].linkdown_panic = false;
@@ -6271,6 +5581,8 @@
msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
}
+ dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
+
ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
msm_pcie_dev[rc_idx].pdev);
@@ -6320,11 +5632,14 @@
goto decrease_rc_num;
}
+ msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
+
msm_pcie_dev[rc_idx].drv_ready = true;
- if (msm_pcie_dev[rc_idx].ep_wakeirq) {
+ if (msm_pcie_dev[rc_idx].boot_option &
+ MSM_PCIE_NO_PROBE_ENUMERATION) {
PCIE_DBG(&msm_pcie_dev[rc_idx],
- "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
+ "PCIe: RC%d will be enumerated by client or endpoint.\n",
rc_idx);
mutex_unlock(&pcie_drv.drv_lock);
return 0;
@@ -6484,11 +5799,16 @@
static void __exit pcie_exit(void)
{
+ int i;
+
PCIE_GEN_DBG("pcie:%s.\n", __func__);
platform_driver_unregister(&msm_pcie_driver);
msm_pcie_debugfs_exit();
+
+ for (i = 0; i < MAX_RC_NUM; i++)
+ msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
}
subsys_initcall_sync(pcie_init);
@@ -6569,12 +5889,12 @@
PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
pcie_dev->rc_idx);
- msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
-
if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
pinctrl_select_state(pcie_dev->pinctrl,
pcie_dev->pins_sleep);
+ msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+
PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
return ret;
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6abaf80..c3276ee 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -284,35 +284,16 @@
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static int thunder_pem_init(struct pci_config_window *cfg)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
{
- struct device *dev = cfg->parent;
- resource_size_t bar4_start;
- struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
- struct platform_device *pdev;
-
- /* Only OF support for now */
- if (!dev->of_node)
- return -EINVAL;
+ resource_size_t bar4_start;
pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
if (!pem_pci)
return -ENOMEM;
- pdev = to_platform_device(dev);
-
- /*
- * The second register range is the PEM bridge to the PCIe
- * bus. It has a different config access method than those
- * devices behind the bridge.
- */
- res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_pem) {
- dev_err(dev, "missing \"reg[1]\"property\n");
- return -EINVAL;
- }
-
pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
if (!pem_pci->pem_reg_base)
return -ENOMEM;
@@ -332,9 +313,32 @@
return 0;
}
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
static struct pci_ecam_ops pci_thunder_pem_ops = {
.bus_shift = 24,
- .init = thunder_pem_init,
+ .init = thunder_pem_platform_init,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = thunder_pem_config_read,
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 8ce0890..46ca8ed 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -44,8 +44,7 @@
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
- LIST_HEAD(res);
- struct resource res_mem;
+ LIST_HEAD(resources);
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -62,22 +61,23 @@
pcie->base_addr = bdev->addr;
- res_mem.start = bdev->addr_s[0];
- res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
- res_mem.name = "PCIe MEM space";
- res_mem.flags = IORESOURCE_MEM;
- pci_add_resource(&res, &res_mem);
+ pcie->mem.start = bdev->addr_s[0];
+ pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+ pcie->mem.name = "PCIe MEM space";
+ pcie->mem.flags = IORESOURCE_MEM;
+ pci_add_resource(&resources, &pcie->mem);
pcie->map_irq = iproc_pcie_bcma_map_irq;
- ret = iproc_pcie_setup(pcie, &res);
- if (ret)
+ ret = iproc_pcie_setup(pcie, &resources);
+ if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
-
- pci_free_resource_list(&res);
+ pci_free_resource_list(&resources);
+ return ret;
+ }
bcma_set_drvdata(bdev, pcie);
- return ret;
+ return 0;
}
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index a3de087..7dcaddc 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -46,7 +46,7 @@
struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
- LIST_HEAD(res);
+ LIST_HEAD(resources);
int ret;
of_id = of_match_device(iproc_pcie_of_match_table, dev);
@@ -108,23 +108,24 @@
pcie->phy = NULL;
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
+ &iobase);
if (ret) {
- dev_err(dev,
- "unable to get PCI host bridge resources\n");
+ dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
}
pcie->map_irq = of_irq_parse_and_map_pci;
- ret = iproc_pcie_setup(pcie, &res);
- if (ret)
+ ret = iproc_pcie_setup(pcie, &resources);
+ if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
-
- pci_free_resource_list(&res);
+ pci_free_resource_list(&resources);
+ return ret;
+ }
platform_set_drvdata(pdev, pcie);
- return ret;
+ return 0;
}
static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e84d93c..fa42267 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -68,6 +68,7 @@
#ifdef CONFIG_ARM
struct pci_sys_data sysdata;
#endif
+ struct resource mem;
struct pci_bus *root_bus;
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 087a218..5d8151b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1634,6 +1634,7 @@
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
/*
@@ -2156,7 +2157,7 @@
{
if (dev->vpd) {
dev->vpd->len = 0;
- dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
+ dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
}
@@ -2240,6 +2241,27 @@
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
+#ifdef CONFIG_PCIE_IPROC_PLATFORM
+static void quirk_paxc_bridge(struct pci_dev *pdev)
+{
+ /* The PCI config space is shared with the PAXC root port and the first
+ * Ethernet device. So, we need to workaround this by telling the PCI
+ * code that the bridge is not an Ethernet device.
+ */
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+
+ /* MPSS is not being set properly (as it is currently 0). This is
+ * because that area of the PCI config space is hard coded to zero, and
+ * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
+ * so that the MPS can be set to the real max value.
+ */
+ pdev->pcie_mpss = 2;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+#endif
+
/* Originally in EDAC sources for i82875P:
* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing
@@ -3114,30 +3136,32 @@
{
dev->d3_delay = 0;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+/* C600 Series devices do not need 10ms d3_delay */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+/* Lynxpoint-H PCH devices do not need 10ms d3_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
/* Intel Cherrytrail devices do not need 10ms d3_delay */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
/*
* Some devices may pass our check in pci_intx_mask_supported() if
@@ -4137,6 +4161,26 @@
}
/*
+ * These QCOM root ports do provide ACS-like features to disable peer
+ * transactions and validate bus numbers in requests, but do not provide an
+ * actual PCIe ACS capability. Hardware supports source validation but it
+ * will report the issue as Completer Abort instead of ACS Violation.
+ * Hardware doesn't support peer-to-peer and each root port is a root
+ * complex with unique segment numbers. It is not possible for one root
+ * port to pass traffic to another root port. All PCIe transactions are
+ * terminated inside the root port.
+ */
+static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
+ int ret = acs_flags & ~flags ? 0 : 1;
+
+ dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret);
+
+ return ret;
+}
+
+/*
* Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
* the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
* 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
@@ -4151,15 +4195,35 @@
*
* N.B. This doesn't fix what lspci shows.
*
+ * The 100 series chipset specification update includes this as errata #23[3].
+ *
+ * The 200 series chipset (Union Point) has the same bug according to the
+ * specification update (Intel 200 Series Chipset Family Platform Controller
+ * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001,
+ * Errata 22)[4]. Per the datasheet[5], root port PCI Device IDs for this
+ * chipset include:
+ *
+ * 0xa290-0xa29f PCI Express Root port #{0-16}
+ * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
+ *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
+ * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
+ * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
*/
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
{
- return pci_is_pcie(dev) &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
- ((dev->device & ~0xf) == 0xa110 ||
- (dev->device >= 0xa167 && dev->device <= 0xa16a));
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ switch (dev->device) {
+ case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
+ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+ return true;
+ }
+
+ return false;
}
#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
@@ -4272,6 +4336,9 @@
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
+ /* QCOM QDF2xxx root ports */
+ { 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
+ { 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index ba6d5ce..304e206 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -97,6 +97,15 @@
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SDM830 platform.
+config PINCTRL_SDXPOORWILLS
+ tristate "Qualcomm Technologies Inc SDXPOORWILLS pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDXPOORWILLS platform.
+
config PINCTRL_MSM8996
tristate "Qualcomm MSM8996 pin controller driver"
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 5e05e897..4786960 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -17,5 +17,6 @@
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDM830) += pinctrl-sdm830.o
+obj-$(CONFIG_PINCTRL_SDXPOORWILLS) += pinctrl-sdxpoorwills.o
obj-$(CONFIG_PINCTRL_WCD) += pinctrl-wcd.o
obj-$(CONFIG_PINCTRL_LPI) += pinctrl-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
new file mode 100644
index 0000000..4a21eb6
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc sdxpoorwills_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "SDC1_CLK"),
+ PINCTRL_PIN(101, "SDC1_CMD"),
+ PINCTRL_PIN(102, "SDC1_DATA"),
+ PINCTRL_PIN(103, "SDC2_CLK"),
+ PINCTRL_PIN(104, "SDC2_CMD"),
+ PINCTRL_PIN(105, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+
+static const unsigned int sdc1_clk_pins[] = { 100 };
+static const unsigned int sdc1_cmd_pins[] = { 101 };
+static const unsigned int sdc1_data_pins[] = { 102 };
+static const unsigned int sdc2_clk_pins[] = { 103 };
+static const unsigned int sdc2_cmd_pins[] = { 104 };
+static const unsigned int sdc2_data_pins[] = { 105 };
+
+enum sdxpoorwills_functions {
+ msm_mux_qdss_stm31,
+ msm_mux_blsp_uart1,
+ msm_mux_gpio,
+ msm_mux_uim2_data,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_uim2_present,
+ msm_mux_qdss_stm30,
+ msm_mux_uim2_reset,
+ msm_mux_blsp_i2c1,
+ msm_mux_qdss_stm29,
+ msm_mux_uim2_clk,
+ msm_mux_qdss_stm28,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_uart2,
+ msm_mux_qdss_stm23,
+ msm_mux_qdss3,
+ msm_mux_qdss_stm22,
+ msm_mux_qdss2,
+ msm_mux_blsp_i2c2,
+ msm_mux_qdss_stm21,
+ msm_mux_qdss1,
+ msm_mux_qdss_stm20,
+ msm_mux_qdss0,
+ msm_mux_pri_mi2s,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_uart3,
+ msm_mux_ext_dbg,
+ msm_mux_ldo_en,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp3,
+ msm_mux_qdss_stm19,
+ msm_mux_qdss12,
+ msm_mux_qdss_stm18,
+ msm_mux_qdss13,
+ msm_mux_qdss_stm17,
+ msm_mux_qdss14,
+ msm_mux_bimc_dte0,
+ msm_mux_native_tsens,
+ msm_mux_vsense_trigger,
+ msm_mux_qdss_stm26,
+ msm_mux_qdss9,
+ msm_mux_blsp_i2c4,
+ msm_mux_gcc_gp1,
+ msm_mux_qdss_stm25,
+ msm_mux_qdss10,
+ msm_mux_jitter_bist,
+ msm_mux_gcc_gp2,
+ msm_mux_qdss_stm24,
+ msm_mux_qdss11,
+ msm_mux_qdss_stm16,
+ msm_mux_qdss15,
+ msm_mux_bimc_dte1,
+ msm_mux_sec_mi2s,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_uart4,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_stm27,
+ msm_mux_qdss8,
+ msm_mux_ebi2_a,
+ msm_mux_qdss_stm3,
+ msm_mux_ebi2_lcd,
+ msm_mux_qdss_stm2,
+ msm_mux_pll_bist,
+ msm_mux_qdss_stm1,
+ msm_mux_qdss_stm0,
+ msm_mux_adsp_ext,
+ msm_mux_epm1,
+ msm_mux_m_voc,
+ msm_mux_native_char,
+ msm_mux_native_char1,
+ msm_mux_pa_indicator,
+ msm_mux_qdss_traceclk,
+ msm_mux_native_char0,
+ msm_mux_qlink_en,
+ msm_mux_qlink_req,
+ msm_mux_pll_test,
+ msm_mux_cri_trng,
+ msm_mux_wmss_reset,
+ msm_mux_native_char3,
+ msm_mux_nav_pps,
+ msm_mux_nav_dr,
+ msm_mux_native_char2,
+ msm_mux_native_tsense,
+ msm_mux_prng_rosc,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_pll_ref,
+ msm_mux_coex_uart,
+ msm_mux_qdss_stm11,
+ msm_mux_qdss_stm10,
+ msm_mux_ddr_pxi0,
+ msm_mux_ap2mdm_status,
+ msm_mux_ddr_bist,
+ msm_mux_mdm2ap_status,
+ msm_mux_ap2mdm_err,
+ msm_mux_mdm2ap_err,
+ msm_mux_ap2mdm_vdd,
+ msm_mux_mdm2ap_vdd,
+ msm_mux_ap2mdm_wake,
+ msm_mux_pciehost_rst,
+ msm_mux_blsp_spi1,
+ msm_mux_qdss_stm14,
+ msm_mux_pcie_wake,
+ msm_mux_mdm2ap_wake,
+ msm_mux_pci_e,
+ msm_mux_qdss_stm13,
+ msm_mux_i2s_mclk,
+ msm_mux_audio_ref,
+ msm_mux_ldo_update,
+ msm_mux_qdss_stm8,
+ msm_mux_qdss_stm7,
+ msm_mux_qdss4,
+ msm_mux_tgu_ch0,
+ msm_mux_pcie_clkreq,
+ msm_mux_qdss_stm9,
+ msm_mux_qdss_stm15,
+ msm_mux_mgpi_clk,
+ msm_mux_qdss_stm12,
+ msm_mux_qdss_tracectl,
+ msm_mux_atest_char,
+ msm_mux_qdss_stm6,
+ msm_mux_qdss5,
+ msm_mux_atest_char3,
+ msm_mux_qdss_stm5,
+ msm_mux_qdss6,
+ msm_mux_atest_char2,
+ msm_mux_qdss_stm4,
+ msm_mux_qdss7,
+ msm_mux_atest_char1,
+ msm_mux_uim1_data,
+ msm_mux_atest_char0,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_clk,
+ msm_mux_dbg_out,
+ msm_mux_gcc_plltest,
+ msm_mux_usb2phy_ac,
+ msm_mux_NA,
+};
+
+static const char * const qdss_stm31_groups[] = {
+ "gpio0",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22",
+ "gpio23",
+};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio54", "gpio55", "gpio56", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio73",
+ "gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79", "gpio80",
+ "gpio81", "gpio82", "gpio83", "gpio84", "gpio85", "gpio86", "gpio87",
+ "gpio88", "gpio89", "gpio90", "gpio91", "gpio92", "gpio93", "gpio94",
+ "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio0",
+};
+static const char * const ebi0_wrcdc_groups[] = {
+ "gpio0", "gpio2",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio1",
+};
+static const char * const qdss_stm30_groups[] = {
+ "gpio1",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio2",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3", "gpio74", "gpio75",
+};
+static const char * const qdss_stm29_groups[] = {
+ "gpio2",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio3",
+};
+static const char * const qdss_stm28_groups[] = {
+ "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio52", "gpio62", "gpio71",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio63", "gpio64", "gpio65",
+ "gpio66",
+};
+static const char * const qdss_stm23_groups[] = {
+ "gpio4",
+};
+static const char * const qdss3_groups[] = {
+ "gpio4",
+};
+static const char * const qdss_stm22_groups[] = {
+ "gpio5",
+};
+static const char * const qdss2_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7", "gpio65", "gpio66",
+};
+static const char * const qdss_stm21_groups[] = {
+ "gpio6",
+};
+static const char * const qdss1_groups[] = {
+ "gpio6",
+};
+static const char * const qdss_stm20_groups[] = {
+ "gpio7",
+};
+static const char * const qdss0_groups[] = {
+ "gpio7",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio52", "gpio62", "gpio71",
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ext_dbg_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio8",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio11",
+};
+static const char * const qdss_stm19_groups[] = {
+ "gpio12",
+};
+static const char * const qdss12_groups[] = {
+ "gpio12",
+};
+static const char * const qdss_stm18_groups[] = {
+ "gpio13",
+};
+static const char * const qdss13_groups[] = {
+ "gpio13",
+};
+static const char * const qdss_stm17_groups[] = {
+ "gpio14",
+};
+static const char * const qdss14_groups[] = {
+ "gpio14",
+};
+static const char * const bimc_dte0_groups[] = {
+ "gpio14", "gpio59",
+};
+static const char * const native_tsens_groups[] = {
+ "gpio14",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio14",
+};
+static const char * const qdss_stm26_groups[] = {
+ "gpio17",
+};
+static const char * const qdss9_groups[] = {
+ "gpio17",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio18", "gpio19", "gpio76", "gpio77",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio18",
+};
+static const char * const qdss_stm25_groups[] = {
+ "gpio18",
+};
+static const char * const qdss10_groups[] = {
+ "gpio18",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio19",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio19",
+};
+static const char * const qdss_stm24_groups[] = {
+ "gpio19",
+};
+static const char * const qdss11_groups[] = {
+ "gpio19",
+};
+static const char * const qdss_stm16_groups[] = {
+ "gpio15",
+};
+static const char * const qdss15_groups[] = {
+ "gpio15",
+};
+static const char * const bimc_dte1_groups[] = {
+ "gpio15", "gpio60",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71",
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23",
+ "gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio61",
+ "gpio88", "gpio88", "gpio89", "gpio89",
+};
+static const char * const qdss_stm27_groups[] = {
+ "gpio16",
+};
+static const char * const qdss8_groups[] = {
+ "gpio16",
+};
+static const char * const ebi2_a_groups[] = {
+ "gpio20",
+};
+static const char * const qdss_stm3_groups[] = {
+ "gpio20",
+};
+static const char * const ebi2_lcd_groups[] = {
+ "gpio21", "gpio22", "gpio23",
+};
+static const char * const qdss_stm2_groups[] = {
+ "gpio21",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio22",
+};
+static const char * const qdss_stm1_groups[] = {
+ "gpio22",
+};
+static const char * const qdss_stm0_groups[] = {
+ "gpio23",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const epm1_groups[] = {
+ "gpio25",
+};
+static const char * const m_voc_groups[] = {
+ "gpio25", "gpio46", "gpio59", "gpio61",
+};
+static const char * const native_char_groups[] = {
+ "gpio26",
+};
+static const char * const native_char1_groups[] = {
+ "gpio32",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio33",
+};
+static const char * const qdss_traceclk_groups[] = {
+ "gpio33",
+};
+static const char * const native_char0_groups[] = {
+ "gpio33",
+};
+static const char * const qlink_en_groups[] = {
+ "gpio34",
+};
+static const char * const qlink_req_groups[] = {
+ "gpio35",
+};
+static const char * const pll_test_groups[] = {
+ "gpio35",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio36",
+};
+static const char * const wmss_reset_groups[] = {
+ "gpio28",
+};
+static const char * const native_char3_groups[] = {
+ "gpio28",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio29", "gpio42", "gpio62",
+};
+static const char * const nav_dr_groups[] = {
+ "gpio29", "gpio42", "gpio62",
+};
+static const char * const native_char2_groups[] = {
+ "gpio29",
+};
+static const char * const native_tsense_groups[] = {
+ "gpio29",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio40",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio41",
+};
+static const char * const pll_ref_groups[] = {
+ "gpio42",
+};
+static const char * const coex_uart_groups[] = {
+ "gpio44", "gpio45",
+};
+static const char * const qdss_stm11_groups[] = {
+ "gpio44",
+};
+static const char * const qdss_stm10_groups[] = {
+ "gpio45",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const ap2mdm_status_groups[] = {
+ "gpio46",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio46", "gpio47", "gpio48", "gpio49",
+};
+static const char * const mdm2ap_status_groups[] = {
+ "gpio47",
+};
+static const char * const ap2mdm_err_groups[] = {
+ "gpio48",
+};
+static const char * const mdm2ap_err_groups[] = {
+ "gpio49",
+};
+static const char * const ap2mdm_vdd_groups[] = {
+ "gpio50",
+};
+static const char * const mdm2ap_vdd_groups[] = {
+ "gpio51",
+};
+static const char * const ap2mdm_wake_groups[] = {
+ "gpio52",
+};
+static const char * const pciehost_rst_groups[] = {
+ "gpio52",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio52", "gpio62", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+};
+static const char * const qdss_stm14_groups[] = {
+ "gpio52",
+};
+static const char * const pcie_wake_groups[] = {
+ "gpio53",
+};
+static const char * const mdm2ap_wake_groups[] = {
+ "gpio53",
+};
+static const char * const pci_e_groups[] = {
+ "gpio53", "gpio57",
+};
+static const char * const qdss_stm13_groups[] = {
+ "gpio53",
+};
+static const char * const i2s_mclk_groups[] = {
+ "gpio62",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio62",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio62",
+};
+static const char * const qdss_stm8_groups[] = {
+ "gpio62",
+};
+static const char * const qdss_stm7_groups[] = {
+ "gpio63",
+};
+static const char * const qdss4_groups[] = {
+ "gpio63",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio55",
+};
+static const char * const pcie_clkreq_groups[] = {
+ "gpio56",
+};
+static const char * const qdss_stm9_groups[] = {
+ "gpio56",
+};
+static const char * const qdss_stm15_groups[] = {
+ "gpio57",
+};
+static const char * const mgpi_clk_groups[] = {
+ "gpio60", "gpio71",
+};
+static const char * const qdss_stm12_groups[] = {
+ "gpio60",
+};
+static const char * const qdss_tracectl_groups[] = {
+ "gpio60",
+};
+static const char * const atest_char_groups[] = {
+ "gpio63",
+};
+static const char * const qdss_stm6_groups[] = {
+ "gpio64",
+};
+static const char * const qdss5_groups[] = {
+ "gpio64",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio64",
+};
+static const char * const qdss_stm5_groups[] = {
+ "gpio65",
+};
+static const char * const qdss6_groups[] = {
+ "gpio65",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio65",
+};
+static const char * const qdss_stm4_groups[] = {
+ "gpio66",
+};
+static const char * const qdss7_groups[] = {
+ "gpio66",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio66",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio67",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio67",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio68",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio69",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio70",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio71",
+};
+static const char * const gcc_plltest_groups[] = {
+ "gpio73", "gpio74",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio87",
+};
+
+static const struct msm_function sdxpoorwills_functions[] = {
+ FUNCTION(qdss_stm31),
+ FUNCTION(blsp_uart1),
+ FUNCTION(gpio),
+ FUNCTION(uim2_data),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(uim2_present),
+ FUNCTION(qdss_stm30),
+ FUNCTION(uim2_reset),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(qdss_stm29),
+ FUNCTION(uim2_clk),
+ FUNCTION(qdss_stm28),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_uart2),
+ FUNCTION(qdss_stm23),
+ FUNCTION(qdss3),
+ FUNCTION(qdss_stm22),
+ FUNCTION(qdss2),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(qdss_stm21),
+ FUNCTION(qdss1),
+ FUNCTION(qdss_stm20),
+ FUNCTION(qdss0),
+ FUNCTION(pri_mi2s),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_uart3),
+ FUNCTION(ext_dbg),
+ FUNCTION(ldo_en),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp3),
+ FUNCTION(qdss_stm19),
+ FUNCTION(qdss12),
+ FUNCTION(qdss_stm18),
+ FUNCTION(qdss13),
+ FUNCTION(qdss_stm17),
+ FUNCTION(qdss14),
+ FUNCTION(bimc_dte0),
+ FUNCTION(native_tsens),
+ FUNCTION(vsense_trigger),
+ FUNCTION(qdss_stm26),
+ FUNCTION(qdss9),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(gcc_gp1),
+ FUNCTION(qdss_stm25),
+ FUNCTION(qdss10),
+ FUNCTION(jitter_bist),
+ FUNCTION(gcc_gp2),
+ FUNCTION(qdss_stm24),
+ FUNCTION(qdss11),
+ FUNCTION(qdss_stm16),
+ FUNCTION(qdss15),
+ FUNCTION(bimc_dte1),
+ FUNCTION(sec_mi2s),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_uart4),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_stm27),
+ FUNCTION(qdss8),
+ FUNCTION(ebi2_a),
+ FUNCTION(qdss_stm3),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(qdss_stm2),
+ FUNCTION(pll_bist),
+ FUNCTION(qdss_stm1),
+ FUNCTION(qdss_stm0),
+ FUNCTION(adsp_ext),
+ FUNCTION(epm1),
+ FUNCTION(m_voc),
+ FUNCTION(native_char),
+ FUNCTION(native_char1),
+ FUNCTION(pa_indicator),
+ FUNCTION(qdss_traceclk),
+ FUNCTION(native_char0),
+ FUNCTION(qlink_en),
+ FUNCTION(qlink_req),
+ FUNCTION(pll_test),
+ FUNCTION(cri_trng),
+ FUNCTION(wmss_reset),
+ FUNCTION(native_char3),
+ FUNCTION(nav_pps),
+ FUNCTION(nav_dr),
+ FUNCTION(native_char2),
+ FUNCTION(native_tsense),
+ FUNCTION(prng_rosc),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(pll_ref),
+ FUNCTION(coex_uart),
+ FUNCTION(qdss_stm11),
+ FUNCTION(qdss_stm10),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ap2mdm_status),
+ FUNCTION(ddr_bist),
+ FUNCTION(mdm2ap_status),
+ FUNCTION(ap2mdm_err),
+ FUNCTION(mdm2ap_err),
+ FUNCTION(ap2mdm_vdd),
+ FUNCTION(mdm2ap_vdd),
+ FUNCTION(ap2mdm_wake),
+ FUNCTION(pciehost_rst),
+ FUNCTION(blsp_spi1),
+ FUNCTION(qdss_stm14),
+ FUNCTION(pcie_wake),
+ FUNCTION(mdm2ap_wake),
+ FUNCTION(pci_e),
+ FUNCTION(qdss_stm13),
+ FUNCTION(i2s_mclk),
+ FUNCTION(audio_ref),
+ FUNCTION(ldo_update),
+ FUNCTION(qdss_stm8),
+ FUNCTION(qdss_stm7),
+ FUNCTION(qdss4),
+ FUNCTION(tgu_ch0),
+ FUNCTION(pcie_clkreq),
+ FUNCTION(qdss_stm9),
+ FUNCTION(qdss_stm15),
+ FUNCTION(mgpi_clk),
+ FUNCTION(qdss_stm12),
+ FUNCTION(qdss_tracectl),
+ FUNCTION(atest_char),
+ FUNCTION(qdss_stm6),
+ FUNCTION(qdss5),
+ FUNCTION(atest_char3),
+ FUNCTION(qdss_stm5),
+ FUNCTION(qdss6),
+ FUNCTION(atest_char2),
+ FUNCTION(qdss_stm4),
+ FUNCTION(qdss7),
+ FUNCTION(atest_char1),
+ FUNCTION(uim1_data),
+ FUNCTION(atest_char0),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_clk),
+ FUNCTION(dbg_out),
+ FUNCTION(gcc_plltest),
+ FUNCTION(usb2phy_ac),
+};
+
+static const struct msm_pingroup sdxpoorwills_groups[] = {
+ PINGROUP(0, uim2_data, blsp_uart1, qdss_stm31, ebi0_wrcdc, NA, NA, NA,
+ NA, NA),
+ PINGROUP(1, uim2_present, blsp_uart1, qdss_stm30, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm29, ebi0_wrcdc,
+ NA, NA, NA, NA),
+ PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm28, NA, NA, NA,
+ NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, NA, qdss_stm23, qdss3, NA, NA, NA,
+ NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, NA, qdss_stm22, qdss2, NA, NA, NA,
+ NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm21, qdss1,
+ NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm20, qdss0,
+ NA, NA, NA),
+ PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, NA, NA,
+ NA, NA),
+ PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, NA, NA, NA, NA,
+ NA),
+ PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, NA,
+ NA, NA, NA),
+ PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg,
+ gcc_gp3, NA, NA, NA),
+ PINGROUP(12, pri_mi2s, NA, qdss_stm19, qdss12, NA, NA, NA, NA, NA),
+ PINGROUP(13, pri_mi2s, NA, qdss_stm18, qdss13, NA, NA, NA, NA, NA),
+ PINGROUP(14, pri_mi2s, NA, NA, qdss_stm17, qdss14, bimc_dte0,
+ native_tsens, vsense_trigger, NA),
+ PINGROUP(15, pri_mi2s, NA, NA, qdss_stm16, qdss15, NA, NA, bimc_dte1,
+ NA),
+ PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, NA,
+ NA, qdss_stm27, qdss8),
+ PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, NA,
+ qdss_stm26, qdss9, NA),
+ PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, gcc_gp1, NA,
+ qdss_stm25, qdss10, NA),
+ PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, jitter_bist,
+ gcc_gp2, NA, qdss_stm24, qdss11),
+ PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, NA, qdss_stm3,
+ NA, NA, NA),
+ PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, NA, NA,
+ qdss_stm2, NA, NA),
+ PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti,
+ blsp_uart4, pll_bist, NA, qdss_stm1),
+ PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1,
+ blsp_uart4, NA, qdss_stm0, NA),
+ PINGROUP(24, adsp_ext, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, m_voc, adsp_ext, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, NA, NA, NA, native_char, NA, NA, NA, NA, NA),
+ PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, wmss_reset, native_char3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, NA, NA, nav_pps, nav_dr, NA, native_char2, native_tsense,
+ NA, NA),
+ PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, NA, native_char1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, pa_indicator, qdss_traceclk, native_char0, NA, NA, NA,
+ NA, NA),
+ PINGROUP(34, qlink_en, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, qlink_req, pll_test, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, NA, NA, cri_trng, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, prng_rosc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(40, NA, NA, cri_trng0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, NA, NA, cri_trng1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, nav_pps, NA, nav_dr, pll_ref, NA, NA, NA, NA, NA),
+ PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(44, coex_uart, NA, qdss_stm11, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, coex_uart, NA, qdss_stm10, ddr_pxi0, NA, NA, NA, NA, NA),
+ PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(47, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, NA, NA,
+ qdss_stm14, NA, NA),
+ PINGROUP(53, pci_e, NA, NA, qdss_stm13, NA, NA, NA, NA, NA),
+ PINGROUP(54, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, pcie_clkreq, NA, qdss_stm9, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, NA, qdss_stm15, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, qdss_cti, m_voc, bimc_dte0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, mgpi_clk, NA, qdss_stm12, qdss_tracectl, bimc_dte1, NA,
+ NA, NA, NA),
+ PINGROUP(61, qdss_cti, NA, m_voc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, i2s_mclk, nav_pps, nav_dr, audio_ref, blsp_spi1,
+ blsp_spi2, blsp_spi3, blsp_spi4, ldo_update),
+ PINGROUP(63, blsp_uart2, NA, qdss_stm7, qdss4, atest_char, NA, NA, NA,
+ NA),
+ PINGROUP(64, blsp_uart2, NA, qdss_stm6, qdss5, atest_char3, NA, NA, NA,
+ NA),
+ PINGROUP(65, blsp_uart2, blsp_i2c2, NA, qdss_stm5, qdss6, atest_char2,
+ NA, NA, NA),
+ PINGROUP(66, blsp_uart2, blsp_i2c2, NA, qdss_stm4, qdss7, atest_char1,
+ NA, NA, NA),
+ PINGROUP(67, uim1_data, atest_char0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4,
+ dbg_out, NA, NA, NA),
+ PINGROUP(72, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, blsp_spi1, NA, gcc_plltest, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, blsp_spi1, NA, blsp_i2c1, gcc_plltest, NA, NA, NA, NA),
+ PINGROUP(75, NA, blsp_spi1, NA, blsp_i2c1, NA, NA, NA, NA, NA),
+ PINGROUP(76, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, usb2phy_ac, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sdxpoorwills_pinctrl = {
+ .pins = sdxpoorwills_pins,
+ .npins = ARRAY_SIZE(sdxpoorwills_pins),
+ .functions = sdxpoorwills_functions,
+ .nfunctions = ARRAY_SIZE(sdxpoorwills_functions),
+ .groups = sdxpoorwills_groups,
+ .ngroups = ARRAY_SIZE(sdxpoorwills_groups),
+ .ngpios = 100,
+};
+
+static int sdxpoorwills_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sdxpoorwills_pinctrl);
+}
+
+static const struct of_device_id sdxpoorwills_pinctrl_of_match[] = {
+ { .compatible = "qcom,sdxpoorwills-pinctrl", },
+ { },
+};
+
+static struct platform_driver sdxpoorwills_pinctrl_driver = {
+ .driver = {
+ .name = "sdxpoorwills-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = sdxpoorwills_pinctrl_of_match,
+ },
+ .probe = sdxpoorwills_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sdxpoorwills_pinctrl_init(void)
+{
+ return platform_driver_register(&sdxpoorwills_pinctrl_driver);
+}
+arch_initcall(sdxpoorwills_pinctrl_init);
+
+static void __exit sdxpoorwills_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sdxpoorwills_pinctrl_driver);
+}
+module_exit(sdxpoorwills_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdxpoorwills pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdxpoorwills_pinctrl_of_match);
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 54a9cb2..3a6e214 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -144,4 +144,12 @@
If you choose to build it as a module, it will be called
msm_11ad_proxy.
+config SEEMP_CORE
+ tristate "SEEMP Core"
+ help
+ This option enables QTI Snapdragron Smart Protection to detect
+ anomalies in various activities. It records task activities in
+ a log and rates the actions according to whether a typical user would
+ use the tools.
+
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 0bf87f4..cf24d7a 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -9,4 +9,5 @@
obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
obj-$(CONFIG_USB_BAM) += usb_bam.o
-obj-$(CONFIG_MSM_11AD) += msm_11ad/
\ No newline at end of file
+obj-$(CONFIG_MSM_11AD) += msm_11ad/
+obj-$(CONFIG_SEEMP_CORE) += seemp_core/
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index b7685cb..5fdb4e9 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2854,10 +2854,8 @@
gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
"gsi", 0);
- if (gsi_ctx->ipc_logbuf == NULL) {
- GSIERR("failed to get ipc_logbuf\n");
- return -ENOMEM;
- }
+ if (gsi_ctx->ipc_logbuf == NULL)
+ GSIERR("failed to create IPC log, continue...\n");
gsi_ctx->dev = dev;
init_completion(&gsi_ctx->gen_ee_cmd_compl);
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index b1d1dfa..f5e23c68 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -29,6 +29,7 @@
static struct dentry *dent;
static char dbg_buff[4096];
+static void *gsi_ipc_logbuf_low;
static void gsi_wq_print_dp_stats(struct work_struct *work);
static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats);
@@ -758,22 +759,20 @@
if (kstrtos8(dbg_buff, 0, &option))
return -EFAULT;
+ mutex_lock(&gsi_ctx->mlock);
if (option) {
- if (!gsi_ctx->ipc_logbuf_low) {
- gsi_ctx->ipc_logbuf_low =
+ if (!gsi_ipc_logbuf_low) {
+ gsi_ipc_logbuf_low =
ipc_log_context_create(GSI_IPC_LOG_PAGES,
"gsi_low", 0);
+ if (gsi_ipc_logbuf_low == NULL)
+ TERR("failed to get ipc_logbuf_low\n");
}
-
- if (gsi_ctx->ipc_logbuf_low == NULL) {
- TERR("failed to get ipc_logbuf_low\n");
- return -EFAULT;
- }
+ gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low;
} else {
- if (gsi_ctx->ipc_logbuf_low)
- ipc_log_context_destroy(gsi_ctx->ipc_logbuf_low);
gsi_ctx->ipc_logbuf_low = NULL;
}
+ mutex_unlock(&gsi_ctx->mlock);
return count;
}
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index d45fa51..a37947b 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -126,6 +126,7 @@
__stringify(IPA_CLIENT_Q6_DECOMP_PROD),
__stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
__stringify(IPA_CLIENT_UC_USB_PROD),
+ __stringify(IPA_CLIENT_ETHERNET_PROD),
/* Below PROD client type is only for test purpose */
__stringify(IPA_CLIENT_TEST_PROD),
@@ -164,6 +165,7 @@
__stringify(IPA_CLIENT_Q6_DECOMP_CONS),
__stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
__stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+ __stringify(IPA_CLIENT_ETHERNET_CONS),
/* Below CONS client type is only for test purpose */
__stringify(IPA_CLIENT_TEST_CONS),
__stringify(IPA_CLIENT_TEST1_CONS),
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 51c930a..ae06d54 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -131,23 +131,23 @@
IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
inp->netdev_name);
memset(¶m, 0, sizeof(param));
- param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
param.reg_params.user_data = ntn_ctx;
param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
param.floor_voltage = IPA_VOLTAGE_SVS;
ret = ipa_rm_create_resource(¶m);
if (ret) {
- IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+ IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n");
return -EFAULT;
}
memset(¶m, 0, sizeof(param));
- param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ param.name = IPA_RM_RESOURCE_ETHERNET_CONS;
param.request_resource = ipa_uc_ntn_cons_request;
param.release_resource = ipa_uc_ntn_cons_release;
ret = ipa_rm_create_resource(¶m);
if (ret) {
- IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+ IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
goto fail_create_rm_cons;
}
@@ -177,13 +177,13 @@
memset(tx_prop, 0, sizeof(tx_prop));
tx_prop[0].ip = IPA_IP_v4;
- tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
sizeof(tx_prop[0].hdr_name));
tx_prop[1].ip = IPA_IP_v6;
- tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
sizeof(tx_prop[1].hdr_name));
@@ -194,7 +194,7 @@
memset(rx_prop, 0, sizeof(rx_prop));
rx_prop[0].ip = IPA_IP_v4;
- rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD;
rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
if (inp->is_meta_data_valid) {
rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -203,7 +203,7 @@
}
rx_prop[1].ip = IPA_IP_v6;
- rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD;
rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
if (inp->is_meta_data_valid) {
rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -229,9 +229,9 @@
fail:
kfree(hdr);
fail_alloc:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
fail_create_rm_cons:
- ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
return ret;
}
@@ -349,18 +349,18 @@
return -EINVAL;
}
- result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", result);
return result;
}
- result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (result == -EINPROGRESS) {
if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
10*HZ) == 0) {
- IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+ IPA_UC_OFFLOAD_ERR("ETH_PROD resource req time out\n");
result = -EFAULT;
goto fail;
}
@@ -384,7 +384,7 @@
return 0;
fail:
- ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
return result;
}
@@ -448,10 +448,10 @@
rm_profile.max_supported_bandwidth_mbps =
profile->max_supported_bw_mbps;
- if (profile->client == IPA_CLIENT_ODU_PROD) {
- resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
- } else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
- resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ if (profile->client == IPA_CLIENT_ETHERNET_PROD) {
+ resource_name = IPA_RM_RESOURCE_ETHERNET_PROD;
+ } else if (profile->client == IPA_CLIENT_ETHERNET_CONS) {
+ resource_name = IPA_RM_RESOURCE_ETHERNET_CONS;
} else {
IPA_UC_OFFLOAD_ERR("not supported\n");
return -EINVAL;
@@ -473,22 +473,22 @@
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
- ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (ret) {
- IPA_UC_OFFLOAD_ERR("fail to release ODU_ADAPT_PROD res: %d\n",
+ IPA_UC_OFFLOAD_ERR("fail to release ETHERNET_PROD res: %d\n",
ret);
return -EFAULT;
}
- ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (ret) {
- IPA_UC_OFFLOAD_ERR("fail to del dep ODU->APPS, %d\n", ret);
+ IPA_UC_OFFLOAD_ERR("fail to del dep ETH_PROD->APPS, %d\n", ret);
return -EFAULT;
}
- ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
- ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+ ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
@@ -541,13 +541,13 @@
int len, result = 0;
struct ipa_ioc_del_hdr *hdr;
- if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
- IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD resource\n");
return -EFAULT;
}
- if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
- IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS resource\n");
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index 1431dcf..ea91b13 100644
--- a/drivers/platform/msm/ipa/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
__stringify(IPA_RM_RESOURCE_WLAN_PROD),
__stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
__stringify(IPA_RM_RESOURCE_MHI_PROD),
+ __stringify(IPA_RM_RESOURCE_ETHERNET_PROD),
__stringify(IPA_RM_RESOURCE_Q6_CONS),
__stringify(IPA_RM_RESOURCE_USB_CONS),
__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
@@ -36,6 +37,7 @@
__stringify(IPA_RM_RESOURCE_APPS_CONS),
__stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
__stringify(IPA_RM_RESOURCE_MHI_CONS),
+ __stringify(IPA_RM_RESOURCE_ETHERNET_CONS),
};
struct ipa_rm_profile_vote_type {
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index 6657bd9..9e74a3f 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@
case IPA_RM_RESOURCE_WLAN_PROD:
case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
case IPA_RM_RESOURCE_MHI_PROD:
+ case IPA_RM_RESOURCE_ETHERNET_PROD:
break;
default:
result = IPA_RM_INDEX_INVALID;
@@ -69,6 +70,7 @@
case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
case IPA_RM_RESOURCE_MHI_CONS:
case IPA_RM_RESOURCE_USB_DPL_CONS:
+ case IPA_RM_RESOURCE_ETHERNET_CONS:
break;
default:
result = IPA_RM_INDEX_INVALID;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 15bb7b4..fb42ef7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1584,6 +1584,7 @@
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc;
struct ipa_mem_buffer mem;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc;
if (memory_region_size == 0)
@@ -1603,7 +1604,7 @@
memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd),
- GFP_KERNEL);
+ flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -2166,6 +2167,7 @@
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc = {0};
struct ipa_mem_buffer mem;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0;
phys_addr = ipa_ctx->ipa_wrapper_base +
@@ -2203,7 +2205,7 @@
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -2314,6 +2316,7 @@
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0;
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
@@ -2325,7 +2328,7 @@
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
rc = -ENOMEM;
@@ -2360,6 +2363,7 @@
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
@@ -2370,7 +2374,7 @@
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
@@ -2411,7 +2415,7 @@
memset(mem.base, 0, mem.size);
memset(&desc, 0, sizeof(desc));
- dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL);
+ dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
dma_free_coherent(ipa_ctx->pdev,
@@ -2462,6 +2466,7 @@
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_routing_init *v4_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2486,7 +2491,7 @@
entry++;
}
- v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
+ v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 routing init command object\n");
rc = -ENOMEM;
@@ -2522,6 +2527,7 @@
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_routing_init *v6_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2546,7 +2552,7 @@
entry++;
}
- v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
+ v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 routing init command object\n");
rc = -ENOMEM;
@@ -2582,6 +2588,7 @@
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_filter_init *v4_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2604,7 +2611,7 @@
entry++;
}
- v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
+ v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 fliter init command object\n");
rc = -ENOMEM;
@@ -2640,6 +2647,7 @@
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_filter_init *v6_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2662,7 +2670,7 @@
entry++;
}
- v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
+ v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 fliter init command object\n");
rc = -ENOMEM;
@@ -3671,6 +3679,7 @@
* pipe will be unsuspended as part of
* enabling IPA clocks
*/
+ mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
if (!atomic_read(
&ipa_ctx->sps_pm.dec_clients)
) {
@@ -3683,6 +3692,7 @@
1);
ipa_sps_process_irq_schedule_rel();
}
+ mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
} else {
resource = ipa2_get_rm_resource_from_ep(i);
res = ipa_rm_request_resource_with_timer(
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 964d6c8..a822f66 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -18,6 +18,7 @@
#include "ipa_i.h"
#include "ipa_trace.h"
+#define IPA_WAN_AGGR_PKT_CNT 5
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
#define POLLING_INACTIVITY_TX 40
@@ -321,8 +322,8 @@
dma_address = desc->dma_address;
tx_pkt->no_unmap_dma = true;
}
- if (!dma_address) {
- IPAERR("failed to DMA wrap\n");
+ if (dma_mapping_error(ipa_ctx->pdev, dma_address)) {
+ IPAERR("dma_map_single failed\n");
goto fail_dma_map;
}
@@ -444,7 +445,7 @@
}
dma_addr = dma_map_single(ipa_ctx->pdev,
transfer.iovec, size, DMA_TO_DEVICE);
- if (!dma_addr) {
+ if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) {
IPAERR("dma_map_single failed for sps xfr buff\n");
kfree(transfer.iovec);
return -EFAULT;
@@ -492,6 +493,15 @@
tx_pkt->mem.base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ipa_ctx->pdev,
+ tx_pkt->mem.phys_base)) {
+ IPAERR("dma_map_single ");
+ IPAERR("failed\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
} else {
tx_pkt->mem.phys_base = desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
@@ -1099,16 +1109,18 @@
break;
ipa_wq_rx_common(ep->sys, iov.size);
- cnt += 5;
+ cnt += IPA_WAN_AGGR_PKT_CNT;
};
- if (cnt == 0) {
+ if (cnt == 0 || cnt < weight) {
ep->inactive_cycles++;
ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
ep->switch_to_intr = true;
delay = 0;
+ } else if (cnt < weight) {
+ delay = 0;
}
queue_delayed_work(ep->sys->wq,
&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
@@ -1870,8 +1882,8 @@
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
__func__, (void *)rx_pkt->data.dma_addr,
ptr, sys);
@@ -2026,18 +2038,20 @@
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
}
+ spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
list_add_tail(&rx_pkt->link,
&ipa_ctx->wc_memb.wlan_comm_desc_list);
rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
}
@@ -2098,8 +2112,8 @@
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -2156,9 +2170,10 @@
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0)
+ if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) {
+ IPAERR("dma_map_single failure for rx_pkt\n");
goto fail_dma_mapping;
+ }
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
@@ -3176,14 +3191,9 @@
sys->repl_hdlr =
ipa_replenish_rx_cache;
}
- if (in->napi_enabled) {
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
- if (in->recycle_enabled) {
- sys->repl_hdlr =
- ipa_replenish_rx_cache_recycle;
- }
- }
+ if (in->napi_enabled && in->recycle_enabled)
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache_recycle;
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WAN_RX;
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index e474a40..b60c7a6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1480,17 +1480,24 @@
void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
{
+ struct ipa_flt_tbl *tbl;
struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
mutex_lock(&ipa_ctx->lock);
if (ep->dflt_flt4_rule_hdl) {
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt4_rule_hdl = 0;
}
if (ep->dflt_flt6_rule_hdl) {
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt6_rule_hdl = 0;
}
mutex_unlock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 25f8923..046f77f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -268,6 +268,7 @@
struct ipa_mem_buffer mem;
struct ipa_hdr_init_system *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
+ gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT;
if (ipa_generate_hdr_hw_tbl(&mem)) {
@@ -281,7 +282,7 @@
IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd;
} else {
- dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC);
+ dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -303,7 +304,7 @@
IPA_MEM_PART(apps_hdr_size_ddr));
goto fail_send_cmd;
} else {
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("fail to alloc hdr init cmd\n");
rc = -ENOMEM;
@@ -359,6 +360,7 @@
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL;
struct ipa_register_write *reg_write_cmd = NULL;
+ gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT;
u32 proc_ctx_size;
u32 proc_ctx_ofst;
@@ -383,7 +385,7 @@
IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd1;
} else {
- dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC);
+ dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag);
if (dma_cmd_hdr == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -406,7 +408,7 @@
goto fail_send_cmd1;
} else {
hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd),
- GFP_ATOMIC);
+ flag);
if (hdr_init_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -431,7 +433,7 @@
goto fail_send_cmd1;
} else {
dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx),
- GFP_ATOMIC);
+ flag);
if (dma_cmd_ctx == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -456,7 +458,7 @@
goto fail_send_cmd1;
} else {
reg_write_cmd = kzalloc(sizeof(*reg_write_cmd),
- GFP_ATOMIC);
+ flag);
if (reg_write_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -722,6 +724,11 @@
entry->hdr,
entry->hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa_ctx->pdev,
+ entry->phys_base)) {
+ IPAERR("dma_map_single failure for entry\n");
+ goto fail_dma_mapping;
+ }
}
} else {
entry->is_hdr_proc_ctx = false;
@@ -798,6 +805,8 @@
list_del(&entry->link);
dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+ entry->is_hdr_proc_ctx = false;
bad_hdr_len:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 672c620..cd575fe 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,8 +51,6 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
#define IPA_MAX_STATUS_STAT_NUM 30
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 2a68970..4b62927 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -538,6 +538,8 @@
mutex_unlock(&ipa_ctx->msg_lock);
if (copy_to_user(buf, &msg->meta,
sizeof(struct ipa_msg_meta))) {
+ kfree(msg);
+ msg = NULL;
ret = -EFAULT;
break;
}
@@ -546,6 +548,8 @@
if (msg->buff) {
if (copy_to_user(buf, msg->buff,
msg->meta.msg_len)) {
+ kfree(msg);
+ msg = NULL;
ret = -EFAULT;
break;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 84849a2..21fdec0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -695,6 +695,7 @@
struct ipa_mem_buffer head;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u16 avail;
u32 num_modem_rt_index;
int rc = 0;
@@ -745,7 +746,7 @@
}
cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
- GFP_KERNEL);
+ flag);
if (cmd1 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -762,7 +763,7 @@
if (lcl) {
cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
- GFP_KERNEL);
+ flag);
if (cmd2 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 78d67a5..a50665c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -94,6 +94,7 @@
[IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = 5,
[IPA_1_1][IPA_CLIENT_Q6_WAN_PROD] = -1,
[IPA_1_1][IPA_CLIENT_Q6_CMD_PROD] = -1,
+ [IPA_1_1][IPA_CLIENT_ETHERNET_PROD] = -1,
[IPA_1_1][IPA_CLIENT_HSIC1_CONS] = 14,
[IPA_1_1][IPA_CLIENT_WLAN1_CONS] = -1,
@@ -119,6 +120,7 @@
[IPA_1_1][IPA_CLIENT_MHI_CONS] = -1,
[IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = 4,
[IPA_1_1][IPA_CLIENT_Q6_WAN_CONS] = -1,
+ [IPA_1_1][IPA_CLIENT_ETHERNET_CONS] = -1,
[IPA_2_0][IPA_CLIENT_HSIC1_PROD] = 12,
@@ -148,6 +150,7 @@
= 12,
[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
= 19,
+ [IPA_2_0][IPA_CLIENT_ETHERNET_PROD] = 12,
/* Only for test purpose */
[IPA_2_0][IPA_CLIENT_TEST_PROD] = 19,
[IPA_2_0][IPA_CLIENT_TEST1_PROD] = 19,
@@ -188,6 +191,7 @@
= 16,
[IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
= 10,
+ [IPA_2_0][IPA_CLIENT_ETHERNET_CONS] = 1,
/* Only for test purpose */
[IPA_2_0][IPA_CLIENT_TEST_CONS] = 1,
[IPA_2_0][IPA_CLIENT_TEST1_CONS] = 1,
@@ -223,6 +227,7 @@
= -1,
[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
= -1,
+ [IPA_2_6L][IPA_CLIENT_ETHERNET_PROD] = -1,
/* Only for test purpose */
[IPA_2_6L][IPA_CLIENT_TEST_PROD] = 11,
[IPA_2_6L][IPA_CLIENT_TEST1_PROD] = 11,
@@ -263,6 +268,7 @@
= -1,
[IPA_2_6L][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
= -1,
+ [IPA_2_6L][IPA_CLIENT_ETHERNET_CONS] = -1,
/* Only for test purpose */
[IPA_2_6L][IPA_CLIENT_TEST_CONS] = 15,
[IPA_2_6L][IPA_CLIENT_TEST1_CONS] = 15,
@@ -457,6 +463,9 @@
clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
break;
+ case IPA_RM_RESOURCE_ETHERNET_CONS:
+ clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
+ break;
case IPA_RM_RESOURCE_USB_PROD:
clients->names[i++] = IPA_CLIENT_USB_PROD;
break;
@@ -468,6 +477,10 @@
break;
case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
clients->names[i++] = IPA_CLIENT_ODU_PROD;
+ break;
+ case IPA_RM_RESOURCE_ETHERNET_PROD:
+ clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
+ break;
default:
break;
}
@@ -507,7 +520,8 @@
client == IPA_CLIENT_WLAN3_CONS ||
client == IPA_CLIENT_WLAN4_CONS ||
client == IPA_CLIENT_ODU_EMB_CONS ||
- client == IPA_CLIENT_ODU_TETH_CONS)
+ client == IPA_CLIENT_ODU_TETH_CONS ||
+ client == IPA_CLIENT_ETHERNET_CONS)
return true;
return false;
@@ -3630,7 +3644,8 @@
meta.qmap_id = param_in->qmap_id;
if (param_in->client == IPA_CLIENT_USB_PROD ||
param_in->client == IPA_CLIENT_HSIC1_PROD ||
- param_in->client == IPA_CLIENT_ODU_PROD) {
+ param_in->client == IPA_CLIENT_ODU_PROD ||
+ param_in->client == IPA_CLIENT_ETHERNET_PROD) {
result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index db732c5..4672233 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -64,6 +64,7 @@
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
#define NAPI_WEIGHT 60
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
@@ -102,6 +103,7 @@
bool ipa_loaduC;
bool ipa_advertise_sg_support;
bool ipa_napi_enable;
+ u32 wan_rx_desc_size;
};
static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
@@ -1310,10 +1312,8 @@
ipa_to_apps_ep_cfg.priv = dev;
ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
- if (ipa_to_apps_ep_cfg.napi_enabled)
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
- else
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
mutex_lock(&ipa_to_apps_pipe_handle_guard);
if (atomic_read(&is_ssr)) {
@@ -1541,6 +1541,9 @@
memcpy(mux_channel[rmnet_index].vchannel_name,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
sizeof(mux_channel[rmnet_index].vchannel_name));
+ mux_channel[rmnet_index].vchannel_name[
+ IFNAMSIZ - 1] = '\0';
+
IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
mux_channel[rmnet_index].vchannel_name,
mux_channel[rmnet_index].mux_id,
@@ -1944,6 +1947,9 @@
static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
{
+ int result;
+
+ ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
ipa_rmnet_drv_res->ipa_rmnet_ssr =
of_property_read_bool(pdev->dev.of_node,
"qcom,rmnet-ipa-ssr");
@@ -1966,6 +1972,18 @@
"qcom,ipa-napi-enable");
pr_info("IPA Napi Enable = %s\n",
ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+ /* Get IPA WAN RX desc fifo size */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wan-rx-desc-size",
+ &ipa_rmnet_drv_res->wan_rx_desc_size);
+ if (result)
+ pr_info("using default for wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+ else
+ IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index ca63518..30f5712 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2268,6 +2268,36 @@
desc[num_descs].len = cmd_pyld->len;
num_descs++;
}
+
+ /* disable statuses for modem producers */
+ if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
+
+ reg_write.skip_pipeline_clear = false;
+ reg_write.pipeline_clear_options =
+ IPAHAL_HPS_CLEAR;
+ reg_write.offset =
+ ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
+ ep_idx);
+ reg_write.value = 0;
+ reg_write.value_mask = ~0;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, ®_write, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_descs].type = IPA_IMM_CMD_DESC;
+ desc[num_descs].callback = ipa3_destroy_imm;
+ desc[num_descs].user1 = cmd_pyld;
+ desc[num_descs].pyld = cmd_pyld->data;
+ desc[num_descs].len = cmd_pyld->len;
+ num_descs++;
+ }
}
/* Will wait 500msecs for IPA tag process completion */
@@ -2367,11 +2397,11 @@
}
/**
- * _ipa_init_sram_v3_0() - Initialize IPA local SRAM.
+ * _ipa_init_sram_v3() - Initialize IPA local SRAM.
*
* Return codes: 0 for success, negative value for failure
*/
-int _ipa_init_sram_v3_0(void)
+int _ipa_init_sram_v3(void)
{
u32 *ipa_sram_mmio;
unsigned long phys_addr;
@@ -2414,7 +2444,10 @@
IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
- ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+ ipa3_sram_set_canary(ipa_sram_mmio,
+ (ipa_get_hw_type() >= IPA_HW_v3_5) ?
+ IPA_MEM_PART(uc_event_ring_ofst) :
+ IPA_MEM_PART(end_ofst));
iounmap(ipa_sram_mmio);
@@ -3587,6 +3620,8 @@
* pipe will be unsuspended as part of
* enabling IPA clocks
*/
+ mutex_lock(&ipa3_ctx->transport_pm.
+ transport_pm_mutex);
if (!atomic_read(
&ipa3_ctx->transport_pm.dec_clients)
) {
@@ -3599,6 +3634,8 @@
1);
ipa3_process_irq_schedule_rel();
}
+ mutex_unlock(&ipa3_ctx->transport_pm.
+ transport_pm_mutex);
} else {
resource = ipa3_get_rm_resource_from_ep(i);
res =
@@ -4064,6 +4101,7 @@
cdev_del(&ipa3_ctx->cdev);
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+ ipa3_free_dma_task_for_gsi();
ipa3_destroy_flt_tbl_idrs();
idr_destroy(&ipa3_ctx->ipa_idr);
kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
@@ -4343,11 +4381,8 @@
}
ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
- if (ipa3_ctx->logbuf == NULL) {
- IPAERR("failed to get logbuf\n");
- result = -ENOMEM;
- goto fail_logbuf;
- }
+ if (ipa3_ctx->logbuf == NULL)
+ IPAERR("failed to create IPC log, continue...\n");
ipa3_ctx->pdev = ipa_dev;
ipa3_ctx->uc_pdev = ipa_dev;
@@ -4607,6 +4642,13 @@
goto fail_rx_pkt_wrapper_cache;
}
+ /* allocate memory for DMA_TASK workaround */
+ result = ipa3_allocate_dma_task_for_gsi();
+ if (result) {
+ IPAERR("failed to allocate dma task\n");
+ goto fail_dma_task;
+ }
+
/* init the various list heads */
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
@@ -4747,6 +4789,8 @@
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
fail_alloc_chrdev_region:
+ ipa3_free_dma_task_for_gsi();
+fail_dma_task:
idr_destroy(&ipa3_ctx->ipa_idr);
kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
fail_rx_pkt_wrapper_cache:
@@ -4793,8 +4837,8 @@
fail_mem_ctrl:
kfree(ipa3_ctx->ipa_tz_unlock_reg);
fail_tz_unlock_reg:
- ipc_log_context_destroy(ipa3_ctx->logbuf);
-fail_logbuf:
+ if (ipa3_ctx->logbuf)
+ ipc_log_context_destroy(ipa3_ctx->logbuf);
kfree(ipa3_ctx);
ipa3_ctx = NULL;
fail_mem_ctx:
@@ -5491,13 +5535,6 @@
return result;
}
- result = of_platform_populate(pdev_p->dev.of_node,
- pdrv_match, NULL, &pdev_p->dev);
- if (result) {
- IPAERR("failed to populate platform\n");
- return result;
- }
-
if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
if (of_property_read_bool(pdev_p->dev.of_node,
"qcom,smmu-s1-bypass"))
@@ -5543,6 +5580,13 @@
}
}
+ result = of_platform_populate(pdev_p->dev.of_node,
+ pdrv_match, NULL, &pdev_p->dev);
+ if (result) {
+ IPAERR("failed to populate platform\n");
+ return result;
+ }
+
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 3fb767c..ca77be9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -106,6 +106,7 @@
static char *active_clients_buf;
static s8 ep_reg_idx;
+static void *ipa_ipc_low_buff;
static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
@@ -1213,12 +1214,7 @@
"TX bamFifoUsageLow=%u\n"
"TX bamUtilCount=%u\n"
"TX num_db=%u\n"
- "TX num_unexpected_db=%u\n"
- "TX num_bam_int_handled=%u\n"
- "TX num_bam_int_in_non_running_state=%u\n"
- "TX num_qmb_int_handled=%u\n"
- "TX num_bam_int_handled_while_wait_for_bam=%u\n"
- "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ "TX num_qmb_int_handled=%u\n",
TX_STATS(num_pkts_processed),
TX_STATS(tail_ptr_val),
TX_STATS(num_db_fired),
@@ -1233,12 +1229,7 @@
TX_STATS(bam_stats.bamFifoUsageLow),
TX_STATS(bam_stats.bamUtilCount),
TX_STATS(num_db),
- TX_STATS(num_unexpected_db),
- TX_STATS(num_bam_int_handled),
- TX_STATS(num_bam_int_in_non_running_state),
- TX_STATS(num_qmb_int_handled),
- TX_STATS(num_bam_int_handled_while_wait_for_bam),
- TX_STATS(num_bam_int_handled_while_not_in_bam));
+ TX_STATS(num_qmb_int_handled));
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"RX max_outstanding_pkts=%u\n"
@@ -1254,12 +1245,7 @@
"RX bamFifoUsageHigh=%u\n"
"RX bamFifoUsageLow=%u\n"
"RX bamUtilCount=%u\n"
- "RX num_bam_int_handled=%u\n"
- "RX num_db=%u\n"
- "RX num_unexpected_db=%u\n"
- "RX num_pkts_in_dis_uninit_state=%u\n"
- "num_ic_inj_vdev_change=%u\n"
- "num_ic_inj_fw_desc_change=%u\n",
+ "RX num_db=%u\n",
RX_STATS(max_outstanding_pkts),
RX_STATS(num_pkts_processed),
RX_STATS(rx_ring_rp_value),
@@ -1273,12 +1259,7 @@
RX_STATS(bam_stats.bamFifoUsageHigh),
RX_STATS(bam_stats.bamFifoUsageLow),
RX_STATS(bam_stats.bamUtilCount),
- RX_STATS(num_bam_int_handled),
- RX_STATS(num_db),
- RX_STATS(num_unexpected_db),
- RX_STATS(num_pkts_in_dis_uninit_state),
- RX_STATS(num_bam_int_handled_while_not_in_bam),
- RX_STATS(num_bam_int_handled_while_in_bam_state));
+ RX_STATS(num_db));
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -1778,22 +1759,20 @@
if (kstrtos8(dbg_buff, 0, &option))
return -EFAULT;
+ mutex_lock(&ipa3_ctx->lock);
if (option) {
- if (!ipa3_ctx->logbuf_low) {
- ipa3_ctx->logbuf_low =
+ if (!ipa_ipc_low_buff) {
+ ipa_ipc_low_buff =
ipc_log_context_create(IPA_IPC_LOG_PAGES,
"ipa_low", 0);
}
-
- if (ipa3_ctx->logbuf_low == NULL) {
- IPAERR("failed to get logbuf_low\n");
- return -EFAULT;
- }
+ if (ipa_ipc_low_buff == NULL)
+ IPAERR("failed to get logbuf_low\n");
+ ipa3_ctx->logbuf_low = ipa_ipc_low_buff;
} else {
- if (ipa3_ctx->logbuf_low)
- ipc_log_context_destroy(ipa3_ctx->logbuf_low);
ipa3_ctx->logbuf_low = NULL;
}
+ mutex_unlock(&ipa3_ctx->lock);
return count;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 3e4bd79..89dd274 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -21,6 +21,7 @@
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
+#define IPA_WAN_AGGR_PKT_CNT 5
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
#define POLLING_MIN_SLEEP_RX 1010
@@ -60,7 +61,6 @@
#define IPA_ODU_RX_POOL_SZ 64
#define IPA_SIZE_DL_CSUM_META_TRAILER 8
-#define IPA_GSI_EVT_RING_LEN 4096
#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
@@ -73,12 +73,6 @@
#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
-/*
- * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
- * IPA users still use sps_iovec size as FIFO element size.
- */
-#define IPA_FIFO_ELEMENT_SIZE 8
-
static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -421,7 +415,6 @@
}
kfree(gsi_xfer_elem_array);
- kfree(gsi_xfer_elem_array);
spin_unlock_bh(&sys->spinlock);
/* set the timer for sending the NOP descriptor */
@@ -788,14 +781,8 @@
sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
if (sys->ep->napi_enabled) {
- if (sys->ep->switch_to_intr) {
- ipa3_rx_switch_to_intr_mode(sys);
- IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
- sys->ep->switch_to_intr = false;
- sys->ep->inactive_cycles = 0;
- } else
- sys->ep->client_notify(sys->ep->priv,
- IPA_CLIENT_START_POLL, 0);
+ ipa3_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
} else
ipa3_handle_rx(sys);
}
@@ -868,7 +855,8 @@
snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
sys_in->client);
ep->sys->wq = alloc_workqueue(buff,
- WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+
if (!ep->sys->wq) {
IPAERR("failed to create wq for client %d\n",
sys_in->client);
@@ -879,7 +867,7 @@
snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
sys_in->client);
ep->sys->repl_wq = alloc_workqueue(buff,
- WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
if (!ep->sys->repl_wq) {
IPAERR("failed to create rep wq for client %d\n",
sys_in->client);
@@ -1016,6 +1004,7 @@
struct ipa3_ep_context *ep;
int empty;
int result;
+ int i;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1030,7 +1019,6 @@
ipa3_disable_data_path(clnt_hdl);
if (ep->napi_enabled) {
- ep->switch_to_intr = true;
do {
usleep_range(95, 105);
} while (atomic_read(&ep->sys->curr_polling_state));
@@ -1051,7 +1039,17 @@
if (IPA_CLIENT_IS_CONS(ep->client))
cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
- result = ipa3_stop_gsi_channel(clnt_hdl);
+ /* channel stop might fail on timeout if IPA is busy */
+ for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result == GSI_STATUS_SUCCESS)
+ break;
+
+ if (result != -GSI_STATUS_AGAIN &&
+ result != -GSI_STATUS_TIMED_OUT)
+ break;
+ }
+
if (result != GSI_STATUS_SUCCESS) {
IPAERR("GSI stop chan err: %d.\n", result);
ipa_assert();
@@ -2677,8 +2675,7 @@
static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
struct ipa3_sys_context *sys)
{
- if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
- in->client == IPA_CLIENT_APPS_WAN_PROD) {
+ if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
sys->policy = IPA_POLICY_INTR_MODE;
sys->use_comm_evt_ring = false;
return 0;
@@ -2743,9 +2740,6 @@
sys->repl_hdlr =
ipa3_replenish_rx_cache;
}
- if (in->napi_enabled)
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
if (in->napi_enabled && in->recycle_enabled)
sys->repl_hdlr =
ipa3_replenish_rx_cache_recycle;
@@ -3442,7 +3436,13 @@
gsi_evt_ring_props.re_size =
GSI_EVT_RING_RE_SIZE_16B;
+ /*
+ * GSI ring length is calculated based on the desc_fifo_sz
+ * which was meant to define the BAM desc fifo. GSI descriptors
+ * are 16B as opposed to 8B for BAM.
+ */
gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
+
gsi_evt_ring_props.ring_base_vaddr =
dma_alloc_coherent(ipa3_ctx->pdev,
gsi_evt_ring_props.ring_len,
@@ -3520,6 +3520,7 @@
if (!gsi_channel_props.ring_base_vaddr) {
IPAERR("fail to dma alloc %u bytes\n",
gsi_channel_props.ring_len);
+ result = -ENOMEM;
goto fail_alloc_channel_ring;
}
gsi_channel_props.ring_base_addr = dma_addr;
@@ -3658,7 +3659,7 @@
* function is exectued in the softirq context
*
* if input budget is zero, the driver switches back to
- * interrupt mode
+ * interrupt mode.
*
* return number of polled packets, on error 0(zero)
*/
@@ -3667,8 +3668,8 @@
struct ipa3_ep_context *ep;
int ret;
int cnt = 0;
- unsigned int delay = 1;
struct ipa_mem_buffer mem_info = {0};
+ static int total_cnt;
IPADBG("\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@@ -3687,21 +3688,20 @@
break;
ipa3_wq_rx_common(ep->sys, mem_info.size);
- cnt += 5;
+ cnt += IPA_WAN_AGGR_PKT_CNT;
+ total_cnt++;
+
+ if (ep->sys->len == 0 || total_cnt >= ep->sys->rx_pool_sz) {
+ total_cnt = 0;
+ cnt = cnt-1;
+ break;
+ }
};
- if (cnt == 0) {
- ep->inactive_cycles++;
+ if (cnt < weight) {
ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
-
- if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
- ep->switch_to_intr = true;
- delay = 0;
- }
- queue_delayed_work(ep->sys->wq,
- &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
- } else
- ep->inactive_cycles = 0;
+ queue_work(ep->sys->wq, &ep->sys->switch_to_intr_work.work);
+ }
return cnt;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 0cc1206..ff763c4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1389,16 +1389,23 @@
void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
{
struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+ struct ipa3_flt_tbl *tbl;
mutex_lock(&ipa3_ctx->lock);
if (ep->dflt_flt4_rule_hdl) {
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt4_rule_hdl = 0;
}
if (ep->dflt_flt6_rule_hdl) {
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt6_rule_hdl = 0;
}
mutex_unlock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7419a64..3af4486 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -54,8 +54,11 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
+/*
+ * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
+ * IPA users still use sps_iovec size as FIFO element size.
+ */
+#define IPA_FIFO_ELEMENT_SIZE 8
#define IPA_MAX_STATUS_STAT_NUM 30
@@ -528,8 +531,6 @@
bool disconnect_in_progress;
u32 qmi_request_sent;
bool napi_enabled;
- bool switch_to_intr;
- int inactive_cycles;
u32 eot_in_poll_err;
/* sys MUST be the last element of this struct */
@@ -996,6 +997,11 @@
u32 size;
};
+struct ipa_dma_task_info {
+ struct ipa_mem_buffer mem;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+};
+
/**
* struct ipa3_context - IPA context
* @class: pointer to the struct class
@@ -1205,6 +1211,7 @@
struct ipa3_smp2p_info smp2p_info;
u32 ipa_tz_unlock_reg_num;
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
+ struct ipa_dma_task_info dma_task_info;
};
struct ipa3_plat_drv_res {
@@ -1239,7 +1246,9 @@
* Order and type of members should not be changed without a suitable change
* to DTS file or the code that reads it.
*
- * IPA v3.0 SRAM memory layout:
+ * IPA SRAM memory layout:
+ * +-------------------------+
+ * | UC MEM |
* +-------------------------+
* | UC INFO |
* +-------------------------+
@@ -1307,10 +1316,14 @@
* +-------------------------+
* | CANARY |
* +-------------------------+
+ * | CANARY |
+ * +-------------------------+
* | MODEM MEM |
* +-------------------------+
* | CANARY |
* +-------------------------+
+ * | UC EVENT RING | From IPA 3.5
+ * +-------------------------+
*/
struct ipa3_mem_partition {
u32 ofst_start;
@@ -1383,6 +1396,8 @@
u32 apps_v6_rt_hash_size;
u32 apps_v6_rt_nhash_ofst;
u32 apps_v6_rt_nhash_size;
+ u32 uc_event_ring_ofst;
+ u32 uc_event_ring_size;
};
struct ipa3_controller {
@@ -1837,7 +1852,7 @@
int ipa3_teth_bridge_driver_init(void);
void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
-int _ipa_init_sram_v3_0(void);
+int _ipa_init_sram_v3(void);
int _ipa_init_hdr_v3_0(void);
int _ipa_init_rt4_v3(void);
int _ipa_init_rt6_v3(void);
@@ -1981,4 +1996,6 @@
void ipa3_enable_dcd(void);
void ipa3_disable_prefetch(enum ipa_client_type client);
int ipa3_alloc_common_event_ring(void);
+int ipa3_allocate_dma_task_for_gsi(void);
+void ipa3_free_dma_task_for_gsi(void);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index b9f5755..da965e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -548,6 +548,8 @@
if (copy_to_user(buf, &msg->meta,
sizeof(struct ipa_msg_meta))) {
ret = -EFAULT;
+ kfree(msg);
+ msg = NULL;
break;
}
buf += sizeof(struct ipa_msg_meta);
@@ -556,6 +558,8 @@
if (copy_to_user(buf, msg->buff,
msg->meta.msg_len)) {
ret = -EFAULT;
+ kfree(msg);
+ msg = NULL;
break;
}
buf += msg->meta.msg_len;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 19c3de4a..73738bf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -53,6 +53,8 @@
static bool workqueues_stopped;
static bool ipa3_modem_init_cmplt;
static bool first_time_handshake;
+struct mutex ipa3_qmi_lock;
+
/* QMI A5 service */
static struct msg_desc ipa3_indication_reg_req_desc = {
@@ -610,12 +612,17 @@
req->filter_spec_ex_list_len);
}
- /* cache the qmi_filter_request */
- memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
- req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
@@ -655,12 +662,17 @@
req->filter_spec_ex_list_len);
}
- /* cache the qmi_filter_request */
- memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
- req, sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
- ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_ex_msg_cache[
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg]),
+ req,
+ sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01));
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg++;
+ ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_ex_msg %= 10;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
req_desc.max_msg_len =
QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01;
@@ -796,12 +808,17 @@
return -EINVAL;
}
- /* cache the qmi_filter_request */
- memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
- ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
- req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
- ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
- ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+ req,
+ sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+ ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
req_desc.max_msg_len =
QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
@@ -1339,3 +1356,13 @@
resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
}
+void ipa3_qmi_init(void)
+{
+ mutex_init(&ipa3_qmi_lock);
+}
+
+void ipa3_qmi_cleanup(void)
+{
+ mutex_destroy(&ipa3_qmi_lock);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 4fde261..6cd82f8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -204,6 +204,10 @@
void ipa3_q6_handshake_complete(bool ssr_bootup);
+void ipa3_qmi_init(void);
+
+void ipa3_qmi_cleanup(void);
+
#else /* CONFIG_RMNET_IPA3 */
static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
@@ -316,6 +320,14 @@
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+static inline void ipa3_qmi_init(void)
+{
+}
+
+static inline void ipa3_qmi_cleanup(void)
+{
+}
+
#endif /* CONFIG_RMNET_IPA3 */
#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index 30243da..ce47623 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -117,12 +117,7 @@
TX_STATS(bam_stats.bamFifoUsageLow);
TX_STATS(bam_stats.bamUtilCount);
TX_STATS(num_db);
- TX_STATS(num_unexpected_db);
- TX_STATS(num_bam_int_handled);
- TX_STATS(num_bam_int_in_non_running_state);
TX_STATS(num_qmb_int_handled);
- TX_STATS(num_bam_int_handled_while_wait_for_bam);
- TX_STATS(num_bam_int_handled_while_not_in_bam);
RX_STATS(max_outstanding_pkts);
RX_STATS(num_pkts_processed);
@@ -137,12 +132,7 @@
RX_STATS(bam_stats.bamFifoUsageHigh);
RX_STATS(bam_stats.bamFifoUsageLow);
RX_STATS(bam_stats.bamUtilCount);
- RX_STATS(num_bam_int_handled);
RX_STATS(num_db);
- RX_STATS(num_unexpected_db);
- RX_STATS(num_pkts_in_dis_uninit_state);
- RX_STATS(num_bam_int_handled_while_not_in_bam);
- RX_STATS(num_bam_int_handled_while_in_bam_state);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -253,7 +243,8 @@
ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
if (ep_ul->valid || ep_dl->valid) {
- IPAERR("EP already allocated.\n");
+ IPAERR("EP already allocated ul:%d dl:%d\n",
+ ep_ul->valid, ep_dl->valid);
return -EFAULT;
}
@@ -398,7 +389,7 @@
goto fail;
}
ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
- memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+ memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 946fc7e..79f0973 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -388,18 +388,9 @@
*@num_pkts_processed: Number of packets processed - cumulative
*@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
*
- *@ntn_ch_err_type: Information about the channel error (if
- * available)
*@rx_ind_ring_stats:
*@bam_stats:
- *@num_bam_int_handled: Number of Bam Interrupts handled by FW
*@num_db: Number of times the doorbell was rung
- *@num_unexpected_db: Number of unexpected doorbells
- *@num_pkts_in_dis_uninit_state:
- *@num_bam_int_handled_while_not_in_bam: Number of Bam
- * Interrupts handled by FW
- *@num_bam_int_handled_while_in_bam_state: Number of Bam
- * Interrupts handled by FW
*/
struct NTN3RxInfoData_t {
u32 max_outstanding_pkts;
@@ -407,17 +398,12 @@
u32 rx_ring_rp_value;
struct IpaHwRingStats_t rx_ind_ring_stats;
struct IpaHwBamStats_t bam_stats;
- u32 num_bam_int_handled;
u32 num_db;
- u32 num_unexpected_db;
- u32 num_pkts_in_dis_uninit_state;
- u32 num_bam_int_handled_while_not_in_bam;
- u32 num_bam_int_handled_while_in_bam_state;
} __packed;
/**
- * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * struct NTN3TxInfoData_t - Structure holding the NTN Tx channel
* Ensure that this is always word aligned
*
*@num_pkts_processed: Number of packets processed - cumulative
@@ -427,27 +413,16 @@
*@tx_comp_ring_stats:
*@bam_stats:
*@num_db: Number of times the doorbell was rung
- *@num_unexpected_db: Number of unexpected doorbells
- *@num_bam_int_handled: Number of Bam Interrupts handled by FW
- *@num_bam_int_in_non_running_state: Number of Bam interrupts
- * while not in Running state
*@num_qmb_int_handled: Number of QMB interrupts handled
- *@num_bam_int_handled_while_wait_for_bam: Number of times the
- * Imm Cmd is injected due to fw_desc change
*/
-struct NTNTxInfoData_t {
+struct NTN3TxInfoData_t {
u32 num_pkts_processed;
u32 tail_ptr_val;
u32 num_db_fired;
struct IpaHwRingStats_t tx_comp_ring_stats;
struct IpaHwBamStats_t bam_stats;
u32 num_db;
- u32 num_unexpected_db;
- u32 num_bam_int_handled;
- u32 num_bam_int_in_non_running_state;
u32 num_qmb_int_handled;
- u32 num_bam_int_handled_while_wait_for_bam;
- u32 num_bam_int_handled_while_not_in_bam;
} __packed;
@@ -458,7 +433,7 @@
*/
struct Ipa3HwStatsNTNInfoData_t {
struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
- struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+ struct NTN3TxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
} __packed;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6cfe25d..f066d94 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -392,6 +392,11 @@
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_PCIE,
{ 13, 10, 8, 16, IPA_EE_AP } },
+ [IPA_3_0][IPA_CLIENT_ETHERNET_PROD] = {
+ 2, IPA_v3_0_GROUP_UL, true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ {2, 0, 8, 16, IPA_EE_UC} },
/* Only for test purpose */
[IPA_3_0][IPA_CLIENT_TEST_PROD] = {
1, IPA_v3_0_GROUP_UL, true,
@@ -517,6 +522,11 @@
QMB_MASTER_SELECT_PCIE,
{ 29, 14, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_0][IPA_CLIENT_ETHERNET_CONS] = {
+ 24, IPA_v3_0_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ {24, 3, 8, 8, IPA_EE_UC} },
/* Only for test purpose */
[IPA_3_0][IPA_CLIENT_TEST_CONS] = {
26, IPA_v3_0_GROUP_DL, false,
@@ -604,6 +614,7 @@
[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5][IPA_CLIENT_ETHERNET_PROD] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5][IPA_CLIENT_TEST_PROD] = {
0, IPA_v3_5_GROUP_UL_DL, true,
@@ -701,6 +712,7 @@
[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5][IPA_CLIENT_ETHERNET_CONS] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
[IPA_3_5][IPA_CLIENT_TEST_CONS] = {
@@ -792,6 +804,7 @@
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_DDR,
{ 8, 9, 8, 16, IPA_EE_AP } },
+ [IPA_3_5_MHI][IPA_CLIENT_ETHERNET_PROD] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5_MHI][IPA_CLIENT_TEST_PROD] = {
0, IPA_v3_5_MHI_GROUP_DDR, true,
@@ -889,6 +902,7 @@
QMB_MASTER_SELECT_PCIE,
{ 19, 13, 8, 8, IPA_EE_AP } },
[IPA_3_5_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5_MHI][IPA_CLIENT_ETHERNET_CONS] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5_MHI][IPA_CLIENT_TEST_CONS] = {
15, IPA_v3_5_MHI_GROUP_PCIE, false,
@@ -975,6 +989,7 @@
[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5_1][IPA_CLIENT_ETHERNET_PROD] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5_1][IPA_CLIENT_TEST_PROD] = {
0, IPA_v3_5_GROUP_UL_DL, true,
@@ -1068,6 +1083,7 @@
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5_1][IPA_CLIENT_ETHERNET_CONS] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5_1][IPA_CLIENT_TEST_CONS] = {
17, IPA_v3_5_GROUP_UL_DL,
@@ -1133,18 +1149,18 @@
static struct msm_bus_paths ipa_usecases_v3_0[] = {
{
- ARRAY_SIZE(ipa_init_vectors_v3_0),
- ipa_init_vectors_v3_0,
+ .num_paths = ARRAY_SIZE(ipa_init_vectors_v3_0),
+ .vectors = ipa_init_vectors_v3_0,
},
{
- ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
- ipa_nominal_perf_vectors_v3_0,
+ .num_paths = ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
+ .vectors = ipa_nominal_perf_vectors_v3_0,
},
};
static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = {
- ipa_usecases_v3_0,
- ARRAY_SIZE(ipa_usecases_v3_0),
+ .usecase = ipa_usecases_v3_0,
+ .num_usecases = ARRAY_SIZE(ipa_usecases_v3_0),
.name = "ipa",
};
@@ -1231,6 +1247,9 @@
clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
break;
+ case IPA_RM_RESOURCE_ETHERNET_CONS:
+ clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
+ break;
case IPA_RM_RESOURCE_USB_PROD:
clients->names[i++] = IPA_CLIENT_USB_PROD;
break;
@@ -1242,6 +1261,10 @@
break;
case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
clients->names[i++] = IPA_CLIENT_ODU_PROD;
+ break;
+ case IPA_RM_RESOURCE_ETHERNET_PROD:
+ clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
+ break;
default:
break;
}
@@ -1282,7 +1305,8 @@
client == IPA_CLIENT_WLAN3_CONS ||
client == IPA_CLIENT_WLAN4_CONS ||
client == IPA_CLIENT_ODU_EMB_CONS ||
- client == IPA_CLIENT_ODU_TETH_CONS)
+ client == IPA_CLIENT_ODU_TETH_CONS ||
+ client == IPA_CLIENT_ETHERNET_CONS)
return true;
return false;
@@ -2742,7 +2766,8 @@
meta.qmap_id = param_in->qmap_id;
if (param_in->client == IPA_CLIENT_USB_PROD ||
param_in->client == IPA_CLIENT_HSIC1_PROD ||
- param_in->client == IPA_CLIENT_ODU_PROD) {
+ param_in->client == IPA_CLIENT_ODU_PROD ||
+ param_in->client == IPA_CLIENT_ETHERNET_PROD) {
result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
@@ -2884,17 +2909,54 @@
*/
int ipa3_init_mem_partition(struct device_node *node)
{
+ const size_t ram_mmap_v3_0_size = 70;
+ const size_t ram_mmap_v3_5_size = 72;
+ const size_t ram_mmap_current_version_size =
+ sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32);
+ const size_t version = ipa_get_hw_type();
int result;
IPADBG("Reading from DTS as u32 array\n");
- result = of_property_read_u32_array(node,
- "qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
- sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
- if (result) {
+ /*
+ * The size of ipa-ram-mmap array depends on the IPA version. The
+ * actual size can't be assumed because of possible DTS versions
+ * mismatch. The size of the array monotonically increasing because the
+ * obsolete entries are set to zero rather than deleted, so the
+ * possible sizes are in range
+ * [ram_mmap_v3_0_size, ram_mmap_current_version_size]
+ */
+ result = of_property_read_variable_u32_array(node, "qcom,ipa-ram-mmap",
+ (u32 *)&ipa3_ctx->ctrl->mem_partition,
+ ram_mmap_v3_0_size, ram_mmap_current_version_size);
+
+ if (result <= 0) {
IPAERR("Read operation failed\n");
return -ENODEV;
}
+ if (version < IPA_HW_v3_0)
+ ipa_assert();
+ if (version < IPA_HW_v3_5) {
+ if (result != ram_mmap_v3_0_size) {
+ IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
+ return -ENODEV;
+ }
+ } else {
+ if (result != ram_mmap_v3_5_size) {
+ IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
+ return -ENODEV;
+ }
+
+ if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) {
+ IPAERR("UC EVENT RING OFST 0x%x is unaligned\n",
+ IPA_MEM_PART(uc_event_ring_ofst));
+ return -ENODEV;
+ }
+
+ IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n",
+ IPA_MEM_PART(uc_event_ring_ofst),
+ IPA_MEM_PART(uc_event_ring_size));
+ }
IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
IPA_MEM_PART(nat_size));
@@ -3142,7 +3204,7 @@
ctrl->clock_scaling_bw_threshold_turbo =
IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
- ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
+ ctrl->ipa_init_sram = _ipa_init_sram_v3;
ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
@@ -4138,6 +4200,7 @@
/* queue a work to start polling if don't have one */
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
if (!atomic_read(&ep->sys->curr_polling_state)) {
+ ipa3_inc_acquire_wakelock();
atomic_set(&ep->sys->curr_polling_state, 1);
queue_work(ep->sys->wq, &ep->sys->work);
}
@@ -4190,6 +4253,51 @@
}
}
+int ipa3_allocate_dma_task_for_gsi(void)
+{
+ struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 };
+
+ IPADBG("Allocate mem\n");
+ ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
+ ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ ipa3_ctx->dma_task_info.mem.size,
+ &ipa3_ctx->dma_task_info.mem.phys_base,
+ GFP_KERNEL);
+ if (!ipa3_ctx->dma_task_info.mem.base) {
+ IPAERR("no mem\n");
+ return -EFAULT;
+ }
+
+ cmd.flsh = 1;
+ cmd.size1 = ipa3_ctx->dma_task_info.mem.size;
+ cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base;
+ cmd.packet_size = ipa3_ctx->dma_task_info.mem.size;
+ ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
+ if (!ipa3_ctx->dma_task_info.cmd_pyld) {
+ IPAERR("failed to construct dma_task_32b_addr cmd\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ ipa3_ctx->dma_task_info.mem.size,
+ ipa3_ctx->dma_task_info.mem.base,
+ ipa3_ctx->dma_task_info.mem.phys_base);
+ memset(&ipa3_ctx->dma_task_info, 0,
+ sizeof(ipa3_ctx->dma_task_info));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void ipa3_free_dma_task_for_gsi(void)
+{
+ dma_free_coherent(ipa3_ctx->pdev,
+ ipa3_ctx->dma_task_info.mem.size,
+ ipa3_ctx->dma_task_info.mem.base,
+ ipa3_ctx->dma_task_info.mem.phys_base);
+ ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld);
+ memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info));
+}
+
/**
* ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
*
@@ -4198,41 +4306,12 @@
*/
int ipa3_inject_dma_task_for_gsi(void)
{
- static struct ipa_mem_buffer mem = {0};
- struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0};
- static struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipa3_desc desc = {0};
- /* allocate the memory only for the very first time */
- if (!mem.base) {
- IPADBG("Allocate mem\n");
- mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
- mem.size,
- &mem.phys_base,
- GFP_KERNEL);
- if (!mem.base) {
- IPAERR("no mem\n");
- return -EFAULT;
- }
- }
- if (!cmd_pyld) {
- cmd.flsh = 1;
- cmd.size1 = mem.size;
- cmd.addr1 = mem.phys_base;
- cmd.packet_size = mem.size;
- cmd_pyld = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
- if (!cmd_pyld) {
- IPAERR("failed to construct dma_task_32b_addr cmd\n");
- return -EFAULT;
- }
- }
-
desc.opcode = ipahal_imm_cmd_get_opcode_param(
IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data;
+ desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
IPADBG("sending 1B packet to IPA\n");
@@ -4273,21 +4352,30 @@
memset(&mem, 0, sizeof(mem));
- for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
- IPADBG("Calling gsi_stop_channel\n");
+ if (IPA_CLIENT_IS_PROD(ep->client)) {
+ IPADBG("Calling gsi_stop_channel ch:%lu\n",
+ ep->gsi_chan_hdl);
res = gsi_stop_channel(ep->gsi_chan_hdl);
- IPADBG("gsi_stop_channel returned %d\n", res);
+ IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+ ep->gsi_chan_hdl, res);
+ goto end_sequence;
+ }
+
+ for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+ IPADBG("Calling gsi_stop_channel ch:%lu\n",
+ ep->gsi_chan_hdl);
+ res = gsi_stop_channel(ep->gsi_chan_hdl);
+ IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+ ep->gsi_chan_hdl, res);
if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
goto end_sequence;
- if (IPA_CLIENT_IS_CONS(ep->client)) {
- IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
- /* Send a 1B packet DMA_TASK to IPA and try again */
- res = ipa3_inject_dma_task_for_gsi();
- if (res) {
- IPAERR("Failed to inject DMA TASk for GSI\n");
- goto end_sequence;
- }
+ IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+ /* Send a 1B packet DMA_TASK to IPA and try again */
+ res = ipa3_inject_dma_task_for_gsi();
+ if (res) {
+ IPAERR("Failed to inject DMA TASk for GSI\n");
+ goto end_sequence;
}
/* sleep for short period to flush IPA */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 3c8688e7..78fd90b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -81,6 +81,9 @@
__stringify(IPA_QSB_MAX_WRITES),
__stringify(IPA_QSB_MAX_READS),
__stringify(IPA_TX_CFG),
+ __stringify(IPA_IDLE_INDICATION_CFG),
+ __stringify(IPA_DPS_SEQUENCER_FIRST),
+ __stringify(IPA_HPS_SEQUENCER_FIRST),
};
static void ipareg_construct_dummy(enum ipahal_reg_name reg,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index cf9775b..a15bd04 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -66,6 +66,7 @@
((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
rmnet_ipa3_ctx->wwan_priv->net : NULL)
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
@@ -90,6 +91,7 @@
bool ipa_loaduC;
bool ipa_advertise_sg_support;
bool ipa_napi_enable;
+ u32 wan_rx_desc_size;
};
/**
@@ -1274,7 +1276,6 @@
{
int ret = 0;
struct ipa_sys_connect_params *ipa_wan_ep_cfg;
- struct rmnet_phys_ep_conf_s *ep_cfg;
IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg;
@@ -1296,14 +1297,6 @@
in->u.ingress_format.agg_size;
ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
in->u.ingress_format.agg_count;
-
- if (ipa_wan_ep_cfg->napi_enabled) {
- ipa_wan_ep_cfg->recycle_enabled = true;
- ep_cfg = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(dev->rx_handler_data);
- ep_cfg->recycle = ipa_recycle_wan_skb;
- pr_info("Wan Recycle Enabled\n");
- }
}
}
@@ -1325,10 +1318,8 @@
ipa_wan_ep_cfg->priv = dev;
ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
- if (ipa_wan_ep_cfg->napi_enabled)
- ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
- else
- ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ ipa_wan_ep_cfg->desc_fifo_sz =
+ ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE;
mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard);
@@ -1664,6 +1655,9 @@
extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
sizeof(mux_channel[rmnet_index]
.vchannel_name));
+ mux_channel[rmnet_index].vchannel_name[
+ IFNAMSIZ - 1] = '\0';
+
IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
mux_channel[rmnet_index].vchannel_name,
mux_channel[rmnet_index].mux_id,
@@ -2012,6 +2006,9 @@
static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
{
+ int result;
+
+ ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
ipa_rmnet_drv_res->ipa_rmnet_ssr =
of_property_read_bool(pdev->dev.of_node,
"qcom,rmnet-ipa-ssr");
@@ -2034,6 +2031,18 @@
"qcom,ipa-napi-enable");
pr_info("IPA Napi Enable = %s\n",
ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+ /* Get IPA WAN RX desc fifo size */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wan-rx-desc-size",
+ &ipa_rmnet_drv_res->wan_rx_desc_size);
+ if (result)
+ pr_info("using default for wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+ else
+ IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+
return 0;
}
@@ -2351,32 +2360,41 @@
{
struct net_device *netdev = IPA_NETDEV();
struct ipa3_wwan_private *wwan_ptr;
+ int ret;
- IPAWANDBG_LOW("Enter...\n");
+ IPAWANDBG("Enter...\n");
+
if (netdev == NULL) {
IPAWANERR("netdev is NULL.\n");
- return 0;
+ ret = 0;
+ goto bail;
}
+ netif_tx_lock_bh(netdev);
wwan_ptr = netdev_priv(netdev);
if (wwan_ptr == NULL) {
IPAWANERR("wwan_ptr is NULL.\n");
- return 0;
+ ret = 0;
+ goto unlock_and_bail;
}
/* Do not allow A7 to suspend in case there are oustanding packets */
if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto unlock_and_bail;
}
/* Make sure that there is no Tx operation ongoing */
- netif_tx_lock_bh(netdev);
+ netif_stop_queue(netdev);
ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
- netif_tx_unlock_bh(netdev);
- IPAWANDBG_LOW("Exit\n");
+ ret = 0;
- return 0;
+unlock_and_bail:
+ netif_tx_unlock_bh(netdev);
+bail:
+ IPAWANDBG("Exit with %d\n", ret);
+ return ret;
}
/**
@@ -2393,10 +2411,10 @@
{
struct net_device *netdev = IPA_NETDEV();
- IPAWANDBG_LOW("Enter...\n");
+ IPAWANDBG("Enter...\n");
if (netdev)
netif_wake_queue(netdev);
- IPAWANDBG_LOW("Exit\n");
+ IPAWANDBG("Exit\n");
return 0;
}
@@ -3188,6 +3206,9 @@
mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
+
+ ipa3_qmi_init();
+
/* Register for Modem SSR */
rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
SUBSYS_MODEM,
@@ -3201,7 +3222,7 @@
static void __exit ipa3_wwan_cleanup(void)
{
int ret;
-
+ ipa3_qmi_cleanup();
mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
ret = subsys_notif_unregister_notifier(
rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 47da1b3..5595b7b 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -36,7 +36,7 @@
#define WIGIG_VENDOR (0x1ae9)
#define WIGIG_DEVICE (0x0310)
-#define SMMU_BASE 0x10000000 /* Device address range base */
+#define SMMU_BASE 0x20000000 /* Device address range base */
#define SMMU_SIZE ((SZ_1G * 4ULL) - SMMU_BASE)
#define WIGIG_ENABLE_DELAY 50
@@ -93,9 +93,12 @@
/* SMMU */
bool use_smmu; /* have SMMU enabled? */
- int smmu_bypass;
+ int smmu_s1_en;
int smmu_fast_map;
+ int smmu_coherent;
struct dma_iommu_mapping *mapping;
+ u32 smmu_base;
+ u32 smmu_size;
/* bus frequency scaling */
struct msm_bus_scale_pdata *bus_scale;
@@ -638,15 +641,20 @@
{
int atomic_ctx = 1;
int rc;
+ int force_pt_coherent = 1;
+ int smmu_bypass = !ctx->smmu_s1_en;
+ dma_addr_t iova_base = 0;
+ dma_addr_t iova_end = ctx->smmu_base + ctx->smmu_size - 1;
+ struct iommu_domain_geometry geometry;
if (!ctx->use_smmu)
return 0;
- dev_info(ctx->dev, "Initialize SMMU, bypass = %d, fastmap = %d\n",
- ctx->smmu_bypass, ctx->smmu_fast_map);
+ dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
+ smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
ctx->mapping = arm_iommu_create_mapping(&platform_bus_type,
- SMMU_BASE, SMMU_SIZE);
+ ctx->smmu_base, ctx->smmu_size);
if (IS_ERR_OR_NULL(ctx->mapping)) {
rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
@@ -662,23 +670,50 @@
goto release_mapping;
}
- if (ctx->smmu_bypass) {
+ if (smmu_bypass) {
rc = iommu_domain_set_attr(ctx->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
- &ctx->smmu_bypass);
+ &smmu_bypass);
if (rc) {
dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
rc);
goto release_mapping;
}
- } else if (ctx->smmu_fast_map) {
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_FAST,
- &ctx->smmu_fast_map);
- if (rc) {
- dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
- rc);
- goto release_mapping;
+ } else {
+ /* Set dma-coherent and page table coherency */
+ if (ctx->smmu_coherent) {
+ arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
+ rc = iommu_domain_set_attr(ctx->mapping->domain,
+ DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ &force_pt_coherent);
+ if (rc) {
+ dev_err(ctx->dev,
+ "Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
+ rc);
+ goto release_mapping;
+ }
+ }
+
+ if (ctx->smmu_fast_map) {
+ rc = iommu_domain_set_attr(ctx->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &ctx->smmu_fast_map);
+ if (rc) {
+ dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
+ rc);
+ goto release_mapping;
+ }
+ memset(&geometry, 0, sizeof(geometry));
+ geometry.aperture_start = iova_base;
+ geometry.aperture_end = iova_end;
+ rc = iommu_domain_set_attr(ctx->mapping->domain,
+ DOMAIN_ATTR_GEOMETRY,
+ &geometry);
+ if (rc) {
+ dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
+ rc);
+ goto release_mapping;
+ }
}
}
@@ -900,6 +935,7 @@
struct device_node *of_node = dev->of_node;
struct device_node *rc_node;
struct pci_dev *pcidev = NULL;
+ u32 smmu_mapping[2];
int rc;
u32 val;
@@ -954,8 +990,27 @@
ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
- ctx->smmu_bypass = 1;
- ctx->smmu_fast_map = 0;
+ ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
+ if (ctx->smmu_s1_en) {
+ ctx->smmu_fast_map = of_property_read_bool(
+ of_node, "qcom,smmu-fast-map");
+ ctx->smmu_coherent = of_property_read_bool(
+ of_node, "qcom,smmu-coherent");
+ }
+ rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
+ smmu_mapping, 2);
+ if (rc) {
+ dev_err(ctx->dev,
+ "Failed to read base/size smmu addresses %d, fallback to default\n",
+ rc);
+ ctx->smmu_base = SMMU_BASE;
+ ctx->smmu_size = SMMU_SIZE;
+ } else {
+ ctx->smmu_base = smmu_mapping[0];
+ ctx->smmu_size = smmu_mapping[1];
+ }
+ dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
+ ctx->smmu_base, ctx->smmu_size);
/*== execute ==*/
/* turn device on */
diff --git a/drivers/platform/msm/seemp_core/Makefile b/drivers/platform/msm/seemp_core/Makefile
new file mode 100644
index 0000000..a26db43
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Iinclude/linux
+obj-$(CONFIG_SEEMP_CORE) += seemp_core.o
+seemp_core-objs:= seemp_logk.o seemp_ringbuf.o seemp_event_encoder.o
\ No newline at end of file
diff --git a/drivers/platform/msm/seemp_core/seemp_event_encoder.c b/drivers/platform/msm/seemp_core/seemp_event_encoder.c
new file mode 100644
index 0000000..6d9aa81
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/seemp_event_encoder.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/seemp_param_id.h>
+#include "seemp_logk.h"
+#include "seemp_event_encoder.h"
+
+static char *scan_id(char *s);
+static void encode_seemp_section(char *section_start, char *section_eq,
+ char *section_end, bool param, bool numeric,
+ int id, __s32 numeric_value);
+
+static void check_param_range(char *section_eq, bool param,
+ bool *numeric, int val_len, __s32 *numeric_value)
+{
+ long long_value = 0;
+
+ if (param && *numeric) {
+ /*check if 2 bytes & in[-99999,999999]*/
+ *numeric = (val_len >= 2) && (val_len <= 6);
+ if (*numeric) {
+ if (kstrtol(section_eq + 1, 10, &long_value)
+ != 0) {
+ *numeric = false;
+ } else {
+ *numeric_value = (__s32)long_value;
+ /* We are checking whether the value
+ * lies within 16bits
+ */
+ *numeric = (long_value >= -32768) &&
+ (long_value <= 32767);
+ }
+ }
+ }
+}
+
+void encode_seemp_params(struct seemp_logk_blk *blk)
+{
+ struct seemp_logk_blk tmp;
+ char *s = 0;
+ char *msg_section_start = 0;
+ char *msg_section_eq = 0;
+ char *msg_s = 0;
+
+ memcpy(tmp.payload.msg, blk->payload.msg, BLK_MAX_MSG_SZ);
+ s = tmp.payload.msg + 1;
+ tmp.payload.msg[BLK_MAX_MSG_SZ - 1] = 0; /* zero-terminate */
+
+ while (true) {
+ char *section_start = s;
+ char *section_eq = scan_id(s);
+ bool param = (section_eq - section_start >= 2) &&
+ (*section_eq == '=') && (section_eq[1] != ' ');
+ bool numeric = false;
+ int id = -1;
+ __s32 numeric_value = 0;
+ int id_len;
+ int val_len;
+ char ch;
+
+ if (param) {
+ id = param_id_index(section_start, section_eq);
+
+ if (id < 0)
+ param = false;
+ }
+
+ if (!param) {
+ s = section_eq;
+ while ((*s != 0) && (*s != ','))
+ s++;
+ } else {
+ s = section_eq + 1; /* equal sign */
+ numeric = (*s == '-') || ((*s >= '0') && (*s <= '9'));
+
+ if (numeric)
+ s++; /* first char of number */
+
+ while ((*s != 0) && (*s != ',')) {
+ if (*s == '=')
+ param = false;
+ else if (!((*s >= '0') && (*s <= '9')))
+ numeric = false;
+
+ s++;
+ }
+
+ if (param) {
+ id_len = section_eq - section_start;
+ val_len = s - (section_eq + 1);
+ param = (id_len >= 2) && (id_len <= 31)
+ && (val_len <= 31);
+ ch = *s;
+ *s = 0;
+
+ check_param_range(section_eq, param,
+ &numeric, val_len, &numeric_value);
+ *s = ch;
+ }
+ }
+
+ msg_section_start = blk->payload.msg + (section_start -
+ tmp.payload.msg);
+ msg_section_eq = blk->payload.msg + (section_eq -
+ tmp.payload.msg);
+ msg_s = blk->payload.msg + (s - tmp.payload.msg);
+ encode_seemp_section(msg_section_start, msg_section_eq,
+ msg_s, param, numeric, id, numeric_value);
+
+ if (*s == 0)
+ break;
+
+ s++;
+ }
+
+ blk->len = s - blk->payload.msg;
+}
+
+static char *scan_id(char *s)
+{
+ while ((*s == '_') ||
+ ((*s >= 'A') && (*s <= 'Z')) ||
+ ((*s >= 'a') && (*s <= 'z'))) {
+ s++;
+ }
+
+ return s;
+}
+
+static void encode_seemp_section(char *section_start, char *section_eq,
+ char *section_end, bool param, bool numeric,
+ int id, __s32 numeric_value) {
+ param = param && (section_eq + 1 < section_end);
+
+ if (!param) {
+ /* Encode skip section */
+ int skip_len = section_end - section_start;
+ char skip_len_hi = skip_len & 0xE0;
+ char skip_len_lo = skip_len & 0x1F;
+
+ if (skip_len < 32) {
+ section_start[-1] = 0xC0 | skip_len_lo;
+ /* [1:1:0:0 0000] */
+ } else {
+ section_start[-1] = 0xE0 | skip_len_lo;
+ /* [1:1:1:0 0000] */
+
+ if (skip_len_hi & 0x20)
+ section_start[0] |= 0x80;
+
+ if (skip_len_hi & 0x40)
+ section_start[1] |= 0x80;
+
+ if (skip_len_hi & 0x80)
+ section_start[2] |= 0x80;
+ }
+ } else {
+ /* Encode ID=VALUE section */
+ char id_len = section_eq - section_start;
+ char value_len = section_end - (section_eq + 1);
+
+ section_start[-1] = 0x00 | id_len;
+ *(__s16 *)section_start = id;
+ section_eq[0] = (!numeric ? 0x80 : 0x00) | value_len;
+
+ if (numeric)
+ *(__s16 *)(section_eq + 1) = numeric_value;
+ }
+}
diff --git a/drivers/platform/msm/seemp_core/seemp_event_encoder.h b/drivers/platform/msm/seemp_core/seemp_event_encoder.h
new file mode 100644
index 0000000..7cb7274
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/seemp_event_encoder.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SEEMP_EVENT_ENCODER_H__
+#define __SEEMP_EVENT_ENCODER_H__
+
+#include "seemp_logk.h"
+
+void encode_seemp_params(struct seemp_logk_blk *blk);
+
+#endif /* __SEEMP_EVENT_ENCODER_H__ */
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c
new file mode 100644
index 0000000..ce073ed
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/seemp_logk.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
+
+#include "seemp_logk.h"
+#include "seemp_ringbuf.h"
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+#define MASK_BUFFER_SIZE 256
+#define FOUR_MB 4
+#define YEAR_BASE 1900
+
+static struct seemp_logk_dev *slogk_dev;
+
+static unsigned int ring_sz = FOUR_MB;
+
+/*
+ * default is besteffort, apps do not get blocked
+ */
+static unsigned int block_apps;
+
+
+/*
+ * When this flag is turned on,
+ * kmalloc should be used for ring buf allocation
+ * otherwise it is vmalloc.
+ * default is to use vmalloc
+ * kmalloc has a limit of 4MB
+ */
+unsigned int kmalloc_flag;
+
+static struct class *cl;
+
+static rwlock_t filter_lock;
+static struct seemp_source_mask *pmask;
+static unsigned int num_sources;
+
+static long seemp_logk_reserve_rdblks(
+ struct seemp_logk_dev *sdev, unsigned long arg);
+static long seemp_logk_set_mask(unsigned long arg);
+static long seemp_logk_set_mapping(unsigned long arg);
+static long seemp_logk_check_filter(unsigned long arg);
+
+void* (*seemp_logk_kernel_begin)(char **buf);
+
+void (*seemp_logk_kernel_end)(void *blck);
+
+/*
+ * the last param is the permission bits *
+ * kernel logging is done in three steps:
+ * (1) fetch a block, fill everything except payload.
+ * (2) return payload pointer to the caller.
+ * (3) caller fills its data directly into the payload area.
+ * (4) caller invoked finish_record(), to finish writing.
+ */
+void *seemp_logk_kernel_start_record(char **buf)
+{
+ struct seemp_logk_blk *blk;
+ struct timespec now;
+ struct tm ts;
+ int idx;
+ int ret;
+
+ DEFINE_WAIT(write_wait);
+
+ ret = 0;
+ idx = 0;
+ now = current_kernel_time();
+ blk = ringbuf_fetch_wr_block(slogk_dev);
+ if (!blk) {
+ /*
+ * there is no blk to write
+ * if block_apps == 0; quietly return
+ */
+ if (!block_apps) {
+ *buf = NULL;
+ return NULL;
+ }
+ /*else wait for the blks to be available*/
+ while (1) {
+ mutex_lock(&slogk_dev->lock);
+ prepare_to_wait(&slogk_dev->writers_wq,
+ &write_wait, TASK_INTERRUPTIBLE);
+ ret = (slogk_dev->num_write_avail_blks <= 0);
+ if (!ret) {
+ /* don't have to wait*/
+ break;
+ }
+ mutex_unlock(&slogk_dev->lock);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ schedule();
+ }
+
+ finish_wait(&slogk_dev->writers_wq, &write_wait);
+ if (ret)
+ return NULL;
+
+ idx = slogk_dev->write_idx;
+ slogk_dev->write_idx =
+ (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
+ slogk_dev->num_write_avail_blks--;
+ slogk_dev->num_write_in_prog_blks++;
+ slogk_dev->num_writers++;
+
+ blk = &slogk_dev->ring[idx];
+ /*mark block invalid*/
+ blk->status = 0x0;
+ mutex_unlock(&slogk_dev->lock);
+ }
+
+ blk->version = OBSERVER_VERSION;
+ blk->pid = current->tgid;
+ blk->tid = current->pid;
+ blk->uid = (current_uid()).val;
+ blk->sec = now.tv_sec;
+ blk->nsec = now.tv_nsec;
+ strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
+ time_to_tm(now.tv_sec, 0, &ts);
+ ts.tm_year += YEAR_BASE;
+ ts.tm_mon += 1;
+
+ snprintf(blk->ts, TS_SIZE, "%04ld-%02d-%02d %02d:%02d:%02d",
+ ts.tm_year, ts.tm_mon, ts.tm_mday,
+ ts.tm_hour, ts.tm_min, ts.tm_sec);
+
+ *buf = blk->payload.msg;
+
+ return blk;
+}
+
+void seemp_logk_kernel_end_record(void *blck)
+{
+ struct seemp_logk_blk *blk = (struct seemp_logk_blk *)blck;
+
+ if (blk) {
+ /*update status at the very end*/
+ blk->status |= 0x1;
+ blk->uid = (current_uid()).val;
+
+ ringbuf_finish_writer(slogk_dev, blk);
+ }
+}
+
+static int seemp_logk_usr_record(const char __user *buf, size_t count)
+{
+ struct seemp_logk_blk *blk;
+ struct seemp_logk_blk usr_blk;
+ struct seemp_logk_blk *local_blk;
+ struct timespec now;
+ struct tm ts;
+ int idx, ret;
+
+ DEFINE_WAIT(write_wait);
+
+ if (buf) {
+ local_blk = (struct seemp_logk_blk *)buf;
+ if (copy_from_user(&usr_blk.pid, &local_blk->pid,
+ sizeof(usr_blk.pid)) != 0)
+ return -EFAULT;
+ if (copy_from_user(&usr_blk.tid, &local_blk->tid,
+ sizeof(usr_blk.tid)) != 0)
+ return -EFAULT;
+ if (copy_from_user(&usr_blk.uid, &local_blk->uid,
+ sizeof(usr_blk.uid)) != 0)
+ return -EFAULT;
+ if (copy_from_user(&usr_blk.len, &local_blk->len,
+ sizeof(usr_blk.len)) != 0)
+ return -EFAULT;
+ if (copy_from_user(&usr_blk.payload, &local_blk->payload,
+ sizeof(struct blk_payload)) != 0)
+ return -EFAULT;
+ } else {
+ return -EFAULT;
+ }
+ idx = ret = 0;
+ now = current_kernel_time();
+ blk = ringbuf_fetch_wr_block(slogk_dev);
+ if (!blk) {
+ if (!block_apps)
+ return 0;
+ while (1) {
+ mutex_lock(&slogk_dev->lock);
+ prepare_to_wait(&slogk_dev->writers_wq,
+ &write_wait,
+ TASK_INTERRUPTIBLE);
+ ret = (slogk_dev->num_write_avail_blks <= 0);
+ if (!ret)
+ break;
+ mutex_unlock(&slogk_dev->lock);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ schedule();
+ }
+ finish_wait(&slogk_dev->writers_wq, &write_wait);
+ if (ret)
+ return -EINTR;
+
+ idx = slogk_dev->write_idx;
+ slogk_dev->write_idx =
+ (slogk_dev->write_idx + 1) % slogk_dev->num_tot_blks;
+ slogk_dev->num_write_avail_blks--;
+ slogk_dev->num_write_in_prog_blks++;
+ slogk_dev->num_writers++;
+ blk = &slogk_dev->ring[idx];
+ /*mark block invalid*/
+ blk->status = 0x0;
+ mutex_unlock(&slogk_dev->lock);
+ }
+ if (usr_blk.len > sizeof(struct blk_payload)-1)
+ usr_blk.len = sizeof(struct blk_payload)-1;
+
+ memcpy(&blk->payload, &usr_blk.payload, sizeof(struct blk_payload));
+ blk->pid = usr_blk.pid;
+ blk->uid = usr_blk.uid;
+ blk->tid = usr_blk.tid;
+ blk->sec = now.tv_sec;
+ blk->nsec = now.tv_nsec;
+ time_to_tm(now.tv_sec, 0, &ts);
+ ts.tm_year += YEAR_BASE;
+ ts.tm_mon += 1;
+ snprintf(blk->ts, TS_SIZE, "%02ld-%02d-%02d %02d:%02d:%02d",
+ ts.tm_year, ts.tm_mon, ts.tm_mday,
+ ts.tm_hour, ts.tm_min, ts.tm_sec);
+ strlcpy(blk->appname, current->comm, TASK_COMM_LEN);
+ blk->status |= 0x1;
+ ringbuf_finish_writer(slogk_dev, blk);
+ return ret;
+}
+
+static void seemp_logk_attach(void)
+{
+ seemp_logk_kernel_end = seemp_logk_kernel_end_record;
+ seemp_logk_kernel_begin = seemp_logk_kernel_start_record;
+}
+
+static void seemp_logk_detach(void)
+{
+ seemp_logk_kernel_begin = NULL;
+ seemp_logk_kernel_end = NULL;
+}
+
+static ssize_t
+seemp_logk_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return seemp_logk_usr_record(buf, count);
+}
+
+static int
+seemp_logk_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+
+ /*disallow seeks on this file*/
+ ret = nonseekable_open(inode, filp);
+ if (ret) {
+ pr_err("ret= %d\n", ret);
+ return ret;
+ }
+
+ slogk_dev->minor = iminor(inode);
+ filp->private_data = slogk_dev;
+
+ return 0;
+}
+
+static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index)
+{
+ unsigned int byte_num = index/8;
+ unsigned int bit_num = index%8;
+ unsigned char byte;
+
+ if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE)
+ return false;
+
+ byte = pVec[byte_num];
+
+ return !(byte & (1 << bit_num));
+}
+
+static long seemp_logk_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct seemp_logk_dev *sdev;
+ int ret;
+
+ sdev = (struct seemp_logk_dev *) filp->private_data;
+
+ if (cmd == SEEMP_CMD_RESERVE_RDBLKS) {
+ return seemp_logk_reserve_rdblks(sdev, arg);
+ } else if (cmd == SEEMP_CMD_RELEASE_RDBLKS) {
+ mutex_lock(&sdev->lock);
+ sdev->read_idx = (sdev->read_idx + sdev->num_read_in_prog_blks)
+ % sdev->num_tot_blks;
+ sdev->num_write_avail_blks += sdev->num_read_in_prog_blks;
+ ret = sdev->num_read_in_prog_blks;
+ sdev->num_read_in_prog_blks = 0;
+ /*wake up any waiting writers*/
+ mutex_unlock(&sdev->lock);
+ if (ret && block_apps)
+ wake_up_interruptible(&sdev->writers_wq);
+ } else if (cmd == SEEMP_CMD_GET_RINGSZ) {
+ if (copy_to_user((unsigned int *)arg, &sdev->ring_sz,
+ sizeof(unsigned int)))
+ return -EFAULT;
+ } else if (cmd == SEEMP_CMD_GET_BLKSZ) {
+ if (copy_to_user((unsigned int *)arg, &sdev->blk_sz,
+ sizeof(unsigned int)))
+ return -EFAULT;
+ } else if (cmd == SEEMP_CMD_SET_MASK) {
+ return seemp_logk_set_mask(arg);
+ } else if (cmd == SEEMP_CMD_SET_MAPPING) {
+ return seemp_logk_set_mapping(arg);
+ } else if (cmd == SEEMP_CMD_CHECK_FILTER) {
+ return seemp_logk_check_filter(arg);
+ }
+ pr_err("Invalid Request %X\n", cmd);
+ return -ENOIOCTLCMD;
+}
+
+static long seemp_logk_reserve_rdblks(
+ struct seemp_logk_dev *sdev, unsigned long arg)
+{
+ int ret;
+ struct read_range rrange;
+
+ DEFINE_WAIT(read_wait);
+
+ mutex_lock(&sdev->lock);
+ if (sdev->num_writers > 0 || sdev->num_read_avail_blks <= 0) {
+ ret = -EPERM;
+ pr_debug("(reserve): blocking, cannot read.\n");
+ pr_debug("num_writers=%d num_read_avail_blks=%d\n",
+ sdev->num_writers,
+ sdev->num_read_avail_blks);
+ mutex_unlock(&sdev->lock);
+ /*
+ * unlock the device
+ * wait on a wait queue
+ * after wait, grab the dev lock again
+ */
+ while (1) {
+ mutex_lock(&sdev->lock);
+ prepare_to_wait(&sdev->readers_wq, &read_wait,
+ TASK_INTERRUPTIBLE);
+ ret = (sdev->num_writers > 0 ||
+ sdev->num_read_avail_blks <= 0);
+ if (!ret) {
+ /*don't have to wait*/
+ break;
+ }
+ mutex_unlock(&sdev->lock);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ schedule();
+ }
+
+ finish_wait(&sdev->readers_wq, &read_wait);
+ if (ret)
+ return -EINTR;
+ }
+
+ /*sdev->lock is held at this point*/
+ sdev->num_read_in_prog_blks = sdev->num_read_avail_blks;
+ sdev->num_read_avail_blks = 0;
+ rrange.start_idx = sdev->read_idx;
+ rrange.num = sdev->num_read_in_prog_blks;
+ mutex_unlock(&sdev->lock);
+
+ if (copy_to_user((unsigned int *)arg, &rrange,
+ sizeof(struct read_range)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long seemp_logk_set_mask(unsigned long arg)
+{
+ __u8 buffer[256];
+ int i;
+ unsigned int num_elements;
+
+ if (copy_from_user(&num_elements,
+ (unsigned int __user *) arg, sizeof(unsigned int)))
+ return -EFAULT;
+
+ read_lock(&filter_lock);
+ if (num_sources == 0) {
+ read_unlock(&filter_lock);
+ return -EINVAL;
+ }
+
+ if (num_elements == 0 ||
+ DIV_ROUND_UP(num_sources, 8) > MASK_BUFFER_SIZE) {
+ read_unlock(&filter_lock);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(buffer,
+ (__u8 *)arg, DIV_ROUND_UP(num_sources, 8))) {
+ read_unlock(&filter_lock);
+ return -EFAULT;
+ }
+
+ read_unlock(&filter_lock);
+ write_lock(&filter_lock);
+ if (num_elements != num_sources) {
+ write_unlock(&filter_lock);
+ return -EPERM;
+ }
+
+ for (i = 0; i < num_sources; i++) {
+ pmask[i].isOn =
+ seemp_logk_get_bit_from_vector(
+ (__u8 *)buffer, i);
+ }
+ write_unlock(&filter_lock);
+ return 0;
+}
+
+static long seemp_logk_set_mapping(unsigned long arg)
+{
+ __u32 num_elements;
+ __u32 *pbuffer;
+ int i;
+ struct seemp_source_mask *pnewmask;
+
+ if (copy_from_user(&num_elements,
+ (__u32 __user *)arg, sizeof(__u32)))
+ return -EFAULT;
+
+ if ((num_elements == 0) || (num_elements >
+ (UINT_MAX / sizeof(struct seemp_source_mask))))
+ return -EFAULT;
+
+ write_lock(&filter_lock);
+ if (pmask != NULL) {
+ /*
+ * Mask is getting set again.
+ * seemp_core was probably restarted.
+ */
+ struct seemp_source_mask *ptempmask;
+
+ num_sources = 0;
+ ptempmask = pmask;
+ pmask = NULL;
+ kfree(ptempmask);
+ }
+ write_unlock(&filter_lock);
+ pbuffer = kmalloc(sizeof(struct seemp_source_mask)
+ * num_elements, GFP_KERNEL);
+ if (pbuffer == NULL)
+ return -ENOMEM;
+
+ /*
+ * Use our new table as scratch space for now.
+ * We copy an ordered list of hash values into our buffer.
+ */
+ if (copy_from_user(pbuffer, &((__u32 __user *)arg)[1],
+ num_elements*sizeof(unsigned int))) {
+ kfree(pbuffer);
+ return -EFAULT;
+ }
+ /*
+ * We arrange the user data into a more usable form.
+ * This is done in-place.
+ */
+ pnewmask = (struct seemp_source_mask *) pbuffer;
+ for (i = num_elements - 1; i >= 0; i--) {
+ pnewmask[i].hash = pbuffer[i];
+ /* Observer is off by default*/
+ pnewmask[i].isOn = 0;
+ }
+ write_lock(&filter_lock);
+ pmask = pnewmask;
+ num_sources = num_elements;
+ write_unlock(&filter_lock);
+ return 0;
+}
+
+static long seemp_logk_check_filter(unsigned long arg)
+{
+ int i;
+ unsigned int hash = (unsigned int) arg;
+
+ /*
+ * This lock may be a bit long.
+ * If it is a problem, it can be fixed.
+ */
+ read_lock(&filter_lock);
+ for (i = 0; i < num_sources; i++) {
+ if (hash == pmask[i].hash) {
+ int result = pmask[i].isOn;
+
+ read_unlock(&filter_lock);
+ return result;
+ }
+ }
+ read_unlock(&filter_lock);
+ return 0;
+}
+
+static int seemp_logk_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ char *vptr;
+ unsigned long length, pfn;
+ unsigned long start = vma->vm_start;
+
+ length = vma->vm_end - vma->vm_start;
+
+ if (length > (unsigned long) slogk_dev->ring_sz) {
+ pr_err("len check failed\n");
+ return -EIO;
+ }
+
+ vma->vm_flags |= VM_RESERVED | VM_SHARED;
+ vptr = (char *) slogk_dev->ring;
+ ret = 0;
+
+ if (kmalloc_flag) {
+ ret = remap_pfn_range(vma,
+ start,
+ virt_to_phys((void *)
+ ((unsigned long)slogk_dev->ring)) >> PAGE_SHIFT,
+ length,
+ vma->vm_page_prot);
+ if (ret != 0) {
+ pr_err("remap_pfn_range() fails with ret = %d\n",
+ ret);
+ return -EAGAIN;
+ }
+ } else {
+ while (length > 0) {
+ pfn = vmalloc_to_pfn(vptr);
+
+ ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret < 0) {
+ pr_err("remap_pfn_range() fails with ret = %d\n",
+ ret);
+ return ret;
+ }
+ start += PAGE_SIZE;
+ vptr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+static const struct file_operations seemp_logk_fops = {
+ .write = seemp_logk_write,
+ .open = seemp_logk_open,
+ .unlocked_ioctl = seemp_logk_ioctl,
+ .compat_ioctl = seemp_logk_ioctl,
+ .mmap = seemp_logk_mmap,
+};
+
+__init int seemp_logk_init(void)
+{
+ int ret;
+ int devno = 0;
+
+ num_sources = 0;
+ kmalloc_flag = 0;
+ block_apps = 0;
+ pmask = NULL;
+
+ if (kmalloc_flag && ring_sz > FOUR_MB) {
+ pr_err("kmalloc cannot allocate > 4MB\n");
+ return -ENOMEM;
+ }
+
+ ring_sz = ring_sz * SZ_1M;
+ if (ring_sz <= 0) {
+ pr_err("Too small a ring_sz=%d\n", ring_sz);
+ return -EINVAL;
+ }
+
+ slogk_dev = kmalloc(sizeof(*slogk_dev), GFP_KERNEL);
+ if (slogk_dev == NULL)
+ return -ENOMEM;
+
+ slogk_dev->ring_sz = ring_sz;
+ slogk_dev->blk_sz = sizeof(struct seemp_logk_blk);
+ /*initialize ping-pong buffers*/
+ ret = ringbuf_init(slogk_dev);
+ if (ret < 0) {
+ pr_err("Init Failed, ret = %d\n", ret);
+ goto pingpong_fail;
+ }
+
+ ret = alloc_chrdev_region(&devno, 0, seemp_LOGK_NUM_DEVS,
+ seemp_LOGK_DEV_NAME);
+ if (ret < 0) {
+ pr_err("alloc_chrdev_region failed with ret = %d\n",
+ ret);
+ goto register_fail;
+ }
+
+ slogk_dev->major = MAJOR(devno);
+
+ pr_debug("logk: major# = %d\n", slogk_dev->major);
+
+ cl = class_create(THIS_MODULE, seemp_LOGK_DEV_NAME);
+ if (cl == NULL) {
+ pr_err("class create failed");
+ goto cdev_fail;
+ }
+ if (device_create(cl, NULL, devno, NULL,
+ seemp_LOGK_DEV_NAME) == NULL) {
+ pr_err("device create failed");
+ goto class_destroy_fail;
+ }
+ cdev_init(&(slogk_dev->cdev), &seemp_logk_fops);
+
+ slogk_dev->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&(slogk_dev->cdev), MKDEV(slogk_dev->major, 0), 1);
+ if (ret) {
+ pr_err("cdev_add failed with ret = %d", ret);
+ goto class_destroy_fail;
+ }
+
+ seemp_logk_attach();
+ mutex_init(&slogk_dev->lock);
+ init_waitqueue_head(&slogk_dev->readers_wq);
+ init_waitqueue_head(&slogk_dev->writers_wq);
+ rwlock_init(&filter_lock);
+ return 0;
+class_destroy_fail:
+ class_destroy(cl);
+cdev_fail:
+ unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
+register_fail:
+ ringbuf_cleanup(slogk_dev);
+pingpong_fail:
+ kfree(slogk_dev);
+ return -EPERM;
+}
+
+__exit void seemp_logk_cleanup(void)
+{
+ dev_t devno = MKDEV(slogk_dev->major, slogk_dev->minor);
+
+ seemp_logk_detach();
+
+ cdev_del(&slogk_dev->cdev);
+
+ unregister_chrdev_region(devno, seemp_LOGK_NUM_DEVS);
+ ringbuf_cleanup(slogk_dev);
+ kfree(slogk_dev);
+
+ if (pmask != NULL) {
+ kfree(pmask);
+ pmask = NULL;
+ }
+}
+
+module_init(seemp_logk_init);
+module_exit(seemp_logk_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("seemp Observer");
+
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.h b/drivers/platform/msm/seemp_core/seemp_logk.h
new file mode 100644
index 0000000..1a41d4c
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/seemp_logk.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SEEMP_LOGK_H__
+#define __SEEMP_LOGK_H__
+
+#define OBSERVER_VERSION 0x01
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <asm/ioctls.h>
+
+#define seemp_LOGK_NUM_DEVS 1
+#define seemp_LOGK_DEV_NAME "seemplog"
+
+/*
+ * The logcat driver on Android uses four 256k ring buffers
+ * here, we use two ring buffers of the same size.
+ * we think this is reasonable
+ */
+#define FULL_BUF_SIZE (64 * 1024 * 1024)
+#define HALF_BUF_SIZE (32 * 1024 * 1024)
+#define FULL_BLOCKS (8 * 1024)
+#define HALF_BLOCKS (4 * 1024)
+
+#define READER_NOT_READY 0
+#define READER_READY 1
+
+#define MAGIC 'z'
+
+#define SEEMP_CMD_RESERVE_RDBLKS _IOR(MAGIC, 1, int)
+#define SEEMP_CMD_RELEASE_RDBLKS _IO(MAGIC, 2)
+#define SEEMP_CMD_GET_RINGSZ _IOR(MAGIC, 3, int)
+#define SEEMP_CMD_GET_BLKSZ _IOR(MAGIC, 4, int)
+#define SEEMP_CMD_SET_MASK _IO(MAGIC, 5)
+#define SEEMP_CMD_SET_MAPPING _IO(MAGIC, 6)
+#define SEEMP_CMD_CHECK_FILTER _IOR(MAGIC, 7, int)
+
+struct read_range {
+ int start_idx;
+ int num;
+};
+
+struct seemp_logk_dev {
+ unsigned int major;
+ unsigned int minor;
+
+ struct cdev cdev;
+ struct class *cls;
+ /*the full buffer*/
+ struct seemp_logk_blk *ring;
+ /*an array of blks*/
+ unsigned int ring_sz;
+ unsigned int blk_sz;
+
+ int num_tot_blks;
+
+ int num_write_avail_blks;
+ int num_write_in_prog_blks;
+
+ int num_read_avail_blks;
+ int num_read_in_prog_blks;
+
+ int num_writers;
+
+ /*
+ * there is always one reader
+ * which is the observer daemon
+ * therefore there is no necessity
+ * for num_readers variable
+ */
+
+ /*
+ * read_idx and write_idx loop through from zero to ring_sz,
+ * and then back to zero in a circle, as they advance
+ * based on the reader's and writers' accesses
+ */
+ int read_idx;
+
+ int write_idx;
+
+ /*
+ * wait queues
+ * readers_wq: implement wait for readers
+ * writers_wq: implement wait for writers
+ *
+ * whether writers are blocked or not is driven by the policy:
+ * case 1: (best_effort_logging == 1)
+ * writers are not blocked, and
+ * when there is no mem in the ring to store logs,
+ * the logs are simply dropped.
+ * case 2: (best_effort_logging == 0)
+ * when there is no mem in the ring to store logs,
+ * the process gets blocked until there is space.
+ */
+ wait_queue_head_t readers_wq;
+ wait_queue_head_t writers_wq;
+
+ /*
+ * protects everything in the device
+ * including ring buffer and all the num_ variables
+ * spinlock_t lock;
+ */
+ struct mutex lock;
+};
+
+#define BLK_SIZE 256
+#define BLK_HDR_SIZE 64
+#define TS_SIZE 20
+#define BLK_MAX_MSG_SZ (BLK_SIZE - BLK_HDR_SIZE)
+
+struct blk_payload {
+ __u32 api_id; /* event API id */
+ char msg[BLK_MAX_MSG_SZ]; /* event parameters */
+} __packed;
+
+struct seemp_logk_blk {
+ __u8 status; /* bits: 0->valid/invalid; 1-7: unused as of now! */
+ __u16 len; /* length of the payload */
+ __u8 version; /* version number */
+ __s32 pid; /* generating process's pid */
+ __s32 uid; /* generating process's uid - app specific */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char ts[TS_SIZE]; /* Time Stamp */
+ char appname[TASK_COMM_LEN];
+ struct blk_payload payload;
+} __packed;
+
+
+extern unsigned int kmalloc_flag;
+
+struct seemp_source_mask {
+ __u32 hash;
+ bool isOn;
+};
+#endif
diff --git a/drivers/platform/msm/seemp_core/seemp_ringbuf.c b/drivers/platform/msm/seemp_core/seemp_ringbuf.c
new file mode 100644
index 0000000..4558051
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/seemp_ringbuf.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
+
+#include "seemp_logk.h"
+#include "seemp_ringbuf.h"
+#include "seemp_event_encoder.h"
+
+/*initial function no need to hold ring_lock*/
+int ringbuf_init(struct seemp_logk_dev *sdev)
+{
+ char *buf;
+ unsigned long virt_addr;
+
+ if (kmalloc_flag) {
+ sdev->ring = kmalloc(sdev->ring_sz, GFP_KERNEL);
+ if (sdev->ring == NULL) {
+ pr_err("kmalloc failed, ring_sz= %d\n", sdev->ring_sz);
+ return -ENOMEM;
+ }
+
+ buf = (char *)sdev->ring;
+
+ /*reserve kmalloc memory as pages to make them remapable*/
+ for (virt_addr = (unsigned long)buf;
+ virt_addr < (unsigned long)buf + sdev->ring_sz;
+ virt_addr += PAGE_SIZE) {
+ SetPageReserved(virt_to_page((virt_addr)));
+ }
+ } else {
+ sdev->ring = vmalloc(sdev->ring_sz);
+ if (sdev->ring == NULL) {
+ pr_err("vmalloc failed, ring_sz = %d\n", sdev->ring_sz);
+ return -ENOMEM;
+ }
+ buf = (char *)sdev->ring;
+
+ /*reserve vmalloc memory as pages to make them remapable*/
+ for (virt_addr = (unsigned long)buf;
+ virt_addr < (unsigned long)buf + sdev->ring_sz;
+ virt_addr += PAGE_SIZE) {
+ SetPageReserved(vmalloc_to_page(
+ (unsigned long *) virt_addr));
+ }
+ }
+
+ memset(sdev->ring, 0, sdev->ring_sz);
+
+ sdev->num_tot_blks = (sdev->ring_sz / BLK_SIZE);
+ sdev->num_writers = 0;
+ sdev->write_idx = 0;
+ sdev->read_idx = 0;
+
+ sdev->num_write_avail_blks = sdev->num_tot_blks;
+ /*no. of blocks available for write*/
+ sdev->num_write_in_prog_blks = 0;
+ /*no. of blocks held by writers to perform writes*/
+
+ sdev->num_read_avail_blks = 0;
+ /*no. of blocks ready for read*/
+ sdev->num_read_in_prog_blks = 0;
+ /*no. of blocks held by the reader to perform read*/
+
+ return 0;
+}
+
+void ringbuf_cleanup(struct seemp_logk_dev *sdev)
+{
+ unsigned long virt_addr;
+
+ if (kmalloc_flag) {
+ for (virt_addr = (unsigned long)sdev->ring;
+ virt_addr < (unsigned long)sdev->ring + sdev->ring_sz;
+ virt_addr += PAGE_SIZE) {
+ /*clear all pages*/
+ ClearPageReserved(virt_to_page((unsigned long *)
+ virt_addr));
+ }
+ kfree(sdev->ring);
+ } else {
+ for (virt_addr = (unsigned long)sdev->ring;
+ virt_addr < (unsigned long)sdev->ring + sdev->ring_sz;
+ virt_addr += PAGE_SIZE) {
+ /*clear all pages*/
+ ClearPageReserved(vmalloc_to_page((unsigned long *)
+ virt_addr));
+ }
+ vfree(sdev->ring);
+ }
+}
+
+struct seemp_logk_blk *ringbuf_fetch_wr_block
+ (struct seemp_logk_dev *sdev)
+{
+ struct seemp_logk_blk *blk = NULL;
+ int idx;
+
+ mutex_lock(&sdev->lock);
+ if (sdev->num_write_avail_blks == 0) {
+ idx = -1;
+ mutex_unlock(&sdev->lock);
+ return blk;
+ }
+
+ idx = sdev->write_idx;
+ sdev->write_idx = (sdev->write_idx + 1) % sdev->num_tot_blks;
+ sdev->num_write_avail_blks--;
+ sdev->num_write_in_prog_blks++;
+ sdev->num_writers++;
+
+ blk = &sdev->ring[idx];
+ blk->status = 0x0;
+
+ mutex_unlock(&sdev->lock);
+ return blk;
+}
+
+void ringbuf_finish_writer(struct seemp_logk_dev *sdev,
+ struct seemp_logk_blk *blk)
+{
+ /* Encode seemp parameters in multi-threaded mode (before mutex lock) */
+ encode_seemp_params(blk);
+
+ /*
+ * finish writing...
+ * the calling process will no longer access this block.
+ */
+ mutex_lock(&sdev->lock);
+
+ sdev->num_writers--;
+ sdev->num_write_in_prog_blks--;
+ sdev->num_read_avail_blks++;
+
+ /*wake up any readers*/
+ if (sdev->num_writers == 0)
+ wake_up_interruptible(&sdev->readers_wq);
+
+ mutex_unlock(&sdev->lock);
+}
+
+int ringbuf_count_marked(struct seemp_logk_dev *sdev)
+{
+ int i;
+ unsigned int marked;
+
+ mutex_lock(&sdev->lock);
+ for (marked = 0, i = 0; i < sdev->num_tot_blks; i++)
+ if (sdev->ring[i].status & 0x1)
+ marked++;
+ mutex_unlock(&sdev->lock);
+
+ return marked;
+}
diff --git a/drivers/platform/msm/seemp_core/seemp_ringbuf.h b/drivers/platform/msm/seemp_core/seemp_ringbuf.h
new file mode 100644
index 0000000..3abdf77
--- /dev/null
+++ b/drivers/platform/msm/seemp_core/seemp_ringbuf.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SEEMP_RINGBUF_H__
+#define __SEEMP_RINGBUF_H__
+
+/*
+ * This header exports pingpong's API
+ */
+
+int ringbuf_init(struct seemp_logk_dev *sdev);
+struct seemp_logk_blk *ringbuf_fetch_wr_block
+(struct seemp_logk_dev *sdev);
+void ringbuf_finish_writer(struct seemp_logk_dev *sdev,
+ struct seemp_logk_blk *blk);
+void ringbuf_cleanup(struct seemp_logk_dev *sdev);
+int ringbuf_count_marked(struct seemp_logk_dev *sdev);
+
+#endif
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 79d64ea..c29b9b6 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -355,6 +355,32 @@
{}
};
+static const struct dmi_system_id amw0_whitelist[] __initconst = {
+ {
+ .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ {
+ .ident = "Gateway",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ },
+ },
+ {
+ .ident = "Packard Bell",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+ },
+ },
+ {}
+};
+
+/*
+ * This quirk table is only for Acer/Gateway/Packard Bell family
+ * that those machines are supported by acer-wmi driver.
+ */
static const struct dmi_system_id acer_quirks[] __initconst = {
{
.callback = dmi_matched,
@@ -464,6 +490,17 @@
},
.driver_data = &quirk_acer_travelmate_2490,
},
+ {}
+};
+
+/*
+ * This quirk list is for those non-acer machines that have AMW0_GUID1
+ * but supported by acer-wmi in past days. Keeping this quirk list here
+ * is only for backward compatible. Please do not add new machine to
+ * here anymore. Those non-acer machines should be supported by
+ * appropriate wmi drivers.
+ */
+static const struct dmi_system_id non_acer_quirks[] __initconst = {
{
.callback = dmi_matched,
.ident = "Fujitsu Siemens Amilo Li 1718",
@@ -598,6 +635,7 @@
{
if (!force_series) {
dmi_check_system(acer_quirks);
+ dmi_check_system(non_acer_quirks);
} else if (force_series == 2490) {
quirks = &quirk_acer_travelmate_2490;
}
@@ -1808,11 +1846,24 @@
return status;
}
+#define ACER_WMID_ACCEL_HID "BST0001"
+
static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
void *ctx, void **retval)
{
+ struct acpi_device *dev;
+
+ if (!strcmp(ctx, "SENR")) {
+ if (acpi_bus_get_device(ah, &dev))
+ return AE_OK;
+ if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ } else
+ return AE_OK;
+
*(acpi_handle *)retval = ah;
- return AE_OK;
+
+ return AE_CTRL_TERMINATE;
}
static int __init acer_wmi_get_handle(const char *name, const char *prop,
@@ -1839,7 +1890,7 @@
{
int err;
- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
+ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
if (err)
return err;
@@ -2108,6 +2159,24 @@
find_quirks();
/*
+ * The AMW0_GUID1 wmi is not only found on Acer family but also other
+ * machines like Lenovo, Fujitsu and Medion. In the past days,
+ * acer-wmi driver handled those non-Acer machines by quirks list.
+ * But actually acer-wmi driver was loaded on any machines that have
+ * AMW0_GUID1. This behavior is strange because those machines should
+ * be supported by appropriate wmi drivers. e.g. fujitsu-laptop,
+ * ideapad-laptop. So, here checks the machine that has AMW0_GUID1
+ * should be in Acer/Gateway/Packard Bell white list, or it's already
+ * in the past quirk list.
+ */
+ if (wmi_has_guid(AMW0_GUID1) &&
+ !dmi_check_system(amw0_whitelist) &&
+ quirks == &quirk_unknown) {
+ pr_err("Unsupported machine has AMW0_GUID1, unable to load\n");
+ return -ENODEV;
+ }
+
+ /*
* Detect which ACPI-WMI interface we're using.
*/
if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
@@ -2177,10 +2246,11 @@
err = acer_wmi_input_setup();
if (err)
return err;
+ err = acer_wmi_accel_setup();
+ if (err)
+ return err;
}
- acer_wmi_accel_setup();
-
err = platform_driver_register(&acer_platform_driver);
if (err) {
pr_err("Unable to register platform driver\n");
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 6032b70..6eb2837 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -116,6 +116,10 @@
.wmi_backlight_native = true,
};
+static struct quirk_entry quirk_asus_x550lb = {
+ .xusb2pr = 0x01D9,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
@@ -407,6 +411,15 @@
},
.driver_data = &quirk_asus_ux303ub,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X550LB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"),
+ },
+ .driver_data = &quirk_asus_x550lb,
+ },
{},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ce6ca31..8499d3a 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -156,6 +156,11 @@
#define ASUS_FAN_CTRL_MANUAL 1
#define ASUS_FAN_CTRL_AUTO 2
+#define USB_INTEL_XUSB2PR 0xD0
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+
+static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+
struct bios_args {
u32 arg0;
u32 arg1;
@@ -1080,6 +1085,29 @@
return result;
}
+static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+{
+ struct pci_dev *xhci_pdev;
+ u32 orig_ports_available;
+ u32 ports_available = asus->driver->quirks->xusb2pr;
+
+ xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
+ NULL);
+
+ if (!xhci_pdev)
+ return;
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &orig_ports_available);
+
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
+ orig_ports_available, ports_available);
+}
+
/*
* Hwmon device
*/
@@ -2025,6 +2053,16 @@
return 0;
}
+static bool ashs_present(void)
+{
+ int i = 0;
+ while (ashs_ids[i]) {
+ if (acpi_dev_found(ashs_ids[i++]))
+ return true;
+ }
+ return false;
+}
+
/*
* WMI Driver
*/
@@ -2069,6 +2107,13 @@
if (err)
goto fail_leds;
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+ if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ asus->driver->wlan_ctrl_by_user = 1;
+
+ if (asus->driver->wlan_ctrl_by_user && ashs_present())
+ asus->driver->quirks->no_rfkill = 1;
+
if (!asus->driver->quirks->no_rfkill) {
err = asus_wmi_rfkill_init(asus);
if (err)
@@ -2087,6 +2132,9 @@
if (asus->driver->quirks->wmi_backlight_native)
acpi_video_set_dmi_backlight_type(acpi_backlight_native);
+ if (asus->driver->quirks->xusb2pr)
+ asus_wmi_set_xusb2pr(asus);
+
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
@@ -2105,10 +2153,6 @@
if (err)
goto fail_debugfs;
- asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
- asus->driver->wlan_ctrl_by_user = 1;
-
return 0;
fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 0e19014..fdff626 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -53,6 +53,7 @@
* and let the ACPI interrupt to send out the key event.
*/
int no_display_toggle;
+ u32 xusb2pr;
bool (*i8042_filter)(unsigned char data, unsigned char str,
struct serio *serio);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index efcc4e8..9f04957 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -28,6 +28,7 @@
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
+#include <asm/memory.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/restart.h>
@@ -57,11 +58,17 @@
#ifdef CONFIG_QCOM_DLOAD_MODE
#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
#define DL_MODE_PROP "qcom,msm-imem-download_mode"
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_OFFSET_PROP "qcom,msm-imem-kaslr_offset"
+#endif
static int in_panic;
static void *dload_mode_addr;
static bool dload_mode_enabled;
static void *emergency_dload_mode_addr;
+#ifdef CONFIG_RANDOMIZE_BASE
+static void *kaslr_imem_addr;
+#endif
static bool scm_dload_supported;
static int dload_set(const char *val, struct kernel_param *kp);
@@ -420,6 +427,27 @@
pr_err("unable to map imem EDLOAD mode offset\n");
}
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_OFFSET_BIT_MASK 0x00000000FFFFFFFF
+ np = of_find_compatible_node(NULL, NULL, KASLR_OFFSET_PROP);
+ if (!np) {
+ pr_err("unable to find DT imem KASLR_OFFSET node\n");
+ } else {
+ kaslr_imem_addr = of_iomap(np, 0);
+ if (!kaslr_imem_addr)
+ pr_err("unable to map imem KASLR offset\n");
+ }
+
+ if (kaslr_imem_addr && scm_is_secure_device()) {
+ __raw_writel(0xdead4ead, kaslr_imem_addr);
+ __raw_writel(KASLR_OFFSET_BIT_MASK &
+ (kimage_vaddr - KIMAGE_VADDR), kaslr_imem_addr + 4);
+ __raw_writel(KASLR_OFFSET_BIT_MASK &
+ ((kimage_vaddr - KIMAGE_VADDR) >> 32),
+ kaslr_imem_addr + 8);
+ iounmap(kaslr_imem_addr);
+ }
+#endif
#endif
np = of_find_compatible_node(NULL, NULL,
"qcom,msm-imem-restart_reason");
@@ -484,4 +512,4 @@
{
return platform_driver_register(&msm_restart_driver);
}
-device_initcall(msm_restart_init);
+pure_initcall(msm_restart_init);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index f6fa78f..46dc148 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -278,6 +278,7 @@
POWER_SUPPLY_ATTR(dp_dm),
POWER_SUPPLY_ATTR(input_current_limited),
POWER_SUPPLY_ATTR(input_current_now),
+ POWER_SUPPLY_ATTR(charge_qnovo_enable),
POWER_SUPPLY_ATTR(current_qnovo),
POWER_SUPPLY_ATTR(voltage_qnovo),
POWER_SUPPLY_ATTR(rerun_aicl),
@@ -306,6 +307,7 @@
POWER_SUPPLY_ATTR(die_health),
POWER_SUPPLY_ATTR(connector_health),
POWER_SUPPLY_ATTR(ctm_current_max),
+ POWER_SUPPLY_ATTR(hw_current_max),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 3659b92..54bef52 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -24,7 +25,7 @@
#include <linux/printk.h>
#include <linux/pm_wakeup.h>
#include <linux/slab.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
#define DRV_MAJOR_VERSION 1
#define DRV_MINOR_VERSION 0
@@ -36,6 +37,7 @@
#define PL_HW_ABSENT_VOTER "PL_HW_ABSENT_VOTER"
#define PL_VOTER "PL_VOTER"
#define RESTRICT_CHG_VOTER "RESTRICT_CHG_VOTER"
+#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
struct pl_data {
int pl_mode;
@@ -49,14 +51,16 @@
struct votable *pl_disable_votable;
struct votable *pl_awake_votable;
struct votable *hvdcp_hw_inov_dis_votable;
+ struct votable *usb_icl_votable;
struct work_struct status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
struct power_supply *main_psy;
struct power_supply *pl_psy;
struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
int charge_type;
- int main_settled_ua;
+ int total_settled_ua;
int pl_settled_ua;
struct class qcom_batt_class;
struct wakeup_source *pl_ws;
@@ -92,15 +96,10 @@
********/
static void split_settled(struct pl_data *chip)
{
- int slave_icl_pct;
+ int slave_icl_pct, total_current_ua;
int slave_ua = 0, main_settled_ua = 0;
union power_supply_propval pval = {0, };
- int rc;
-
- /* TODO some parallel chargers do not have a fine ICL resolution. For
- * them implement a psy interface which returns the closest lower ICL
- * for desired split
- */
+ int rc, total_settled_ua = 0;
if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
@@ -122,12 +121,31 @@
slave_icl_pct = max(0, chip->slave_pct - 10);
slave_ua = ((main_settled_ua + chip->pl_settled_ua)
* slave_icl_pct) / 100;
+ total_settled_ua = main_settled_ua + chip->pl_settled_ua;
}
- /* ICL_REDUCTION on main could be 0mA when pl is disabled */
- pval.intval = slave_ua;
+ total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+ if (total_current_ua < 0) {
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy) {
+ pr_err("Couldn't get usbpsy while splitting settled\n");
+ return;
+ }
+ /* no client is voting, so get the total current from charger */
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get max current rc=%d\n", rc);
+ return;
+ }
+ total_current_ua = pval.intval;
+ }
+
+ pval.intval = total_current_ua - slave_ua;
+ /* Set ICL on main charger */
rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_ICL_REDUCTION, &pval);
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
if (rc < 0) {
pr_err("Couldn't change slave suspend state rc=%d\n", rc);
return;
@@ -142,10 +160,12 @@
return;
}
- /* main_settled_ua represents the total capability of adapter */
- if (!chip->main_settled_ua)
- chip->main_settled_ua = main_settled_ua;
+ chip->total_settled_ua = total_settled_ua;
chip->pl_settled_ua = slave_ua;
+
+ pl_dbg(chip, PR_PARALLEL,
+ "Split total_current_ua=%d main_settled_ua=%d slave_ua=%d\n",
+ total_current_ua, main_settled_ua, slave_ua);
}
static ssize_t version_show(struct class *c, struct class_attribute *attr,
@@ -213,6 +233,10 @@
chip->restricted_charging_enabled = !!val;
+ /* disable parallel charger in case of restricted charging */
+ vote(chip->pl_disable_votable, RESTRICT_CHG_VOTER,
+ chip->restricted_charging_enabled, 0);
+
vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
chip->restricted_charging_enabled,
chip->restricted_current);
@@ -410,23 +434,28 @@
return rc;
}
- split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
- pval.intval = slave_fcc_ua;
- rc = power_supply_set_property(chip->pl_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
- if (rc < 0) {
- pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
- return rc;
- }
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+ split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
- chip->slave_fcc_ua = slave_fcc_ua;
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+ return rc;
+ }
- pval.intval = master_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
- if (rc < 0) {
- pr_err("Could not set main fcc, rc=%d\n", rc);
- return rc;
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
}
pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
@@ -487,6 +516,66 @@
return 0;
}
+#define ICL_STEP_UA 25000
+static int usb_icl_vote_callback(struct votable *votable, void *data,
+ int icl_ua, const char *client)
+{
+ int rc;
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ bool rerun_aicl = false;
+
+ if (!chip->main_psy)
+ return 0;
+
+ if (client == NULL)
+ icl_ua = INT_MAX;
+
+ /*
+ * Disable parallel for new ICL vote - the call to split_settled will
+ * ensure that all the input current limit gets assigned to the main
+ * charger.
+ */
+ vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+
+ /* rerun AICL */
+ /* get the settled current */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return rc;
+ }
+
+ /* rerun AICL if new ICL is above settled ICL */
+ if (icl_ua > pval.intval)
+ rerun_aicl = true;
+
+ if (rerun_aicl) {
+ /* set a lower ICL */
+ pval.intval = max(pval.intval - ICL_STEP_UA, ICL_STEP_UA);
+ power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+ /* wait for ICL change */
+ msleep(100);
+ }
+
+ /* set the effective ICL */
+ pval.intval = icl_ua;
+ power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+ if (rerun_aicl)
+ /* wait for ICL change */
+ msleep(100);
+
+ vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
+
+ return 0;
+}
+
static void pl_disable_forever_work(struct work_struct *work)
{
struct pl_data *chip = container_of(work,
@@ -508,7 +597,7 @@
int rc;
chip->taper_pct = 100;
- chip->main_settled_ua = 0;
+ chip->total_settled_ua = 0;
chip->pl_settled_ua = 0;
if (!pl_disable) { /* enable */
@@ -596,13 +685,12 @@
static bool is_main_available(struct pl_data *chip)
{
- if (!chip->main_psy)
- chip->main_psy = power_supply_get_by_name("main");
+ if (chip->main_psy)
+ return true;
- if (!chip->main_psy)
- return false;
+ chip->main_psy = power_supply_get_by_name("main");
- return true;
+ return !!chip->main_psy;
}
static bool is_batt_available(struct pl_data *chip)
@@ -711,6 +799,7 @@
static void handle_settled_icl_change(struct pl_data *chip)
{
union power_supply_propval pval = {0, };
+ int new_total_settled_ua;
int rc;
if (get_effective_result(chip->pl_disable_votable))
@@ -730,9 +819,15 @@
return;
}
+ new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+ pl_dbg(chip, PR_PARALLEL,
+ "total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
+ chip->total_settled_ua, pval.intval,
+ new_total_settled_ua);
+
/* If ICL change is small skip splitting */
- if (abs((chip->main_settled_ua - chip->pl_settled_ua)
- - pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
+ if (abs(new_total_settled_ua - chip->total_settled_ua)
+ > MIN_ICL_CHANGE_DELTA_UA)
split_settled(chip);
} else {
rerun_election(chip->fcc_votable);
@@ -773,7 +868,18 @@
struct pl_data *chip = container_of(work,
struct pl_data, status_change_work);
- if (!is_main_available(chip))
+ if (!chip->main_psy && is_main_available(chip)) {
+ /*
+ * re-run election for FCC/FV/ICL once main_psy
+ * is available to ensure all votes are reflected
+ * on hardware
+ */
+ rerun_election(chip->usb_icl_votable);
+ rerun_election(chip->fcc_votable);
+ rerun_election(chip->fv_votable);
+ }
+
+ if (!chip->main_psy)
return;
if (!is_batt_available(chip))
@@ -855,6 +961,14 @@
goto destroy_votable;
}
+ chip->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+ usb_icl_vote_callback,
+ chip);
+ if (IS_ERR(chip->usb_icl_votable)) {
+ rc = PTR_ERR(chip->usb_icl_votable);
+ goto destroy_votable;
+ }
+
chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
pl_disable_vote_callback,
chip);
@@ -909,6 +1023,7 @@
destroy_votable(chip->pl_disable_votable);
destroy_votable(chip->fv_votable);
destroy_votable(chip->fcc_votable);
+ destroy_votable(chip->usb_icl_votable);
release_wakeup_source:
wakeup_source_unregister(chip->pl_ws);
cleanup:
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index c0ba5a9..5983b5c 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -29,7 +29,7 @@
#include <linux/string_helpers.h>
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
#define fg_dbg(chip, reason, fmt, ...) \
do { \
@@ -46,10 +46,13 @@
&& (value) <= (right)))
/* Awake votable reasons */
-#define SRAM_READ "fg_sram_read"
-#define SRAM_WRITE "fg_sram_write"
-#define PROFILE_LOAD "fg_profile_load"
-#define DELTA_SOC "fg_delta_soc"
+#define SRAM_READ "fg_sram_read"
+#define SRAM_WRITE "fg_sram_write"
+#define PROFILE_LOAD "fg_profile_load"
+#define DELTA_SOC "fg_delta_soc"
+
+/* Delta BSOC votable reasons */
+#define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq"
#define DEBUG_PRINT_BUFFER_SIZE 64
/* 3 byte address + 1 space character */
@@ -125,11 +128,6 @@
FG_IRQ_MAX,
};
-/* WA flags */
-enum {
- DELTA_SOC_IRQ_WA = BIT(0),
-};
-
/*
* List of FG_SRAM parameters. Please add a parameter only if it is an entry
* that will be used either to configure an entity (e.g. termination current)
@@ -149,6 +147,7 @@
FG_SRAM_CC_SOC,
FG_SRAM_CC_SOC_SW,
FG_SRAM_ACT_BATT_CAP,
+ FG_SRAM_TIMEBASE,
/* Entries below here are configurable during initialization */
FG_SRAM_CUTOFF_VOLT,
FG_SRAM_EMPTY_VOLT,
@@ -159,14 +158,17 @@
FG_SRAM_ESR_TIMER_DISCHG_INIT,
FG_SRAM_ESR_TIMER_CHG_MAX,
FG_SRAM_ESR_TIMER_CHG_INIT,
+ FG_SRAM_ESR_PULSE_THRESH,
FG_SRAM_SYS_TERM_CURR,
FG_SRAM_CHG_TERM_CURR,
+ FG_SRAM_CHG_TERM_BASE_CURR,
FG_SRAM_DELTA_MSOC_THR,
FG_SRAM_DELTA_BSOC_THR,
FG_SRAM_RECHARGE_SOC_THR,
FG_SRAM_RECHARGE_VBATT_THR,
FG_SRAM_KI_COEFF_MED_DISCHG,
FG_SRAM_KI_COEFF_HI_DISCHG,
+ FG_SRAM_KI_COEFF_FULL_SOC,
FG_SRAM_ESR_TIGHT_FILTER,
FG_SRAM_ESR_BROAD_FILTER,
FG_SRAM_SLOPE_LIMIT,
@@ -206,6 +208,7 @@
enum wa_flags {
PMI8998_V1_REV_WA = BIT(0),
+ PM660_TSMC_OSC_WA = BIT(1),
};
enum slope_limit_status {
@@ -225,6 +228,7 @@
int empty_volt_mv;
int vbatt_low_thr_mv;
int chg_term_curr_ma;
+ int chg_term_base_curr_ma;
int sys_term_curr_ma;
int delta_soc_thr;
int recharge_soc_thr;
@@ -250,6 +254,8 @@
int esr_tight_lt_flt_upct;
int esr_broad_lt_flt_upct;
int slope_limit_temp;
+ int esr_pulse_thresh_ma;
+ int esr_meas_curr_ma;
int jeita_thresholds[NUM_JEITA_LEVELS];
int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
@@ -316,6 +322,23 @@
{ 128000, 4852 },
};
+/* each tuple is - <temperature in degC, Timebase> */
+static const struct fg_pt fg_tsmc_osc_table[] = {
+ { -20, 395064 },
+ { -10, 398114 },
+ { 0, 401669 },
+ { 10, 404641 },
+ { 20, 408856 },
+ { 25, 412449 },
+ { 30, 416532 },
+ { 40, 420289 },
+ { 50, 425020 },
+ { 60, 430160 },
+ { 70, 434175 },
+ { 80, 439475 },
+ { 90, 444992 },
+};
+
struct fg_chip {
struct device *dev;
struct pmic_revid_data *pmic_rev_id;
@@ -327,9 +350,11 @@
struct power_supply *dc_psy;
struct power_supply *parallel_psy;
struct iio_channel *batt_id_chan;
+ struct iio_channel *die_temp_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
+ struct votable *delta_bsoc_irq_en_votable;
struct fg_sram_param *sp;
struct fg_alg_flag *alg_flags;
int *debug_mask;
@@ -349,6 +374,7 @@
u32 rradc_base;
u32 wa_flags;
int batt_id_ohms;
+ int ki_coeff_full_soc;
int charge_status;
int prev_charge_status;
int charge_done;
@@ -370,8 +396,8 @@
bool esr_fcc_ctrl_en;
bool soc_reporting_ready;
bool esr_flt_cold_temp_en;
- bool bsoc_delta_irq_en;
bool slope_limit_en;
+ bool use_ima_single_mode;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 2dc7618..8a949bf 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -48,6 +48,10 @@
int rc;
u8 intf_ctl = 0;
+ fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "access: %d burst: %d\n",
+ access, burst);
+
+ WARN_ON(burst && chip->use_ima_single_mode);
intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
(burst ? MEM_ACS_BURST_BIT : 0);
@@ -175,6 +179,7 @@
{
int rc;
u8 dma_sts;
+ bool error_present;
rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
if (rc < 0) {
@@ -184,14 +189,13 @@
}
fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
- if (dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT)) {
- rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip),
- DMA_CLEAR_LOG_BIT, DMA_CLEAR_LOG_BIT);
- if (rc < 0) {
- pr_err("failed to write addr=0x%04x, rc=%d\n",
- MEM_IF_DMA_CTL(chip), rc);
- return rc;
- }
+ error_present = dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT);
+ rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), DMA_CLEAR_LOG_BIT,
+ error_present ? DMA_CLEAR_LOG_BIT : 0);
+ if (rc < 0) {
+ pr_err("failed to write addr=0x%04x, rc=%d\n",
+ MEM_IF_DMA_CTL(chip), rc);
+ return rc;
}
return 0;
@@ -293,7 +297,9 @@
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
- pr_err("Failed to check for ima errors rc=%d\n", rc);
+ if (rc != -EAGAIN)
+ pr_err("Failed to check for ima errors rc=%d\n",
+ rc);
return rc;
}
@@ -357,7 +363,12 @@
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
- pr_err("Failed to check for ima errors rc=%d\n", rc);
+ if (rc == -EAGAIN)
+ pr_err("IMA error cleared, address [%d %d] len %d\n",
+ address, offset, len);
+ else
+ pr_err("Failed to check for ima errors rc=%d\n",
+ rc);
return rc;
}
@@ -365,6 +376,15 @@
len -= num_bytes;
offset = byte_enable = 0;
+ if (chip->use_ima_single_mode && len) {
+ address++;
+ rc = fg_set_address(chip, address);
+ if (rc < 0) {
+ pr_err("failed to set address rc = %d\n", rc);
+ return rc;
+ }
+ }
+
rc = fg_check_iacs_ready(chip);
if (rc < 0) {
pr_debug("IACS_RDY failed rc=%d\n", rc);
@@ -403,22 +423,40 @@
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
- pr_err("Failed to check for ima errors rc=%d\n", rc);
+ if (rc == -EAGAIN)
+ pr_err("IMA error cleared, address [%d %d] len %d\n",
+ address, offset, len);
+ else
+ pr_err("Failed to check for ima errors rc=%d\n",
+ rc);
return rc;
}
- if (len && len < BYTES_PER_SRAM_WORD) {
- /*
- * Move to single mode. Changing address is not
- * required here as it must be in burst mode. Address
- * will get incremented internally by FG HW once the MSB
- * of RD_DATA is read.
- */
- rc = fg_config_access_mode(chip, FG_READ, 0);
- if (rc < 0) {
- pr_err("failed to move to single mode rc=%d\n",
- rc);
- return -EIO;
+ if (chip->use_ima_single_mode) {
+ if (len) {
+ address++;
+ rc = fg_set_address(chip, address);
+ if (rc < 0) {
+ pr_err("failed to set address rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ } else {
+ if (len && len < BYTES_PER_SRAM_WORD) {
+ /*
+ * Move to single mode. Changing address is not
+ * required here as it must be in burst mode.
+ * Address will get incremented internally by FG
+ * HW once the MSB of RD_DATA is read.
+ */
+ rc = fg_config_access_mode(chip, FG_READ,
+ false);
+ if (rc < 0) {
+ pr_err("failed to move to single mode rc=%d\n",
+ rc);
+ return -EIO;
+ }
}
}
@@ -489,6 +527,7 @@
u16 address, int offset, int len, bool access)
{
int rc = 0;
+ bool burst_mode = false;
if (!is_mem_access_available(chip, access))
return -EBUSY;
@@ -503,7 +542,8 @@
}
/* configure for the read/write, single/burst mode */
- rc = fg_config_access_mode(chip, access, (offset + len) > 4);
+ burst_mode = chip->use_ima_single_mode ? false : ((offset + len) > 4);
+ rc = fg_config_access_mode(chip, access, burst_mode);
if (rc < 0) {
pr_err("failed to set memory access rc = %d\n", rc);
return rc;
@@ -583,7 +623,7 @@
if (rc < 0) {
count++;
if (rc == -EAGAIN) {
- pr_err("IMA access failed retry_count = %d\n", count);
+ pr_err("IMA read failed retry_count = %d\n", count);
goto retry;
}
pr_err("failed to read SRAM address rc = %d\n", rc);
@@ -667,8 +707,8 @@
rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
if (rc < 0) {
count++;
- if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- pr_err("IMA access failed retry_count = %d\n", count);
+ if (rc == -EAGAIN) {
+ pr_err("IMA write failed retry_count = %d\n", count);
goto retry;
}
pr_err("failed to write SRAM address rc = %d\n", rc);
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
index bf2827f..cd0b2fb 100644
--- a/drivers/power/supply/qcom/fg-reg.h
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -167,6 +167,7 @@
/* BATT_INFO_ESR_PULL_DN_CFG */
#define ESR_PULL_DOWN_IVAL_MASK GENMASK(3, 2)
+#define ESR_PULL_DOWN_IVAL_SHIFT 2
#define ESR_MEAS_CUR_60MA 0x0
#define ESR_MEAS_CUR_120MA 0x1
#define ESR_MEAS_CUR_180MA 0x2
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index 39a0dcb6..10a1c54 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -18,9 +18,9 @@
#include <linux/slab.h>
#include <linux/string.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
-#define NUM_MAX_CLIENTS 8
+#define NUM_MAX_CLIENTS 16
#define DEBUG_FORCE_CLIENT "DEBUG_FORCE_CLIENT"
static DEFINE_SPINLOCK(votable_list_slock);
@@ -188,6 +188,38 @@
}
/**
+ * is_client_vote_enabled() -
+ * is_client_vote_enabled_locked() -
+ * The unlocked and locked variants of getting whether a client's
+ vote is enabled.
+ * @votable: the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ * True if the client's vote is enabled; false otherwise.
+ */
+bool is_client_vote_enabled_locked(struct votable *votable,
+ const char *client_str)
+{
+ int client_id = get_client_id(votable, client_str);
+
+ if (client_id < 0)
+ return false;
+
+ return votable->votes[client_id].enabled;
+}
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str)
+{
+ bool enabled;
+
+ lock_votable(votable);
+ enabled = is_client_vote_enabled_locked(votable, client_str);
+ unlock_votable(votable);
+ return enabled;
+}
+
+/**
* get_client_vote() -
* get_client_vote_locked() -
* The unlocked and locked variants of getting a client's voted
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 304d0cf..806460f 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -31,6 +31,8 @@
#define FG_MEM_INFO_PMI8998 0x0D
/* SRAM address and offset in ascending order */
+#define ESR_PULSE_THRESH_WORD 2
+#define ESR_PULSE_THRESH_OFFSET 3
#define SLOPE_LIMIT_WORD 3
#define SLOPE_LIMIT_OFFSET 0
#define CUTOFF_VOLT_WORD 5
@@ -45,11 +47,14 @@
#define ESR_UPD_TIGHT_LOW_TEMP_OFFSET 2
#define ESR_UPD_BROAD_LOW_TEMP_OFFSET 3
#define KI_COEFF_MED_DISCHG_WORD 9
+#define TIMEBASE_OFFSET 1
#define KI_COEFF_MED_DISCHG_OFFSET 3
#define KI_COEFF_HI_DISCHG_WORD 10
#define KI_COEFF_HI_DISCHG_OFFSET 0
#define KI_COEFF_LOW_DISCHG_WORD 10
#define KI_COEFF_LOW_DISCHG_OFFSET 2
+#define KI_COEFF_FULL_SOC_WORD 12
+#define KI_COEFF_FULL_SOC_OFFSET 2
#define DELTA_MSOC_THR_WORD 12
#define DELTA_MSOC_THR_OFFSET 3
#define DELTA_BSOC_THR_WORD 13
@@ -126,6 +131,7 @@
#define RECHARGE_SOC_THR_v2_WORD 14
#define RECHARGE_SOC_THR_v2_OFFSET 1
#define CHG_TERM_CURR_v2_WORD 15
+#define CHG_TERM_BASE_CURR_v2_OFFSET 0
#define CHG_TERM_CURR_v2_OFFSET 1
#define EMPTY_VOLT_v2_WORD 15
#define EMPTY_VOLT_v2_OFFSET 3
@@ -216,12 +222,17 @@
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+ 1, 100000, 390625, 0, fg_encode_default, NULL),
PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
fg_encode_default, NULL),
PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
fg_encode_default, NULL),
+ PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_WORD,
+ KI_COEFF_FULL_SOC_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
1, 512, 1000000, 0, fg_encode_default, NULL),
PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
@@ -251,6 +262,8 @@
fg_decode_cc_soc),
PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
1, 1, 0, NULL, fg_decode_default),
+ PARAM(TIMEBASE, KI_COEFF_MED_DISCHG_WORD, TIMEBASE_OFFSET, 2, 1000,
+ 61000, 0, fg_encode_default, NULL),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -266,6 +279,9 @@
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
+ PARAM(CHG_TERM_BASE_CURR, CHG_TERM_CURR_v2_WORD,
+ CHG_TERM_BASE_CURR_v2_OFFSET, 1, 1024, 1000, 0,
+ fg_encode_current, NULL),
PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
1, 2048, 100, 0, fg_encode_default, NULL),
PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
@@ -286,12 +302,17 @@
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+ 1, 100000, 390625, 0, fg_encode_default, NULL),
PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
fg_encode_default, NULL),
PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
fg_encode_default, NULL),
+ PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_WORD,
+ KI_COEFF_FULL_SOC_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
1, 512, 1000000, 0, fg_encode_default, NULL),
PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
@@ -525,7 +546,7 @@
}
#define CC_SOC_30BIT GENMASK(29, 0)
-static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+static int fg_get_charge_raw(struct fg_chip *chip, int *val)
{
int rc, cc_soc;
@@ -539,7 +560,7 @@
return 0;
}
-static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+static int fg_get_charge_counter(struct fg_chip *chip, int *val)
{
int rc, cc_soc;
@@ -981,6 +1002,29 @@
};
}
+static inline void get_esr_meas_current(int curr_ma, u8 *val)
+{
+ switch (curr_ma) {
+ case 60:
+ *val = ESR_MEAS_CUR_60MA;
+ break;
+ case 120:
+ *val = ESR_MEAS_CUR_120MA;
+ break;
+ case 180:
+ *val = ESR_MEAS_CUR_180MA;
+ break;
+ case 240:
+ *val = ESR_MEAS_CUR_240MA;
+ break;
+ default:
+ *val = ESR_MEAS_CUR_120MA;
+ break;
+ };
+
+ *val <<= ESR_PULL_DOWN_IVAL_SHIFT;
+}
+
static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
int flags)
{
@@ -1054,6 +1098,25 @@
fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
}
+static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
+ int enable, const char *client)
+{
+ struct fg_chip *chip = data;
+
+ if (!chip->irqs[BSOC_DELTA_IRQ].irq)
+ return 0;
+
+ if (enable) {
+ enable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+ enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+ } else {
+ disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+ disable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+ }
+
+ return 0;
+}
+
static int fg_awake_cb(struct votable *votable, void *data, int awake,
const char *client)
{
@@ -1241,7 +1304,7 @@
chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
}
-static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
{
int rc, cc_soc_sw, cc_soc_delta_pct;
int64_t delta_cc_uah;
@@ -1263,30 +1326,39 @@
return 0;
}
-static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+#define BATT_SOC_32BIT GENMASK(31, 0)
+static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
{
- int rc, cc_soc_sw;
+ int rc, cc_soc_sw, batt_soc_msb;
- if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+ batt_soc_msb = batt_soc >> 24;
+ if (DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW) >
chip->dt.cl_start_soc) {
fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
- batt_soc);
+ batt_soc_msb);
return -EINVAL;
}
- chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+ chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc_msb,
FULL_SOC_RAW);
- rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+
+ /* Prime cc_soc_sw with battery SOC when capacity learning begins */
+ cc_soc_sw = div64_s64((int64_t)batt_soc * CC_SOC_30BIT,
+ BATT_SOC_32BIT);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
if (rc < 0) {
- pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
- return rc;
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ goto out;
}
chip->cl.init_cc_soc_sw = cc_soc_sw;
chip->cl.active = true;
fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
- batt_soc, chip->cl.init_cc_soc_sw);
- return 0;
+ batt_soc_msb, chip->cl.init_cc_soc_sw);
+out:
+ return rc;
}
static int fg_cap_learning_done(struct fg_chip *chip)
@@ -1318,7 +1390,7 @@
#define FULL_SOC_RAW 255
static void fg_cap_learning_update(struct fg_chip *chip)
{
- int rc, batt_soc;
+ int rc, batt_soc, batt_soc_msb;
mutex_lock(&chip->cl.lock);
@@ -1337,11 +1409,9 @@
goto out;
}
- /* We need only the most significant byte here */
- batt_soc = (u32)batt_soc >> 24;
-
+ batt_soc_msb = (u32)batt_soc >> 24;
fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
- chip->charge_status, chip->cl.active, batt_soc);
+ chip->charge_status, chip->cl.active, batt_soc_msb);
/* Initialize the starting point of learning capacity */
if (!chip->cl.active) {
@@ -1363,7 +1433,7 @@
if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
- batt_soc);
+ batt_soc_msb);
chip->cl.active = false;
chip->cl.init_cc_uah = 0;
}
@@ -1427,6 +1497,37 @@
return 0;
}
+#define KI_COEFF_FULL_SOC_DEFAULT 733
+static int fg_adjust_ki_coeff_full_soc(struct fg_chip *chip, int batt_temp)
+{
+ int rc, ki_coeff_full_soc;
+ u8 val;
+
+ if (batt_temp < 0)
+ ki_coeff_full_soc = 0;
+ else
+ ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT;
+
+ if (chip->ki_coeff_full_soc == ki_coeff_full_soc)
+ return 0;
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_FULL_SOC, ki_coeff_full_soc, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_full_soc, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->ki_coeff_full_soc = ki_coeff_full_soc;
+ fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_full_soc %d\n",
+ ki_coeff_full_soc);
+ return 0;
+}
+
static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
{
u8 buf;
@@ -1470,16 +1571,8 @@
return 0;
mutex_lock(&chip->charge_full_lock);
- if (!chip->charge_done && chip->bsoc_delta_irq_en) {
- disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
- disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
- chip->bsoc_delta_irq_en = false;
- } else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
- enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
- enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
- chip->bsoc_delta_irq_en = true;
- }
-
+ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER,
+ chip->charge_done, 0);
rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
&prop);
if (rc < 0) {
@@ -1598,6 +1691,9 @@
u64 scaling_factor;
u32 val = 0;
+ if (!chip->dt.rconn_mohms)
+ return 0;
+
rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -1696,6 +1792,9 @@
if (!chip->dt.auto_recharge_soc)
return 0;
+ if (recharge_soc < 0 || recharge_soc > FULL_CAPACITY)
+ return 0;
+
fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
rc = fg_sram_write(chip,
chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
@@ -1712,46 +1811,55 @@
static int fg_adjust_recharge_soc(struct fg_chip *chip)
{
int rc, msoc, recharge_soc, new_recharge_soc = 0;
+ bool recharge_soc_status;
if (!chip->dt.auto_recharge_soc)
return 0;
recharge_soc = chip->dt.recharge_soc_thr;
+ recharge_soc_status = chip->recharge_soc_adjusted;
/*
* If the input is present and charging had been terminated, adjust
* the recharge SOC threshold based on the monotonic SOC at which
* the charge termination had happened.
*/
- if (is_input_present(chip) && !chip->recharge_soc_adjusted
- && chip->charge_done) {
- /* Get raw monotonic SOC for calculation */
- rc = fg_get_msoc(chip, &msoc);
- if (rc < 0) {
- pr_err("Error in getting msoc, rc=%d\n", rc);
- return rc;
- }
+ if (is_input_present(chip)) {
+ if (chip->charge_done) {
+ if (!chip->recharge_soc_adjusted) {
+ /* Get raw monotonic SOC for calculation */
+ rc = fg_get_msoc(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting msoc, rc=%d\n",
+ rc);
+ return rc;
+ }
- /* Adjust the recharge_soc threshold */
- new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
- } else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
- || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ /* Adjust the recharge_soc threshold */
+ new_recharge_soc = msoc - (FULL_CAPACITY -
+ recharge_soc);
+ chip->recharge_soc_adjusted = true;
+ } else {
+ /* adjusted already, do nothing */
+ return 0;
+ }
+ } else {
+ /* Charging, do nothing */
+ return 0;
+ }
+ } else {
/* Restore the default value */
new_recharge_soc = recharge_soc;
+ chip->recharge_soc_adjusted = false;
}
- if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
- rc = fg_set_recharge_soc(chip, new_recharge_soc);
- if (rc) {
- pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
- return rc;
- }
-
- chip->recharge_soc_adjusted = (new_recharge_soc !=
- recharge_soc);
- fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
- new_recharge_soc);
+ rc = fg_set_recharge_soc(chip, new_recharge_soc);
+ if (rc < 0) {
+ chip->recharge_soc_adjusted = recharge_soc_status;
+ pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+ return rc;
}
+ fg_dbg(chip, FG_STATUS, "resume soc set to %d\n", new_recharge_soc);
return 0;
}
@@ -1862,7 +1970,7 @@
{
union power_supply_propval prop = {0, };
int rc;
- bool parallel_en = false;
+ bool parallel_en = false, qnovo_en = false;
if (is_parallel_charger_available(chip)) {
rc = power_supply_get_property(chip->parallel_psy,
@@ -1875,19 +1983,25 @@
parallel_en = prop.intval;
}
- fg_dbg(chip, FG_POWER_SUPPLY, "charge_status: %d parallel_en: %d esr_fcc_ctrl_en: %d\n",
- chip->charge_status, parallel_en, chip->esr_fcc_ctrl_en);
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &prop);
+ if (!rc)
+ qnovo_en = prop.intval;
+
+ fg_dbg(chip, FG_POWER_SUPPLY, "chg_sts: %d par_en: %d qnov_en: %d esr_fcc_ctrl_en: %d\n",
+ chip->charge_status, parallel_en, qnovo_en,
+ chip->esr_fcc_ctrl_en);
if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
- parallel_en) {
+ (parallel_en || qnovo_en)) {
if (chip->esr_fcc_ctrl_en)
return 0;
/*
- * When parallel charging is enabled, configure ESR FCC to
- * 300mA to trigger an ESR pulse. Without this, FG can ask
- * the main charger to increase FCC when it is supposed to
- * decrease it.
+ * When parallel charging or Qnovo is enabled, configure ESR
+ * FCC to 300mA to trigger an ESR pulse. Without this, FG can
+ * request the main charger to increase FCC when it is supposed
+ * to decrease it.
*/
rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
ESR_FAST_CRG_IVAL_MASK |
@@ -1906,8 +2020,8 @@
/*
* If we're here, then it means either the device is not in
- * charging state or parallel charging is disabled. Disable
- * ESR fast charge current control in SW.
+ * charging state or parallel charging / Qnovo is disabled.
+ * Disable ESR fast charge current control in SW.
*/
rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
ESR_FAST_CRG_CTL_EN_BIT, 0);
@@ -2004,6 +2118,11 @@
if (rc < 0)
pr_err("Error in configuring slope limiter rc:%d\n",
rc);
+
+ rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
+ if (rc < 0)
+ pr_err("Error in configuring ki_coeff_full_soc rc:%d\n",
+ rc);
}
fg_batt_avg_update(chip);
@@ -2156,6 +2275,35 @@
return count;
}
+static int fg_bp_params_config(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 buf;
+
+ /* This SRAM register is only present in v2.0 and above */
+ if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
+ chip->bp.float_volt_uv > 0) {
+ fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+ chip->bp.float_volt_uv / 1000, &buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+ chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, &buf,
+ chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing float_volt, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (chip->bp.vbatt_full_mv > 0) {
+ rc = fg_set_constant_chg_voltage(chip,
+ chip->bp.vbatt_full_mv * 1000);
+ if (rc < 0)
+ return rc;
+ }
+
+ return rc;
+}
+
#define PROFILE_LOAD_BIT BIT(0)
#define BOOTLOADER_LOAD_BIT BIT(1)
#define BOOTLOADER_RESTART_BIT BIT(2)
@@ -2176,6 +2324,17 @@
/* Check if integrity bit is set */
if (val & PROFILE_LOAD_BIT) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+
+ /* Whitelist the values */
+ val &= ~PROFILE_LOAD_BIT;
+ if (val != HLOS_RESTART_BIT && val != BOOTLOADER_LOAD_BIT &&
+ val != (BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT)) {
+ val |= PROFILE_LOAD_BIT;
+ pr_warn("Garbage value in profile integrity word: 0x%x\n",
+ val);
+ return true;
+ }
+
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -2323,6 +2482,11 @@
}
done:
+ rc = fg_bp_params_config(chip);
+ if (rc < 0)
+ pr_err("Error in configuring battery profile params, rc:%d\n",
+ rc);
+
rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
FG_IMA_DEFAULT);
if (rc < 0) {
@@ -2806,7 +2970,7 @@
pval->intval = chip->cyc_ctr.id;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
- rc = fg_get_cc_soc(chip, &pval->intval);
+ rc = fg_get_charge_raw(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
pval->intval = chip->cl.init_cc_uah;
@@ -2815,7 +2979,7 @@
pval->intval = chip->cl.learned_cc_uah;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
- rc = fg_get_cc_soc_sw(chip, &pval->intval);
+ rc = fg_get_charge_counter(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
rc = fg_get_time_to_full(chip, &pval->intval);
@@ -2974,27 +3138,6 @@
return rc;
}
- /* This SRAM register is only present in v2.0 and above */
- if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
- chip->bp.float_volt_uv > 0) {
- fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
- chip->bp.float_volt_uv / 1000, buf);
- rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
- chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
- chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
- if (rc < 0) {
- pr_err("Error in writing float_volt, rc=%d\n", rc);
- return rc;
- }
- }
-
- if (chip->bp.vbatt_full_mv > 0) {
- rc = fg_set_constant_chg_voltage(chip,
- chip->bp.vbatt_full_mv * 1000);
- if (rc < 0)
- return rc;
- }
-
fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -3015,6 +3158,21 @@
return rc;
}
+ if (!(chip->wa_flags & PMI8998_V1_REV_WA)) {
+ fg_encode(chip->sp, FG_SRAM_CHG_TERM_BASE_CURR,
+ chip->dt.chg_term_base_curr_ma, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].addr_word,
+ chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].addr_byte,
+ buf, chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing chg_term_base_curr, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
if (chip->dt.vbatt_low_thr_mv > 0) {
fg_encode(chip->sp, FG_SRAM_VBATT_LOW,
chip->dt.vbatt_low_thr_mv, buf);
@@ -3164,12 +3322,10 @@
return rc;
}
- if (chip->dt.rconn_mohms > 0) {
- rc = fg_rconn_config(chip);
- if (rc < 0) {
- pr_err("Error in configuring Rconn, rc=%d\n", rc);
- return rc;
- }
+ rc = fg_rconn_config(chip);
+ if (rc < 0) {
+ pr_err("Error in configuring Rconn, rc=%d\n", rc);
+ return rc;
}
fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
@@ -3192,6 +3348,24 @@
return rc;
}
+ fg_encode(chip->sp, FG_SRAM_ESR_PULSE_THRESH,
+ chip->dt.esr_pulse_thresh_ma, buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_word,
+ chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_byte, buf,
+ chip->sp[FG_SRAM_ESR_PULSE_THRESH].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing esr_pulse_thresh_ma, rc=%d\n", rc);
+ return rc;
+ }
+
+ get_esr_meas_current(chip->dt.esr_meas_curr_ma, &val);
+ rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+ ESR_PULL_DOWN_IVAL_MASK, val);
+ if (rc < 0) {
+ pr_err("Error in writing esr_meas_curr_ma, rc=%d\n", rc);
+ return rc;
+ }
+
return 0;
}
@@ -3200,6 +3374,40 @@
return fg_ima_init(chip);
}
+static int fg_adjust_timebase(struct fg_chip *chip)
+{
+ int rc = 0, die_temp;
+ s32 time_base = 0;
+ u8 buf[2] = {0};
+
+ if ((chip->wa_flags & PM660_TSMC_OSC_WA) && chip->die_temp_chan) {
+ rc = iio_read_channel_processed(chip->die_temp_chan, &die_temp);
+ if (rc < 0) {
+ pr_err("Error in reading die_temp, rc:%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_lerp(fg_tsmc_osc_table, ARRAY_SIZE(fg_tsmc_osc_table),
+ die_temp / 1000, &time_base);
+ if (rc < 0) {
+ pr_err("Error to lookup fg_tsmc_osc_table rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_encode(chip->sp, FG_SRAM_TIMEBASE, time_base, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_TIMEBASE].addr_word,
+ chip->sp[FG_SRAM_TIMEBASE].addr_byte, buf,
+ chip->sp[FG_SRAM_TIMEBASE].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing timebase, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
/* INTERRUPT HANDLERS STAY HERE */
static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
@@ -3216,20 +3424,19 @@
}
fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
- if (status & MEM_XCP_BIT) {
- rc = fg_clear_dma_errors_if_any(chip);
- if (rc < 0) {
- pr_err("Error in clearing DMA error, rc=%d\n", rc);
- return IRQ_HANDLED;
- }
- mutex_lock(&chip->sram_rw_lock);
+ mutex_lock(&chip->sram_rw_lock);
+ rc = fg_clear_dma_errors_if_any(chip);
+ if (rc < 0)
+ pr_err("Error in clearing DMA error, rc=%d\n", rc);
+
+ if (status & MEM_XCP_BIT) {
rc = fg_clear_ima_errors_if_any(chip, true);
if (rc < 0 && rc != -EAGAIN)
pr_err("Error in checking IMA errors rc:%d\n", rc);
- mutex_unlock(&chip->sram_rw_lock);
}
+ mutex_unlock(&chip->sram_rw_lock);
return IRQ_HANDLED;
}
@@ -3308,6 +3515,10 @@
if (rc < 0)
pr_err("Error in configuring slope limiter rc:%d\n", rc);
+ rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
+ if (rc < 0)
+ pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", rc);
+
if (!batt_psy_initialized(chip)) {
chip->last_batt_temp = batt_temp;
return IRQ_HANDLED;
@@ -3318,6 +3529,10 @@
chip->health = prop.intval;
if (chip->last_batt_temp != batt_temp) {
+ rc = fg_adjust_timebase(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting timebase, rc=%d\n", rc);
+
chip->last_batt_temp = batt_temp;
power_supply_changed(chip->batt_psy);
}
@@ -3387,6 +3602,10 @@
if (rc < 0)
pr_err("Error in validating ESR, rc=%d\n", rc);
+ rc = fg_adjust_timebase(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting timebase, rc=%d\n", rc);
+
if (batt_psy_initialized(chip))
power_supply_changed(chip->batt_psy);
@@ -3654,6 +3873,7 @@
#define DEFAULT_EMPTY_VOLT_MV 2800
#define DEFAULT_RECHARGE_VOLT_MV 4250
#define DEFAULT_CHG_TERM_CURR_MA 100
+#define DEFAULT_CHG_TERM_BASE_CURR_MA 75
#define DEFAULT_SYS_TERM_CURR_MA -125
#define DEFAULT_DELTA_SOC_THR 1
#define DEFAULT_RECHARGE_SOC_THR 95
@@ -3676,6 +3896,8 @@
#define DEFAULT_ESR_TIGHT_LT_FLT_UPCT 48829
#define DEFAULT_ESR_BROAD_LT_FLT_UPCT 148438
#define DEFAULT_ESR_CLAMP_MOHMS 20
+#define DEFAULT_ESR_PULSE_THRESH_MA 110
+#define DEFAULT_ESR_MEAS_CURR_MA 120
static int fg_parse_dt(struct fg_chip *chip)
{
struct device_node *child, *revid_node, *node = chip->dev->of_node;
@@ -3725,6 +3947,9 @@
case PM660_SUBTYPE:
chip->sp = pmi8998_v2_sram_params;
chip->alg_flags = pmi8998_v2_alg_flags;
+ chip->use_ima_single_mode = true;
+ if (chip->pmic_rev_id->fab_id == PM660_FAB_ID_TSMC)
+ chip->wa_flags |= PM660_TSMC_OSC_WA;
break;
default:
return -EINVAL;
@@ -3805,6 +4030,12 @@
else
chip->dt.sys_term_curr_ma = temp;
+ rc = of_property_read_u32(node, "qcom,fg-chg-term-base-current", &temp);
+ if (rc < 0)
+ chip->dt.chg_term_base_curr_ma = DEFAULT_CHG_TERM_BASE_CURR_MA;
+ else
+ chip->dt.chg_term_base_curr_ma = temp;
+
rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp);
if (rc < 0)
chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR;
@@ -3945,9 +4176,7 @@
pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
- if (rc < 0)
- chip->dt.rconn_mohms = -EINVAL;
- else
+ if (!rc)
chip->dt.rconn_mohms = temp;
rc = of_property_read_u32(node, "qcom,fg-esr-filter-switch-temp",
@@ -3995,6 +4224,22 @@
else
chip->dt.esr_clamp_mohms = temp;
+ chip->dt.esr_pulse_thresh_ma = DEFAULT_ESR_PULSE_THRESH_MA;
+ rc = of_property_read_u32(node, "qcom,fg-esr-pulse-thresh-ma", &temp);
+ if (!rc) {
+ /* ESR pulse qualification threshold range is 1-997 mA */
+ if (temp > 0 && temp < 997)
+ chip->dt.esr_pulse_thresh_ma = temp;
+ }
+
+ chip->dt.esr_meas_curr_ma = DEFAULT_ESR_MEAS_CURR_MA;
+ rc = of_property_read_u32(node, "qcom,fg-esr-meas-curr-ma", &temp);
+ if (!rc) {
+ /* ESR measurement current range is 60-240 mA */
+ if (temp >= 60 || temp <= 240)
+ chip->dt.esr_meas_curr_ma = temp;
+ }
+
return 0;
}
@@ -4005,6 +4250,9 @@
if (chip->awake_votable)
destroy_votable(chip->awake_votable);
+ if (chip->delta_bsoc_irq_en_votable)
+ destroy_votable(chip->delta_bsoc_irq_en_votable);
+
if (chip->batt_id_chan)
iio_channel_release(chip->batt_id_chan);
@@ -4026,6 +4274,7 @@
chip->irqs = fg_irqs;
chip->charge_status = -EINVAL;
chip->prev_charge_status = -EINVAL;
+ chip->ki_coeff_full_soc = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
dev_err(chip->dev, "Parent regmap is unavailable\n");
@@ -4042,11 +4291,34 @@
return rc;
}
+ rc = of_property_match_string(chip->dev->of_node,
+ "io-channel-names", "rradc_die_temp");
+ if (rc >= 0) {
+ chip->die_temp_chan = iio_channel_get(chip->dev,
+ "rradc_die_temp");
+ if (IS_ERR(chip->die_temp_chan)) {
+ if (PTR_ERR(chip->die_temp_chan) != -EPROBE_DEFER)
+ pr_err("rradc_die_temp unavailable %ld\n",
+ PTR_ERR(chip->die_temp_chan));
+ rc = PTR_ERR(chip->die_temp_chan);
+ chip->die_temp_chan = NULL;
+ return rc;
+ }
+ }
+
chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
chip);
if (IS_ERR(chip->awake_votable)) {
rc = PTR_ERR(chip->awake_votable);
- return rc;
+ goto exit;
+ }
+
+ chip->delta_bsoc_irq_en_votable = create_votable("FG_DELTA_BSOC_IRQ",
+ VOTE_SET_ANY,
+ fg_delta_bsoc_irq_en_cb, chip);
+ if (IS_ERR(chip->delta_bsoc_irq_en_votable)) {
+ rc = PTR_ERR(chip->delta_bsoc_irq_en_votable);
+ goto exit;
}
rc = fg_parse_dt(chip);
@@ -4073,7 +4345,7 @@
rc = fg_get_batt_id(chip);
if (rc < 0) {
pr_err("Error in getting battery id, rc:%d\n", rc);
- return rc;
+ goto exit;
}
rc = fg_get_batt_profile(chip);
@@ -4131,11 +4403,7 @@
disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
/* Keep BSOC_DELTA_IRQ irq disabled until we require it */
- if (fg_irqs[BSOC_DELTA_IRQ].irq) {
- disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
- disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
- chip->bsoc_delta_irq_en = false;
- }
+ rerun_election(chip->delta_bsoc_irq_en_votable);
rc = fg_debugfs_create(chip);
if (rc < 0) {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index cbfab30..eb97eb0 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -19,7 +19,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/qpnp/qpnp-revid.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
#define QNOVO_REVISION1 0x00
#define QNOVO_REVISION2 0x01
@@ -29,6 +29,8 @@
#define QNOVO_PTRAIN_STS 0x08
#define QNOVO_ERROR_STS 0x09
#define QNOVO_ERROR_BIT BIT(0)
+#define QNOVO_ERROR_STS2 0x0A
+#define QNOVO_ERROR_CHARGING_DISABLED BIT(1)
#define QNOVO_INT_RT_STS 0x10
#define QNOVO_INT_SET_TYPE 0x11
#define QNOVO_INT_POLARITY_HIGH 0x12
@@ -87,7 +89,16 @@
#define QNOVO_STRM_CTRL 0xA8
#define QNOVO_IADC_OFFSET_OVR_VAL 0xA9
#define QNOVO_IADC_OFFSET_OVR 0xAA
+
#define QNOVO_DISABLE_CHARGING 0xAB
+#define ERR_SWITCHER_DISABLED BIT(7)
+#define ERR_JEITA_SOFT_CONDITION BIT(6)
+#define ERR_BAT_OV BIT(5)
+#define ERR_CV_MODE BIT(4)
+#define ERR_BATTERY_MISSING BIT(3)
+#define ERR_SAFETY_TIMER_EXPIRED BIT(2)
+#define ERR_CHARGING_DISABLED BIT(1)
+#define ERR_JEITA_HARD_CONDITION BIT(0)
#define QNOVO_TR_IADC_OFFSET_0 0xF1
#define QNOVO_TR_IADC_OFFSET_1 0xF2
@@ -109,20 +120,6 @@
struct device_node *revid_dev_node;
};
-enum {
- QNOVO_NO_ERR_STS_BIT = BIT(0),
-};
-
-struct chg_props {
- bool charging;
- bool usb_online;
- bool dc_online;
-};
-
-struct chg_status {
- bool ok_to_qnovo;
-};
-
struct qnovo {
int base;
struct mutex write_lock;
@@ -141,13 +138,10 @@
s64 v_gain_mega;
struct notifier_block nb;
struct power_supply *batt_psy;
- struct power_supply *usb_psy;
- struct power_supply *dc_psy;
- struct chg_props cp;
- struct chg_status cs;
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
+ bool ok_to_qnovo;
};
static int debug_mask;
@@ -272,28 +266,22 @@
const char *client)
{
struct qnovo *chip = data;
- int rc = 0;
+ union power_supply_propval pval = {0};
+ int rc;
- if (disable) {
- rc = qnovo_batt_psy_update(chip, true);
- if (rc < 0)
- return rc;
- }
+ if (!is_batt_available(chip))
+ return -EINVAL;
- rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
- disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+ pval.intval = !disable;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ &pval);
if (rc < 0) {
- dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
- disable ? "disable" : "enable", rc);
- return rc;
+ pr_err("Couldn't set prop qnovo_enable rc = %d\n", rc);
+ return -EINVAL;
}
- if (!disable) {
- rc = qnovo_batt_psy_update(chip, false);
- if (rc < 0)
- return rc;
- }
-
+ rc = qnovo_batt_psy_update(chip, disable);
return rc;
}
@@ -325,36 +313,18 @@
return 0;
}
-static int qnovo_check_chg_version(struct qnovo *chip)
-{
- int rc;
-
- chip->pmic_rev_id = get_revid_data(chip->dt.revid_dev_node);
- if (IS_ERR(chip->pmic_rev_id)) {
- rc = PTR_ERR(chip->pmic_rev_id);
- if (rc != -EPROBE_DEFER)
- pr_err("Unable to get pmic_revid rc=%d\n", rc);
- return rc;
- }
-
- if ((chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
- && (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4)) {
- chip->wa_flags |= QNOVO_NO_ERR_STS_BIT;
- }
-
- return 0;
-}
-
enum {
VER = 0,
OK_TO_QNOVO,
- ENABLE,
+ QNOVO_ENABLE,
+ PT_ENABLE,
FV_REQUEST,
FCC_REQUEST,
PE_CTRL_REG,
PE_CTRL2_REG,
PTRAIN_STS_REG,
INT_RT_STS_REG,
+ ERR_STS2_REG,
PREST1,
PPULS1,
NREST1,
@@ -394,6 +364,12 @@
};
static struct param_info params[] = {
+ [PT_ENABLE] = {
+ .name = "PT_ENABLE",
+ .start_addr = QNOVO_PTRAIN_EN,
+ .num_regs = 1,
+ .units_str = "",
+ },
[FV_REQUEST] = {
.units_str = "uV",
},
@@ -424,6 +400,12 @@
.num_regs = 1,
.units_str = "",
},
+ [ERR_STS2_REG] = {
+ .name = "RAW_CHGR_ERR",
+ .start_addr = QNOVO_ERROR_STS2,
+ .num_regs = 1,
+ .units_str = "",
+ },
[PREST1] = {
.name = "PREST1",
.start_addr = QNOVO_PREST1_CTRL,
@@ -431,7 +413,7 @@
.reg_to_unit_multiplier = 5,
.reg_to_unit_divider = 1,
.min_val = 5,
- .max_val = 1275,
+ .max_val = 255,
.units_str = "mS",
},
[PPULS1] = {
@@ -440,8 +422,8 @@
.num_regs = 2,
.reg_to_unit_multiplier = 1600, /* converts to uC */
.reg_to_unit_divider = 1,
- .min_val = 0,
- .max_val = 104856000,
+ .min_val = 30000,
+ .max_val = 65535000,
.units_str = "uC",
},
[NREST1] = {
@@ -451,7 +433,7 @@
.reg_to_unit_multiplier = 5,
.reg_to_unit_divider = 1,
.min_val = 5,
- .max_val = 1275,
+ .max_val = 255,
.units_str = "mS",
},
[NPULS1] = {
@@ -460,8 +442,8 @@
.num_regs = 1,
.reg_to_unit_multiplier = 5,
.reg_to_unit_divider = 1,
- .min_val = 5,
- .max_val = 1275,
+ .min_val = 0,
+ .max_val = 255,
.units_str = "mS",
},
[PPCNT] = {
@@ -470,7 +452,7 @@
.num_regs = 1,
.reg_to_unit_multiplier = 1,
.reg_to_unit_divider = 1,
- .min_val = 0,
+ .min_val = 1,
.max_val = 255,
.units_str = "pulses",
},
@@ -480,8 +462,8 @@
.num_regs = 2,
.reg_to_unit_multiplier = 610350, /* converts to nV */
.reg_to_unit_divider = 1,
- .min_val = 0,
- .max_val = 5000000,
+ .min_val = 2200000,
+ .max_val = 4500000,
.units_str = "uV",
},
[PVOLT1] = {
@@ -506,8 +488,6 @@
.num_regs = 1,
.reg_to_unit_multiplier = 2,
.reg_to_unit_divider = 1,
- .min_val = 5,
- .max_val = 1275,
.units_str = "S",
},
[PREST2] = {
@@ -517,7 +497,7 @@
.reg_to_unit_multiplier = 5,
.reg_to_unit_divider = 1,
.min_val = 5,
- .max_val = 327675,
+ .max_val = 65535,
.units_str = "mS",
},
[PPULS2] = {
@@ -526,8 +506,8 @@
.num_regs = 2,
.reg_to_unit_multiplier = 1600, /* converts to uC */
.reg_to_unit_divider = 1,
- .min_val = 0,
- .max_val = 104856000,
+ .min_val = 30000,
+ .max_val = 65535000,
.units_str = "uC",
},
[NREST2] = {
@@ -538,7 +518,7 @@
.reg_to_unit_divider = 1,
.reg_to_unit_offset = -5,
.min_val = 5,
- .max_val = 1280,
+ .max_val = 255,
.units_str = "mS",
},
[NPULS2] = {
@@ -547,18 +527,18 @@
.num_regs = 1,
.reg_to_unit_multiplier = 5,
.reg_to_unit_divider = 1,
- .min_val = 5,
- .max_val = 1275,
+ .min_val = 0,
+ .max_val = 255,
.units_str = "mS",
},
[VLIM2] = {
- .name = "VLIM1",
+ .name = "VLIM2",
.start_addr = QNOVO_VLIM2_LSB_CTRL,
.num_regs = 2,
.reg_to_unit_multiplier = 610350, /* converts to nV */
.reg_to_unit_divider = 1,
- .min_val = 0,
- .max_val = 5000000,
+ .min_val = 2200000,
+ .max_val = 4500000,
.units_str = "uV",
},
[PVOLT2] = {
@@ -591,6 +571,8 @@
.num_regs = 1,
.reg_to_unit_multiplier = 1,
.reg_to_unit_divider = 1,
+ .min_val = 0,
+ .max_val = 255,
.units_str = "pulses",
},
[VMAX] = {
@@ -645,33 +627,73 @@
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->cs.ok_to_qnovo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
}
-static ssize_t enable_show(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
char *ubuf)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- int val;
+ int val = get_effective_result(chip->disable_votable);
- val = get_client_vote(chip->disable_votable, USER_VOTER);
- val = !val;
- return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
}
-static ssize_t enable_store(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
const char *ubuf, size_t count)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
unsigned long val;
- bool disable;
- if (kstrtoul(ubuf, 10, &val))
+ if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
- disable = !val;
+ vote(chip->disable_votable, USER_VOTER, !val, 0);
- vote(chip->disable_votable, USER_VOTER, disable, 0);
+ return count;
+}
+
+static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
+ char *ubuf)
+{
+ int i = attr - qnovo_attributes;
+ struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ u8 buf[2] = {0, 0};
+ u16 regval;
+ int rc;
+
+ rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+ if (rc < 0) {
+ pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+ return -EINVAL;
+ }
+ regval = buf[1] << 8 | buf[0];
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n",
+ (int)(regval & QNOVO_PTRAIN_EN_BIT));
+}
+
+static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
+ const char *ubuf, size_t count)
+{
+ struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ unsigned long val;
+ int rc = 0;
+
+ if (get_effective_result(chip->disable_votable))
+ return -EINVAL;
+
+ if (kstrtoul(ubuf, 0, &val))
+ return -EINVAL;
+
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+ (bool)val ? "enable" : "disable", rc);
+ return rc;
+ }
+
return count;
}
@@ -688,7 +710,7 @@
if (i == FCC_REQUEST)
val = chip->fcc_uA_request;
- return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
}
static ssize_t val_store(struct class *c, struct class_attribute *attr,
@@ -698,7 +720,7 @@
int i = attr - qnovo_attributes;
unsigned long val;
- if (kstrtoul(ubuf, 10, &val))
+ if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
if (i == FV_REQUEST)
@@ -707,6 +729,9 @@
if (i == FCC_REQUEST)
chip->fcc_uA_request = val;
+ if (!get_effective_result(chip->disable_votable))
+ qnovo_batt_psy_update(chip, false);
+
return count;
}
@@ -726,8 +751,7 @@
}
regval = buf[1] << 8 | buf[0];
- return snprintf(ubuf, PAGE_SIZE, "0x%04x%s\n",
- regval, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "0x%04x\n", regval);
}
static ssize_t reg_store(struct class *c, struct class_attribute *attr,
@@ -739,7 +763,7 @@
unsigned long val;
int rc;
- if (kstrtoul(ubuf, 16, &val))
+ if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
buf[0] = val & 0xFF;
@@ -774,7 +798,7 @@
/ params[i].reg_to_unit_divider)
- params[i].reg_to_unit_offset;
- return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
}
static ssize_t time_store(struct class *c, struct class_attribute *attr,
@@ -787,7 +811,7 @@
unsigned long val;
int rc;
- if (kstrtoul(ubuf, 10, &val))
+ if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
if (val < params[i].min_val || val > params[i].max_val) {
@@ -828,7 +852,11 @@
pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
return -EINVAL;
}
- regval_nA = buf[1] << 8 | buf[0];
+
+ if (buf[1] & BIT(5))
+ buf[1] |= GENMASK(7, 6);
+
+ regval_nA = (s16)(buf[1] << 8 | buf[0]);
regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
params[i].reg_to_unit_divider)
- params[i].reg_to_unit_offset;
@@ -841,11 +869,10 @@
gain = chip->internal_i_gain_mega;
}
- comp_val_nA = div_s64(regval_nA * gain, 1000000) + offset_nA;
+ comp_val_nA = div_s64(regval_nA * gain, 1000000) - offset_nA;
comp_val_uA = div_s64(comp_val_nA, 1000);
- return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
- comp_val_uA, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uA);
}
static ssize_t voltage_show(struct class *c, struct class_attribute *attr,
@@ -875,8 +902,7 @@
comp_val_nV = div_s64(regval_nV * gain, 1000000) + offset_nV;
comp_val_uV = div_s64(comp_val_nV, 1000);
- return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
- comp_val_uV, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uV);
}
static ssize_t voltage_store(struct class *c, struct class_attribute *attr,
@@ -890,7 +916,7 @@
s64 regval_nV;
s64 gain, offset_nV;
- if (kstrtoul(ubuf, 10, &val_uV))
+ if (kstrtoul(ubuf, 0, &val_uV))
return -EINVAL;
if (val_uV < params[i].min_val || val_uV > params[i].max_val) {
@@ -947,8 +973,7 @@
gain = chip->internal_i_gain_mega;
comp_val_uC = div_s64(regval_uC * gain, 1000000);
- return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
- comp_val_uC, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uC);
}
static ssize_t coulomb_store(struct class *c, struct class_attribute *attr,
@@ -962,7 +987,7 @@
s64 regval;
s64 gain;
- if (kstrtoul(ubuf, 10, &val_uC))
+ if (kstrtoul(ubuf, 0, &val_uC))
return -EINVAL;
if (val_uC < params[i].min_val || val_uC > params[i].max_val) {
@@ -1014,15 +1039,14 @@
return -EINVAL;
}
- return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
- pval.intval, params[i].units_str);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", pval.intval);
}
static struct class_attribute qnovo_attributes[] = {
[VER] = __ATTR_RO(version),
[OK_TO_QNOVO] = __ATTR_RO(ok_to_qnovo),
- [ENABLE] = __ATTR(enable, 0644,
- enable_show, enable_store),
+ [QNOVO_ENABLE] = __ATTR_RW(qnovo_enable),
+ [PT_ENABLE] = __ATTR_RW(pt_enable),
[FV_REQUEST] = __ATTR(fv_uV_request, 0644,
val_show, val_store),
[FCC_REQUEST] = __ATTR(fcc_uA_request, 0644,
@@ -1031,10 +1055,12 @@
reg_show, reg_store),
[PE_CTRL2_REG] = __ATTR(PE_CTRL2_REG, 0644,
reg_show, reg_store),
- [PTRAIN_STS_REG] = __ATTR(PTRAIN_STS_REG, 0644,
- reg_show, reg_store),
- [INT_RT_STS_REG] = __ATTR(INT_RT_STS_REG, 0644,
- reg_show, reg_store),
+ [PTRAIN_STS_REG] = __ATTR(PTRAIN_STS_REG, 0444,
+ reg_show, NULL),
+ [INT_RT_STS_REG] = __ATTR(INT_RT_STS_REG, 0444,
+ reg_show, NULL),
+ [ERR_STS2_REG] = __ATTR(ERR_STS2_REG, 0444,
+ reg_show, NULL),
[PREST1] = __ATTR(PREST1_mS, 0644,
time_show, time_store),
[PPULS1] = __ATTR(PPULS1_uC, 0644,
@@ -1055,7 +1081,7 @@
time_show, NULL),
[PREST2] = __ATTR(PREST2_mS, 0644,
time_show, time_store),
- [PPULS2] = __ATTR(PPULS2_mS, 0644,
+ [PPULS2] = __ATTR(PPULS2_uC, 0644,
coulomb_show, coulomb_store),
[NREST2] = __ATTR(NREST2_mS, 0644,
time_show, time_store),
@@ -1073,8 +1099,8 @@
time_show, time_store),
[VMAX] = __ATTR(VMAX_uV, 0444,
voltage_show, NULL),
- [SNUM] = __ATTR(SNUM, 0644,
- time_show, time_store),
+ [SNUM] = __ATTR(SNUM, 0444,
+ time_show, NULL),
[VBATT] = __ATTR(VBATT_uV, 0444,
batt_prop_show, NULL),
[IBATT] = __ATTR(IBATT_uA, 0444,
@@ -1086,95 +1112,44 @@
__ATTR_NULL,
};
-static void get_chg_props(struct qnovo *chip, struct chg_props *cp)
+static int qnovo_update_status(struct qnovo *chip)
{
- union power_supply_propval pval;
u8 val = 0;
int rc;
+ bool ok_to_qnovo;
+ bool changed = false;
- cp->charging = true;
- rc = qnovo_read(chip, QNOVO_ERROR_STS, &val, 1);
+ rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- cp->charging = false;
+ ok_to_qnovo = false;
} else {
- cp->charging = (!(val & QNOVO_ERROR_BIT));
- }
-
- if (chip->wa_flags & QNOVO_NO_ERR_STS_BIT) {
/*
- * on v1.0 and v1.1 pmic's force charging to true
- * if things are not good to charge s/w gets a PTRAIN_DONE
- * interrupt
+ * For CV mode keep qnovo enabled, userspace is expected to
+ * disable it after few runs
*/
- cp->charging = true;
+ ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false;
}
- cp->usb_online = false;
- if (!chip->usb_psy)
- chip->usb_psy = power_supply_get_by_name("usb");
- if (chip->usb_psy) {
- rc = power_supply_get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_ONLINE, &pval);
- if (rc < 0)
- pr_err("Couldn't read usb online rc = %d\n", rc);
- else
- cp->usb_online = (bool)pval.intval;
+ if (chip->ok_to_qnovo ^ ok_to_qnovo) {
+
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0);
+ if (!ok_to_qnovo)
+ vote(chip->disable_votable, USER_VOTER, true, 0);
+
+ chip->ok_to_qnovo = ok_to_qnovo;
+ changed = true;
}
- cp->dc_online = false;
- if (!chip->dc_psy)
- chip->dc_psy = power_supply_get_by_name("dc");
- if (chip->dc_psy) {
- rc = power_supply_get_property(chip->dc_psy,
- POWER_SUPPLY_PROP_ONLINE, &pval);
- if (rc < 0)
- pr_err("Couldn't read dc online rc = %d\n", rc);
- else
- cp->dc_online = (bool)pval.intval;
- }
-}
-
-static void get_chg_status(struct qnovo *chip, const struct chg_props *cp,
- struct chg_status *cs)
-{
- cs->ok_to_qnovo = false;
-
- if (cp->charging &&
- (cp->usb_online || cp->dc_online))
- cs->ok_to_qnovo = true;
+ return changed;
}
static void status_change_work(struct work_struct *work)
{
struct qnovo *chip = container_of(work,
struct qnovo, status_change_work);
- bool notify_uevent = false;
- struct chg_props cp;
- struct chg_status cs;
- get_chg_props(chip, &cp);
- get_chg_status(chip, &cp, &cs);
-
- if (cs.ok_to_qnovo ^ chip->cs.ok_to_qnovo) {
- /*
- * when it is not okay to Qnovo charge, disable both voters,
- * so that when it becomes okay to Qnovo charge the user voter
- * has to specifically enable its vote to being Qnovo charging
- */
- if (!cs.ok_to_qnovo) {
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 1, 0);
- vote(chip->disable_votable, USER_VOTER, 1, 0);
- } else {
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 0, 0);
- }
- notify_uevent = true;
- }
-
- memcpy(&chip->cp, &cp, sizeof(struct chg_props));
- memcpy(&chip->cs, &cs, sizeof(struct chg_status));
-
- if (notify_uevent)
+ if (qnovo_update_status(chip))
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
}
@@ -1186,8 +1161,8 @@
if (ev != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if ((strcmp(psy->desc->name, "battery") == 0)
- || (strcmp(psy->desc->name, "usb") == 0))
+
+ if (strcmp(psy->desc->name, "battery") == 0)
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
@@ -1197,8 +1172,7 @@
{
struct qnovo *chip = data;
- /* disable user voter here */
- vote(chip->disable_votable, USER_VOTER, 0, 0);
+ qnovo_update_status(chip);
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
}
@@ -1211,7 +1185,14 @@
u8 vadc_offset, vadc_gain;
u8 val;
- vote(chip->disable_votable, USER_VOTER, 1, 0);
+ vote(chip->disable_votable, USER_VOTER, true, 0);
+
+ val = 0;
+ rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+ if (rc < 0) {
+ pr_err("Couldn't write iadc bitstream control rc = %d\n", rc);
+ return rc;
+ }
rc = qnovo_read(chip, QNOVO_IADC_OFFSET_0, &iadc_offset_external, 1);
if (rc < 0) {
@@ -1219,12 +1200,28 @@
return rc;
}
+ /* stored as an 8 bit 2's complement signed integer */
+ val = -1 * iadc_offset_external;
+ rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+ if (rc < 0) {
+ pr_err("Couldn't write iadc offset rc = %d\n", rc);
+ return rc;
+ }
+
rc = qnovo_read(chip, QNOVO_IADC_OFFSET_1, &iadc_offset_internal, 1);
if (rc < 0) {
pr_err("Couldn't read iadc internal offset rc = %d\n", rc);
return rc;
}
+ /* stored as an 8 bit 2's complement signed integer */
+ val = -1 * iadc_offset_internal;
+ rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+ if (rc < 0) {
+ pr_err("Couldn't write iadc offset rc = %d\n", rc);
+ return rc;
+ }
+
rc = qnovo_read(chip, QNOVO_IADC_GAIN_0, &iadc_gain_external, 1);
if (rc < 0) {
pr_err("Couldn't read iadc external gain rc = %d\n", rc);
@@ -1249,50 +1246,27 @@
return rc;
}
- chip->external_offset_nA = (s64)iadc_offset_external * IADC_LSB_NA;
- chip->internal_offset_nA = (s64)iadc_offset_internal * IADC_LSB_NA;
- chip->offset_nV = (s64)vadc_offset * VADC_LSB_NA;
+ chip->external_offset_nA = (s64)(s8)iadc_offset_external * IADC_LSB_NA;
+ chip->internal_offset_nA = (s64)(s8)iadc_offset_internal * IADC_LSB_NA;
+ chip->offset_nV = (s64)(s8)vadc_offset * VADC_LSB_NA;
chip->external_i_gain_mega
- = 1000000000 + (s64)iadc_gain_external * GAIN_LSB_FACTOR;
+ = 1000000000 + (s64)(s8)iadc_gain_external * GAIN_LSB_FACTOR;
chip->external_i_gain_mega
= div_s64(chip->external_i_gain_mega, 1000);
chip->internal_i_gain_mega
- = 1000000000 + (s64)iadc_gain_internal * GAIN_LSB_FACTOR;
+ = 1000000000 + (s64)(s8)iadc_gain_internal * GAIN_LSB_FACTOR;
chip->internal_i_gain_mega
= div_s64(chip->internal_i_gain_mega, 1000);
- chip->v_gain_mega = 1000000000 + (s64)vadc_gain * GAIN_LSB_FACTOR;
+ chip->v_gain_mega = 1000000000 + (s64)(s8)vadc_gain * GAIN_LSB_FACTOR;
chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
- val = 0;
- rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+ /* allow charger error conditions to disable qnovo, CV mode excluded */
+ val = ERR_SWITCHER_DISABLED | ERR_JEITA_SOFT_CONDITION | ERR_BAT_OV |
+ ERR_BATTERY_MISSING | ERR_SAFETY_TIMER_EXPIRED |
+ ERR_CHARGING_DISABLED | ERR_JEITA_HARD_CONDITION;
+ rc = qnovo_write(chip, QNOVO_DISABLE_CHARGING, &val, 1);
if (rc < 0) {
- pr_err("Couldn't write iadc bitsteam control rc = %d\n", rc);
- return rc;
- }
-
- rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
- if (rc < 0) {
- pr_err("Couldn't read iadc offset rc = %d\n", rc);
- return rc;
- }
-
- val *= -1;
- rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
- if (rc < 0) {
- pr_err("Couldn't write iadc offset rc = %d\n", rc);
- return rc;
- }
-
- rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
- if (rc < 0) {
- pr_err("Couldn't read iadc offset rc = %d\n", rc);
- return rc;
- }
-
- val *= -1;
- rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
- if (rc < 0) {
- pr_err("Couldn't write iadc offset rc = %d\n", rc);
+ pr_err("Couldn't write QNOVO_DISABLE_CHARGING rc = %d\n", rc);
return rc;
}
@@ -1333,6 +1307,9 @@
irq_ptrain_done, rc);
return rc;
}
+
+ enable_irq_wake(irq_ptrain_done);
+
return rc;
}
@@ -1362,13 +1339,6 @@
return rc;
}
- rc = qnovo_check_chg_version(chip);
- if (rc < 0) {
- if (rc != -EPROBE_DEFER)
- pr_err("Couldn't check version rc=%d\n", rc);
- return rc;
- }
-
/* set driver data before resources request it */
platform_set_drvdata(pdev, chip);
@@ -1414,6 +1384,8 @@
goto unreg_notifier;
}
+ device_init_wakeup(chip->dev, true);
+
return rc;
unreg_notifier:
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index dab7888..e802fbd 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -11,6 +11,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -25,7 +26,7 @@
#include "smb-reg.h"
#include "smb-lib.h"
#include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
#define SMB2_DEFAULT_WPWR_UW 8000000
@@ -239,11 +240,12 @@
struct smb_dt_props {
int fcc_ua;
int usb_icl_ua;
- int otg_cl_ua;
int dc_icl_ua;
int boost_threshold_ua;
int fv_uv;
int wipower_max_uw;
+ int min_freq_khz;
+ int max_freq_khz;
u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
@@ -323,9 +325,9 @@
chip->dt.usb_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
- "qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+ "qcom,otg-cl-ua", &chg->otg_cl_ua);
if (rc < 0)
- chip->dt.otg_cl_ua = MICRO_1P5A;
+ chg->otg_cl_ua = MICRO_1P5A;
rc = of_property_read_u32(node,
"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
@@ -338,6 +340,18 @@
if (rc < 0)
chip->dt.boost_threshold_ua = MICRO_P1A;
+ rc = of_property_read_u32(node,
+ "qcom,min-freq-khz",
+ &chip->dt.min_freq_khz);
+ if (rc < 0)
+ chip->dt.min_freq_khz = -EINVAL;
+
+ rc = of_property_read_u32(node,
+ "qcom,max-freq-khz",
+ &chip->dt.max_freq_khz);
+ if (rc < 0)
+ chip->dt.max_freq_khz = -EINVAL;
+
rc = of_property_read_u32(node, "qcom,wipower-max-uw",
&chip->dt.wipower_max_uw);
if (rc < 0)
@@ -414,6 +428,7 @@
POWER_SUPPLY_PROP_BOOST_CURRENT,
POWER_SUPPLY_PROP_PE_START,
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -502,6 +517,9 @@
case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
break;
+ case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
+ rc = smblib_get_charge_current(chg, &val->intval);
+ break;
default:
pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
@@ -522,6 +540,12 @@
struct smb_charger *chg = &chip->chg;
int rc = 0;
+ mutex_lock(&chg->lock);
+ if (!chg->typec_present) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
switch (psp) {
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
rc = smblib_set_prop_usb_voltage_min(chg, val);
@@ -560,6 +584,8 @@
break;
}
+unlock:
+ mutex_unlock(&chg->lock);
return rc;
}
@@ -610,12 +636,12 @@
static enum power_supply_property smb2_usb_main_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_ICL_REDUCTION,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_TYPE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
POWER_SUPPLY_PROP_FCC_DELTA,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
/*
* TODO move the TEMP and TEMP_MAX properties here,
* and update the thermal balancer to look here
@@ -634,9 +660,6 @@
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
break;
- case POWER_SUPPLY_PROP_ICL_REDUCTION:
- val->intval = chg->icl_reduction_ua;
- break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
@@ -653,6 +676,9 @@
case POWER_SUPPLY_PROP_FCC_DELTA:
rc = smblib_get_prop_fcc_delta(chg, val);
break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = get_effective_result(chg->usb_icl_votable);
+ break;
default:
pr_debug("get prop %d is not supported in usb-main\n", psp);
rc = -EINVAL;
@@ -677,12 +703,12 @@
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
break;
- case POWER_SUPPLY_PROP_ICL_REDUCTION:
- rc = smblib_set_icl_reduction(chg, val->intval);
- break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_set_icl_current(chg, val->intval);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -838,7 +864,9 @@
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_QNOVO,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -858,6 +886,7 @@
{
struct smb_charger *chg = power_supply_get_drvdata(psy);
int rc = 0;
+ union power_supply_propval pval = {0, };
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -882,7 +911,14 @@
rc = smblib_get_prop_system_temp_level(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smblib_get_prop_charger_temp(chg, val);
+ /* do not query RRADC if charger is not present */
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0)
+ pr_err("Couldn't get usb present rc=%d\n", rc);
+
+ rc = -ENODATA;
+ if (pval.intval)
+ rc = smblib_get_prop_charger_temp(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
@@ -902,6 +938,9 @@
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ rc = smblib_get_prop_charge_qnovo_enable(chg, val);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
val->intval = chg->qnovo_fv_uv;
break;
@@ -977,12 +1016,17 @@
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ rc = smblib_set_prop_charge_qnovo_enable(chg, val);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
chg->qnovo_fv_uv = val->intval;
rc = rerun_election(chg->fv_votable);
break;
case POWER_SUPPLY_PROP_CURRENT_QNOVO:
chg->qnovo_fcc_ua = val->intval;
+ vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
+ val->intval != -EINVAL && val->intval < 2000000, 0);
rc = rerun_election(chg->fcc_votable);
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
@@ -1115,6 +1159,9 @@
struct regulator_config cfg = {};
int rc = 0;
+ if (chg->micro_usb_mode)
+ return 0;
+
chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
GFP_KERNEL);
if (!chg->vconn_vreg)
@@ -1311,10 +1358,12 @@
return rc;
}
- /* disable try.SINK mode */
- rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT, 0);
+ /* disable try.SINK mode and legacy cable IRQs */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT |
+ TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT |
+ TYPEC_LEGACY_CABLE_INT_EN_BIT, 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't set Type-C config rc=%d\n", rc);
return rc;
}
@@ -1325,6 +1374,39 @@
{
int rc;
+ /* Move to typeC mode */
+ /* configure FSM in idle state and disable UFP_ENABLE bit */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_DISABLE_CMD_BIT | UFP_EN_CMD_BIT,
+ TYPEC_DISABLE_CMD_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait for FSM to enter idle state */
+ msleep(200);
+ /* configure TypeC mode */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+ TYPE_C_OR_U_USB_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait for mode change before enabling FSM */
+ usleep_range(10000, 11000);
+ /* release FSM from idle state */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_DISABLE_CMD_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait for FSM to start */
+ msleep(100);
+ /* move to uUSB mode */
/* configure FSM in idle state */
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
@@ -1333,6 +1415,8 @@
return rc;
}
+ /* wait for FSM to enter idle state */
+ msleep(200);
/* configure micro USB mode */
rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
TYPE_C_OR_U_USB_BIT, TYPE_C_OR_U_USB_BIT);
@@ -1341,6 +1425,8 @@
return rc;
}
+ /* wait for mode change before enabling FSM */
+ usleep_range(10000, 11000);
/* release FSM from idle state */
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_DISABLE_CMD_BIT, 0);
@@ -1376,6 +1462,16 @@
smblib_get_charge_param(chg, &chg->param.dc_icl,
&chip->dt.dc_icl_ua);
+ if (chip->dt.min_freq_khz > 0) {
+ chg->param.freq_buck.min_u = chip->dt.min_freq_khz;
+ chg->param.freq_boost.min_u = chip->dt.min_freq_khz;
+ }
+
+ if (chip->dt.max_freq_khz > 0) {
+ chg->param.freq_buck.max_u = chip->dt.max_freq_khz;
+ chg->param.freq_boost.max_u = chip->dt.max_freq_khz;
+ }
+
/* set a slower soft start setting for OTG */
rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG2_REG,
ENG_SSUPPLY_IVREF_OTG_SS_MASK, OTG_SS_SLOW);
@@ -1386,7 +1482,8 @@
/* set OTG current limit */
rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
- chip->dt.otg_cl_ua);
+ (chg->wa_flags & OTG_WA) ?
+ chg->param.otg_cl.min_u : chg->otg_cl_ua);
if (rc < 0) {
pr_err("Couldn't set otg current limit rc=%d\n", rc);
return rc;
@@ -1420,10 +1517,12 @@
DEFAULT_VOTER, true, chip->dt.fv_uv);
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
- vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
- chip->dt.hvdcp_disable, 0);
vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
+ chip->dt.hvdcp_disable, 0);
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
true, 0);
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
@@ -1489,13 +1588,6 @@
return rc;
}
- rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
- QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
- return rc;
- }
-
/* configure step charging */
rc = smb2_config_step_charging(chip);
if (rc < 0) {
@@ -1520,6 +1612,16 @@
return rc;
}
+ /* disable h/w autonomous parallel charging control */
+ rc = smblib_masked_write(chg, MISC_CFG_REG,
+ STAT_PARALLEL_1400MA_EN_CFG_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't disable h/w autonomous parallel control rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* configure float charger options */
switch (chip->dt.float_option) {
case 1:
@@ -1608,6 +1710,15 @@
return rc;
}
+static int smb2_post_init(struct smb2 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+
+ rerun_election(chg->usb_irq_enable_votable);
+
+ return 0;
+}
+
static int smb2_chg_config_init(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -1649,7 +1760,7 @@
break;
case PM660_SUBTYPE:
chip->chg.smb_version = PM660_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA;
+ chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA;
chg->param.freq_buck = pm660_params.freq_buck;
chg->param.freq_boost = pm660_params.freq_boost;
chg->chg_freq.freq_5V = 600;
@@ -1947,6 +2058,16 @@
return rc;
}
+static void smb2_disable_interrupts(struct smb_charger *chg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+ if (smb2_irqs[i].irq > 0)
+ disable_irq(smb2_irqs[i].irq);
+ }
+}
+
#if defined(CONFIG_DEBUG_FS)
static int force_batt_psy_update_write(void *data, u64 val)
@@ -2074,7 +2195,7 @@
rc = smb2_init_vconn_regulator(chip);
if (rc < 0) {
pr_err("Couldn't initialize vconn regulator rc=%d\n",
- rc);
+ rc);
goto cleanup;
}
@@ -2137,6 +2258,8 @@
goto cleanup;
}
+ smb2_post_init(chip);
+
smb2_create_debugfs(chip);
rc = smblib_get_prop_usb_present(chg, &val);
@@ -2156,7 +2279,7 @@
rc = smblib_get_prop_batt_health(chg, &val);
if (rc < 0) {
pr_err("Couldn't get batt health rc=%d\n", rc);
- goto cleanup;
+ val.intval = POWER_SUPPLY_HEALTH_UNKNOWN;
}
batt_health = val.intval;
@@ -2167,6 +2290,8 @@
}
batt_charge_type = val.intval;
+ device_init_wakeup(chg->dev, true);
+
pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
usb_present, chg->usb_psy_desc.type,
batt_present, batt_health, batt_charge_type);
@@ -2205,6 +2330,9 @@
struct smb2 *chip = platform_get_drvdata(pdev);
struct smb_charger *chg = &chip->chg;
+ /* disable all interrupts */
+ smb2_disable_interrupts(chg);
+
/* configure power role for UFP */
smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, UFP_EN_CMD_BIT);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index eb6727b..7d5a8bd 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -21,7 +21,7 @@
#include "smb-lib.h"
#include "smb-reg.h"
#include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
#define smblib_err(chg, fmt, ...) \
pr_err("%s: %s: " fmt, chg->name, \
@@ -160,39 +160,14 @@
int smblib_icl_override(struct smb_charger *chg, bool override)
{
int rc;
- bool override_status;
- u8 stat;
- u16 reg;
- switch (chg->smb_version) {
- case PMI8998_SUBTYPE:
- reg = APSD_RESULT_STATUS_REG;
- break;
- case PM660_SUBTYPE:
- reg = AICL_STATUS_REG;
- break;
- default:
- smblib_dbg(chg, PR_MISC, "Unknown chip version=%x\n",
- chg->smb_version);
- return -EINVAL;
- }
+ rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+ ICL_OVERRIDE_AFTER_APSD_BIT,
+ override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
- rc = smblib_read(chg, reg, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read reg=%x rc=%d\n", reg, rc);
- return rc;
- }
- override_status = (bool)(stat & ICL_OVERRIDE_LATCH_BIT);
-
- if (override != override_status) {
- rc = smblib_masked_write(chg, CMD_APSD_REG,
- ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
- return rc;
- }
- }
- return 0;
+ return rc;
}
/********************
@@ -547,32 +522,24 @@
* HELPER FUNCTIONS *
********************/
-static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
+static void smblib_rerun_apsd(struct smb_charger *chg)
{
- const struct apsd_result *apsd_result;
+ int rc;
- /*
- * PD_INACTIVE_VOTER on hvdcp_disable_votable indicates whether
- * apsd rerun was tried earlier
- */
- if (get_client_vote(chg->hvdcp_disable_votable_indirect,
- PD_INACTIVE_VOTER)) {
- vote(chg->hvdcp_disable_votable_indirect,
- PD_INACTIVE_VOTER, false, 0);
- /* ensure hvdcp is enabled */
- if (!get_effective_result(
- chg->hvdcp_disable_votable_indirect)) {
- apsd_result = smblib_get_apsd_result(chg);
- if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
- /* rerun APSD */
- smblib_dbg(chg, PR_MISC, "rerun APSD\n");
- smblib_masked_write(chg, CMD_APSD_REG,
- APSD_RERUN_BIT,
- APSD_RERUN_BIT);
- }
- }
+ smblib_dbg(chg, PR_MISC, "re-running APSD\n");
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable HVDCP auth IRQ rc=%d\n",
+ rc);
}
- return 0;
+
+ rc = smblib_masked_write(chg, CMD_APSD_REG,
+ APSD_RERUN_BIT, APSD_RERUN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc);
}
static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
@@ -580,12 +547,13 @@
const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* if PD is active, APSD is disabled so won't have a valid result */
- if (chg->pd_active) {
+ if (chg->pd_active)
chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
- return apsd_result;
- }
+ else
+ chg->usb_psy_desc.type = apsd_result->pst;
- chg->usb_psy_desc.type = apsd_result->pst;
+ smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
+ apsd_result->name, chg->pd_active);
return apsd_result;
}
@@ -661,10 +629,13 @@
{
int rc;
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
- vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -689,6 +660,7 @@
chg->voltage_max_uv = MICRO_5V;
chg->usb_icl_delta_ua = 0;
chg->pulse_cnt = 0;
+ chg->uusb_apsd_rerun_done = false;
/* clear USB ICL vote for USB_PSY_VOTER */
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
@@ -700,28 +672,6 @@
if (rc < 0)
smblib_err(chg,
"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
-
- /* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
- rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
- rc);
-}
-
-static bool smblib_sysok_reason_usbin(struct smb_charger *chg)
-{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, SYSOK_REASON_STATUS_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get SYSOK_REASON_STATUS rc=%d\n", rc);
- /* assuming 'not usbin' in case of read failure */
- return false;
- }
-
- return stat & SYSOK_REASON_USBIN_BIT;
}
void smblib_suspend_on_debug_battery(struct smb_charger *chg)
@@ -747,7 +697,6 @@
int smblib_rerun_apsd_if_required(struct smb_charger *chg)
{
- const struct apsd_result *apsd_result;
union power_supply_propval val;
int rc;
@@ -760,21 +709,28 @@
if (!val.intval)
return 0;
- apsd_result = smblib_get_apsd_result(chg);
- if ((apsd_result->pst == POWER_SUPPLY_TYPE_UNKNOWN)
- || (apsd_result->pst == POWER_SUPPLY_TYPE_USB)) {
- /* rerun APSD */
- pr_info("Reruning APSD type = %s at bootup\n",
- apsd_result->name);
- rc = smblib_masked_write(chg, CMD_APSD_REG,
- APSD_RERUN_BIT,
- APSD_RERUN_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't rerun APSD rc = %d\n", rc);
- return rc;
+ /* fetch the DPDM regulator */
+ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+ "dpdm-supply", NULL)) {
+ chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+ if (IS_ERR(chg->dpdm_reg)) {
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
+ PTR_ERR(chg->dpdm_reg));
+ chg->dpdm_reg = NULL;
}
}
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+ rc = regulator_enable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
+ rc);
+ }
+
+ chg->uusb_apsd_rerun_done = true;
+ smblib_rerun_apsd(chg);
+
return 0;
}
@@ -812,29 +768,12 @@
return 0;
}
-/*********************
- * VOTABLE CALLBACKS *
- *********************/
-
-static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
- int suspend, const char *client)
-{
- struct smb_charger *chg = data;
-
- /* resume input if suspend is invalid */
- if (suspend < 0)
- suspend = 0;
-
- return smblib_set_dc_suspend(chg, (bool)suspend);
-}
-
#define USBIN_25MA 25000
#define USBIN_100MA 100000
#define USBIN_150MA 150000
#define USBIN_500MA 500000
#define USBIN_900MA 900000
-
static int set_sdp_current(struct smb_charger *chg, int icl_ua)
{
int rc;
@@ -873,20 +812,18 @@
return rc;
}
-static int smblib_usb_icl_vote_callback(struct votable *votable, void *data,
- int icl_ua, const char *client)
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
{
- struct smb_charger *chg = data;
int rc = 0;
bool override;
union power_supply_propval pval;
/* suspend and return if 25mA or less is requested */
- if (client && (icl_ua < USBIN_25MA))
+ if (icl_ua < USBIN_25MA)
return smblib_set_usb_suspend(chg, true);
disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
- if (!client)
+ if (icl_ua == INT_MAX)
goto override_suspend_config;
rc = smblib_get_prop_typec_mode(chg, &pval);
@@ -904,8 +841,7 @@
goto enable_icl_changed_interrupt;
}
} else {
- rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
- icl_ua - chg->icl_reduction_ua);
+ rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
goto enable_icl_changed_interrupt;
@@ -915,7 +851,7 @@
override_suspend_config:
/* determine if override needs to be enforced */
override = true;
- if (client == NULL) {
+ if (icl_ua == INT_MAX) {
/* remove override if no voters - hw defaults is desired */
override = false;
} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
@@ -923,7 +859,7 @@
/* For std cable with type = SDP never override */
override = false;
else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
- && icl_ua - chg->icl_reduction_ua == 1500000)
+ && icl_ua == 1500000)
/*
* For std cable with type = CDP override only if
* current is not 1500mA
@@ -953,6 +889,22 @@
return rc;
}
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+ int suspend, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ /* resume input if suspend is invalid */
+ if (suspend < 0)
+ suspend = 0;
+
+ return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -1046,6 +998,7 @@
struct smb_charger *chg = data;
int rc;
u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+ u8 stat;
/* vote to enable/disable HW autonomous INOV */
vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
@@ -1067,6 +1020,16 @@
return rc;
}
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
+ return rc;
+ }
+
+ /* re-run APSD if HVDCP was detected */
+ if (stat & QC_CHARGER_BIT)
+ smblib_rerun_apsd(chg);
+
return 0;
}
@@ -1089,16 +1052,6 @@
int rc;
if (apsd_disable) {
- /* Don't run APSD on CC debounce when APSD is disabled */
- rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
- APSD_START_ON_CC_BIT,
- 0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
- rc);
- return rc;
- }
-
rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
AUTO_SRC_DETECT_BIT,
0);
@@ -1114,15 +1067,6 @@
smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
return rc;
}
-
- rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
- APSD_START_ON_CC_BIT,
- APSD_START_ON_CC_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n",
- rc);
- return rc;
- }
}
return 0;
@@ -1159,6 +1103,42 @@
return rc;
}
+static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
+ void *data, int enable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (!chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq ||
+ !chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ return 0;
+
+ if (enable) {
+ enable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+ enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+ } else {
+ disable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+ disable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+ }
+
+ return 0;
+}
+
+static int smblib_typec_irq_disable_vote_callback(struct votable *votable,
+ void *data, int disable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ if (!chg->irq_info[TYPE_C_CHANGE_IRQ].irq)
+ return 0;
+
+ if (disable)
+ disable_irq_nosync(chg->irq_info[TYPE_C_CHANGE_IRQ].irq);
+ else
+ enable_irq(chg->irq_info[TYPE_C_CHANGE_IRQ].irq);
+
+ return 0;
+}
+
/*******************
* VCONN REGULATOR *
* *****************/
@@ -1167,7 +1147,7 @@
static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
- u8 otg_stat, stat4;
+ u8 otg_stat, val;
int rc = 0, i;
if (!chg->external_vconn) {
@@ -1198,17 +1178,12 @@
* VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
* for Vconn, and it should be set with reverse polarity of CC_OUT.
*/
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
- return rc;
- }
-
smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
- stat4 = stat4 & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
+ val = chg->typec_status[3] &
+ CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
- VCONN_EN_VALUE_BIT | stat4);
+ VCONN_EN_VALUE_BIT | val);
if (rc < 0) {
smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc);
return rc;
@@ -1281,11 +1256,14 @@
/*****************
* OTG REGULATOR *
*****************/
-
+#define MAX_RETRY 15
+#define MIN_DELAY_US 2000
+#define MAX_DELAY_US 9000
static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
- int rc;
+ int rc, retry_count = 0, min_delay = MIN_DELAY_US;
+ u8 stat;
smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
@@ -1304,6 +1282,42 @@
return rc;
}
+ if (chg->wa_flags & OTG_WA) {
+ /* check for softstart */
+ do {
+ usleep_range(min_delay, min_delay + 100);
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read OTG status rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (stat & BOOST_SOFTSTART_DONE_BIT) {
+ rc = smblib_set_charge_param(chg,
+ &chg->param.otg_cl, chg->otg_cl_ua);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't set otg limit\n");
+ break;
+ }
+
+ /* increase the delay for following iterations */
+ if (retry_count > 5)
+ min_delay = MAX_DELAY_US;
+ } while (retry_count++ < MAX_RETRY);
+
+ if (retry_count >= MAX_RETRY) {
+ smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ /* disable OTG if softstart failed */
+ smblib_write(chg, CMD_OTG_REG, 0);
return rc;
}
@@ -1316,6 +1330,14 @@
if (chg->otg_en)
goto unlock;
+ if (!chg->usb_icl_votable) {
+ chg->usb_icl_votable = find_votable("USB_ICL");
+
+ if (!chg->usb_icl_votable)
+ return -EINVAL;
+ }
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, true, 0);
+
rc = _smblib_vbus_regulator_enable(rdev);
if (rc >= 0)
chg->otg_en = true;
@@ -1337,6 +1359,17 @@
smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
}
+ if (chg->wa_flags & OTG_WA) {
+ /* set OTG current limit to minimum value */
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+ chg->param.otg_cl.min_u);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't set otg current limit rc=%d\n", rc);
+ return rc;
+ }
+ }
+
smblib_dbg(chg, PR_OTG, "disabling OTG\n");
rc = smblib_write(chg, CMD_OTG_REG, 0);
if (rc < 0) {
@@ -1345,7 +1378,6 @@
}
smblib_dbg(chg, PR_OTG, "start 1 in 8 mode\n");
- rc = smblib_write(chg, CMD_OTG_REG, 0);
rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
if (rc < 0) {
@@ -1369,6 +1401,8 @@
if (rc >= 0)
chg->otg_en = false;
+ if (chg->usb_icl_votable)
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
unlock:
mutex_unlock(&chg->otg_oc_lock);
return rc;
@@ -1497,6 +1531,21 @@
break;
}
+ if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
+ return 0;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
+ ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
+ if (!stat)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
return 0;
}
@@ -1682,6 +1731,23 @@
return 0;
}
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ val->intval = (bool)(stat & QNOVO_PT_ENABLE_CMD_BIT);
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -1733,6 +1799,10 @@
return -EINVAL;
chg->system_temp_level = val->intval;
+ /* disable parallel charge in case of system temp level */
+ vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER,
+ chg->system_temp_level ? true : false, 0);
+
if (chg->system_temp_level == chg->thermal_levels)
return vote(chg->chg_disable_votable,
THERMAL_DAEMON_VOTER, true, 0);
@@ -1746,6 +1816,22 @@
return 0;
}
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+
+ rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
+ QNOVO_PT_ENABLE_CMD_BIT,
+ val->intval ? QNOVO_PT_ENABLE_CMD_BIT : 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
int smblib_rerun_aicl(struct smb_charger *chg)
{
int rc, settled_icl_ua;
@@ -1951,7 +2037,7 @@
int rc = 0;
u8 stat;
- if (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0) {
+ if (get_client_vote_locked(chg->usb_icl_votable, USER_VOTER) == 0) {
val->intval = false;
return rc;
}
@@ -2060,23 +2146,13 @@
int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc = 0;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
- return rc;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
- stat);
-
- if (stat & CC_ATTACHED_BIT)
- val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1;
+ if (chg->typec_status[3] & CC_ATTACHED_BIT)
+ val->intval =
+ (bool)(chg->typec_status[3] & CC_ORIENTATION_BIT) + 1;
else
val->intval = 0;
- return rc;
+ return 0;
}
static const char * const smblib_typec_mode_name[] = {
@@ -2094,17 +2170,7 @@
static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_1_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
- return POWER_SUPPLY_TYPEC_NONE;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat);
-
- switch (stat) {
+ switch (chg->typec_status[0]) {
case 0:
return POWER_SUPPLY_TYPEC_NONE;
case UFP_TYPEC_RDSTD_BIT:
@@ -2122,17 +2188,7 @@
static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_2_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc);
- return POWER_SUPPLY_TYPEC_NONE;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_2 = 0x%02x\n", stat);
-
- switch (stat & DFP_TYPEC_MASK) {
+ switch (chg->typec_status[1] & DFP_TYPEC_MASK) {
case DFP_RA_RA_BIT:
return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
case DFP_RD_RD_BIT:
@@ -2153,28 +2209,17 @@
int smblib_get_prop_typec_mode(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ if (!(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
val->intval = POWER_SUPPLY_TYPEC_NONE;
- return rc;
- }
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
-
- if (!(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
- val->intval = POWER_SUPPLY_TYPEC_NONE;
- return rc;
+ return 0;
}
- if (stat & UFP_DFP_MODE_STATUS_BIT)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
val->intval = smblib_get_prop_dfp_mode(chg);
else
val->intval = smblib_get_prop_ufp_mode(chg);
- return rc;
+ return 0;
}
int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -2266,16 +2311,7 @@
int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc;
- u8 ctrl;
-
- rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG rc=%d\n",
- rc);
- return rc;
- }
- val->intval = ctrl & EXIT_SNK_BASED_ON_CC_BIT;
+ val->intval = chg->pd_hard_reset;
return 0;
}
@@ -2404,6 +2440,22 @@
return -EINVAL;
}
+ if (power_role == UFP_EN_CMD_BIT) {
+ /* disable PBS workaround when forcing sink mode */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ } else {
+ /* restore it back to 0xA5 */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ }
+
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, power_role);
if (rc < 0) {
@@ -2429,10 +2481,6 @@
return rc;
}
- if (chg->mode == PARALLEL_MASTER)
- vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
- min_uv > MICRO_5V, 0);
-
chg->voltage_min_uv = min_uv;
return rc;
}
@@ -2452,10 +2500,6 @@
}
chg->voltage_max_uv = max_uv;
- rc = smblib_rerun_aicl(chg);
- if (rc < 0)
- smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
-
return rc;
}
@@ -2463,103 +2507,101 @@
const union power_supply_propval *val)
{
int rc;
- u8 stat = 0;
- bool cc_debounced;
- bool orientation;
- bool pd_active = val->intval;
+ bool orientation, cc_debounced, sink_attached, hvdcp;
+ u8 stat;
- if (!get_effective_result(chg->pd_allowed_votable)) {
- smblib_err(chg, "PD is not allowed\n");
+ if (!get_effective_result(chg->pd_allowed_votable))
return -EINVAL;
- }
- vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
- vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
-
- /*
- * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
- * when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set
- * or when VCONN_EN_VALUE_BIT is set.
- */
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ rc = smblib_read(chg, APSD_STATUS_REG, &stat);
if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
return rc;
}
- if (pd_active) {
- orientation = stat & CC_ORIENTATION_BIT;
+ cc_debounced = (bool)
+ (chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+ sink_attached = (bool)
+ (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
+ hvdcp = stat & QC_CHARGER_BIT;
+
+ chg->pd_active = val->intval;
+ if (chg->pd_active) {
+ vote(chg->apsd_disable_votable, PD_VOTER, true, 0);
+ vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+ vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+
+ /*
+ * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2
+ * line when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override)
+ * is set or when VCONN_EN_VALUE_BIT is set.
+ */
+ orientation = chg->typec_status[3] & CC_ORIENTATION_BIT;
rc = smblib_masked_write(chg,
TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
VCONN_EN_ORIENTATION_BIT,
orientation ? 0 : VCONN_EN_ORIENTATION_BIT);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg,
"Couldn't enable vconn on CC line rc=%d\n", rc);
- return rc;
- }
+
+ /* SW controlled CC_OUT */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT, TYPEC_SPARE_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable SW cc_out rc=%d\n",
+ rc);
+
/*
* Enforce 500mA for PD until the real vote comes in later.
* It is guaranteed that pd_active is set prior to
* pd_current_max
*/
rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
- rc);
- return rc;
- }
+ rc);
+
+ /* since PD was found the cable must be non-legacy */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
/* clear USB ICL vote for DCP_VOTER */
rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote DCP from USB ICL rc=%d\n",
- rc);
-
- /* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
- rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't un-vote DCP from USB ICL rc=%d\n",
+ rc);
/* remove USB_PSY_VOTER */
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
- return rc;
- }
+ } else {
+ vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
+ vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
+ vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+ false, 0);
- /* pd active set, parallel charger can be enabled now */
- rc = vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
- false, 0);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't unvote PL_DELAY_HVDCP_VOTER rc=%d\n",
- rc);
- return rc;
- }
+ /* HW controlled CC_OUT */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n",
+ rc);
+
+ /*
+ * This WA should only run for HVDCP. Non-legacy SDP/CDP could
+ * draw more, but this WA will remove Rd causing VBUS to drop,
+ * and data could be interrupted. Non-legacy DCP could also draw
+ * more, but it may impact compliance.
+ */
+ if (!chg->typec_legacy_valid && cc_debounced &&
+ !sink_attached && hvdcp)
+ schedule_work(&chg->legacy_detection_work);
}
- /* CC pin selection s/w override in PD session; h/w otherwise. */
- rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
- TYPEC_SPARE_CFG_BIT,
- pd_active ? TYPEC_SPARE_CFG_BIT : 0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't change cc_out ctrl to %s rc=%d\n",
- pd_active ? "SW" : "HW", rc);
- return rc;
- }
-
- cc_debounced = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
- if (!pd_active && cc_debounced)
- try_rerun_apsd_for_hvdcp(chg);
-
- chg->pd_active = pd_active;
smblib_update_usb_type(chg);
power_supply_changed(chg->usb_psy);
-
return rc;
}
@@ -2627,12 +2669,6 @@
static struct reg_info cc2_detach_settings[] = {
{
- .reg = TYPE_C_CFG_REG,
- .mask = APSD_START_ON_CC_BIT,
- .val = 0,
- .desc = "TYPE_C_CFG_REG",
- },
- {
.reg = TYPE_C_CFG_2_REG,
.mask = TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
.val = TYPE_C_UFP_MODE_BIT,
@@ -2668,88 +2704,70 @@
static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
{
- int rc = 0;
- union power_supply_propval cc2_val = {0, };
+ int rc, ccout, ufp_mode;
+ u8 stat;
if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
- return rc;
+ return 0;
- if (chg->cc2_sink_detach_flag != CC2_SINK_NONE)
- return rc;
+ if (chg->cc2_detach_wa_active)
+ return 0;
- rc = smblib_get_prop_typec_cc_orientation(chg, &cc2_val);
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- smblib_err(chg, "Couldn't get cc orientation rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return rc;
}
- if (cc2_val.intval == 1)
- return rc;
+ ccout = (stat & CC_ATTACHED_BIT) ?
+ (!!(stat & CC_ORIENTATION_BIT) + 1) : 0;
+ ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ?
+ !(stat & UFP_DFP_MODE_STATUS_BIT) : 0;
- rc = smblib_get_prop_typec_mode(chg, &cc2_val);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
- return rc;
- }
+ if (ccout != 2)
+ return 0;
- switch (cc2_val.intval) {
- case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
- smblib_reg_block_update(chg, cc2_detach_settings);
- chg->cc2_sink_detach_flag = CC2_SINK_STD;
- schedule_work(&chg->rdstd_cc2_detach_work);
- break;
- case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
- case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
- chg->cc2_sink_detach_flag = CC2_SINK_MEDIUM_HIGH;
- break;
- default:
- break;
- }
+ if (!ufp_mode)
+ return 0;
+ chg->cc2_detach_wa_active = true;
+ /* The CC2 removal WA will cause a type-c-change IRQ storm */
+ smblib_reg_block_update(chg, cc2_detach_settings);
+ schedule_work(&chg->rdstd_cc2_detach_work);
return rc;
}
static int smblib_cc2_sink_removal_exit(struct smb_charger *chg)
{
- int rc = 0;
-
if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
- return rc;
+ return 0;
- if (chg->cc2_sink_detach_flag == CC2_SINK_STD) {
- cancel_work_sync(&chg->rdstd_cc2_detach_work);
- smblib_reg_block_restore(chg, cc2_detach_settings);
- }
+ if (!chg->cc2_detach_wa_active)
+ return 0;
- chg->cc2_sink_detach_flag = CC2_SINK_NONE;
-
- return rc;
+ chg->cc2_detach_wa_active = false;
+ cancel_work_sync(&chg->rdstd_cc2_detach_work);
+ smblib_reg_block_restore(chg, cc2_detach_settings);
+ return 0;
}
int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
const union power_supply_propval *val)
{
- int rc;
+ int rc = 0;
+ if (chg->pd_hard_reset == val->intval)
+ return rc;
+
+ chg->pd_hard_reset = val->intval;
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- EXIT_SNK_BASED_ON_CC_BIT,
- (val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
- if (rc < 0) {
- smblib_err(chg, "Could not set EXIT_SNK_BASED_ON_CC rc=%d\n",
+ EXIT_SNK_BASED_ON_CC_BIT,
+ (chg->pd_hard_reset) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set EXIT_SNK_BASED_ON_CC rc=%d\n",
rc);
- return rc;
- }
- vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0);
-
- if (val->intval)
- rc = smblib_cc2_sink_removal_enter(chg);
- else
- rc = smblib_cc2_sink_removal_exit(chg);
-
- if (rc < 0) {
- smblib_err(chg, "Could not detect cc2 removal rc=%d\n", rc);
- return rc;
- }
+ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER,
+ chg->pd_hard_reset, 0);
return rc;
}
@@ -2794,15 +2812,21 @@
#define TYPEC_DEFAULT_CURRENT_MA 900000
#define TYPEC_MEDIUM_CURRENT_MA 1500000
#define TYPEC_HIGH_CURRENT_MA 3000000
-static int smblib_get_charge_current(struct smb_charger *chg,
+int smblib_get_charge_current(struct smb_charger *chg,
int *total_current_ua)
{
const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
union power_supply_propval val = {0, };
- int rc, typec_source_rd, current_ua;
+ int rc = 0, typec_source_rd, current_ua;
bool non_compliant;
u8 stat5;
+ if (chg->pd_active) {
+ *total_current_ua =
+ get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+ return rc;
+ }
+
rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
if (rc < 0) {
smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
@@ -2877,39 +2901,12 @@
return 0;
}
-int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
-{
- int current_ua, rc;
-
- if (reduction_ua == 0) {
- vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
- } else {
- /*
- * No usb_icl voter means we are defaulting to hw chosen
- * max limit. We need a vote from s/w to enforce the reduction.
- */
- if (get_effective_result(chg->usb_icl_votable) == -EINVAL) {
- rc = smblib_get_charge_current(chg, ¤t_ua);
- if (rc < 0) {
- pr_err("Failed to get ICL rc=%d\n", rc);
- return rc;
- }
- vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, true,
- current_ua);
- }
- }
-
- chg->icl_reduction_ua = reduction_ua;
-
- return rerun_election(chg->usb_icl_votable);
-}
-
/************************
* PARALLEL PSY GETTERS *
************************/
int smblib_get_prop_slave_current_now(struct smb_charger *chg,
- union power_supply_propval *pval)
+ union power_supply_propval *pval)
{
if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
@@ -2946,6 +2943,14 @@
return IRQ_HANDLED;
}
+ if (chg->wa_flags & OTG_WA) {
+ if (stat & OTG_OC_DIS_SW_STS_RT_STS_BIT)
+ smblib_err(chg, "OTG disabled by hw\n");
+
+ /* not handling software based hiccups for PM660 */
+ return IRQ_HANDLED;
+ }
+
if (stat & OTG_OVERCURRENT_RT_STS_BIT)
schedule_work(&chg->otg_oc_work);
@@ -2964,7 +2969,7 @@
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
- rc);
+ rc);
return IRQ_HANDLED;
}
@@ -3069,24 +3074,43 @@
return IRQ_HANDLED;
}
-irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising)
{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
+ if (vbus_rising) {
+ /* use the typec flag even though its not typec */
+ chg->typec_present = 1;
+ } else {
+ chg->typec_present = 0;
+ smblib_update_usb_type(chg);
+ extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
+ smblib_uusb_removal(chg);
+ }
+}
+
+static void smblib_typec_usb_plugin(struct smb_charger *chg, bool vbus_rising)
+{
+ if (vbus_rising)
+ smblib_cc2_sink_removal_exit(chg);
+ else
+ smblib_cc2_sink_removal_enter(chg);
+}
+
+#define PL_DELAY_MS 30000
+void smblib_usb_plugin_locked(struct smb_charger *chg)
+{
int rc;
u8 stat;
bool vbus_rising;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
- return IRQ_HANDLED;
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return;
}
vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
- smblib_set_opt_freq_buck(chg,
- vbus_rising ? chg->chg_freq.freq_5V :
- chg->chg_freq.freq_removal);
+ smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
+ chg->chg_freq.freq_removal);
/* fetch the DPDM regulator */
if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
@@ -3107,6 +3131,11 @@
smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
rc);
}
+
+ /* Schedule work to enable parallel charger */
+ vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
+ schedule_delayed_work(&chg->pl_enable_work,
+ msecs_to_jiffies(PL_DELAY_MS));
} else {
if (chg->wa_flags & BOOST_BACK_WA)
vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
@@ -3118,17 +3147,26 @@
smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
rc);
}
-
- if (chg->micro_usb_mode) {
- smblib_update_usb_type(chg);
- extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
- smblib_uusb_removal(chg);
- }
}
+ if (chg->micro_usb_mode)
+ smblib_micro_usb_plugin(chg, vbus_rising);
+ else
+ smblib_typec_usb_plugin(chg, vbus_rising);
+
power_supply_changed(chg->usb_psy);
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
- irq_data->name, vbus_rising ? "attached" : "detached");
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
+ vbus_rising ? "attached" : "detached");
+}
+
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ mutex_lock(&chg->lock);
+ smblib_usb_plugin_locked(chg);
+ mutex_unlock(&chg->lock);
return IRQ_HANDLED;
}
@@ -3161,6 +3199,7 @@
|| (stat & AICL_DONE_BIT))
delay = 0;
+ cancel_delayed_work_sync(&chg->icl_change_work);
schedule_delayed_work(&chg->icl_change_work,
msecs_to_jiffies(delay));
}
@@ -3270,11 +3309,19 @@
if (chg->mode == PARALLEL_MASTER)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
- /* QC authentication done, parallel charger can be enabled now */
- vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
-
/* the APSD done handler will set the USB supply type */
apsd_result = smblib_get_apsd_result(chg);
+ if (get_effective_result(chg->hvdcp_hw_inov_dis_votable)) {
+ if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ /* force HVDCP2 to 9V if INOV is disabled */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+ FORCE_9V_BIT, FORCE_9V_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't force 9V HVDCP rc=%d\n", rc);
+ }
+ }
+
smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
apsd_result->name);
}
@@ -3288,9 +3335,10 @@
if (rising) {
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
false, 0);
- if (get_effective_result(chg->pd_disallowed_votable_indirect))
- /* could be a legacy cable, try doing hvdcp */
- try_rerun_apsd_for_hvdcp(chg);
+
+ /* enable HDC and ICL irq for QC2/3 charger */
+ if (qc_charger)
+ vote(chg->usb_irq_enable_votable, QC_VOTER, true, 0);
/*
* HVDCP detection timeout done
@@ -3300,15 +3348,6 @@
/* enforce DCP ICL if specified */
vote(chg->usb_icl_votable, DCP_VOTER,
chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
- /*
- * If adapter is not QC2.0/QC3.0 remove vote for parallel
- * disable.
- * Otherwise if adapter is QC2.0/QC3.0 wait for authentication
- * to complete.
- */
- if (!qc_charger)
- vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
- false, 0);
}
smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
@@ -3328,6 +3367,41 @@
rising ? "rising" : "falling");
}
+static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
+{
+ /* while PD is active it should have complete ICL control */
+ if (chg->pd_active)
+ return;
+
+ switch (pst) {
+ case POWER_SUPPLY_TYPE_USB:
+ /*
+ * USB_PSY will vote to increase the current to 500/900mA once
+ * enumeration is done. Ensure that USB_PSY has at least voted
+ * for 100mA before releasing the LEGACY_UNKNOWN vote
+ */
+ if (!is_client_vote_enabled(chg->usb_icl_votable,
+ USB_PSY_VOTER))
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, true, 100000);
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+ break;
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+ break;
+ case POWER_SUPPLY_TYPE_USB_HVDCP:
+ case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
+ break;
+ default:
+ smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 500000);
+ break;
+ }
+}
+
#define HVDCP_DET_MS 2500
static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
{
@@ -3337,6 +3411,10 @@
return;
apsd_result = smblib_update_usb_type(chg);
+
+ if (!chg->typec_legacy_valid)
+ smblib_force_legacy_icl(chg, apsd_result->pst);
+
switch (apsd_result->bit) {
case SDP_CHARGER_BIT:
case CDP_CHARGER_BIT:
@@ -3349,13 +3427,9 @@
break;
case OCP_CHARGER_BIT:
case FLOAT_CHARGER_BIT:
- /*
- * if not DCP then no hvdcp timeout happens. Enable
- * pd/parallel here.
- */
+ /* if not DCP then no hvdcp timeout happens, Enable pd here. */
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
false, 0);
- vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
break;
case DCP_CHARGER_BIT:
if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
@@ -3384,6 +3458,17 @@
}
smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+ if (chg->micro_usb_mode && (stat & APSD_DTC_STATUS_DONE_BIT)
+ && !chg->uusb_apsd_rerun_done) {
+ /*
+ * Force re-run APSD to handle slow insertion related
+ * charger-mis-detection.
+ */
+ chg->uusb_apsd_rerun_done = true;
+ smblib_rerun_apsd(chg);
+ return IRQ_HANDLED;
+ }
+
smblib_handle_apsd_done(chg,
(bool)(stat & APSD_DTC_STATUS_DONE_BIT));
@@ -3417,65 +3502,6 @@
return IRQ_HANDLED;
}
-static void typec_source_removal(struct smb_charger *chg)
-{
- int rc;
-
- /* reset both usbin current and voltage votes */
- vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
- vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
-
- cancel_delayed_work_sync(&chg->hvdcp_detect_work);
-
- if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
- /* re-enable AUTH_IRQ_EN_CFG_BIT */
- rc = smblib_masked_write(chg,
- USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't enable QC auth setting rc=%d\n", rc);
- }
-
- /* reconfigure allowed voltage for HVDCP */
- rc = smblib_set_adapter_allowance(chg,
- USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
- if (rc < 0)
- smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
- rc);
-
- chg->voltage_min_uv = MICRO_5V;
- chg->voltage_max_uv = MICRO_5V;
-
- /* clear USB ICL vote for PD_VOTER */
- rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't un-vote PD from USB ICL rc=%d\n", rc);
-
- /* clear USB ICL vote for USB_PSY_VOTER */
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote USB_PSY from USB ICL rc=%d\n", rc);
-
- /* clear USB ICL vote for DCP_VOTER */
- rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
-
- /* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
- rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
- rc);
-}
-
-static void typec_source_insertion(struct smb_charger *chg)
-{
-}
-
static void typec_sink_insertion(struct smb_charger *chg)
{
/* when a sink is inserted we should not wait on hvdcp timeout to
@@ -3494,107 +3520,140 @@
static void smblib_handle_typec_removal(struct smb_charger *chg)
{
+ int rc;
+
+ chg->cc2_detach_wa_active = false;
+
+ /* reset APSD voters */
+ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+ vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
+
+ cancel_delayed_work_sync(&chg->pl_enable_work);
+ cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+ /* reset input current limit voters */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+ vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+ vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+ vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+
+ /* reset hvdcp voters */
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
+ vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0);
+
+ /* reset power delivery voters */
+ vote(chg->pd_allowed_votable, PD_VOTER, false, 0);
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
- vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
- vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
- vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
- /* reset votes from vbus_cc_short */
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- true, 0);
- vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
- true, 0);
- /*
- * cable could be removed during hard reset, remove its vote to
- * disable apsd
- */
- vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+ /* reset usb irq voters */
+ vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+ vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0);
+
+ /* reset parallel voters */
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
chg->vconn_attempts = 0;
chg->otg_attempts = 0;
chg->pulse_cnt = 0;
chg->usb_icl_delta_ua = 0;
+ chg->voltage_min_uv = MICRO_5V;
+ chg->voltage_max_uv = MICRO_5V;
+ chg->pd_active = 0;
+ chg->pd_hard_reset = 0;
+ chg->typec_legacy_valid = false;
- chg->usb_ever_removed = true;
+ /* enable APSD CC trigger for next insertion */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+ APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc);
- smblib_update_usb_type(chg);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /* re-enable AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
- typec_source_removal(chg);
+ /* reconfigure allowed voltage for HVDCP */
+ rc = smblib_set_adapter_allowance(chg,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+ rc);
+
+ /* enable DRP */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+
+ /* HW controlled CC_OUT */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_SPARE_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc);
+
+ /* restore crude sensor */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
+
typec_sink_removal(chg);
+ smblib_update_usb_type(chg);
}
static void smblib_handle_typec_insertion(struct smb_charger *chg,
- bool sink_attached, bool legacy_cable)
+ bool sink_attached)
{
- int rp;
- bool vbus_cc_short = false;
- bool valid_legacy_cable;
+ int rc;
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
- if (sink_attached) {
- typec_source_removal(chg);
+ /* disable APSD CC trigger since CC is attached */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG, APSD_START_ON_CC_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+ rc);
+
+ if (sink_attached)
typec_sink_insertion(chg);
- } else {
- typec_source_insertion(chg);
+ else
typec_sink_removal(chg);
- }
-
- valid_legacy_cable = legacy_cable &&
- (chg->usb_ever_removed || !smblib_sysok_reason_usbin(chg));
- vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
- valid_legacy_cable, 0);
-
- if (valid_legacy_cable) {
- rp = smblib_get_prop_ufp_mode(chg);
- if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
- || rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
- vbus_cc_short = true;
- smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
- rp);
- }
- }
-
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- vbus_cc_short, 0);
- vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
- vbus_cc_short, 0);
}
static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
- bool rising, bool sink_attached, bool legacy_cable)
+ bool rising, bool sink_attached)
{
int rc;
union power_supply_propval pval = {0, };
- if (rising)
- smblib_handle_typec_insertion(chg, sink_attached, legacy_cable);
- else
- smblib_handle_typec_removal(chg);
+ if (rising) {
+ if (!chg->typec_present) {
+ chg->typec_present = true;
+ smblib_dbg(chg, PR_MISC, "TypeC insertion\n");
+ smblib_handle_typec_insertion(chg, sink_attached);
+ }
+ } else {
+ if (chg->typec_present) {
+ chg->typec_present = false;
+ smblib_dbg(chg, PR_MISC, "TypeC removal\n");
+ smblib_handle_typec_removal(chg);
+ }
+ }
rc = smblib_get_prop_typec_mode(chg, &pval);
if (rc < 0)
smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
- /*
- * HW BUG - after cable is removed, medium or high rd reading
- * falls to std. Use it for signal of typec cc detachment in
- * software WA.
- */
- if (chg->cc2_sink_detach_flag == CC2_SINK_MEDIUM_HIGH
- && pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
-
- chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
-
- rc = smblib_masked_write(chg,
- TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- EXIT_SNK_BASED_ON_CC_BIT, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't get prop typec mode rc=%d\n",
- rc);
- }
-
smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
rising ? "rising" : "falling",
smblib_typec_mode_name[pval.intval]);
@@ -3620,50 +3679,54 @@
return IRQ_HANDLED;
}
+static void smblib_usb_typec_change(struct smb_charger *chg)
+{
+ int rc;
+ bool debounce_done, sink_attached;
+
+ rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG,
+ chg->typec_status, 5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't cache USB Type-C status rc=%d\n", rc);
+ return;
+ }
+
+ debounce_done =
+ (bool)(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+ sink_attached =
+ (bool)(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
+
+ smblib_handle_typec_debounce_done(chg, debounce_done, sink_attached);
+
+ if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT)
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
+
+ if (chg->typec_status[3] & TYPEC_VCONN_OVERCURR_STATUS_BIT)
+ schedule_work(&chg->vconn_oc_work);
+
+ power_supply_changed(chg->usb_psy);
+}
+
irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc;
- u8 stat4, stat5;
- bool debounce_done, sink_attached, legacy_cable;
- if (chg->micro_usb_mode)
- return smblib_handle_usb_typec_change_for_uusb(chg);
-
- /* WA - not when PD hard_reset WIP on cc2 in sink mode */
- if (chg->cc2_sink_detach_flag == CC2_SINK_STD)
- return IRQ_HANDLED;
-
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ if (chg->micro_usb_mode) {
+ smblib_handle_usb_typec_change_for_uusb(chg);
return IRQ_HANDLED;
}
- rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+ if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+ smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
+ chg->cc2_detach_wa_active ?
+ "cc2_detach_wa" : "typec_en_dis");
return IRQ_HANDLED;
}
- debounce_done = (bool)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
- sink_attached = (bool)(stat4 & UFP_DFP_MODE_STATUS_BIT);
- legacy_cable = (bool)(stat5 & TYPEC_LEGACY_CABLE_STATUS_BIT);
-
- smblib_handle_typec_debounce_done(chg,
- debounce_done, sink_attached, legacy_cable);
-
- if (stat4 & TYPEC_VBUS_ERROR_STATUS_BIT)
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n",
- irq_data->name);
-
- if (stat4 & TYPEC_VCONN_OVERCURR_STATUS_BIT)
- schedule_work(&chg->vconn_oc_work);
-
- power_supply_changed(chg->usb_psy);
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat4);
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_5 = 0x%02x\n", stat5);
+ mutex_lock(&chg->lock);
+ smblib_usb_typec_change(chg);
+ mutex_unlock(&chg->lock);
return IRQ_HANDLED;
}
@@ -3691,7 +3754,7 @@
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc;
+ int rc, usb_icl;
u8 stat;
if (!(chg->wa_flags & BOOST_BACK_WA))
@@ -3703,8 +3766,9 @@
return IRQ_HANDLED;
}
- if ((stat & USE_USBIN_BIT) &&
- get_effective_result(chg->usb_icl_votable) < USBIN_25MA)
+ /* skip suspending input if its already suspended by some other voter */
+ usb_icl = get_effective_result(chg->usb_icl_votable);
+ if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl < USBIN_25MA)
return IRQ_HANDLED;
if (stat & USE_DCIN_BIT)
@@ -3742,12 +3806,7 @@
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
false, 0);
- if (get_effective_result(chg->pd_disallowed_votable_indirect))
- /* pd is still disabled, try hvdcp */
- try_rerun_apsd_for_hvdcp(chg);
- else
- /* notify pd now that pd is allowed */
- power_supply_changed(chg->usb_psy);
+ power_supply_changed(chg->usb_psy);
}
static void bms_update_work(struct work_struct *work)
@@ -3788,11 +3847,13 @@
static void rdstd_cc2_detach_work(struct work_struct *work)
{
int rc;
- u8 stat;
- struct smb_irq_data irq_data = {NULL, "cc2-removal-workaround"};
+ u8 stat4, stat5;
struct smb_charger *chg = container_of(work, struct smb_charger,
rdstd_cc2_detach_work);
+ if (!chg->cc2_detach_wa_active)
+ return;
+
/*
* WA steps -
* 1. Enable both UFP and DFP, wait for 10ms.
@@ -3800,7 +3861,7 @@
* 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS
* and TIMER_STAGE bits are gone, otherwise repeat all by
* work rescheduling.
- * Note, work will be cancelled when pd_hard_reset is 0.
+ * Note, work will be cancelled when USB_PLUGIN rises.
*/
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
@@ -3823,30 +3884,35 @@
usleep_range(30000, 31000);
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return;
}
- if (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
- goto rerun;
- rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+ rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
if (rc < 0) {
smblib_err(chg,
"Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc);
return;
}
- if (stat & TIMER_STAGE_2_BIT)
+
+ if ((stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
+ || (stat5 & TIMER_STAGE_2_BIT)) {
+ smblib_dbg(chg, PR_MISC, "rerunning DD=%d TS2BIT=%d\n",
+ (int)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT),
+ (int)(stat5 & TIMER_STAGE_2_BIT));
goto rerun;
+ }
- /* Bingo, cc2 removal detected */
+ smblib_dbg(chg, PR_MISC, "Bingo CC2 Removal detected\n");
+ chg->cc2_detach_wa_active = false;
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ EXIT_SNK_BASED_ON_CC_BIT, 0);
smblib_reg_block_restore(chg, cc2_detach_settings);
- chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
- irq_data.parent_data = chg;
- smblib_handle_usb_typec_change(0, &irq_data);
-
+ mutex_lock(&chg->lock);
+ smblib_usb_typec_change(chg);
+ mutex_unlock(&chg->lock);
return;
rerun:
@@ -3962,6 +4028,9 @@
int rc, i;
u8 stat;
+ if (chg->micro_usb_mode)
+ return;
+
smblib_err(chg, "over-current detected on VCONN\n");
if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
return;
@@ -4056,6 +4125,66 @@
smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
}
+static void smblib_pl_enable_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ pl_enable_work.work);
+
+ smblib_dbg(chg, PR_PARALLEL, "timer expired, enabling parallel\n");
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, false, 0);
+ vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+}
+
+static void smblib_legacy_detection_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ legacy_detection_work);
+ int rc;
+ u8 stat;
+ bool legacy, rp_high;
+
+ mutex_lock(&chg->lock);
+ chg->typec_en_dis_active = 1;
+ smblib_dbg(chg, PR_MISC, "running legacy unknown workaround\n");
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_DISABLE_CMD_BIT,
+ TYPEC_DISABLE_CMD_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable type-c rc=%d\n", rc);
+
+ /* wait for the adapter to turn off VBUS */
+ msleep(500);
+
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_DISABLE_CMD_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable type-c rc=%d\n", rc);
+
+ /* wait for type-c detection to complete */
+ msleep(100);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read typec stat5 rc = %d\n", rc);
+ goto unlock;
+ }
+
+ chg->typec_legacy_valid = true;
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+ legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
+ rp_high = smblib_get_prop_ufp_mode(chg) ==
+ POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+ if (!legacy || !rp_high)
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ false, 0);
+
+unlock:
+ chg->typec_en_dis_active = 0;
+ mutex_unlock(&chg->lock);
+}
+
static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
@@ -4072,13 +4201,19 @@
return rc;
}
+ chg->usb_icl_votable = find_votable("USB_ICL");
+ if (!chg->usb_icl_votable) {
+ rc = -EPROBE_DEFER;
+ return rc;
+ }
+
chg->pl_disable_votable = find_votable("PL_DISABLE");
if (!chg->pl_disable_votable) {
rc = -EPROBE_DEFER;
return rc;
}
vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
- vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+ vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
smblib_dc_suspend_vote_callback,
@@ -4088,14 +4223,6 @@
return rc;
}
- chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
- smblib_usb_icl_vote_callback,
- chg);
- if (IS_ERR(chg->usb_icl_votable)) {
- rc = PTR_ERR(chg->usb_icl_votable);
- return rc;
- }
-
chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN,
smblib_dc_icl_vote_callback,
chg);
@@ -4181,6 +4308,24 @@
return rc;
}
+ chg->usb_irq_enable_votable = create_votable("USB_IRQ_DISABLE",
+ VOTE_SET_ANY,
+ smblib_usb_irq_enable_vote_callback,
+ chg);
+ if (IS_ERR(chg->usb_irq_enable_votable)) {
+ rc = PTR_ERR(chg->usb_irq_enable_votable);
+ return rc;
+ }
+
+ chg->typec_irq_disable_votable = create_votable("TYPEC_IRQ_DISABLE",
+ VOTE_SET_ANY,
+ smblib_typec_irq_disable_vote_callback,
+ chg);
+ if (IS_ERR(chg->typec_irq_disable_votable)) {
+ rc = PTR_ERR(chg->typec_irq_disable_votable);
+ return rc;
+ }
+
return rc;
}
@@ -4206,6 +4351,8 @@
destroy_votable(chg->apsd_disable_votable);
if (chg->hvdcp_hw_inov_dis_votable)
destroy_votable(chg->hvdcp_hw_inov_dis_votable);
+ if (chg->typec_irq_disable_votable)
+ destroy_votable(chg->typec_irq_disable_votable);
}
static void smblib_iio_deinit(struct smb_charger *chg)
@@ -4226,6 +4373,7 @@
{
int rc = 0;
+ mutex_init(&chg->lock);
mutex_init(&chg->write_lock);
mutex_init(&chg->otg_oc_lock);
INIT_WORK(&chg->bms_update_work, bms_update_work);
@@ -4237,6 +4385,8 @@
INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+ INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
+ INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
chg->fake_capacity = -EINVAL;
switch (chg->mode) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 21ccd3c..b0d84f0 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -32,10 +32,12 @@
#define USER_VOTER "USER_VOTER"
#define PD_VOTER "PD_VOTER"
#define DCP_VOTER "DCP_VOTER"
+#define QC_VOTER "QC_VOTER"
#define PL_USBIN_USBIN_VOTER "PL_USBIN_USBIN_VOTER"
#define USB_PSY_VOTER "USB_PSY_VOTER"
#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define PL_QNOVO_VOTER "PL_QNOVO_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
#define USBIN_V_VOTER "USBIN_V_VOTER"
#define CHG_STATE_VOTER "CHG_STATE_VOTER"
@@ -47,17 +49,19 @@
#define PD_DISALLOWED_INDIRECT_VOTER "PD_DISALLOWED_INDIRECT_VOTER"
#define PD_HARD_RESET_VOTER "PD_HARD_RESET_VOTER"
#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER"
-#define LEGACY_CABLE_VOTER "LEGACY_CABLE_VOTER"
#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER"
#define BOOST_BACK_VOTER "BOOST_BACK_VOTER"
+#define USBIN_USBIN_BOOST_VOTER "USBIN_USBIN_BOOST_VOTER"
#define HVDCP_INDIRECT_VOTER "HVDCP_INDIRECT_VOTER"
#define MICRO_USB_VOTER "MICRO_USB_VOTER"
#define DEBUG_BOARD_VOTER "DEBUG_BOARD_VOTER"
#define PD_SUSPEND_SUPPORTED_VOTER "PD_SUSPEND_SUPPORTED_VOTER"
-#define PL_DELAY_HVDCP_VOTER "PL_DELAY_HVDCP_VOTER"
+#define PL_DELAY_VOTER "PL_DELAY_VOTER"
#define CTM_VOTER "CTM_VOTER"
#define SW_QC3_VOTER "SW_QC3_VOTER"
#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
+#define LEGACY_UNKNOWN_VOTER "LEGACY_UNKNOWN_VOTER"
+#define CC2_WA_VOTER "CC2_WA_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -68,18 +72,12 @@
NUM_MODES,
};
-enum cc2_sink_type {
- CC2_SINK_NONE = 0,
- CC2_SINK_STD,
- CC2_SINK_MEDIUM_HIGH,
- CC2_SINK_WA_DONE,
-};
-
enum {
QC_CHARGER_DETECTION_WA_BIT = BIT(0),
BOOST_BACK_WA = BIT(1),
TYPEC_CC2_REMOVAL_WA_BIT = BIT(2),
QC_AUTH_INTERRUPT_WA_BIT = BIT(3),
+ OTG_WA = BIT(4),
};
enum smb_irq_index {
@@ -232,6 +230,7 @@
int smb_version;
/* locks */
+ struct mutex lock;
struct mutex write_lock;
struct mutex ps_change_lock;
struct mutex otg_oc_lock;
@@ -271,6 +270,8 @@
struct votable *hvdcp_enable_votable;
struct votable *apsd_disable_votable;
struct votable *hvdcp_hw_inov_dis_votable;
+ struct votable *usb_irq_enable_votable;
+ struct votable *typec_irq_disable_votable;
/* work */
struct work_struct bms_update_work;
@@ -283,6 +284,8 @@
struct work_struct vconn_oc_work;
struct delayed_work otg_ss_done_work;
struct delayed_work icl_change_work;
+ struct delayed_work pl_enable_work;
+ struct work_struct legacy_detection_work;
/* cached status */
int voltage_min_uv;
@@ -305,17 +308,22 @@
int otg_attempts;
int vconn_attempts;
int default_icl_ua;
+ int otg_cl_ua;
+ bool uusb_apsd_rerun_done;
+ bool pd_hard_reset;
+ bool typec_present;
+ u8 typec_status[5];
+ bool typec_legacy_valid;
/* workaround flag */
u32 wa_flags;
- enum cc2_sink_type cc2_sink_detach_flag;
+ bool cc2_detach_wa_active;
+ bool typec_en_dis_active;
int boost_current_ua;
+ int temp_speed_reading_count;
/* extcon for VBUS / ID notification to USB for uUSB */
struct extcon_dev *extcon;
- bool usb_ever_removed;
-
- int icl_reduction_ua;
/* qnovo */
int qnovo_fcc_ua;
@@ -453,6 +461,8 @@
union power_supply_propval *val);
int smblib_get_prop_die_health(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_set_prop_pd_current_max(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
@@ -473,14 +483,17 @@
union power_supply_propval *val);
int smblib_set_prop_ship_mode(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+ const union power_supply_propval *val);
void smblib_suspend_on_debug_battery(struct smb_charger *chg);
int smblib_rerun_apsd_if_required(struct smb_charger *chg);
int smblib_get_prop_fcc_delta(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_icl_override(struct smb_charger *chg, bool override);
-int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
int smblib_dp_dm(struct smb_charger *chg, int val);
int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
+int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 54b6b38..3f260a4 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -628,6 +628,7 @@
#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
#define USBIN_OV_CH_LOAD_OPTION_BIT BIT(7)
+#define ICL_OVERRIDE_AFTER_APSD_BIT BIT(4)
#define USBIN_ICL_OPTIONS_REG (USBIN_BASE + 0x66)
#define CFG_USB3P0_SEL_BIT BIT(2)
@@ -918,6 +919,7 @@
#define MISC_CFG_REG (MISC_BASE + 0x52)
#define GSM_PA_ON_ADJ_SEL_BIT BIT(0)
+#define STAT_PARALLEL_1400MA_EN_CFG_BIT BIT(3)
#define TCC_DEBOUNCE_20MS_BIT BIT(5)
#define SNARL_BARK_BITE_WD_CFG_REG (MISC_BASE + 0x53)
@@ -1018,7 +1020,19 @@
#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG (MISC_BASE + 0xA0)
#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG (MISC_BASE + 0xA1)
+#define TM_IO_DTEST4_SEL (MISC_BASE + 0xE9)
+
/* CHGR FREQ Peripheral registers */
#define FREQ_CLK_DIV_REG (CHGR_FREQ_BASE + 0x50)
+/* SMB1355 specific registers */
+#define SMB1355_TEMP_COMP_STATUS_REG (MISC_BASE + 0x07)
+#define SKIN_TEMP_RST_HOT_BIT BIT(6)
+#define SKIN_TEMP_UB_HOT_BIT BIT(5)
+#define SKIN_TEMP_LB_HOT_BIT BIT(4)
+#define DIE_TEMP_TSD_HOT_BIT BIT(3)
+#define DIE_TEMP_RST_HOT_BIT BIT(2)
+#define DIE_TEMP_UB_HOT_BIT BIT(1)
+#define DIE_TEMP_LB_HOT_BIT BIT(0)
+
#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index 0d1f2a6..b92a482 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1655,7 +1655,7 @@
switch (prop) {
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
- val->intval = !chip->usb_suspended_status;
+ val->intval = !chip->parallel_charger_suspended;
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
if (!chip->parallel_charger_suspended)
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 1e89a721..a29871b 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -28,7 +28,7 @@
#include "smb-reg.h"
#include "smb-lib.h"
#include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
#define SMB138X_DEFAULT_FCC_UA 1000000
#define SMB138X_DEFAULT_ICL_UA 1500000
@@ -45,6 +45,7 @@
#define STACKED_DIODE_EN_BIT BIT(2)
#define TDIE_AVG_COUNT 10
+#define MAX_SPEED_READING_TIMES 5
enum {
OOB_COMP_WA_BIT = BIT(0),
@@ -95,6 +96,7 @@
int dc_icl_ua;
int chg_temp_max_mdegc;
int connector_temp_max_mdegc;
+ int pl_mode;
};
struct smb138x {
@@ -102,6 +104,8 @@
struct smb_dt_props dt;
struct power_supply *parallel_psy;
u32 wa_flags;
+ struct pmic_revid_data *pmic_rev_id;
+ char *name;
};
static int __debug_mask;
@@ -126,8 +130,16 @@
union power_supply_propval pval;
int rc = 0, avg = 0, i;
struct smb_charger *chg = &chip->chg;
+ int die_avg_count;
- for (i = 0; i < TDIE_AVG_COUNT; i++) {
+ if (chg->temp_speed_reading_count < MAX_SPEED_READING_TIMES) {
+ chg->temp_speed_reading_count++;
+ die_avg_count = 1;
+ } else {
+ die_avg_count = TDIE_AVG_COUNT;
+ }
+
+ for (i = 0; i < die_avg_count; i++) {
pval.intval = 0;
rc = smblib_get_prop_charger_temp(chg, &pval);
if (rc < 0) {
@@ -137,7 +149,7 @@
}
avg += pval.intval;
}
- val->intval = avg / TDIE_AVG_COUNT;
+ val->intval = avg / die_avg_count;
return rc;
}
@@ -152,6 +164,19 @@
return -EINVAL;
}
+ rc = of_property_read_u32(node,
+ "qcom,parallel-mode", &chip->dt.pl_mode);
+ if (rc < 0)
+ chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+
+ /* check that smb1355 is configured to run in mid-mid mode */
+ if (chip->pmic_rev_id->pmic_subtype == SMB1355_SUBTYPE
+ && chip->dt.pl_mode != POWER_SUPPLY_PL_USBMID_USBMID) {
+ pr_err("Smb1355 can only run in MID-MID mode, saw = %d mode\n",
+ chip->dt.pl_mode);
+ return -EINVAL;
+ }
+
chip->dt.suspend_input = of_property_read_bool(node,
"qcom,suspend-input");
@@ -464,6 +489,30 @@
* PARALLEL PSY REGISTRATION *
*****************************/
+static int smb1355_get_prop_connector_health(struct smb138x *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ u8 temp;
+ int rc;
+
+ rc = smblib_read(chg, SMB1355_TEMP_COMP_STATUS_REG, &temp);
+ if (rc < 0) {
+ pr_err("Couldn't read comp stat reg rc = %d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (temp & SKIN_TEMP_RST_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (temp & SKIN_TEMP_UB_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (temp & SKIN_TEMP_LB_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+}
+
static int smb138x_get_prop_connector_health(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -520,15 +569,33 @@
POWER_SUPPLY_PROP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CHARGER_TEMP,
- POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static enum power_supply_property smb1355_parallel_props[] = {
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_PIN_ENABLED,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
+ POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -559,6 +626,13 @@
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
rc = smblib_get_usb_suspend(chg, &val->intval);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+ rc = smblib_get_prop_input_current_limited(chg, val);
+ else
+ val->intval = 0;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
break;
@@ -566,28 +640,46 @@
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- rc = smblib_get_prop_slave_current_now(chg, val);
- break;
- case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smb138x_get_prop_charger_temp(chip, val);
- break;
- case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
- rc = smblib_get_prop_charger_temp_max(chg, val);
- break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = "smb138x";
+ val->strval = chip->name;
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
- val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+ val->intval = chip->dt.pl_mode;
break;
case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
- val->intval = smb138x_get_prop_connector_health(chip);
+ if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ val->intval = smb138x_get_prop_connector_health(chip);
+ else
+ val->intval = smb1355_get_prop_connector_health(chip);
break;
case POWER_SUPPLY_PROP_SET_SHIP_MODE:
/* Not in ship mode as long as device is active */
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ rc = smb138x_get_prop_charger_temp(chip, val);
+ else
+ rc = smblib_get_prop_charger_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smblib_get_prop_charger_temp_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ rc = smblib_get_prop_slave_current_now(chg, val);
+ else
+ rc = -ENODATA;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if ((chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
+ && ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)))
+ rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+ &val->intval);
+ else
+ rc = -ENODATA;
+ break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
@@ -638,6 +730,12 @@
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+ rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+ val->intval);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
break;
@@ -665,7 +763,7 @@
return 0;
}
-static const struct power_supply_desc parallel_psy_desc = {
+static struct power_supply_desc parallel_psy_desc = {
.name = "parallel",
.type = POWER_SUPPLY_TYPE_PARALLEL,
.properties = smb138x_parallel_props,
@@ -693,6 +791,28 @@
return 0;
}
+static int smb1355_init_parallel_psy(struct smb138x *chip)
+{
+ struct power_supply_config parallel_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ parallel_cfg.drv_data = chip;
+ parallel_cfg.of_node = chg->dev->of_node;
+
+ /* change to smb1355's property list */
+ parallel_psy_desc.properties = smb1355_parallel_props;
+ parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props);
+ chip->parallel_psy = devm_power_supply_register(chg->dev,
+ ¶llel_psy_desc,
+ ¶llel_cfg);
+ if (IS_ERR(chip->parallel_psy)) {
+ pr_err("Couldn't register parallel power supply\n");
+ return PTR_ERR(chip->parallel_psy);
+ }
+
+ return 0;
+}
+
/******************************
* VBUS REGULATOR REGISTRATION *
******************************/
@@ -1012,7 +1132,6 @@
static int smb138x_setup_wa_flags(struct smb138x *chip)
{
- struct pmic_revid_data *pmic_rev_id;
struct device_node *revid_dev_node;
revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
@@ -1022,8 +1141,8 @@
return -EINVAL;
}
- pmic_rev_id = get_revid_data(revid_dev_node);
- if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ chip->pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
/*
* the revid peripheral must be registered, any failure
* here only indicates that the rev-id module has not
@@ -1032,14 +1151,14 @@
return -EPROBE_DEFER;
}
- switch (pmic_rev_id->pmic_subtype) {
+ switch (chip->pmic_rev_id->pmic_subtype) {
case SMB1381_SUBTYPE:
- if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+ if (chip->pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
chip->wa_flags |= OOB_COMP_WA_BIT;
break;
default:
pr_err("PMIC subtype %d not supported\n",
- pmic_rev_id->pmic_subtype);
+ chip->pmic_rev_id->pmic_subtype);
return -EINVAL;
}
@@ -1337,6 +1456,7 @@
chg->param = v1_params;
+ chip->name = "smb1381";
rc = smblib_init(chg);
if (rc < 0) {
pr_err("Couldn't initialize smblib rc=%d\n", rc);
@@ -1397,7 +1517,7 @@
return rc;
}
-static int smb138x_slave_probe(struct smb138x *chip)
+static int smb1355_slave_probe(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
int rc = 0;
@@ -1410,6 +1530,55 @@
goto cleanup;
}
+ rc = smb138x_parse_dt(chip);
+ if (rc < 0) {
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb138x_init_slave_hw(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize hardware rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb1355_init_parallel_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb138x_determine_initial_slave_status(chip);
+ if (rc < 0) {
+ pr_err("Couldn't determine initial status rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ rc = smb138x_request_interrupts(chip);
+ if (rc < 0) {
+ pr_err("Couldn't request interrupts rc=%d\n", rc);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ smblib_deinit(chg);
+ return rc;
+}
+
+static int smb1381_slave_probe(struct smb138x *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ chg->param = v1_params;
+
+ rc = smblib_init(chg);
+ if (rc < 0) {
+ pr_err("Couldn't initialize smblib rc=%d\n", rc);
+ goto cleanup;
+ }
chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
if (IS_ERR(chg->iio.temp_max_chan)) {
rc = PTR_ERR(chg->iio.temp_max_chan);
@@ -1449,6 +1618,16 @@
goto cleanup;
}
+ if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+ rc = smb138x_init_vbus_regulator(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize vbus regulator rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = smb138x_init_parallel_psy(chip);
if (rc < 0) {
pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
@@ -1467,23 +1646,71 @@
goto cleanup;
}
- return rc;
+ return 0;
cleanup:
smblib_deinit(chg);
- if (chip->parallel_psy)
- power_supply_unregister(chip->parallel_psy);
return rc;
}
+static int slave_probe(struct smb138x *chip)
+{
+ struct device_node *revid_dev_node;
+ int rc = 0;
+
+ revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property\n");
+ return -EINVAL;
+ }
+
+ chip->pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (chip->pmic_rev_id->pmic_subtype) {
+ case SMB1355_SUBTYPE:
+ chip->name = "smb1355";
+ rc = smb1355_slave_probe(chip);
+ break;
+ case SMB1381_SUBTYPE:
+ chip->name = "smb1381";
+ rc = smb1381_slave_probe(chip);
+ break;
+ default:
+ pr_err("Unsupported pmic subtype = 0x%02x\n",
+ chip->pmic_rev_id->pmic_subtype);
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("Couldn't probe SMB138X rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static const struct of_device_id match_table[] = {
{
- .compatible = "qcom,smb138x-charger",
- .data = (void *) PARALLEL_MASTER
+ .compatible = "qcom,smb138x-charger",
+ .data = (void *) PARALLEL_MASTER,
},
{
- .compatible = "qcom,smb138x-parallel-slave",
- .data = (void *) PARALLEL_SLAVE
+ .compatible = "qcom,smb138x-parallel-slave",
+ .data = (void *) PARALLEL_SLAVE,
+ },
+ {
+ .compatible = "qcom,smb1355-parallel-slave",
+ .data = (void *) PARALLEL_SLAVE,
},
{ },
};
@@ -1530,7 +1757,7 @@
rc = smb138x_master_probe(chip);
break;
case PARALLEL_SLAVE:
- rc = smb138x_slave_probe(chip);
+ rc = slave_probe(chip);
break;
default:
pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
@@ -1544,7 +1771,8 @@
goto cleanup;
}
- pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
+ pr_info("%s probed successfully mode=%d pl_mode = %d\n",
+ chip->name, chip->chg.mode, chip->dt.pl_mode);
return rc;
cleanup:
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1..744d561 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@
return 0;
}
+static int rockchip_pwm_enable(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ bool enable,
+ enum pwm_polarity polarity)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ int ret;
+
+ if (enable) {
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+ }
+
+ pc->data->set_enable(chip, pwm, enable, polarity);
+
+ if (!enable)
+ clk_disable(pc->clk);
+
+ return 0;
+}
+
static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
@@ -207,22 +229,26 @@
return ret;
if (state->polarity != curstate.polarity && enabled) {
- pc->data->set_enable(chip, pwm, false, state->polarity);
+ ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
+ if (ret)
+ goto out;
enabled = false;
}
ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (ret) {
if (enabled != curstate.enabled)
- pc->data->set_enable(chip, pwm, !enabled,
- state->polarity);
-
+ rockchip_pwm_enable(chip, pwm, !enabled,
+ state->polarity);
goto out;
}
- if (state->enabled != enabled)
- pc->data->set_enable(chip, pwm, state->enabled,
- state->polarity);
+ if (state->enabled != enabled) {
+ ret = rockchip_pwm_enable(chip, pwm, state->enabled,
+ state->polarity);
+ if (ret)
+ goto out;
+ }
/*
* Update the state with the real hardware, which can differ a bit
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 07a0aef..c45fb0d 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -177,6 +177,7 @@
#define CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN BIT(27)
#define CPR4_REG_MISC 0x700
+#define CPR4_MISC_RESET_STEP_QUOT_LOOP_EN BIT(2)
#define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_MASK GENMASK(23, 20)
#define CPR4_MISC_MARGIN_TABLE_ROW_SELECT_SHIFT 20
#define CPR4_MISC_TEMP_SENSOR_ID_START_MASK GENMASK(27, 24)
@@ -723,6 +724,11 @@
int thread_id = 0;
u64 temp;
+ if (ctrl->reset_step_quot_loop_en)
+ cpr3_masked_write(ctrl, CPR4_REG_MISC,
+ CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
+ CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
+
if (ctrl->supports_hw_closed_loop) {
if (ctrl->saw_use_unit_mV)
pmic_step_size = ctrl->step_volt / 1000;
@@ -1355,6 +1361,11 @@
}
}
+ if (ctrl->reset_step_quot_loop_en)
+ cpr3_masked_write(ctrl, CPR4_REG_MISC,
+ CPR4_MISC_RESET_STEP_QUOT_LOOP_EN,
+ CPR4_MISC_RESET_STEP_QUOT_LOOP_EN);
+
if (ctrl->saw_use_unit_mV)
pmic_step_size = ctrl->step_volt / 1000;
cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
@@ -1382,6 +1393,11 @@
(ctrl->use_dynamic_step_quot
? CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN : 0));
+ if (ctrl->thread_count > 1)
+ cpr3_masked_write(ctrl, CPR4_REG_CPR_TIMER_CLAMP,
+ CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN,
+ CPR4_CPR_TIMER_CLAMP_THREAD_AGGREGATION_EN);
+
if (ctrl->voltage_settling_time) {
/*
* Configure the settling timer used to account for
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 570ddfc..8535020 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -756,6 +756,12 @@
* @panic_notifier: Notifier block registered to global panic notifier list.
* @support_ldo300_vreg: Boolean value which indicates that this CPR controller
* manages an underlying LDO regulator of type LDO300.
+ * @reset_step_quot_loop_en: Boolean value which indicates that this CPR
+ * controller should be configured to reset step_quot on
+ * each loop_en = 0 transition. This configuration allows
+ * the CPR controller to first use the default step_quot
+ * and then later switch to the run-time calibrated
+ * step_quot.
*
* This structure contains both configuration and runtime state data. The
* elements cpr_allowed_sw, use_hw_closed_loop, aggr_corner, cpr_enabled,
@@ -866,6 +872,7 @@
struct cpr3_panic_regs_info *panic_regs_info;
struct notifier_block panic_notifier;
bool support_ldo300_vreg;
+ bool reset_step_quot_loop_en;
};
/* Used for rounding voltages to the closest physically available set point. */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 648d396..3035155 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -1224,6 +1224,14 @@
}
/*
+ * Reset step_quot to default on each loop_en = 0 transition is
+ * optional.
+ */
+ ctrl->reset_step_quot_loop_en
+ = of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-reset-step-quot-loop-en");
+
+ /*
* Regulator device handles are not necessary for CPRh controllers
* since communication with the regulators is completely managed
* in hardware.
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index a93e7d8..b84d9f0 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -983,6 +983,9 @@
return -EINVAL;
}
+ vreg->fuse_corner_count = fuse_corners;
+ vreg->platform_fuses = fuse;
+
fuse->ro_sel = devm_kcalloc(vreg->thread->ctrl->dev, fuse_corners,
sizeof(*fuse->ro_sel), GFP_KERNEL);
fuse->init_voltage = devm_kcalloc(vreg->thread->ctrl->dev, fuse_corners,
@@ -1037,8 +1040,6 @@
vreg->speed_bin_fuse = fuse->speed_bin;
vreg->cpr_rev_fuse = fuse->cpr_fusing_rev;
- vreg->fuse_corner_count = fuse_corners;
- vreg->platform_fuses = fuse;
return 0;
}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 3853ba9..19e03d0 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -18,6 +18,7 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -59,6 +60,7 @@
struct platform_device *pdev;
struct rtc_device *rtc_dev;
void __iomem *rtc_base; /* NULL if not initialized. */
+ struct clk *clk;
int tegra_rtc_irq; /* alarm and periodic irq */
spinlock_t tegra_rtc_lock;
};
@@ -326,6 +328,14 @@
if (info->tegra_rtc_irq <= 0)
return -EBUSY;
+ info->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk))
+ return PTR_ERR(info->clk);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0)
+ return ret;
+
/* set context info. */
info->pdev = pdev;
spin_lock_init(&info->tegra_rtc_lock);
@@ -346,7 +356,7 @@
ret = PTR_ERR(info->rtc_dev);
dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
ret);
- return ret;
+ goto disable_clk;
}
ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
@@ -356,12 +366,25 @@
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
ret);
- return ret;
+ goto disable_clk;
}
dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
return 0;
+
+disable_clk:
+ clk_disable_unprepare(info->clk);
+ return ret;
+}
+
+static int tegra_rtc_remove(struct platform_device *pdev)
+{
+ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(info->clk);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -413,6 +436,7 @@
MODULE_ALIAS("platform:tegra_rtc");
static struct platform_driver tegra_rtc_driver = {
+ .remove = tegra_rtc_remove,
.shutdown = tegra_rtc_shutdown,
.driver = {
.name = "tegra_rtc",
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7bb2068..d314579 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@
#define ALUA_POLICY_SWITCH_ALL 1
static void alua_rtpg_work(struct work_struct *work);
-static void alua_rtpg_queue(struct alua_port_group *pg,
+static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force);
static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@
kref_put(&pg->kref, release_port_group);
}
-static void alua_rtpg_queue(struct alua_port_group *pg,
+/**
+ * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ *
+ * Returns true if and only if alua_rtpg_work() will be called asynchronously.
+ * That function is responsible for calling @qdata->fn().
+ */
+static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force)
{
@@ -870,8 +876,8 @@
unsigned long flags;
struct workqueue_struct *alua_wq = kaluad_wq;
- if (!pg)
- return;
+ if (!pg || scsi_device_get(sdev))
+ return false;
spin_lock_irqsave(&pg->lock, flags);
if (qdata) {
@@ -884,14 +890,12 @@
pg->flags |= ALUA_PG_RUN_RTPG;
kref_get(&pg->kref);
pg->rtpg_sdev = sdev;
- scsi_device_get(sdev);
start_queue = 1;
} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg->flags |= ALUA_PG_RUN_RTPG;
/* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
- sdev = NULL;
start_queue = 1;
}
}
@@ -900,13 +904,17 @@
alua_wq = kaluad_sync_wq;
spin_unlock_irqrestore(&pg->lock, flags);
- if (start_queue &&
- !queue_delayed_work(alua_wq, &pg->rtpg_work,
- msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
- if (sdev)
- scsi_device_put(sdev);
- kref_put(&pg->kref, release_port_group);
+ if (start_queue) {
+ if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
+ sdev = NULL;
+ else
+ kref_put(&pg->kref, release_port_group);
}
+ if (sdev)
+ scsi_device_put(sdev);
+
+ return true;
}
/*
@@ -1007,11 +1015,13 @@
mutex_unlock(&h->init_mutex);
goto out;
}
- fn = NULL;
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- alua_rtpg_queue(pg, sdev, qdata, true);
+ if (alua_rtpg_queue(pg, sdev, qdata, true))
+ fn = NULL;
+ else
+ err = SCSI_DH_DEV_OFFLINED;
kref_put(&pg->kref, release_port_group);
out:
if (fn)
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012..87f5e694 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c..ad33238 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2153,8 +2153,6 @@
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
- BUG_ON(atomic_read(&vha->vref_count));
-
qla2x00_free_fcports(vha);
mutex_lock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e4..8e63a7b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3742,6 +3742,7 @@
struct qla8044_reset_template reset_tmplt;
struct qla_tgt_counters tgt_counters;
uint16_t bbcr;
+ wait_queue_head_t vref_waitq;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -3780,6 +3781,7 @@
mb(); \
if (__vha->flags.delete_progress) { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
__bail = 1; \
} else { \
__bail = 0; \
@@ -3788,6 +3790,7 @@
#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
} while (0)
/*
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296..8f12f6b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4356,6 +4356,7 @@
}
}
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52..3dfb54a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
+ wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+ 10*HZ);
+
spin_lock_irqsave(&ha->vport_slock, flags);
- while (atomic_read(&vha->vref_count)) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- msleep(500);
-
- spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count)) {
+ ql_dbg(ql_dbg_vport, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ vha->vref_count = (atomic_t)ATOMIC_INIT(0);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
i++;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bea819e..734e592 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -968,8 +968,13 @@
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT);
+ if (IS_P3P_TYPE(ha))
+ return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT);
+ else
+ return ((RD_REG_DWORD(®->host_status)) ==
+ ISP_REG_DISCONNECT);
}
/**************************************************************************
@@ -4045,6 +4050,7 @@
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
+ init_waitqueue_head(&vha->vref_waitq);
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b7889c7..c2ac982 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1982,6 +1982,22 @@
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2047,7 +2063,7 @@
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2133,7 +2149,7 @@
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2780,7 +2796,8 @@
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else
- rw_max = BLK_DEF_MAX_SECTORS;
+ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+ (sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c05cf3b..44c466b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1002,6 +1002,8 @@
result = get_user(val, ip);
if (result)
return result;
+ if (val > SG_MAX_CDB_SIZE)
+ return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index bed2bbd..e635973 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -833,6 +833,7 @@
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
@@ -859,10 +860,11 @@
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d326b80..ee23fc7 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -32,6 +32,7 @@
#include "ufs_quirks.h"
#include "ufs-qcom-ice.h"
#include "ufs-qcom-debugfs.h"
+#include <linux/clk/qcom.h>
#define MAX_PROP_SIZE 32
#define VDDP_REF_CLK_MIN_UV 1200000
@@ -356,6 +357,28 @@
return err;
}
+static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+
+ /*
+ * Configure the behavior of ufs clocks core and peripheral
+ * memory state when they are turned off.
+ * This configuration is required to allow retaining
+ * ICE crypto configuration (including keys) when
+ * core_clk_ice is turned off, and powering down
+ * non-ICE RAMs of host controller.
+ */
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk_ice"))
+ clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
+ clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
+ }
+}
+
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
@@ -364,6 +387,7 @@
switch (status) {
case PRE_CHANGE:
+ ufs_qcom_force_mem_config(hba);
ufs_qcom_power_up_sequence(hba);
/*
* The PHY PLL output is the source of tx/rx lane symbol
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
index b22a4c4..3210d60 100644
--- a/drivers/scsi/ufs/ufs_quirks.c
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,20 @@
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hD8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8aM1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "h08aM1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8GL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8HL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
END_FIX
};
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index f7182ed..0fa7d93 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -130,6 +130,15 @@
*/
#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 7)
+/*
+ * Some UFS devices may stop responding after switching from HS-G1 to HS-G3.
+ * Also, it is found that these devices work fine if we do 2 steps switch:
+ * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
+ * device would apply this 2 steps gear switch workaround.
+ */
+#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH (1 << 8)
+
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8772bcb..602c359 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -276,6 +276,7 @@
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
@@ -524,7 +525,7 @@
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
- if (!val)
+ if (!val || !*val)
return;
if (*val < 0x20 || *val > 0x7e)
@@ -1404,7 +1405,7 @@
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state == REQ_CLKS_ON)) {
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@@ -2386,7 +2387,7 @@
*
* Returns 0 in case of success, non-zero value in case of failure
*/
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
@@ -2400,8 +2401,13 @@
return sg_segments;
if (sg_segments) {
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -2823,6 +2829,7 @@
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
+ case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
@@ -2913,7 +2920,7 @@
goto out;
}
- err = ufshcd_map_sg(lrbp);
+ err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
@@ -3670,7 +3677,7 @@
goto out;
}
- buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
if (!buff_ascii) {
dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
__func__, ascii_len);
@@ -3855,12 +3862,21 @@
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- utrdlp[i].response_upiu_offset =
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
+ utrdlp[i].prd_table_offset =
cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
+ utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
@@ -4308,15 +4324,25 @@
* mode hence full reinit is required to move link to HS speeds.
*/
if (ret || hba->full_init_linereset) {
+ int err;
+
hba->full_init_linereset = false;
ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
__func__, ret);
/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code (-ENOLINK)
+ * returned ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- ret = ufshcd_link_recovery(hba);
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else {
dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
ktime_to_us(ktime_get()));
@@ -4333,8 +4359,8 @@
ret = __ufshcd_uic_hibern8_enter(hba);
if (!ret)
goto out;
- /* Unable to recover the link, so no point proceeding */
- if (ret == -ENOLINK)
+ else if (ret != -EAGAIN)
+ /* Unable to recover the link, so no point proceeding */
BUG();
}
out:
@@ -4872,12 +4898,9 @@
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret) {
+ if (ret)
dev_err(hba->dev, "link startup failed %d\n", ret);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_host_regs(hba);
- }
+
return ret;
}
@@ -6229,7 +6252,7 @@
*/
ufshcd_set_eh_in_progress(hba);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
schedule_work(&hba->eh_work);
}
retval |= IRQ_HANDLED;
@@ -9370,6 +9393,33 @@
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
sizeof(struct ufs_pa_layer_attr));
+ /*
+ * Some UFS devices may stop responding after switching from
+ * HS-G1 to HS-G3. Also, it is found that these devices work
+ * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
+ * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
+ * quirk is enabled for such devices, this 2 steps gear switch
+ * workaround will be applied.
+ */
+ if ((hba->dev_info.quirks &
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
+ && (hba->pwr_info.gear_tx == UFS_HS_G1)
+ && (new_pwr_info.gear_tx == UFS_HS_G3)) {
+ /* scale up to G2 first */
+ new_pwr_info.gear_tx = UFS_HS_G2;
+ new_pwr_info.gear_rx = UFS_HS_G2;
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
+
+ /* scale up to G3 now */
+ new_pwr_info.gear_tx = UFS_HS_G3;
+ new_pwr_info.gear_rx = UFS_HS_G3;
+ /* now, fall through to set the HS-G3 */
+ }
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
} else {
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -9389,10 +9439,10 @@
new_pwr_info.pwr_rx = FASTAUTO_MODE;
}
}
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
}
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
-
+out:
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
__func__, ret,
@@ -9455,10 +9505,29 @@
goto clk_scaling_unprepare;
}
+ /*
+ * If auto hibern8 is supported then put the link in
+ * hibern8 manually, this is to avoid auto hibern8
+ * racing during clock frequency scaling sequence.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ /* link will be bad state so no need to scale_up_gear */
+ return ret;
+ }
+
ret = ufshcd_scale_clks(hba, scale_up);
if (ret)
goto scale_up_gear;
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ /* link will be bad state so no need to scale_up_gear */
+ return ret;
+ }
+
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index b70606b..11916ac 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -303,6 +303,7 @@
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set.
+ * @apply_dev_quirks: called to apply device specific quirks
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @full_reset: called during link recovery for handling variant specific
@@ -336,6 +337,7 @@
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
int (*full_reset)(struct ufs_hba *);
void (*dbg_register_dump)(struct ufs_hba *hba, bool no_sleep);
+ int (*phy_initialization)(struct ufs_hba *);
int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
u32 (*get_scale_down_gear)(struct ufs_hba *);
int (*set_bus_vote)(struct ufs_hba *, bool);
@@ -385,6 +387,9 @@
struct ufs_hba_variant_ops *vops;
struct ufs_hba_crypto_variant_ops *crypto_vops;
struct ufs_hba_pm_qos_variant_ops *pm_qos_vops;
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+ int (*resume)(struct ufs_hba *, enum ufs_pm_op);
+ void (*dbg_register_dump)(struct ufs_hba *hba);
};
/* clock gating state */
@@ -778,6 +783,11 @@
/* Auto hibern8 support is broken */
#define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 UFS_BIT(6)
+ /*
+ * This quirk needs to be enabled if the host contoller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
@@ -1237,8 +1247,8 @@
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
- if (hba->var && hba->var->vops && hba->var->vops->suspend)
- return hba->var->vops->suspend(hba, op);
+ if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
+ return hba->var->vops->apply_dev_quirks(hba);
return 0;
}
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 602e196..a8cfa5e 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -125,6 +125,9 @@
#define PA_GRANULARITY_MIN_VAL 1
#define PA_GRANULARITY_MAX_VAL 6
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index cf715e5..3311380 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -214,6 +214,15 @@
deadlocks. It does not run during the bootup process, so it will
not catch any early lockups.
+config QPNP_PBS
+ tristate "PBS trigger support for QPNP PMIC"
+ depends on SPMI
+ help
+ This driver supports configuring software PBS trigger event through PBS
+ RAM on Qualcomm Technologies, Inc. QPNP PMICs. This module provides
+ the APIs to the client drivers that wants to send the PBS trigger
+ event to the PBS RAM.
+
config QCOM_MEMORY_DUMP_V2
bool "QCOM Memory Dump V2 Support"
help
@@ -315,6 +324,17 @@
spcom provides clients/server API, although currently only one client
or server is allowed per logical channel.
+config MSM_SPSS_UTILS
+ depends on MSM_PIL
+ bool "Secure Processor Utilities"
+ help
+ spss-utils driver selects Secure Processor firmware file name.
+ The firmware file name for dev, test or production is selected
+ based on two fuses.
+ Different file name is used for differnt SPSS HW versions,
+ because the SPSS firmware size is too small to support multiple
+ HW versions.
+
config TRACER_PKT
bool "Tracer Packet"
help
@@ -625,3 +645,21 @@
This option enables driver for Data Capture and Compare engine. DCC
driver provides interface to configure DCC block and read back
captured data from DCC's internal SRAM.
+
+config QTI_RPM_STATS_LOG
+ bool "Qualcomm Technologies RPM Stats Driver"
+ depends on DEBUG_FS
+ default n
+ help
+ This option enables a driver which reads RPM messages from a shared
+ memory location. These messages provide statistical information about
+ the low power modes that RPM enters. The drivers outputs the message
+ via a debugfs node.
+
+config QCOM_FORCE_WDOG_BITE_ON_PANIC
+ bool "QCOM force watchdog bite"
+ depends on QCOM_WATCHDOG_V2
+ help
+ This forces a watchdog bite when the device restarts due to a
+ kernel panic. On certain MSM SoCs, this provides us
+ additional debugging information.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 45384668..ba00ef10 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
+obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
@@ -30,6 +31,7 @@
obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT) += glink_smem_native_xprt.o
obj-$(CONFIG_MSM_GLINK_SPI_XPRT) += glink_spi_xprt.o
obj-$(CONFIG_MSM_SPCOM) += spcom.o
+obj-$(CONFIG_MSM_SPSS_UTILS) += spss_utils.o
obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
obj-$(CONFIG_QTI_RPMH_API) += rpmh.o
@@ -66,3 +68,4 @@
obj-$(CONFIG_MSM_IDLE_STATS) += lpm-stats.o
obj-$(CONFIG_APSS_CORE_EA) += msm-core.o debug_core.o
obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
+obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 0c2ba4d..5cc04c0 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -241,18 +241,28 @@
static int cmd_db_dev_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct resource res;
+ void __iomem *dict;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ dict = of_iomap(pdev->dev.of_node, 0);
+ if (!dict) {
cmd_db_status = -ENOMEM;
goto failed;
}
- start_addr = devm_ioremap_resource(&pdev->dev, res);
+ /*
+ * Read start address and size of the command DB address from
+ * shared dictionary location
+ */
+ res.start = readl_relaxed(dict);
+ res.end = res.start + readl_relaxed(dict + 0x4);
+ res.flags = IORESOURCE_MEM;
+ iounmap(dict);
- cmd_db_header = devm_kzalloc(&pdev->dev, sizeof(*cmd_db_header),
- GFP_KERNEL);
+ start_addr = devm_ioremap_resource(&pdev->dev, &res);
+
+ cmd_db_header = devm_kzalloc(&pdev->dev,
+ sizeof(*cmd_db_header), GFP_KERNEL);
if (!cmd_db_header) {
cmd_db_status = -ENOMEM;
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index cb212b2..4d2f54d 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -121,6 +121,7 @@
struct mutex mutex;
void __iomem *ram_base;
uint32_t ram_size;
+ uint32_t ram_offset;
enum dcc_data_sink data_sink;
enum dcc_func_type func_type[DCC_MAX_LINK_LIST];
uint32_t ram_cfg;
@@ -517,9 +518,10 @@
/* 3. If in capture mode program DCC_RAM_CFG reg */
if (drvdata->func_type[list] == DCC_FUNC_TYPE_CAPTURE) {
- dcc_writel(drvdata, ram_cfg_base, DCC_LL_BASE(list));
- dcc_writel(drvdata, drvdata->ram_start,
- DCC_FD_BASE(list));
+ dcc_writel(drvdata, ram_cfg_base +
+ drvdata->ram_offset/4, DCC_LL_BASE(list));
+ dcc_writel(drvdata, drvdata->ram_start +
+ drvdata->ram_offset/4, DCC_FD_BASE(list));
dcc_writel(drvdata, 0, DCC_LL_TIMEOUT(list));
}
@@ -1342,6 +1344,11 @@
if (!drvdata->ram_base)
return -ENOMEM;
+ ret = of_property_read_u32(pdev->dev.of_node, "dcc-ram-offset",
+ &drvdata->ram_offset);
+ if (ret)
+ return -EINVAL;
+
drvdata->save_reg = of_property_read_bool(pdev->dev.of_node,
"qcom,save-reg");
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 11965a2..1455069 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -144,10 +144,10 @@
eud_work);
if (chip->int_status == EUD_INT_VBUS)
- extcon_set_cable_state_(chip->extcon, chip->extcon_id,
+ extcon_set_state_sync(chip->extcon, chip->extcon_id,
chip->usb_attach);
else if (chip->int_status == EUD_INT_CHGR)
- extcon_set_cable_state_(chip->extcon, chip->extcon_id,
+ extcon_set_state_sync(chip->extcon, chip->extcon_id,
chip->chgr_enable);
}
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 8cd5d3c..fd4c604 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1846,7 +1846,7 @@
/**
* ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if
- * it is not found.
+ * it is not found and get reference of context.
* @xprt_ctx: Transport to search for a matching channel.
* @name: Name of the desired channel.
*
@@ -1902,6 +1902,7 @@
spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
flags);
kfree(ctx);
+ rwref_get(&entry->ch_state_lhb2);
rwref_write_put(&xprt_ctx->xprt_state_lhb0);
return entry;
}
@@ -1935,6 +1936,7 @@
"%s: local:GLINK_CHANNEL_CLOSED\n",
__func__);
}
+ rwref_get(&ctx->ch_state_lhb2);
spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
rwref_write_put(&xprt_ctx->xprt_state_lhb0);
mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
@@ -2579,6 +2581,7 @@
GLINK_INFO_CH_XPRT(ctx, transport_ptr,
"%s: Channel not ready to be re-opened. State: %u\n",
__func__, ctx->local_open_state);
+ rwref_put(&ctx->ch_state_lhb2);
return ERR_PTR(-EBUSY);
}
@@ -2627,11 +2630,13 @@
ctx->local_open_state = GLINK_CHANNEL_CLOSED;
GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n",
__func__, ret);
+ rwref_put(&ctx->ch_state_lhb2);
return ERR_PTR(ret);
}
GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
__func__, ctx);
+ rwref_put(&ctx->ch_state_lhb2);
return ctx;
}
EXPORT_SYMBOL(glink_open);
@@ -4805,6 +4810,7 @@
GLINK_ERR_CH(ctx,
"%s: Duplicate remote open for rcid %u, name '%s'\n",
__func__, rcid, name);
+ rwref_put(&ctx->ch_state_lhb2);
glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
return;
}
@@ -4827,6 +4833,7 @@
if (do_migrate)
ch_migrate(NULL, ctx);
+ rwref_put(&ctx->ch_state_lhb2);
glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
}
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
index 0aeb0e8..3b540f3 100644
--- a/drivers/soc/qcom/glink_loopback_server.c
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,7 @@
{"LOOPBACK_CTL_APSS", "lpass", "smem"},
{"LOOPBACK_CTL_APSS", "dsps", "smem"},
{"LOOPBACK_CTL_APSS", "spss", "mailbox"},
+ {"LOOPBACK_CTL_APSS", "wdsp", "spi"},
};
static DEFINE_MUTEX(ctl_ch_list_lock);
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index c837bd8..9810207 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -700,6 +700,7 @@
* edge: The G-Link edge name for the channel associated with
* this callback data
* do_cleanup_data: Structure containing the G-Link SSR do_cleanup message.
+ * cb_kref: Kref object to maintain cb_data reference.
*/
struct ssr_notify_data {
bool tx_done;
@@ -707,6 +708,7 @@
bool responded;
const char *edge;
struct do_cleanup_msg *do_cleanup_data;
+ struct kref cb_kref;
};
/**
@@ -741,6 +743,7 @@
int notify_list_len;
bool link_up;
spinlock_t link_up_lock;
+ spinlock_t cb_lock;
};
/**
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 266c0a2..3c4759c 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -822,6 +822,12 @@
einfo->remote_proc_id,
SMEM_ITEM_CACHED_FLAG);
if (!einfo->rx_fifo)
+ einfo->rx_fifo = smem_get_entry(
+ SMEM_GLINK_NATIVE_XPRT_FIFO_1,
+ &einfo->rx_fifo_size,
+ einfo->remote_proc_id,
+ 0);
+ if (!einfo->rx_fifo)
return false;
}
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index b24598a..4737288 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -115,6 +115,44 @@
static atomic_t responses_remaining = ATOMIC_INIT(0);
static wait_queue_head_t waitqueue;
+/**
+ * cb_data_release() - Free cb_data and set to NULL
+ * @kref_ptr: pointer to kref.
+ *
+ * This function releses cb_data.
+ */
+static inline void cb_data_release(struct kref *kref_ptr)
+{
+ struct ssr_notify_data *cb_data;
+
+ cb_data = container_of(kref_ptr, struct ssr_notify_data, cb_kref);
+ kfree(cb_data);
+}
+
+/**
+ * check_and_get_cb_data() - Try to get reference to kref of cb_data
+ * @ss_info: pointer to subsystem info structure.
+ *
+ * Return: NULL is cb_data is NULL, pointer to cb_data otherwise
+ */
+static struct ssr_notify_data *check_and_get_cb_data(
+ struct subsys_info *ss_info)
+{
+ struct ssr_notify_data *cb_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ss_info->cb_lock, flags);
+ if (ss_info->cb_data == NULL) {
+ GLINK_SSR_LOG("<SSR> %s: cb_data is NULL\n", __func__);
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+ return 0;
+ }
+ kref_get(&ss_info->cb_data->cb_kref);
+ cb_data = ss_info->cb_data;
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+ return cb_data;
+}
+
static void rx_done_cb_worker(struct work_struct *work)
{
struct rx_done_ch_work *rx_done_work =
@@ -340,8 +378,10 @@
if (WARN_ON(!ss_info->cb_data))
return;
- kfree(ss_info->cb_data);
+ spin_lock_irqsave(&ss_info->cb_lock, flags);
+ kref_put(&ss_info->cb_data->cb_kref, cb_data_release);
ss_info->cb_data = NULL;
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
kfree(close_work);
}
@@ -508,13 +548,18 @@
return -ENODEV;
}
handle = ss_info_channel->handle;
- ss_leaf_entry->cb_data = ss_info_channel->cb_data;
+ ss_leaf_entry->cb_data = check_and_get_cb_data(
+ ss_info_channel);
+ if (!ss_leaf_entry->cb_data) {
+ GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+ atomic_dec(&responses_remaining);
+ continue;
+ }
spin_lock_irqsave(&ss_info->link_up_lock, flags);
if (IS_ERR_OR_NULL(ss_info_channel->handle) ||
- !ss_info_channel->cb_data ||
!ss_info_channel->link_up ||
- ss_info_channel->cb_data->event
+ ss_leaf_entry->cb_data->event
!= GLINK_CONNECTED) {
GLINK_SSR_LOG(
@@ -527,6 +572,8 @@
spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
continue;
}
spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
@@ -537,6 +584,8 @@
GLINK_SSR_ERR(
"%s %s: Could not allocate do_cleanup_msg\n",
"<SSR>", __func__);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
return -ENOMEM;
}
@@ -568,6 +617,8 @@
__func__);
}
atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
continue;
}
@@ -597,10 +648,12 @@
__func__);
}
atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
continue;
}
-
sequence_number++;
+ kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
}
wait_ret = wait_event_timeout(waitqueue,
@@ -609,6 +662,21 @@
list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
notify_list_node) {
+ ss_info_channel =
+ get_info_for_subsystem(ss_leaf_entry->ssr_name);
+ if (ss_info_channel == NULL) {
+ GLINK_SSR_ERR(
+ "<SSR> %s: unable to find subsystem name\n",
+ __func__);
+ continue;
+ }
+
+ ss_leaf_entry->cb_data = check_and_get_cb_data(
+ ss_info_channel);
+ if (!ss_leaf_entry->cb_data) {
+ GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+ continue;
+ }
if (!wait_ret && !IS_ERR_OR_NULL(ss_leaf_entry->cb_data)
&& !ss_leaf_entry->cb_data->responded) {
GLINK_SSR_ERR("%s %s: Subsystem %s %s\n",
@@ -627,6 +695,7 @@
if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
ss_leaf_entry->cb_data->responded = false;
+ kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
}
complete(¬ifications_successful_complete);
return 0;
@@ -645,6 +714,7 @@
struct glink_open_config open_cfg;
struct ssr_notify_data *cb_data = NULL;
void *handle = NULL;
+ unsigned long flags;
if (!ss_info) {
GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
@@ -661,7 +731,10 @@
cb_data->responded = false;
cb_data->event = GLINK_SSR_EVENT_INIT;
cb_data->edge = ss_info->edge;
+ spin_lock_irqsave(&ss_info->cb_lock, flags);
ss_info->cb_data = cb_data;
+ kref_init(&cb_data->cb_kref);
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
memset(&open_cfg, 0, sizeof(struct glink_open_config));
@@ -877,6 +950,7 @@
ss_info->link_state_handle = NULL;
ss_info->cb_data = NULL;
spin_lock_init(&ss_info->link_up_lock);
+ spin_lock_init(&ss_info->cb_lock);
nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
if (!nb) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 722127d..b759776 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -62,7 +62,7 @@
#define WLFW_CLIENT_ID 0x4b4e454c
#define MAX_PROP_SIZE 32
#define NUM_LOG_PAGES 10
-#define NUM_REG_LOG_PAGES 4
+#define NUM_LOG_LONG_PAGES 4
#define ICNSS_MAGIC 0x5abc5abc
#define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN"
@@ -77,14 +77,10 @@
ipc_log_string(icnss_ipc_log_context, _x); \
} while (0)
-#ifdef CONFIG_ICNSS_DEBUG
#define icnss_ipc_log_long_string(_x...) do { \
if (icnss_ipc_log_long_context) \
ipc_log_string(icnss_ipc_log_long_context, _x); \
} while (0)
-#else
-#define icnss_ipc_log_long_string(_x...)
-#endif
#define icnss_pr_err(_fmt, ...) do { \
pr_err(_fmt, ##__VA_ARGS__); \
@@ -110,28 +106,25 @@
##__VA_ARGS__); \
} while (0)
-#define icnss_reg_dbg(_fmt, ...) do { \
+#define icnss_pr_vdbg(_fmt, ...) do { \
pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_long_string("REG: " pr_fmt(_fmt), \
+ icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt), \
##__VA_ARGS__); \
} while (0)
#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
if (!(_condition)) { \
- icnss_pr_err("ASSERT at line %d\n", \
- __LINE__); \
+ icnss_pr_err("ASSERT at line %d\n", __LINE__); \
BUG_ON(1); \
} \
} while (0)
+
+bool ignore_qmi_timeout;
+#define ICNSS_QMI_ASSERT() ICNSS_ASSERT(ignore_qmi_timeout)
#else
-#define ICNSS_ASSERT(_condition) do { \
- if (!(_condition)) { \
- icnss_pr_err("ASSERT at line %d\n", \
- __LINE__); \
- WARN_ON(1); \
- } \
- } while (0)
+#define ICNSS_ASSERT(_condition) do { } while (0)
+#define ICNSS_QMI_ASSERT() do { } while (0)
#endif
enum icnss_debug_quirks {
@@ -156,10 +149,7 @@
module_param(dynamic_feature_mask, ullong, 0600);
void *icnss_ipc_log_context;
-
-#ifdef CONFIG_ICNSS_DEBUG
void *icnss_ipc_log_long_context;
-#endif
#define ICNSS_EVENT_PENDING 2989
@@ -181,6 +171,7 @@
struct icnss_event_pd_service_down_data {
bool crashed;
bool fw_rejuvenate;
+ bool wdog_bite;
};
struct icnss_driver_event {
@@ -205,6 +196,7 @@
ICNSS_PD_RESTART,
ICNSS_MSA0_ASSIGNED,
ICNSS_WLFW_EXISTS,
+ ICNSS_WDOG_BITE,
};
struct ce_irq_list {
@@ -212,6 +204,38 @@
irqreturn_t (*handler)(int, void *);
};
+struct icnss_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct icnss_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+static struct icnss_vreg_info icnss_vreg_info[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ICNSS_VREG_INFO_SIZE ARRAY_SIZE(icnss_vreg_info)
+
+static struct icnss_clk_info icnss_clk_info[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ICNSS_CLK_INFO_SIZE ARRAY_SIZE(icnss_clk_info)
+
struct icnss_stats {
struct {
uint32_t posted;
@@ -265,6 +289,7 @@
uint32_t rejuvenate_ack_req;
uint32_t rejuvenate_ack_resp;
uint32_t rejuvenate_ack_err;
+ uint32_t trigger_recovery;
};
#define MAX_NO_OF_MAC_ADDR 4
@@ -284,6 +309,8 @@
struct platform_device *pdev;
struct icnss_driver_ops *ops;
struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
+ struct icnss_vreg_info vreg_info[ICNSS_VREG_INFO_SIZE];
+ struct icnss_clk_info clk_info[ICNSS_CLK_INFO_SIZE];
u32 ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
phys_addr_t mem_base_pa;
void __iomem *mem_base_va;
@@ -310,8 +337,9 @@
u32 pwr_pin_result;
u32 phy_io_pin_result;
u32 rf_pin_result;
+ uint32_t nr_mem_region;
struct icnss_mem_region_info
- icnss_mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+ mem_region[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
struct dentry *root_dentry;
spinlock_t on_off_lock;
struct icnss_stats stats;
@@ -334,14 +362,24 @@
struct ramdump_device *msa0_dump_dev;
bool is_wlan_mac_set;
struct icnss_wlan_mac_addr wlan_mac_addr;
+ bool bypass_s1_smmu;
} *penv;
+#ifdef CONFIG_ICNSS_DEBUG
+static void icnss_ignore_qmi_timeout(bool ignore)
+{
+ ignore_qmi_timeout = ignore;
+}
+#else
+static void icnss_ignore_qmi_timeout(bool ignore) { }
+#endif
+
static void icnss_pm_stay_awake(struct icnss_priv *priv)
{
if (atomic_inc_return(&priv->pm_count) != 1)
return;
- icnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
+ icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
atomic_read(&priv->pm_count));
pm_stay_awake(&priv->pdev->dev);
@@ -358,7 +396,7 @@
if (r != 0)
return;
- icnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
+ icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
atomic_read(&priv->pm_count));
pm_relax(&priv->pdev->dev);
@@ -680,41 +718,220 @@
return ret;
}
+static int icnss_vreg_on(struct icnss_priv *priv)
+{
+ int ret = 0;
+ struct icnss_vreg_info *vreg_info;
+ int i;
+
+ for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+ vreg_info = &priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ icnss_pr_vdbg("Regulator %s being enabled\n", vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ icnss_pr_err("Regulator %s, can't set voltage: min_v: %u, max_v: %u, ret: %d\n",
+ vreg_info->name, vreg_info->min_v,
+ vreg_info->max_v, ret);
+ break;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+ if (ret < 0) {
+ icnss_pr_err("Regulator %s, can't set load: %u, ret: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua, ret);
+ break;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ icnss_pr_err("Regulator %s, can't enable: %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+ }
+
+ if (!ret)
+ return 0;
+
+ for (; i >= 0; i--) {
+ vreg_info = &priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ regulator_set_load(vreg_info->reg, 0);
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ }
+
+ return ret;
+}
+
+static int icnss_vreg_off(struct icnss_priv *priv)
+{
+ int ret = 0;
+ struct icnss_vreg_info *vreg_info;
+ int i;
+
+ for (i = ICNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ icnss_pr_vdbg("Regulator %s being disabled\n", vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ icnss_pr_err("Regulator %s, can't disable: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ icnss_pr_err("Regulator %s, can't set load: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+ if (ret)
+ icnss_pr_err("Regulator %s, can't set voltage: %d\n",
+ vreg_info->name, ret);
+ }
+
+ return ret;
+}
+
+static int icnss_clk_init(struct icnss_priv *priv)
+{
+ struct icnss_clk_info *clk_info;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+ clk_info = &priv->clk_info[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ icnss_pr_vdbg("Clock %s being enabled\n", clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ icnss_pr_err("Clock %s, can't set frequency: %u, ret: %d\n",
+ clk_info->name, clk_info->freq,
+ ret);
+ break;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+ if (ret) {
+ icnss_pr_err("Clock %s, can't enable: %d\n",
+ clk_info->name, ret);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ return 0;
+
+ for (; i >= 0; i--) {
+ clk_info = &priv->clk_info[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return ret;
+}
+
+static int icnss_clk_deinit(struct icnss_priv *priv)
+{
+ struct icnss_clk_info *clk_info;
+ int i;
+
+ for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+ clk_info = &priv->clk_info[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ icnss_pr_vdbg("Clock %s being disabled\n", clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return 0;
+}
+
static int icnss_hw_power_on(struct icnss_priv *priv)
{
int ret = 0;
- unsigned long flags;
icnss_pr_dbg("HW Power on: state: 0x%lx\n", priv->state);
- spin_lock_irqsave(&priv->on_off_lock, flags);
+ spin_lock(&priv->on_off_lock);
if (test_bit(ICNSS_POWER_ON, &priv->state)) {
- spin_unlock_irqrestore(&priv->on_off_lock, flags);
+ spin_unlock(&priv->on_off_lock);
return ret;
}
set_bit(ICNSS_POWER_ON, &priv->state);
- spin_unlock_irqrestore(&priv->on_off_lock, flags);
+ spin_unlock(&priv->on_off_lock);
+ ret = icnss_vreg_on(priv);
+ if (ret)
+ goto out;
+
+ ret = icnss_clk_init(priv);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ icnss_vreg_off(priv);
+out:
+ clear_bit(ICNSS_POWER_ON, &priv->state);
return ret;
}
static int icnss_hw_power_off(struct icnss_priv *priv)
{
int ret = 0;
- unsigned long flags;
if (test_bit(HW_ALWAYS_ON, &quirks))
return 0;
icnss_pr_dbg("HW Power off: 0x%lx\n", priv->state);
- spin_lock_irqsave(&priv->on_off_lock, flags);
+ spin_lock(&priv->on_off_lock);
if (!test_bit(ICNSS_POWER_ON, &priv->state)) {
- spin_unlock_irqrestore(&priv->on_off_lock, flags);
+ spin_unlock(&priv->on_off_lock);
return ret;
}
clear_bit(ICNSS_POWER_ON, &priv->state);
- spin_unlock_irqrestore(&priv->on_off_lock, flags);
+ spin_unlock(&priv->on_off_lock);
+
+ icnss_clk_deinit(priv);
+
+ ret = icnss_vreg_off(priv);
return ret;
}
@@ -760,7 +977,7 @@
}
EXPORT_SYMBOL(icnss_power_off);
-static int icnss_map_msa_permissions(struct icnss_priv *priv, u32 index)
+static int icnss_map_msa_permissions(struct icnss_mem_region_info *mem_region)
{
int ret = 0;
phys_addr_t addr;
@@ -773,10 +990,10 @@
int source_nelems = sizeof(source_vmlist)/sizeof(u32);
int dest_nelems = 0;
- addr = priv->icnss_mem_region[index].reg_addr;
- size = priv->icnss_mem_region[index].size;
+ addr = mem_region->reg_addr;
+ size = mem_region->size;
- if (!priv->icnss_mem_region[index].secure_flag) {
+ if (!mem_region->secure_flag) {
dest_vmids[2] = VMID_WLAN_CE;
dest_nelems = 3;
} else {
@@ -786,19 +1003,20 @@
ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
dest_vmids, dest_perms, dest_nelems);
if (ret) {
- icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
- index, &addr, size, ret);
+ icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
+ &addr, size, ret);
goto out;
}
- icnss_pr_dbg("Hypervisor map for region %u: source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
- index, source_vmlist[0], dest_nelems,
- dest_vmids[0], dest_vmids[1], dest_vmids[2]);
+
+ icnss_pr_dbg("Hypervisor map for source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
+ source_vmlist[0], dest_nelems, dest_vmids[0],
+ dest_vmids[1], dest_vmids[2]);
out:
return ret;
}
-static int icnss_unmap_msa_permissions(struct icnss_priv *priv, u32 index)
+static int icnss_unmap_msa_permissions(struct icnss_mem_region_info *mem_region)
{
int ret = 0;
phys_addr_t addr;
@@ -809,9 +1027,10 @@
int source_nelems = 0;
int dest_nelems = sizeof(dest_vmids)/sizeof(u32);
- addr = priv->icnss_mem_region[index].reg_addr;
- size = priv->icnss_mem_region[index].size;
- if (!priv->icnss_mem_region[index].secure_flag) {
+ addr = mem_region->reg_addr;
+ size = mem_region->size;
+
+ if (!mem_region->secure_flag) {
source_vmlist[2] = VMID_WLAN_CE;
source_nelems = 3;
} else {
@@ -822,14 +1041,13 @@
ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
dest_vmids, dest_perms, dest_nelems);
if (ret) {
- icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
- index, &addr, size, ret);
+ icnss_pr_err("Hyperviser unmap failed for PA=%pa size=%u err=%d\n",
+ &addr, size, ret);
goto out;
}
- icnss_pr_dbg("hypervisor unmap for region %u, source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
- index, source_nelems,
- source_vmlist[0], source_vmlist[1], source_vmlist[2],
- dest_vmids[0]);
+ icnss_pr_dbg("Hypervisor unmap for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
+ source_nelems, source_vmlist[0], source_vmlist[1],
+ source_vmlist[2], dest_vmids[0]);
out:
return ret;
}
@@ -837,34 +1055,37 @@
static int icnss_setup_msa_permissions(struct icnss_priv *priv)
{
int ret;
+ int i;
if (test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
return 0;
- ret = icnss_map_msa_permissions(priv, 0);
- if (ret)
- return ret;
+ for (i = 0; i < priv->nr_mem_region; i++) {
- ret = icnss_map_msa_permissions(priv, 1);
- if (ret)
- goto err_map_msa;
+ ret = icnss_map_msa_permissions(&priv->mem_region[i]);
+ if (ret)
+ goto err_unmap;
+ }
set_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
- return ret;
+ return 0;
-err_map_msa:
- icnss_unmap_msa_permissions(priv, 0);
+err_unmap:
+ for (i--; i >= 0; i--)
+ icnss_unmap_msa_permissions(&priv->mem_region[i]);
return ret;
}
static void icnss_remove_msa_permissions(struct icnss_priv *priv)
{
+ int i;
+
if (!test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
return;
- icnss_unmap_msa_permissions(priv, 0);
- icnss_unmap_msa_permissions(priv, 1);
+ for (i = 0; i < priv->nr_mem_region; i++)
+ icnss_unmap_msa_permissions(&priv->mem_region[i]);
clear_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
}
@@ -915,7 +1136,7 @@
icnss_pr_dbg("Receive mem_region_info_len: %d\n",
resp.mem_region_info_len);
- if (resp.mem_region_info_len > 2) {
+ if (resp.mem_region_info_len > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) {
icnss_pr_err("Invalid memory region length received: %d\n",
resp.mem_region_info_len);
ret = -EINVAL;
@@ -923,24 +1144,25 @@
}
penv->stats.msa_info_resp++;
+ penv->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
- penv->icnss_mem_region[i].reg_addr =
+ penv->mem_region[i].reg_addr =
resp.mem_region_info[i].region_addr;
- penv->icnss_mem_region[i].size =
+ penv->mem_region[i].size =
resp.mem_region_info[i].size;
- penv->icnss_mem_region[i].secure_flag =
+ penv->mem_region[i].secure_flag =
resp.mem_region_info[i].secure_flag;
icnss_pr_dbg("Memory Region: %d Addr: 0x%llx Size: 0x%x Flag: 0x%08x\n",
- i, penv->icnss_mem_region[i].reg_addr,
- penv->icnss_mem_region[i].size,
- penv->icnss_mem_region[i].secure_flag);
+ i, penv->mem_region[i].reg_addr,
+ penv->mem_region[i].size,
+ penv->mem_region[i].secure_flag);
}
return 0;
out:
penv->stats.msa_info_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -988,7 +1210,7 @@
out:
penv->stats.msa_ready_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1051,7 +1273,7 @@
out:
penv->stats.ind_register_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1120,7 +1342,7 @@
out:
penv->stats.cap_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1181,7 +1403,7 @@
out:
penv->stats.mode_req_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1231,7 +1453,7 @@
out:
penv->stats.cfg_req_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1284,7 +1506,7 @@
out:
penv->stats.ini_req_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1339,7 +1561,7 @@
goto out;
}
- if (!resp->data_valid || resp->data_len <= data_len) {
+ if (!resp->data_valid || resp->data_len < data_len) {
icnss_pr_err("Athdiag read data is invalid, data_valid = %u, data_len = %u\n",
resp->data_valid, resp->data_len);
ret = -EINVAL;
@@ -1450,7 +1672,7 @@
out:
priv->stats.rejuvenate_ack_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1524,7 +1746,7 @@
if (!penv || !penv->wlfw_clnt)
return;
- icnss_pr_dbg("Receiving Event in work queue context\n");
+ icnss_pr_vdbg("Receiving Event in work queue context\n");
do {
} while ((ret = qmi_recv_msg(penv->wlfw_clnt)) == 0);
@@ -1532,13 +1754,13 @@
if (ret != -ENOMSG)
icnss_pr_err("Error receiving message: %d\n", ret);
- icnss_pr_dbg("Receiving Event completed\n");
+ icnss_pr_vdbg("Receiving Event completed\n");
}
static void icnss_qmi_wlfw_clnt_notify(struct qmi_handle *handle,
enum qmi_event_type event, void *notify_priv)
{
- icnss_pr_dbg("QMI client notify: %d\n", event);
+ icnss_pr_vdbg("QMI client notify: %d\n", event);
if (!penv || !penv->wlfw_clnt)
return;
@@ -1553,11 +1775,29 @@
}
}
+static int icnss_call_driver_uevent(struct icnss_priv *priv,
+ enum icnss_uevent uevent, void *data)
+{
+ struct icnss_uevent_data uevent_data;
+
+ if (!priv->ops || !priv->ops->uevent)
+ return 0;
+
+ icnss_pr_dbg("Calling driver uevent state: 0x%lx, uevent: %d\n",
+ priv->state, uevent);
+
+ uevent_data.uevent = uevent;
+ uevent_data.data = data;
+
+ return priv->ops->uevent(&priv->pdev->dev, &uevent_data);
+}
+
static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
unsigned int msg_id, void *msg,
unsigned int msg_len, void *ind_cb_priv)
{
struct icnss_event_pd_service_down_data *event_data;
+ struct icnss_uevent_fw_down_data fw_down_data;
if (!penv)
return;
@@ -1582,11 +1822,16 @@
case QMI_WLFW_REJUVENATE_IND_V01:
icnss_pr_dbg("Received Rejuvenate Indication msg_id 0x%x, state: 0x%lx\n",
msg_id, penv->state);
+
+ icnss_ignore_qmi_timeout(true);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
if (event_data == NULL)
return;
event_data->crashed = true;
event_data->fw_rejuvenate = true;
+ fw_down_data.crashed = true;
+ icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_DOWN,
+ &fw_down_data);
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
0, event_data);
break;
@@ -1707,6 +1952,9 @@
if (!priv->ops || !priv->ops->probe)
return 0;
+ if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+ return -EINVAL;
+
icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
icnss_hw_power_on(priv);
@@ -1727,17 +1975,39 @@
return ret;
}
+static int icnss_call_driver_shutdown(struct icnss_priv *priv)
+{
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ goto out;
+
+ if (!priv->ops || !priv->ops->shutdown)
+ goto out;
+
+ icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
+
+ priv->ops->shutdown(&priv->pdev->dev);
+
+out:
+ return 0;
+}
+
static int icnss_pd_restart_complete(struct icnss_priv *priv)
{
int ret;
- clear_bit(ICNSS_PD_RESTART, &priv->state);
icnss_pm_relax(priv);
+ if (test_bit(ICNSS_WDOG_BITE, &priv->state)) {
+ icnss_call_driver_shutdown(priv);
+ clear_bit(ICNSS_WDOG_BITE, &priv->state);
+ }
+
+ clear_bit(ICNSS_PD_RESTART, &priv->state);
+
if (!priv->ops || !priv->ops->reinit)
goto out;
- if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
goto call_probe;
icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
@@ -1774,6 +2044,8 @@
set_bit(ICNSS_FW_READY, &penv->state);
+ icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_READY, NULL);
+
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
icnss_hw_power_off(penv);
@@ -1870,26 +2142,36 @@
clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
+ icnss_hw_power_off(penv);
+
return 0;
}
-static int icnss_call_driver_shutdown(struct icnss_priv *priv)
+static int icnss_fw_crashed(struct icnss_priv *priv,
+ struct icnss_event_pd_service_down_data *event_data)
{
- icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
+ icnss_pr_dbg("FW crashed, state: 0x%lx, wdog_bite: %d\n",
+ priv->state, event_data->wdog_bite);
set_bit(ICNSS_PD_RESTART, &priv->state);
clear_bit(ICNSS_FW_READY, &priv->state);
icnss_pm_stay_awake(priv);
- if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
- return 0;
+ if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+ icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL);
- if (!priv->ops || !priv->ops->shutdown)
- return 0;
+ if (event_data->wdog_bite) {
+ set_bit(ICNSS_WDOG_BITE, &priv->state);
+ goto out;
+ }
- priv->ops->shutdown(&priv->pdev->dev);
+ icnss_call_driver_shutdown(priv);
+ if (event_data->fw_rejuvenate)
+ wlfw_rejuvenate_ack_send_sync_msg(priv);
+
+out:
return 0;
}
@@ -1900,7 +2182,7 @@
struct icnss_event_pd_service_down_data *event_data = data;
if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
- return 0;
+ goto out;
if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
@@ -1910,18 +2192,15 @@
}
if (event_data->crashed)
- icnss_call_driver_shutdown(priv);
+ icnss_fw_crashed(priv, event_data);
else
icnss_call_driver_remove(priv);
- if (event_data->fw_rejuvenate)
- wlfw_rejuvenate_ack_send_sync_msg(priv);
-
out:
- ret = icnss_hw_power_off(priv);
-
kfree(data);
+ icnss_ignore_qmi_timeout(false);
+
return ret;
}
@@ -2046,8 +2325,9 @@
struct notif_data *notif = data;
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
modem_ssr_nb);
+ struct icnss_uevent_fw_down_data fw_down_data;
- icnss_pr_dbg("Modem-Notify: event %lu\n", code);
+ icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
if (code == SUBSYS_AFTER_SHUTDOWN &&
notif->crashed == CRASH_STATUS_ERR_FATAL) {
@@ -2063,7 +2343,10 @@
if (test_bit(ICNSS_PDR_ENABLED, &priv->state))
return NOTIFY_OK;
- icnss_pr_info("Modem went down, state: %lx\n", priv->state);
+ icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
+ priv->state, notif->crashed);
+
+ icnss_ignore_qmi_timeout(true);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
@@ -2072,6 +2355,12 @@
event_data->crashed = notif->crashed;
+ if (notif->crashed == CRASH_STATUS_WDOG_BITE)
+ event_data->wdog_bite = true;
+
+ fw_down_data.crashed = !!notif->crashed;
+ icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_EVENT_SYNC, event_data);
@@ -2135,31 +2424,47 @@
service_notifier_nb);
enum pd_subsys_state *state = data;
struct icnss_event_pd_service_down_data *event_data;
+ struct icnss_uevent_fw_down_data fw_down_data;
- switch (notification) {
- case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
- icnss_pr_info("Service down, data: 0x%p, state: 0x%lx\n", data,
- priv->state);
- event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+ icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
+ notification, priv->state);
- if (event_data == NULL)
- return notifier_from_errno(-ENOMEM);
+ if (notification != SERVREG_NOTIF_SERVICE_STATE_DOWN_V01)
+ goto done;
- if (state == NULL || *state != ROOT_PD_SHUTDOWN)
- event_data->crashed = true;
+ event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
- icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
- ICNSS_EVENT_SYNC, event_data);
- break;
- case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
- icnss_pr_dbg("Service up, state: 0x%lx\n", priv->state);
- break;
- default:
- icnss_pr_dbg("Service state Unknown, notification: 0x%lx, state: 0x%lx\n",
- notification, priv->state);
- return NOTIFY_DONE;
+ if (event_data == NULL)
+ return notifier_from_errno(-ENOMEM);
+
+ if (state == NULL) {
+ event_data->crashed = true;
+ goto event_post;
}
+ icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx\n",
+ *state, priv->state);
+
+ switch (*state) {
+ case ROOT_PD_WDOG_BITE:
+ event_data->crashed = true;
+ event_data->wdog_bite = true;
+ break;
+ case ROOT_PD_SHUTDOWN:
+ break;
+ default:
+ event_data->crashed = true;
+ break;
+ }
+
+event_post:
+ icnss_ignore_qmi_timeout(true);
+
+ fw_down_data.crashed = event_data->crashed;
+ icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+ icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
+ ICNSS_EVENT_SYNC, event_data);
+done:
return NOTIFY_OK;
}
@@ -2265,7 +2570,7 @@
return 0;
out:
- icnss_pr_err("PD restart not enabled: %d\n", ret);
+ icnss_pr_err("Failed to enable PD restart: %d\n", ret);
return ret;
}
@@ -2375,7 +2680,7 @@
goto out;
}
- icnss_pr_dbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+ icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
@@ -2401,7 +2706,7 @@
irq_entry->irq = irq;
irq_entry->handler = handler;
- icnss_pr_dbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
+ icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
penv->stats.ce_irqs[ce_id].request++;
out:
@@ -2420,7 +2725,7 @@
goto out;
}
- icnss_pr_dbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+ icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
@@ -2454,7 +2759,7 @@
return;
}
- icnss_pr_dbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+ icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
@@ -2478,7 +2783,7 @@
return;
}
- icnss_pr_dbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+ icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
@@ -2878,12 +3183,25 @@
goto out;
}
- if (!priv->service_notifier[0].handle) {
- icnss_pr_err("Invalid handle during recovery\n");
+ if (!test_bit(ICNSS_PDR_ENABLED, &priv->state)) {
+ icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
+ priv->state);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!priv->service_notifier || !priv->service_notifier[0].handle) {
+ icnss_pr_err("Invalid handle during recovery, state: 0x%lx\n",
+ priv->state);
ret = -EINVAL;
goto out;
}
+ WARN_ON(1);
+ icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
+ priv->state);
+ priv->stats.trigger_recovery++;
+
/*
* Initiate PDR, required only for the first instance
*/
@@ -2914,13 +3232,15 @@
goto map_fail;
}
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_ATOMIC,
- &atomic_ctx);
- if (ret < 0) {
- icnss_pr_err("Set atomic_ctx attribute failed, err = %d\n",
- ret);
- goto set_attr_fail;
+ if (!priv->bypass_s1_smmu) {
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx);
+ if (ret < 0) {
+ icnss_pr_err("Set atomic_ctx attribute failed, err = %d\n",
+ ret);
+ goto set_attr_fail;
+ }
}
ret = iommu_domain_set_attr(mapping->domain,
@@ -2959,6 +3279,114 @@
priv->smmu_mapping = NULL;
}
+static int icnss_get_vreg_info(struct device *dev,
+ struct icnss_vreg_info *vreg_info)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct regulator *reg;
+ const __be32 *prop;
+ int len = 0;
+ int i;
+
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+ if (PTR_ERR(reg) == -EPROBE_DEFER) {
+ icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ ret = PTR_ERR(reg);
+ goto out;
+ }
+
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (vreg_info->required) {
+ icnss_pr_err("Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto out;
+ } else {
+ icnss_pr_dbg("Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
+ }
+
+ vreg_info->reg = reg;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-config", vreg_info->name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+
+ icnss_pr_dbg("Got regulator config, prop: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len < (2 * sizeof(__be32))) {
+ icnss_pr_dbg("Property %s %s\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ goto done;
+ }
+
+ for (i = 0; (i * sizeof(__be32)) < len; i++) {
+ switch (i) {
+ case 0:
+ vreg_info->min_v = be32_to_cpup(&prop[0]);
+ break;
+ case 1:
+ vreg_info->max_v = be32_to_cpup(&prop[1]);
+ break;
+ case 2:
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ break;
+ case 3:
+ vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+ break;
+ default:
+ icnss_pr_dbg("Property %s, ignoring value at %d\n",
+ prop_name, i);
+ break;
+ }
+ }
+
+done:
+ icnss_pr_dbg("Regulator: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int icnss_get_clk_info(struct device *dev,
+ struct icnss_clk_info *clk_info)
+{
+ struct clk *handle;
+ int ret = 0;
+
+ handle = devm_clk_get(dev, clk_info->name);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ icnss_pr_err("Clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ goto out;
+ } else {
+ icnss_pr_dbg("Ignoring clock %s: %d\n", clk_info->name,
+ ret);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ icnss_pr_dbg("Clock: %s, freq: %u\n", clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+out:
+ return ret;
+}
+
static int icnss_fw_debug_show(struct seq_file *s, void *data)
{
struct icnss_priv *priv = s->private;
@@ -2969,6 +3397,7 @@
seq_puts(s, " VAL: 0 (Test mode disable)\n");
seq_puts(s, " VAL: 1 (WLAN FW test)\n");
seq_puts(s, " VAL: 2 (CCPM test)\n");
+ seq_puts(s, " VAL: 3 (Trigger Recovery)\n");
seq_puts(s, "\nCMD: dynamic_feature_mask\n");
seq_puts(s, " VAL: (64 bit feature mask)\n");
@@ -3223,6 +3652,9 @@
case ICNSS_WLFW_EXISTS:
seq_puts(s, "WLAN FW EXISTS");
continue;
+ case ICNSS_WDOG_BITE:
+ seq_puts(s, "MODEM WDOG BITE");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -3321,6 +3753,7 @@
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
+ ICNSS_STATS_DUMP(s, priv, trigger_recovery);
seq_puts(s, "\n<------------------ PM stats ------------------->\n");
ICNSS_STATS_DUMP(s, priv, pm_suspend);
@@ -3666,6 +4099,26 @@
if (ret == -EPROBE_DEFER)
goto out;
+ memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
+ for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
+ ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
+
+ if (ret)
+ goto out;
+ }
+
+ memcpy(priv->clk_info, icnss_clk_info, sizeof(icnss_clk_info));
+ for (i = 0; i < ICNSS_CLK_INFO_SIZE; i++) {
+ ret = icnss_get_clk_info(dev, &priv->clk_info[i]);
+ if (ret)
+ goto out;
+ }
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,smmu-s1-bypass"))
+ priv->bypass_s1_smmu = true;
+
+ icnss_pr_dbg("SMMU S1 BYPASS = %d\n", priv->bypass_s1_smmu);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
if (!res) {
icnss_pr_err("Memory base not found in DT\n");
@@ -3830,7 +4283,7 @@
return -EINVAL;
}
- icnss_pr_dbg("PM Suspend, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->pm_suspend ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3859,7 +4312,7 @@
return -EINVAL;
}
- icnss_pr_dbg("PM resume, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->pm_resume ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3888,7 +4341,7 @@
return -EINVAL;
}
- icnss_pr_dbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->suspend_noirq ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3917,7 +4370,7 @@
return -EINVAL;
}
- icnss_pr_dbg("PM resume_noirq, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->resume_noirq ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -3961,26 +4414,6 @@
},
};
-#ifdef CONFIG_ICNSS_DEBUG
-static void __init icnss_ipc_log_long_context_init(void)
-{
- icnss_ipc_log_long_context = ipc_log_context_create(NUM_REG_LOG_PAGES,
- "icnss_long", 0);
- if (!icnss_ipc_log_long_context)
- icnss_pr_err("Unable to create register log context\n");
-}
-
-static void __exit icnss_ipc_log_long_context_destroy(void)
-{
- ipc_log_context_destroy(icnss_ipc_log_long_context);
- icnss_ipc_log_long_context = NULL;
-}
-#else
-
-static void __init icnss_ipc_log_long_context_init(void) { }
-static void __exit icnss_ipc_log_long_context_destroy(void) { }
-#endif
-
static int __init icnss_initialize(void)
{
icnss_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES,
@@ -3988,7 +4421,10 @@
if (!icnss_ipc_log_context)
icnss_pr_err("Unable to create log context\n");
- icnss_ipc_log_long_context_init();
+ icnss_ipc_log_long_context = ipc_log_context_create(NUM_LOG_LONG_PAGES,
+ "icnss_long", 0);
+ if (!icnss_ipc_log_long_context)
+ icnss_pr_err("Unable to create log long context\n");
return platform_driver_register(&icnss_driver);
}
@@ -3998,8 +4434,8 @@
platform_driver_unregister(&icnss_driver);
ipc_log_context_destroy(icnss_ipc_log_context);
icnss_ipc_log_context = NULL;
-
- icnss_ipc_log_long_context_destroy();
+ ipc_log_context_destroy(icnss_ipc_log_long_context);
+ icnss_ipc_log_long_context = NULL;
}
diff --git a/drivers/soc/qcom/lpm-stats.c b/drivers/soc/qcom/lpm-stats.c
index 74a86ec..ee68433 100644
--- a/drivers/soc/qcom/lpm-stats.c
+++ b/drivers/soc/qcom/lpm-stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -425,7 +425,10 @@
uint64_t exit_time = 0;
/* Update time stats only when exit is preceded by enter */
- exit_time = stats->sleep_time;
+ if (stats->sleep_time < 0)
+ success = false;
+ else
+ exit_time = stats->sleep_time;
update_level_stats(&stats->time_stats[index], exit_time,
success);
}
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
index de2a1ce..4ec791c 100644
--- a/drivers/soc/qcom/msm-core.c
+++ b/drivers/soc/qcom/msm-core.c
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/uio_driver.h>
#include <asm/smp_plat.h>
+#include <asm/cputype.h>
#include <stdbool.h>
#define CREATE_TRACE_POINTS
#include <trace/events/trace_msm_core.h>
@@ -46,7 +47,6 @@
#define DEFAULT_TEMP 40
#define DEFAULT_LOW_HYST_TEMP 10
#define DEFAULT_HIGH_HYST_TEMP 5
-#define CLUSTER_OFFSET_FOR_MPIDR 8
#define MAX_CORES_PER_CLUSTER 4
#define MAX_NUM_OF_CLUSTERS 2
#define NUM_OF_CORNERS 10
@@ -291,12 +291,11 @@
int cpu = -1;
struct cpu_activity_info *node;
struct cpu_static_info *sp, *clear_sp;
- int cpumask, cluster, mpidr;
+ int cpumask, cluster;
bool pdata_valid[NR_CPUS] = {0};
get_user(cpumask, &argp->cpumask);
get_user(cluster, &argp->cluster);
- mpidr = cluster << 8;
pr_debug("%s: cpumask %d, cluster: %d\n", __func__, cpumask,
cluster);
@@ -304,10 +303,12 @@
if (!(cpumask & 0x01))
continue;
- mpidr |= i;
for_each_possible_cpu(cpu) {
- if (cpu_logical_map(cpu) == mpidr)
- break;
+ if ((cpu_topology[cpu].core_id != i) &&
+ (cpu_topology[cpu].cluster_id != cluster))
+ continue;
+
+ break;
}
}
@@ -348,10 +349,9 @@
for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
if (!(cpumask & 0x01))
continue;
- mpidr = (cluster << CLUSTER_OFFSET_FOR_MPIDR);
- mpidr |= i;
for_each_possible_cpu(cpu) {
- if (!(cpu_logical_map(cpu) == mpidr))
+ if (((cpu_topology[cpu].core_id != i) ||
+ (cpu_topology[cpu].cluster_id != cluster)))
continue;
node = &activity[cpu];
@@ -395,14 +395,12 @@
struct cpu_activity_info *node = NULL;
struct sched_params __user *argp = (struct sched_params __user *)arg;
int i, cpu = num_possible_cpus();
- int mpidr, cluster, cpumask;
+ int cluster, cpumask;
if (!argp)
return -EINVAL;
get_user(cluster, &argp->cluster);
- mpidr = (cluster << (MAX_CORES_PER_CLUSTER *
- MAX_NUM_OF_CLUSTERS));
get_user(cpumask, &argp->cpumask);
switch (cmd) {
@@ -414,8 +412,11 @@
case EA_VOLT:
for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
for_each_possible_cpu(cpu) {
- if (cpu_logical_map(cpu) == (mpidr | i))
- break;
+ if (((cpu_topology[cpu].core_id != i) ||
+ (cpu_topology[cpu].cluster_id != cluster)))
+ continue;
+
+ break;
}
}
if (cpu >= num_possible_cpus())
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index c977d1b..bf5a526 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -24,6 +24,9 @@
#define NUM_LNODES 3
#define MAX_STR_CL 50
+#define MSM_BUS_MAS_ALC 144
+#define MSM_BUS_RSC_APPS 8000
+
struct bus_search_type {
struct list_head link;
struct list_head node_list;
@@ -38,6 +41,7 @@
static LIST_HEAD(input_list);
static LIST_HEAD(apply_list);
static LIST_HEAD(commit_list);
+static LIST_HEAD(late_init_clist);
static LIST_HEAD(query_list);
DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
@@ -123,6 +127,9 @@
goto exit_bcm_add_bus_req;
}
+ if (cur_dev->node_info->bcm_req_idx != -1)
+ goto exit_bcm_add_bus_req;
+
if (!cur_dev->node_info->num_bcm_devs)
goto exit_bcm_add_bus_req;
@@ -179,8 +186,6 @@
cur_dev->node_info->bcm_req_idx = lnode_idx;
memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
- MSM_BUS_ERR("%s: Added %d entry to bcm %d @ %d\n", __func__,
- lnode->bus_dev_id, bcm_dev->node_info->id, lnode_idx);
}
exit_bcm_add_bus_req:
@@ -316,7 +321,6 @@
MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
goto exit_prune_path;
}
- MSM_BUS_ERR("%s: dest dev %d", __func__, dest);
lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
bcm_add_bus_req(dest_dev);
@@ -520,7 +524,6 @@
max_ib = max(max_ib,
max(bcm_dev->lnode_list[i].lnode_ib[ACTIVE_CTX],
bcm_dev->lnode_list[i].lnode_ib[DUAL_CTX]));
-
max_ab = max(max_ab,
bcm_dev->lnode_list[i].lnode_ab[ACTIVE_CTX] +
bcm_dev->lnode_list[i].lnode_ab[DUAL_CTX]);
@@ -531,9 +534,14 @@
bcm_dev->lnode_list[i].lnode_ab[ctx]);
}
}
-
bcm_dev->node_bw[ctx].max_ab = max_ab;
bcm_dev->node_bw[ctx].max_ib = max_ib;
+
+ max_ab = msm_bus_div64(max_ab, bcm_dev->bcmdev->unit_size);
+ max_ib = msm_bus_div64(max_ib, bcm_dev->bcmdev->unit_size);
+
+ bcm_dev->node_vec[ctx].vec_a = max_ab;
+ bcm_dev->node_vec[ctx].vec_b = max_ib;
}
exit_bcm_update_bus_req:
return;
@@ -598,44 +606,81 @@
}
}
+ max_query_ab = msm_bus_div64(max_query_ab,
+ bcm_dev->bcmdev->unit_size);
+ max_query_ib = msm_bus_div64(max_query_ib,
+ bcm_dev->bcmdev->unit_size);
+
bcm_dev->node_bw[ctx].max_query_ab = max_query_ab;
bcm_dev->node_bw[ctx].max_query_ib = max_query_ib;
-
}
exit_bcm_query_bus_req:
return;
}
-
-
-int bcm_remove_handoff_req(struct device *dev, void *data)
+static void bcm_update_alc_req(struct msm_bus_node_device_type *dev, int ctx)
{
struct msm_bus_node_device_type *bcm_dev = NULL;
int i;
- uint64_t max_ib = 0;
- uint64_t max_ab = 0;
+ uint64_t max_alc = 0;
+
+ if (!dev || !to_msm_bus_node(dev->node_info->bus_device)) {
+ MSM_BUS_ERR("Bus node pointer is Invalid");
+ goto exit_bcm_update_alc_req;
+ }
+
+ for (i = 0; i < dev->num_lnodes; i++)
+ max_alc = max(max_alc, dev->lnode_list[i].alc_idx[ctx]);
+
+ dev->node_bw[ctx].max_alc = max_alc;
+
+ bcm_dev = to_msm_bus_node(dev->node_info->bcm_devs[0]);
+
+ if (ctx == ACTIVE_CTX) {
+ max_alc = max(max_alc,
+ max(dev->node_bw[ACTIVE_CTX].max_alc,
+ dev->node_bw[DUAL_CTX].max_alc));
+ } else {
+ max_alc = dev->node_bw[ctx].max_alc;
+ }
+
+ bcm_dev->node_bw[ctx].max_alc = max_alc;
+ bcm_dev->node_vec[ctx].vec_a = max_alc;
+ bcm_dev->node_vec[ctx].vec_b = 0;
+
+exit_bcm_update_alc_req:
+ return;
+}
+
+int bcm_remove_handoff_req(struct device *dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_dev = NULL;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ struct msm_bus_node_device_type *cur_rsc = NULL;
int ret = 0;
rt_mutex_lock(&msm_bus_adhoc_lock);
- bcm_dev = to_msm_bus_node(dev);
- if (!bcm_dev) {
- MSM_BUS_ERR("%s: Null device ptr", __func__);
- goto exit_bcm_remove_handoff_req;
- }
-
- if (!bcm_dev->node_info->is_bcm_dev)
+ bus_dev = to_msm_bus_node(dev);
+ if (bus_dev->node_info->is_bcm_dev ||
+ bus_dev->node_info->is_fab_dev ||
+ bus_dev->node_info->is_rsc_dev)
goto exit_bcm_remove_handoff_req;
- for (i = 0; i < bcm_dev->num_lnodes; i++) {
- max_ib = max(max_ib,
- bcm_dev->lnode_list[i].lnode_ib[0]);
- max_ab = max(max_ab,
- bcm_dev->lnode_list[i].lnode_ab[0]);
+ if (bus_dev->node_info->num_bcm_devs) {
+ cur_bcm = to_msm_bus_node(bus_dev->node_info->bcm_devs[0]);
+ if (cur_bcm->node_info->num_rsc_devs) {
+ cur_rsc =
+ to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+ if (cur_rsc->node_info->id != MSM_BUS_RSC_APPS)
+ goto exit_bcm_remove_handoff_req;
+ }
}
- bcm_dev->node_bw[0].max_ab = max_ab;
- bcm_dev->node_bw[0].max_ib = max_ib;
+ if (!bus_dev->dirty) {
+ list_add_tail(&bus_dev->link, &late_init_clist);
+ bus_dev->dirty = true;
+ }
exit_bcm_remove_handoff_req:
rt_mutex_unlock(&msm_bus_adhoc_lock);
@@ -684,7 +729,6 @@
sum_ab += bus_dev->lnode_list[i].lnode_query_ab[ctx];
}
- MSM_BUS_ERR("aggregate: query_ab:%llu\n", sum_ab);
bus_dev->node_bw[ctx].sum_query_ab = sum_ab;
bus_dev->node_bw[ctx].max_query_ib = max_ib;
@@ -766,6 +810,18 @@
INIT_LIST_HEAD(&commit_list);
}
+void commit_late_init_data(void)
+{
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ msm_bus_commit_data(&late_init_clist);
+ INIT_LIST_HEAD(&late_init_clist);
+
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+
+
static void add_node_to_clist(struct msm_bus_node_device_type *node)
{
struct msm_bus_node_device_type *node_parent =
@@ -870,6 +926,63 @@
return ret;
}
+static int update_alc_vote(struct device *alc_dev, uint64_t act_req_fa_lat,
+ uint64_t act_req_idle_time, uint64_t slp_req_fa_lat,
+ uint64_t slp_req_idle_time, uint64_t cur_fa_lat,
+ uint64_t cur_idle_time, int idx, int ctx)
+{
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int curr_idx, i;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(alc_dev)) {
+ MSM_BUS_ERR("%s: No source device", __func__);
+ ret = -ENODEV;
+ goto exit_update_alc_vote;
+ }
+
+ if (idx < 0) {
+ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, idx);
+ ret = -ENXIO;
+ goto exit_update_alc_vote;
+ }
+
+ dev_info = to_msm_bus_node(alc_dev);
+ curr_idx = idx;
+
+ if (curr_idx >= dev_info->num_lnodes) {
+ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+ __func__, curr_idx, dev_info->num_lnodes);
+ ret = -ENXIO;
+ goto exit_update_alc_vote;
+ }
+
+ lnode = &dev_info->lnode_list[curr_idx];
+ if (!lnode) {
+ MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+ __func__, curr_idx);
+ ret = -ENXIO;
+ goto exit_update_alc_vote;
+ }
+
+ /*
+ * Add aggregation and mapping logic once LUT is avail.
+ * Use default values for time being.
+ */
+ lnode->alc_idx[ACTIVE_CTX] = 12;
+ lnode->alc_idx[DUAL_CTX] = 0;
+
+ for (i = 0; i < NUM_CTX; i++)
+ bcm_update_alc_req(dev_info, i);
+
+ add_node_to_clist(dev_info);
+
+exit_update_alc_vote:
+ return ret;
+}
+
+
static int query_path(struct device *src_dev, int dest, uint64_t act_req_ib,
uint64_t act_req_bw, uint64_t slp_req_ib,
uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
@@ -1160,6 +1273,40 @@
}
client->pdata = pdata;
+ if (pdata->alc) {
+ client->curr = -1;
+ lnode = kzalloc(sizeof(int), GFP_KERNEL);
+
+ if (ZERO_OR_NULL_PTR(lnode)) {
+ MSM_BUS_ERR("%s: Error allocating lnode!", __func__);
+ goto exit_lnode_malloc_fail;
+ }
+ client->src_pnode = lnode;
+
+ client->src_devs = kzalloc(sizeof(struct device *),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(client->src_devs)) {
+ MSM_BUS_ERR("%s: Error allocating src_dev!", __func__);
+ goto exit_src_dev_malloc_fail;
+ }
+ src = MSM_BUS_MAS_ALC;
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+ if (IS_ERR_OR_NULL(dev)) {
+ MSM_BUS_ERR("%s:Failed to find alc device",
+ __func__);
+ goto exit_invalid_data;
+ }
+ gen_lnode(dev, MSM_BUS_MAS_ALC, 0, pdata->name);
+ bcm_add_bus_req(dev);
+
+ client->src_devs[0] = dev;
+
+ handle = gen_handle(client);
+ goto exit_register_client;
+ }
+
lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(lnode)) {
MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
@@ -1293,6 +1440,58 @@
return ret;
}
+static int update_client_alc(struct msm_bus_client *client, bool log_trns,
+ unsigned int idx)
+{
+ int lnode, cur_idx;
+ uint64_t req_idle_time, req_fal, dual_idle_time, dual_fal,
+ cur_idle_time, cur_fal;
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct device *src_dev;
+
+ if (!client) {
+ MSM_BUS_ERR("Client handle Null");
+ ret = -ENXIO;
+ goto exit_update_client_alc;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("Client pdata Null");
+ ret = -ENXIO;
+ goto exit_update_client_alc;
+ }
+
+ cur_idx = client->curr;
+ client->curr = idx;
+ req_fal = pdata->usecase_lat[idx].fal_ns;
+ req_idle_time = pdata->usecase_lat[idx].idle_t_ns;
+ lnode = client->src_pnode[0];
+ src_dev = client->src_devs[0];
+
+ if (pdata->active_only) {
+ dual_fal = 0;
+ dual_idle_time = 0;
+ } else {
+ dual_fal = req_fal;
+ dual_idle_time = req_idle_time;
+ }
+
+ ret = update_alc_vote(src_dev, req_fal, req_idle_time, dual_fal,
+ dual_idle_time, cur_fal, cur_idle_time, lnode,
+ pdata->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+ __func__, ret, pdata->active_only);
+ goto exit_update_client_alc;
+ }
+ commit_data();
+exit_update_client_alc:
+ return ret;
+}
+
static int query_usecase(struct msm_bus_client *client, bool log_trns,
unsigned int idx,
struct msm_bus_tcs_usecase *tcs_usecase)
@@ -1483,8 +1682,13 @@
MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
cl, index, client->curr, client->pdata->usecase->num_paths);
- msm_bus_dbg_client_data(client->pdata, index, cl);
- ret = update_client_paths(client, log_transaction, index);
+
+ if (pdata->alc)
+ ret = update_client_alc(client, log_transaction, index);
+ else {
+ msm_bus_dbg_client_data(client->pdata, index, cl);
+ ret = update_client_paths(client, log_transaction, index);
+ }
if (ret) {
pr_err("%s: Err updating path\n", __func__);
goto exit_update_request;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
index 5908122..015edb3 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, 2014-2016, The Linux Foundation. All rights
+/* Copyright (c) 2010-2012, 2014-2017, The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -439,7 +439,6 @@
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->clid == clid) {
- debugfs_remove(cldata->file);
list_del(&cldata->list);
kfree(cldata);
break;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 51e03c3..c950367 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -45,6 +45,7 @@
static struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
static struct msm_bus_node_device_type *cur_rsc;
+static bool init_time = true;
struct bcm_db {
uint32_t unit_size;
@@ -265,27 +266,23 @@
}
static int tcs_cmd_gen(struct msm_bus_node_device_type *cur_bcm,
- struct tcs_cmd *cmd, uint64_t ib,
- uint64_t ab, bool commit)
+ struct tcs_cmd *cmd, uint64_t vec_a,
+ uint64_t vec_b, bool commit)
{
int ret = 0;
bool valid = true;
- if (ib == 0 && ab == 0) {
+ if (vec_a == 0 && vec_b == 0)
valid = false;
- } else {
- do_div(ib, cur_bcm->bcmdev->unit_size);
- do_div(ab, cur_bcm->bcmdev->unit_size);
- }
- if (ib > BCM_TCS_CMD_VOTE_MASK)
- ib = BCM_TCS_CMD_VOTE_MASK;
+ if (vec_a > BCM_TCS_CMD_VOTE_MASK)
+ vec_a = BCM_TCS_CMD_VOTE_MASK;
- if (ab > BCM_TCS_CMD_VOTE_MASK)
- ab = BCM_TCS_CMD_VOTE_MASK;
+ if (vec_b > BCM_TCS_CMD_VOTE_MASK)
+ vec_b = BCM_TCS_CMD_VOTE_MASK;
cmd->addr = cur_bcm->bcmdev->addr;
- cmd->data = BCM_TCS_CMD(commit, valid, ab, ib);
+ cmd->data = BCM_TCS_CMD(commit, valid, vec_a, vec_b);
cmd->complete = commit;
return ret;
@@ -314,7 +311,12 @@
if (list_empty(&cur_bcm_clist[i]))
continue;
list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
- if (cur_bcm->updated) {
+ if (cur_bcm->updated ||
+ (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+ init_time == true)) {
if (last_tcs != -1 &&
list_is_last(&cur_bcm->link,
&cur_bcm_clist[i])) {
@@ -333,10 +335,10 @@
idx++;
}
tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
- cur_bcm->node_bw[ACTIVE_CTX].max_ib,
- cur_bcm->node_bw[ACTIVE_CTX].max_ab, commit);
- k++;
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a,
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
last_tcs = k;
+ k++;
cur_bcm->updated = true;
}
}
@@ -352,25 +354,26 @@
continue;
list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
commit = false;
- if ((cur_bcm->node_bw[DUAL_CTX].max_ab ==
- cur_bcm->node_bw[ACTIVE_CTX].max_ab) &&
- (cur_bcm->node_bw[DUAL_CTX].max_ib ==
- cur_bcm->node_bw[ACTIVE_CTX].max_ib)) {
+ if ((cur_bcm->node_vec[DUAL_CTX].vec_a ==
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a) &&
+ (cur_bcm->node_vec[DUAL_CTX].vec_b ==
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b)) {
if (last_tcs != -1 &&
list_is_last(&cur_bcm->link,
&cur_bcm_clist[i])) {
- cmdlist_wake[k].data |=
+ cmdlist_wake[last_tcs].data |=
BCM_TCS_CMD_COMMIT_MASK;
- cmdlist_sleep[k].data |=
+ cmdlist_sleep[last_tcs].data |=
BCM_TCS_CMD_COMMIT_MASK;
- cmdlist_wake[k].complete = true;
- cmdlist_sleep[k].complete = true;
+ cmdlist_wake[last_tcs].complete = true;
+ cmdlist_sleep[last_tcs].complete = true;
idx++;
}
continue;
}
last_tcs = k;
n_sleep[idx]++;
+ n_wake[idx]++;
if (list_is_last(&cur_bcm->link,
&cur_bcm_clist[i])) {
commit = true;
@@ -378,11 +381,11 @@
}
tcs_cmd_gen(cur_bcm, &cmdlist_wake[k],
- cur_bcm->node_bw[ACTIVE_CTX].max_ib,
- cur_bcm->node_bw[ACTIVE_CTX].max_ab, commit);
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a,
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
tcs_cmd_gen(cur_bcm, &cmdlist_sleep[k],
- cur_bcm->node_bw[DUAL_CTX].max_ib,
- cur_bcm->node_bw[DUAL_CTX].max_ab, commit);
+ cur_bcm->node_vec[DUAL_CTX].vec_a,
+ cur_bcm->node_vec[DUAL_CTX].vec_b, commit);
k++;
}
}
@@ -485,10 +488,11 @@
cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
- if (cur_bcm->node_bw[DUAL_CTX].max_ab == 0 &&
- cur_bcm->node_bw[ACTIVE_CTX].max_ab == 0 &&
- cur_bcm->node_bw[DUAL_CTX].max_ib == 0 &&
- cur_bcm->node_bw[ACTIVE_CTX].max_ib == 0) {
+ if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+ init_time == false) {
cur_bcm->dirty = false;
list_del_init(&cur_bcm->link);
}
@@ -550,25 +554,31 @@
if (list_empty(&cur_bcm_clist[i]))
continue;
list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
- if ((cur_bcm->node_bw[DUAL_CTX].max_ab !=
- cur_bcm->node_bw[ACTIVE_CTX].max_ab) ||
- (cur_bcm->node_bw[DUAL_CTX].max_ib !=
- cur_bcm->node_bw[ACTIVE_CTX].max_ib)) {
+ if ((cur_bcm->node_vec[DUAL_CTX].vec_a !=
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a) ||
+ (cur_bcm->node_vec[DUAL_CTX].vec_b !=
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b)) {
cnt_sleep++;
cnt_wake++;
}
- if (!cur_bcm->updated)
- cnt_active++;
+ if (cur_bcm->updated ||
+ (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+ init_time == true))
+ continue;
+ cnt_active++;
}
cnt_vcd++;
}
- MSM_BUS_ERR("%s: cmd_gen\n", __func__);
n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
- cmdlist_active = kcalloc(cnt_active, sizeof(struct tcs_cmd),
+ if (cnt_active)
+ cmdlist_active = kcalloc(cnt_active, sizeof(struct tcs_cmd),
GFP_KERNEL);
if (cnt_sleep && cnt_wake) {
cmdlist_wake = kcalloc(cnt_wake, sizeof(struct tcs_cmd),
@@ -580,7 +590,11 @@
cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
ret = rpmh_invalidate(cur_mbox);
- ret = rpmh_write_passthru(cur_mbox, cur_rsc->rscdev->req_state,
+ if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE)
+ ret = rpmh_write(cur_mbox, cur_rsc->rscdev->req_state,
+ cmdlist_active, cnt_active);
+ else
+ ret = rpmh_write_passthru(cur_mbox, cur_rsc->rscdev->req_state,
cmdlist_active, n_active);
ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
@@ -1086,6 +1100,7 @@
node_info->name = pdata_node_info->name;
node_info->id = pdata_node_info->id;
+ node_info->bcm_req_idx = -1;
node_info->bus_device_id = pdata_node_info->bus_device_id;
node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
@@ -1289,6 +1304,7 @@
bus_node->node_info = node_info;
bus_node->ap_owned = pdata->ap_owned;
+ bus_node->dirty = false;
bus_dev->of_node = pdata->of_node;
if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
@@ -1531,21 +1547,22 @@
goto exit_device_probe;
}
}
- if (pdata->info[i].node_info->is_bcm_dev)
+ if (pdata->info[i].node_info->is_bcm_dev) {
ret = msm_bus_bcm_init(node_dev, &pdata->info[i]);
if (ret) {
MSM_BUS_ERR("%s: Error intializing bcm %d",
__func__, pdata->info[i].node_info->id);
goto exit_device_probe;
}
- if (pdata->info[i].node_info->is_rsc_dev)
+ }
+ if (pdata->info[i].node_info->is_rsc_dev) {
ret = msm_bus_rsc_init(pdev, node_dev, &pdata->info[i]);
if (ret) {
MSM_BUS_ERR("%s: Error intializing rsc %d",
__func__, pdata->info[i].node_info->id);
goto exit_device_probe;
}
-
+ }
}
ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
@@ -1652,9 +1669,11 @@
int rc;
MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
+ init_time = false;
rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
bcm_remove_handoff_req);
+ commit_late_init_data();
return rc;
}
subsys_initcall(msm_bus_device_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c
index 856dcce..fd72ae6 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
{
struct msm_bus_scale_pdata *pdata = NULL;
struct msm_bus_paths *usecase = NULL;
+ struct msm_bus_lat_vectors *usecase_lat = NULL;
int i = 0, j, ret, num_usecases = 0, num_paths, len;
const uint32_t *vec_arr = NULL;
bool mem_err = false;
@@ -85,6 +86,42 @@
pr_debug("Using dual context by default\n");
}
+ pdata->alc = of_property_read_bool(of_node, "qcom,msm-bus,alc-voter");
+
+ if (pdata->alc) {
+ usecase_lat = devm_kzalloc(&pdev->dev,
+ (sizeof(struct msm_bus_lat_vectors) *
+ pdata->num_usecases), GFP_KERNEL);
+ if (!usecase_lat) {
+ mem_err = true;
+ goto err;
+ }
+
+ vec_arr = of_get_property(of_node,
+ "qcom,msm-bus,vectors-alc", &len);
+ if (vec_arr == NULL) {
+ pr_err("Error: Lat vector array not found\n");
+ goto err;
+ }
+
+ if (len != num_usecases * sizeof(uint32_t) * 2) {
+ pr_err("Error: Length-error on getting vectors\n");
+ goto err;
+ }
+
+ for (i = 0; i < num_usecases; i++) {
+ int index = i * 2;
+
+ usecase_lat[i].fal_ns = (uint64_t)
+ KBTOB(be32_to_cpu(vec_arr[index]));
+ usecase_lat[i].idle_t_ns = (uint64_t)
+ KBTOB(be32_to_cpu(vec_arr[index + 1]));
+ }
+
+ pdata->usecase_lat = usecase_lat;
+ return pdata;
+ }
+
usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
pdata->num_usecases), GFP_KERNEL);
if (!usecase) {
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index f415735..fad7afa 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -29,6 +29,7 @@
uint64_t lnode_ab[NUM_CTX];
uint64_t lnode_query_ib[NUM_CTX];
uint64_t lnode_query_ab[NUM_CTX];
+ uint64_t alc_idx[NUM_CTX];
int next;
struct device *next_dev;
struct list_head link;
@@ -61,11 +62,17 @@
uint64_t sum_query_ab;
uint64_t max_query_ib;
uint64_t max_query_ab;
+ uint64_t max_alc;
uint64_t cur_clk_hz;
uint32_t util_used;
uint32_t vrail_used;
};
+struct nodevector {
+ uint64_t vec_a;
+ uint64_t vec_b;
+};
+
struct msm_bus_rsc_device_type {
struct rpmh_client *mbox;
struct list_head bcm_clist[VCD_MAX_CNT];
@@ -167,6 +174,7 @@
int num_lnodes;
struct link_node *lnode_list;
struct nodebw node_bw[NUM_CTX];
+ struct nodevector node_vec[NUM_CTX];
struct list_head link;
struct list_head query_link;
struct nodeclk clk[NUM_CTX];
@@ -193,6 +201,7 @@
int throttle_en, uint64_t lim_bw);
int msm_bus_commit_data(struct list_head *clist);
int bcm_remove_handoff_req(struct device *dev, void *data);
+void commit_late_init_data(void);
int msm_bus_query_gen(struct list_head *qlist,
struct msm_bus_tcs_usecase *tcs_usecase);
void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 2471d27..519bb611 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -625,14 +625,17 @@
return -ENETRESET;
}
+ mutex_lock(&devp->ch_lock);
if (!glink_rx_intent_exists(devp->handle, count)) {
ret = glink_queue_rx_intent(devp->handle, devp, count);
if (ret) {
GLINK_PKT_ERR("%s: failed to queue_rx_intent ret[%d]\n",
__func__, ret);
+ mutex_unlock(&devp->ch_lock);
return ret;
}
}
+ mutex_unlock(&devp->ch_lock);
GLINK_PKT_INFO("Begin %s on glink_pkt_dev id:%d buffer_size %zu\n",
__func__, devp->i, count);
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index fb3d7d9..c5ba279 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -75,6 +75,10 @@
#define MSS_RESTART_ID 0xA
#define MSS_MAGIC 0XAABADEAD
+
+#define MSS_PDC_OFFSET 8
+#define MSS_PDC_MASK BIT(MSS_PDC_OFFSET)
+
enum scm_cmd {
PAS_MEM_SETUP_CMD = 2,
};
@@ -204,6 +208,33 @@
clk_disable_unprepare(drv->ahb_clk);
}
+static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
+{
+ u32 val = 0;
+
+ if (drv->pdc_sync) {
+ val = readl_relaxed(drv->pdc_sync);
+ if (pdc_sync)
+ val |= MSS_PDC_MASK;
+ else
+ val &= ~MSS_PDC_MASK;
+ writel_relaxed(val, drv->pdc_sync);
+ /* Ensure PDC is written before next write */
+ wmb();
+ udelay(2);
+ }
+}
+
+static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
+{
+ if (drv->alt_reset) {
+ writel_relaxed(val, drv->alt_reset);
+ /* Ensure alt reset is written before restart reg */
+ wmb();
+ udelay(2);
+ }
+}
+
static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
{
int ret = 0;
@@ -235,6 +266,32 @@
return ret;
}
+static int pil_mss_assert_resets(struct q6v5_data *drv)
+{
+ int ret = 0;
+
+ pil_mss_pdc_sync(drv, 1);
+ pil_mss_alt_reset(drv, 1);
+ ret = pil_mss_restart_reg(drv, true);
+
+ return ret;
+}
+
+static int pil_mss_deassert_resets(struct q6v5_data *drv)
+{
+ int ret = 0;
+
+ ret = pil_mss_restart_reg(drv, 0);
+ if (ret)
+ return ret;
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ pil_mss_alt_reset(drv, 0);
+ pil_mss_pdc_sync(drv, false);
+
+ return ret;
+}
+
static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
{
struct device *dev = drv->desc.dev;
@@ -304,7 +361,10 @@
ret);
}
- ret = pil_mss_restart_reg(drv, 1);
+ pil_mss_assert_resets(drv);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ ret = pil_mss_deassert_resets(drv);
if (drv->is_booted) {
pil_mss_disable_clks(drv);
@@ -450,6 +510,7 @@
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
phys_addr_t start_addr = pil_get_entry_addr(pil);
+ u32 debug_val;
int ret;
if (drv->mba_dp_phys)
@@ -463,15 +524,22 @@
if (ret)
goto err_power;
- /* Deassert reset to subsystem and wait for propagation */
- ret = pil_mss_restart_reg(drv, 0);
- if (ret)
- goto err_restart;
-
ret = pil_mss_enable_clks(drv);
if (ret)
goto err_clks;
+ /* Save state of modem debug register before full reset */
+ debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+
+ /* Assert reset to subsystem */
+ pil_mss_assert_resets(drv);
+ /* Wait 6 32kHz sleep cycles for reset */
+ udelay(200);
+ ret = pil_mss_deassert_resets(drv);
+ if (ret)
+ goto err_restart;
+
+ writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
if (modem_dbg_cfg)
writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
@@ -519,12 +587,11 @@
err_q6v5_reset:
modem_log_rmb_regs(drv->rmb_base);
+err_restart:
pil_mss_disable_clks(drv);
if (drv->ahb_clk_vote)
clk_disable_unprepare(drv->ahb_clk);
err_clks:
- pil_mss_restart_reg(drv, 1);
-err_restart:
pil_mss_power_down(drv);
err_power:
return ret;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 2cbbe2e..df0c609c 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -266,8 +266,8 @@
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"rmb_base");
q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
- if (!q6->rmb_base)
- return -ENOMEM;
+ if (IS_ERR(q6->rmb_base))
+ return PTR_ERR(q6->rmb_base);
drv->rmb_base = q6->rmb_base;
q6_desc->ops = &pil_msa_mss_ops_selfauth;
}
@@ -284,6 +284,20 @@
if (!q6->restart_reg)
return -ENOMEM;
+ q6->pdc_sync = NULL;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pdc_sync");
+ if (res) {
+ q6->pdc_sync = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ }
+
+ q6->alt_reset = NULL;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "alt_reset");
+ if (res) {
+ q6->alt_reset = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ }
+
q6->vreg = NULL;
prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index d9d6c72..b41a173 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -120,6 +120,18 @@
goto err_qdss_vote;
}
+ ret = clk_prepare_enable(drv->prng_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for prng(rc:%d)\n", ret);
+ goto err_prng_vote;
+ }
+
+ ret = clk_prepare_enable(drv->axis2_clk);
+ if (ret) {
+ dev_err(pil->dev, "Failed to vote for axis2(rc:%d)\n", ret);
+ goto err_axis2_vote;
+ }
+
ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
if (ret) {
dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
@@ -157,6 +169,10 @@
err_cx_mode:
regulator_set_voltage(drv->vreg_cx, 0, uv);
err_cx_voltage:
+ clk_disable_unprepare(drv->axis2_clk);
+err_axis2_vote:
+ clk_disable_unprepare(drv->prng_clk);
+err_prng_vote:
clk_disable_unprepare(drv->qdss_clk);
err_qdss_vote:
clk_disable_unprepare(drv->pnoc_clk);
@@ -189,6 +205,8 @@
clk_disable_unprepare(drv->xo);
clk_disable_unprepare(drv->pnoc_clk);
clk_disable_unprepare(drv->qdss_clk);
+ clk_disable_unprepare(drv->prng_clk);
+ clk_disable_unprepare(drv->axis2_clk);
}
EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
@@ -628,8 +646,8 @@
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!drv->reg_base)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(drv->reg_base))
+ return drv->reg_base;
desc = &drv->desc;
ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
@@ -763,6 +781,24 @@
drv->qdss_clk = NULL;
}
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,proxy-clock-names", "prng_clk") >= 0) {
+ drv->prng_clk = devm_clk_get(&pdev->dev, "prng_clk");
+ if (IS_ERR(drv->prng_clk))
+ return ERR_CAST(drv->prng_clk);
+ } else {
+ drv->prng_clk = NULL;
+ }
+
+ if (of_property_match_string(pdev->dev.of_node,
+ "qcom,proxy-clock-names", "axis2_clk") >= 0) {
+ drv->axis2_clk = devm_clk_get(&pdev->dev, "axis2_clk");
+ if (IS_ERR(drv->axis2_clk))
+ return ERR_CAST(drv->axis2_clk);
+ } else {
+ drv->axis2_clk = NULL;
+ }
+
drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
if (IS_ERR(drv->vreg_cx))
return ERR_CAST(drv->vreg_cx);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
index 49aee97..9b4c811 100644
--- a/drivers/soc/qcom/pil-q6v5.h
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
struct clk *snoc_axi_clk;
struct clk *mnoc_axi_clk;
struct clk *qdss_clk;
+ struct clk *prng_clk;
+ struct clk *axis2_clk;
void __iomem *axi_halt_base; /* Halt base of q6, mss,
* nc are in same 4K page
*/
@@ -42,6 +44,8 @@
void __iomem *axi_halt_mss;
void __iomem *axi_halt_nc;
void __iomem *restart_reg;
+ void __iomem *pdc_sync;
+ void __iomem *alt_reset;
struct regulator *vreg;
struct regulator *vreg_cx;
struct regulator *vreg_mx;
diff --git a/drivers/soc/qcom/qpnp-pbs.c b/drivers/soc/qcom/qpnp-pbs.c
new file mode 100644
index 0000000..287c8a2
--- /dev/null
+++ b/drivers/soc/qcom/qpnp-pbs.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "PBS: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/qpnp/qpnp-pbs.h>
+
+#define QPNP_PBS_DEV_NAME "qcom,qpnp-pbs"
+
+#define PBS_CLIENT_TRIG_CTL 0x42
+#define PBS_CLIENT_SW_TRIG_BIT BIT(7)
+#define PBS_CLIENT_SCRATCH1 0x50
+#define PBS_CLIENT_SCRATCH2 0x51
+
+static LIST_HEAD(pbs_dev_list);
+static DEFINE_MUTEX(pbs_list_lock);
+
+struct qpnp_pbs {
+ struct platform_device *pdev;
+ struct device *dev;
+ struct device_node *dev_node;
+ struct regmap *regmap;
+ struct mutex pbs_lock;
+ struct list_head link;
+
+ u32 base;
+};
+
+static int qpnp_pbs_read(struct qpnp_pbs *pbs, u32 address,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = pbs->pdev;
+
+ rc = regmap_bulk_read(pbs->regmap, address, val, count);
+ if (rc)
+ pr_err("Failed to read address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
+
+ return rc;
+}
+
+static int qpnp_pbs_write(struct qpnp_pbs *pbs, u16 address,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = pbs->pdev;
+
+ rc = regmap_bulk_write(pbs->regmap, address, val, count);
+ if (rc < 0)
+ pr_err("Failed to write address =0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
+ else
+ pr_debug("Wrote 0x%02X to addr 0x%04x\n", *val, address);
+
+ return rc;
+}
+
+static int qpnp_pbs_masked_write(struct qpnp_pbs *pbs, u16 address,
+ u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(pbs->regmap, address, mask, val);
+ if (rc < 0)
+ pr_err("Failed to write address 0x%04X, rc = %d\n",
+ address, rc);
+ else
+ pr_debug("Wrote 0x%02X to addr 0x%04X\n",
+ val, address);
+
+ return rc;
+}
+
+static struct qpnp_pbs *get_pbs_client_node(struct device_node *dev_node)
+{
+ struct qpnp_pbs *pbs;
+
+ mutex_lock(&pbs_list_lock);
+ list_for_each_entry(pbs, &pbs_dev_list, link) {
+ if (dev_node == pbs->dev_node) {
+ mutex_unlock(&pbs_list_lock);
+ return pbs;
+ }
+ }
+
+ mutex_unlock(&pbs_list_lock);
+ return ERR_PTR(-EINVAL);
+}
+
+static int qpnp_pbs_wait_for_ack(struct qpnp_pbs *pbs, u8 bit_pos)
+{
+ int rc = 0;
+ u16 retries = 2000, dly = 1000;
+ u8 val;
+
+ while (retries--) {
+ rc = qpnp_pbs_read(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to read register %x rc = %d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ return rc;
+ }
+
+ if (val == 0xFF) {
+ /* PBS error - clear SCRATCH2 register */
+ rc = qpnp_pbs_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, 0, 1);
+ if (rc < 0) {
+ pr_err("Failed to clear register %x rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ return rc;
+ }
+
+ pr_err("NACK from PBS for bit %d\n", bit_pos);
+ return -EINVAL;
+ }
+
+ if (val & BIT(bit_pos)) {
+ pr_debug("PBS sequence for bit %d executed!\n",
+ bit_pos);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries) {
+ pr_err("Timeout for PBS ACK/NACK for bit %d\n", bit_pos);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * qpnp_pbs_trigger_event - Trigger the PBS RAM sequence
+ *
+ * Returns = 0 If the PBS RAM sequence executed successfully.
+ *
+ * Returns < 0 for errors.
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ * completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ */
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap)
+{
+ struct qpnp_pbs *pbs;
+ int rc = 0;
+ u16 bit_pos = 0;
+ u8 val, mask = 0;
+
+ if (!dev_node)
+ return -EINVAL;
+
+ if (!bitmap) {
+ pr_err("Invalid bitmap passed by client\n");
+ return -EINVAL;
+ }
+
+ pbs = get_pbs_client_node(dev_node);
+ if (IS_ERR_OR_NULL(pbs)) {
+ pr_err("Unable to find the PBS dev_node\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pbs->pbs_lock);
+ rc = qpnp_pbs_read(pbs, pbs->base + PBS_CLIENT_SCRATCH2, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto out;
+ }
+
+ if (val == 0xFF) {
+ /* PBS error - clear SCRATCH2 register */
+ rc = qpnp_pbs_write(pbs, pbs->base + PBS_CLIENT_SCRATCH2, 0, 1);
+ if (rc < 0) {
+ pr_err("Failed to clear register %x rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto out;
+ }
+ }
+
+ for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+ if (bitmap & BIT(bit_pos)) {
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH2 mask register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto error;
+ }
+
+ /*
+ * Set the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH1 register.
+ */
+ val = mask = BIT(bit_pos);
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH1, mask, val);
+ if (rc < 0) {
+ pr_err("Failed to set %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+ goto error;
+ }
+
+ /* Initiate the SW trigger */
+ val = mask = PBS_CLIENT_SW_TRIG_BIT;
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_TRIG_CTL, mask, val);
+ if (rc < 0) {
+ pr_err("Failed to write register %x rc=%d\n",
+ PBS_CLIENT_TRIG_CTL, rc);
+ goto error;
+ }
+
+ rc = qpnp_pbs_wait_for_ack(pbs, bit_pos);
+ if (rc < 0) {
+ pr_err("Error during wait_for_ack\n");
+ goto error;
+ }
+
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH1 register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+ goto error;
+ }
+
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH2 mask register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto error;
+ }
+
+ }
+ }
+
+error:
+ /* Clear all the requested bitmap */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base + PBS_CLIENT_SCRATCH1,
+ bitmap, 0);
+ if (rc < 0)
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+out:
+ mutex_unlock(&pbs->pbs_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pbs_trigger_event);
+
+static int qpnp_pbs_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 val = 0;
+ struct qpnp_pbs *pbs;
+
+ pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+ if (!pbs)
+ return -ENOMEM;
+
+ pbs->pdev = pdev;
+ pbs->dev = &pdev->dev;
+ pbs->dev_node = pdev->dev.of_node;
+ pbs->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pbs->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &val);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ pbs->base = val;
+ mutex_init(&pbs->pbs_lock);
+
+ dev_set_drvdata(&pdev->dev, pbs);
+
+ mutex_lock(&pbs_list_lock);
+ list_add(&pbs->link, &pbs_dev_list);
+ mutex_unlock(&pbs_list_lock);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_pbs_match_table[] = {
+ { .compatible = QPNP_PBS_DEV_NAME },
+ {}
+};
+
+static struct platform_driver qpnp_pbs_driver = {
+ .driver = {
+ .name = QPNP_PBS_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qpnp_pbs_match_table,
+ },
+ .probe = qpnp_pbs_probe,
+};
+
+static int __init qpnp_pbs_init(void)
+{
+ return platform_driver_register(&qpnp_pbs_driver);
+}
+arch_initcall(qpnp_pbs_init);
+
+static void __exit qpnp_pbs_exit(void)
+{
+ return platform_driver_unregister(&qpnp_pbs_driver);
+}
+module_exit(qpnp_pbs_exit);
+
+MODULE_DESCRIPTION("QPNP PBS DRIVER");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_PBS_DEV_NAME);
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
new file mode 100644
index 0000000..15d8b1b
--- /dev/null
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -0,0 +1,381 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <asm/arch_timer.h>
+
+#define RPM_STATS_NUM_REC 2
+#define MSM_ARCH_TIMER_FREQ 19200000
+
+#define GET_PDATA_OF_ATTR(attr) \
+ (container_of(attr, struct msm_rpmstats_kobj_attr, ka)->pd)
+
+struct msm_rpmstats_record {
+ char name[32];
+ u32 id;
+ u32 val;
+};
+
+struct msm_rpmstats_platform_data {
+ phys_addr_t phys_addr_base;
+ u32 phys_size;
+};
+
+struct msm_rpmstats_private_data {
+ void __iomem *reg_base;
+ u32 num_records;
+ u32 read_idx;
+ u32 len;
+ char buf[320];
+ struct msm_rpmstats_platform_data *platform_data;
+};
+
+struct msm_rpm_stats_data {
+ u32 stat_type;
+ u32 count;
+ u64 last_entered_at;
+ u64 last_exited_at;
+ u64 accumulated;
+};
+
+struct msm_rpmstats_kobj_attr {
+ struct kobj_attribute ka;
+ struct msm_rpmstats_platform_data *pd;
+};
+
+static inline u64 get_time_in_sec(u64 counter)
+{
+ do_div(counter, MSM_ARCH_TIMER_FREQ);
+
+ return counter;
+}
+
+static inline u64 get_time_in_msec(u64 counter)
+{
+ do_div(counter, MSM_ARCH_TIMER_FREQ);
+ counter *= MSEC_PER_SEC;
+
+ return counter;
+}
+
+static inline int msm_rpmstats_append_data_to_buf(char *buf,
+ struct msm_rpm_stats_data *data, int buflength)
+{
+ char stat_type[5];
+ u64 time_in_last_mode;
+ u64 time_since_last_mode;
+ u64 actual_last_sleep;
+
+ stat_type[4] = 0;
+ memcpy(stat_type, &data->stat_type, sizeof(u32));
+
+ time_in_last_mode = data->last_exited_at - data->last_entered_at;
+ time_in_last_mode = get_time_in_msec(time_in_last_mode);
+ time_since_last_mode = arch_counter_get_cntvct() - data->last_exited_at;
+ time_since_last_mode = get_time_in_sec(time_since_last_mode);
+ actual_last_sleep = get_time_in_msec(data->accumulated);
+
+ return snprintf(buf, buflength,
+ "RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
+ "time since last mode(sec):%llu\nactual last sleep(msec):%llu\n\n",
+ stat_type, data->count, time_in_last_mode,
+ time_since_last_mode, actual_last_sleep);
+}
+
+static inline u32 msm_rpmstats_read_long_register(void __iomem *regbase,
+ int index, int offset)
+{
+ return readl_relaxed(regbase + offset +
+ index * sizeof(struct msm_rpm_stats_data));
+}
+
+static inline u64 msm_rpmstats_read_quad_register(void __iomem *regbase,
+ int index, int offset)
+{
+ u64 dst;
+
+ memcpy_fromio(&dst,
+ regbase + offset + index * sizeof(struct msm_rpm_stats_data),
+ 8);
+ return dst;
+}
+
+static inline int msm_rpmstats_copy_stats(
+ struct msm_rpmstats_private_data *prvdata)
+{
+ void __iomem *reg;
+ struct msm_rpm_stats_data data;
+ int i, length;
+
+ reg = prvdata->reg_base;
+
+ for (i = 0, length = 0; i < prvdata->num_records; i++) {
+ data.stat_type = msm_rpmstats_read_long_register(reg, i,
+ offsetof(struct msm_rpm_stats_data,
+ stat_type));
+ data.count = msm_rpmstats_read_long_register(reg, i,
+ offsetof(struct msm_rpm_stats_data, count));
+ data.last_entered_at = msm_rpmstats_read_quad_register(reg,
+ i, offsetof(struct msm_rpm_stats_data,
+ last_entered_at));
+ data.last_exited_at = msm_rpmstats_read_quad_register(reg,
+ i, offsetof(struct msm_rpm_stats_data,
+ last_exited_at));
+ data.accumulated = msm_rpmstats_read_quad_register(reg,
+ i, offsetof(struct msm_rpm_stats_data,
+ accumulated));
+ length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
+ &data, sizeof(prvdata->buf) - length);
+ prvdata->read_idx++;
+ }
+
+ return length;
+}
+
+static inline unsigned long msm_rpmstats_read_register(void __iomem *regbase,
+ int index, int offset)
+{
+ return readl_relaxed(regbase + index * 12 + (offset + 1) * 4);
+}
+
+static ssize_t msm_rpmstats_file_read(struct file *file, char __user *bufu,
+ size_t count, loff_t *ppos)
+{
+ struct msm_rpmstats_private_data *prvdata;
+
+ prvdata = file->private_data;
+ if (!prvdata)
+ return -EINVAL;
+
+ if (!bufu || count == 0)
+ return -EINVAL;
+
+ if ((*ppos >= prvdata->len) &&
+ (prvdata->read_idx < prvdata->num_records)) {
+ prvdata->len = msm_rpmstats_copy_stats(prvdata);
+ *ppos = 0;
+ }
+
+ return simple_read_from_buffer(bufu, count, ppos,
+ prvdata->buf, prvdata->len);
+}
+
+static int msm_rpmstats_file_open(struct inode *inode, struct file *file)
+{
+ struct msm_rpmstats_private_data *prvdata;
+ struct msm_rpmstats_platform_data *pdata;
+
+ pdata = inode->i_private;
+
+ file->private_data = kzalloc(sizeof(*prvdata), GFP_KERNEL);
+ if (!file->private_data)
+ return -ENOMEM;
+
+ prvdata = file->private_data;
+
+ prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!prvdata->reg_base) {
+ kfree(file->private_data);
+ prvdata = NULL;
+ pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ return -EBUSY;
+ }
+
+ prvdata->read_idx = prvdata->len = 0;
+ prvdata->platform_data = pdata;
+ prvdata->num_records = RPM_STATS_NUM_REC;
+
+ return 0;
+}
+
+static int msm_rpmstats_file_close(struct inode *inode, struct file *file)
+{
+ struct msm_rpmstats_private_data *private = file->private_data;
+
+ if (private->reg_base)
+ iounmap(private->reg_base);
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations msm_rpmstats_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpmstats_file_open,
+ .read = msm_rpmstats_file_read,
+ .release = msm_rpmstats_file_close,
+ .llseek = no_llseek,
+};
+
+static ssize_t rpmstats_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct msm_rpmstats_private_data *prvdata = NULL;
+ struct msm_rpmstats_platform_data *pdata = NULL;
+
+ pdata = GET_PDATA_OF_ATTR(attr);
+
+ prvdata =
+ kmalloc(sizeof(*prvdata), GFP_KERNEL);
+ if (!prvdata)
+ return -ENOMEM;
+
+ prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!prvdata->reg_base) {
+ kfree(prvdata);
+ pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ return -EBUSY;
+ }
+
+ prvdata->read_idx = prvdata->len = 0;
+ prvdata->platform_data = pdata;
+ prvdata->num_records = RPM_STATS_NUM_REC;
+
+ if (prvdata->read_idx < prvdata->num_records)
+ prvdata->len = msm_rpmstats_copy_stats(prvdata);
+
+ return snprintf(buf, prvdata->len, prvdata->buf);
+}
+
+static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *rpmstats_kobj = NULL;
+ struct msm_rpmstats_kobj_attr *rpms_ka = NULL;
+ int ret = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: Cannot find module_kset\n", __func__);
+ return -ENODEV;
+ }
+
+ rpmstats_kobj = kobject_create_and_add("rpmstats", module_kobj);
+ if (!rpmstats_kobj) {
+ pr_err("%s: Cannot create rpmstats kobject\n", __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ rpms_ka = kzalloc(sizeof(*rpms_ka), GFP_KERNEL);
+ if (!rpms_ka) {
+ kobject_put(rpmstats_kobj);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sysfs_attr_init(&rpms_ka->ka.attr);
+ rpms_ka->pd = pd;
+ rpms_ka->ka.attr.mode = 0444;
+ rpms_ka->ka.attr.name = "stats";
+ rpms_ka->ka.show = rpmstats_show;
+ rpms_ka->ka.store = NULL;
+
+ ret = sysfs_create_file(rpmstats_kobj, &rpms_ka->ka.attr);
+
+fail:
+ return ret;
+}
+
+static int msm_rpmstats_probe(struct platform_device *pdev)
+{
+ struct dentry *dent = NULL;
+ struct msm_rpmstats_platform_data *pdata;
+ struct msm_rpmstats_platform_data *pd;
+ struct resource *res = NULL, *offset = NULL;
+ u32 offset_addr = 0;
+ void __iomem *phys_ptr = NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phys_addr_base");
+ if (!res)
+ return -EINVAL;
+
+ offset = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "offset_addr");
+ if (offset) {
+ /* Remap the rpm-stats pointer */
+ phys_ptr = ioremap_nocache(offset->start, SZ_4);
+ if (!phys_ptr) {
+ pr_err("%s: Failed to ioremap address: %x\n",
+ __func__, offset_addr);
+ return -ENODEV;
+ }
+ offset_addr = readl_relaxed(phys_ptr);
+ iounmap(phys_ptr);
+ }
+
+ pdata->phys_addr_base = res->start + offset_addr;
+ pdata->phys_size = resource_size(res);
+
+ if (pdev->dev.platform_data)
+ pd = pdev->dev.platform_data;
+
+ dent = debugfs_create_file("rpm_stats", 0444, NULL,
+ pdata, &msm_rpmstats_fops);
+ if (!dent) {
+ pr_err("%s: ERROR rpm_stats debugfs_create_file fail\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ msm_rpmstats_create_sysfs(pdata);
+
+ platform_set_drvdata(pdev, dent);
+ return 0;
+}
+
+static int msm_rpmstats_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id rpm_stats_table[] = {
+ { .compatible = "qcom,rpm-stats" },
+ { },
+};
+
+static struct platform_driver msm_rpmstats_driver = {
+ .probe = msm_rpmstats_probe,
+ .remove = msm_rpmstats_remove,
+ .driver = {
+ .name = "msm_rpm_stat",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_stats_table,
+ },
+};
+builtin_platform_driver(msm_rpmstats_driver);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index aeecf29..b9070bd 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -12,6 +12,7 @@
*/
#include <linux/atomic.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -37,14 +38,13 @@
.msg = { 0 }, \
.msg.state = s, \
.msg.is_complete = true, \
- .msg.payload = &name.cmd, \
- .msg.num_payload = 1, \
- .cmd = { 0 }, \
+ .msg.payload = name.cmd, \
+ .msg.num_payload = 0, \
+ .cmd = { { 0 } }, \
.waitq = q, \
.wait_count = c, \
.rc = rc, \
.bit = -1, \
- .free_cmd = NULL, \
}
struct rpmh_req {
@@ -56,13 +56,11 @@
struct rpmh_msg {
struct tcs_mbox_msg msg;
- /* A single command for our use here */
- struct tcs_cmd cmd;
+ struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
wait_queue_head_t *waitq;
atomic_t *wait_count;
struct rpmh_client *rc;
int bit;
- void *free_cmd;
int err; /* relay error from mbox for sync calls */
};
@@ -73,6 +71,7 @@
struct rpmh_msg *msg_pool;
DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
bool dirty;
+ bool in_solver_mode;
};
struct rpmh_client {
@@ -91,8 +90,9 @@
struct rpmh_mbox *rpm = rc->rpmh;
struct rpmh_msg *msg = NULL;
int pos;
+ unsigned long flags;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
pos = find_first_zero_bit(rpm->fast_req, RPMH_MAX_FAST_RES);
if (pos != RPMH_MAX_FAST_RES) {
bitmap_set(rpm->fast_req, pos, 1);
@@ -101,7 +101,7 @@
msg->bit = pos;
msg->rc = rc;
}
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return msg;
}
@@ -111,7 +111,7 @@
struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
atomic_dec(rpm_msg->wait_count);
- wake_up_interruptible(rpm_msg->waitq);
+ wake_up(rpm_msg->waitq);
}
static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
@@ -120,7 +120,7 @@
struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
atomic_t *wc = rpm_msg->wait_count;
wait_queue_head_t *waitq = rpm_msg->waitq;
- void *free = rpm_msg->free_cmd;
+ unsigned long flags;
rpm_msg->err = r;
@@ -139,7 +139,7 @@
/*
* Copy the child object pointers before freeing up the parent,
* This way even if the parent (rpm_msg) object gets reused, we
- * can free up the child objects (free_cmd and wq/wc) parallely.
+ * can free up the child objects (wq/wc) parallely.
* If you free up the children before the parent, then we run
* into an issue that the stack allocated parent object may be
* invalid before we can check the ->bit value.
@@ -147,18 +147,15 @@
/* If we allocated the pool, set it as available */
if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
}
- /* Nobody should be needing the request anymore */
- kfree(free);
-
/* Signal the blocking thread we are done */
if (waitq) {
atomic_dec(wc);
- wake_up_interruptible(waitq);
+ wake_up(waitq);
}
}
@@ -181,8 +178,9 @@
{
struct rpmh_req *req;
struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
req = __find_req(rc, cmd->addr);
if (req)
goto existing;
@@ -217,7 +215,7 @@
unlock:
rpm->dirty = true;
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return req;
}
@@ -254,6 +252,9 @@
ret = mbox_send_message(rc->chan, &rpm_msg->msg);
if (ret > 0)
ret = 0;
+ } else {
+ /* Clean up our call by spoofing tx_done */
+ rpmh_tx_done(&rc->client, &rpm_msg->msg, ret);
}
return ret;
@@ -285,10 +286,10 @@
if (!rpm_msg)
return -ENOMEM;
- rpm_msg->cmd.addr = addr;
- rpm_msg->cmd.data = data;
+ rpm_msg->cmd[0].addr = addr;
+ rpm_msg->cmd[0].data = data;
- rpm_msg->msg.payload = &rpm_msg->cmd;
+ rpm_msg->msg.payload = rpm_msg->cmd;
rpm_msg->msg.num_payload = 1;
return __rpmh_write(rc, state, rpm_msg);
@@ -325,45 +326,37 @@
if (rpmh_standalone)
return 0;
- rpm_msg.cmd.addr = addr;
- rpm_msg.cmd.data = data;
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_payload = 1;
ret = __rpmh_write(rc, state, &rpm_msg);
if (ret < 0)
return ret;
- ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
- if (ret)
- return ret;
+ wait_event(waitq, atomic_read(&wait_count) == 0);
return rpm_msg.err;
}
EXPORT_SYMBOL(rpmh_write_single);
struct rpmh_msg *__get_rpmh_msg_async(struct rpmh_client *rc,
- enum rpmh_state state, struct tcs_cmd *cmd, int n, bool fast)
+ enum rpmh_state state, struct tcs_cmd *cmd, int n)
{
struct rpmh_msg *rpm_msg;
- struct tcs_cmd *tcs_cmd;
if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
return ERR_PTR(-EINVAL);
- tcs_cmd = kcalloc(n, sizeof(*cmd), fast ? GFP_ATOMIC : GFP_KERNEL);
- if (!tcs_cmd)
- return ERR_PTR(-ENOMEM);
- memcpy(tcs_cmd, cmd, n * sizeof(*tcs_cmd));
-
rpm_msg = get_msg_from_pool(rc);
- if (!rpm_msg) {
- kfree(tcs_cmd);
+ if (!rpm_msg)
return ERR_PTR(-ENOMEM);
- }
+
+ memcpy(rpm_msg->cmd, cmd, n * sizeof(*cmd));
rpm_msg->msg.state = state;
- rpm_msg->msg.payload = tcs_cmd;
+ rpm_msg->msg.payload = rpm_msg->cmd;
rpm_msg->msg.num_payload = n;
- rpm_msg->free_cmd = tcs_cmd;
return rpm_msg;
}
@@ -389,8 +382,7 @@
if (rpmh_standalone)
return 0;
- rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n, true);
-
+ rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n);
if (IS_ERR(rpm_msg))
return PTR_ERR(rpm_msg);
@@ -430,17 +422,15 @@
if (rpmh_standalone)
return 0;
- rpm_msg.msg.payload = cmd;
+ memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
rpm_msg.msg.num_payload = n;
ret = __rpmh_write(rc, state, &rpm_msg);
- if (ret < 0)
- return ret;
-
- ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
if (ret)
return ret;
+ wait_event(waitq, atomic_read(&wait_count) == 0);
+
return rpm_msg.err;
}
EXPORT_SYMBOL(rpmh_write);
@@ -470,10 +460,21 @@
int count = 0;
int ret, i, j, k;
bool complete_set;
+ unsigned long flags;
+ struct rpmh_mbox *rpm;
if (rpmh_standalone)
return 0;
+ /* Do not allow setting wake votes when in solver mode */
+ rpm = rc->rpmh;
+ spin_lock_irqsave(&rpm->lock, flags);
+ if (rpm->in_solver_mode && state == RPMH_WAKE_ONLY_STATE) {
+ spin_unlock_irqrestore(&rpm->lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&rpm->lock, flags);
+
while (n[count++])
;
count--;
@@ -502,11 +503,9 @@
/* Create async request batches */
for (i = 0; i < count; i++) {
- rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i], false);
+ rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
if (IS_ERR_OR_NULL(rpm_msg[i]))
return PTR_ERR(rpm_msg[i]);
- rpm_msg[i]->waitq = &waitq;
- rpm_msg[i]->wait_count = &wait_count;
cmd += n[i];
}
@@ -515,16 +514,18 @@
might_sleep();
atomic_set(&wait_count, count);
for (i = 0; i < count; i++) {
+ rpm_msg[i]->waitq = &waitq;
+ rpm_msg[i]->wait_count = &wait_count;
/* Bypass caching and write to mailbox directly */
ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
if (ret < 0)
return ret;
}
- return wait_event_interruptible(waitq,
- atomic_read(&wait_count) == 0);
+ wait_event(waitq, atomic_read(&wait_count) == 0);
} else {
/* Send Sleep requests to the controller, expect no response */
for (i = 0; i < count; i++) {
+ rpm_msg[i]->waitq = NULL;
ret = mbox_send_controller_data(rc->chan,
&rpm_msg[i]->msg);
/* Clean up our call by spoofing tx_done */
@@ -532,10 +533,49 @@
}
return 0;
}
+
+ return 0;
}
EXPORT_SYMBOL(rpmh_write_passthru);
/**
+ * rpmh_mode_solver_set: Indicate that the RSC controller hardware has
+ * been configured to be in solver mode
+ *
+ * @rc: The RPMH handle
+ * @enable: Boolean value indicating if the controller is in solver mode.
+ *
+ * When solver mode is enabled, passthru API will not be able to send wake
+ * votes, just awake and active votes.
+ */
+int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
+{
+ struct rpmh_mbox *rpm;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(rc))
+ return -EINVAL;
+
+ if (rpmh_standalone)
+ return 0;
+
+ rpm = rc->rpmh;
+ do {
+ spin_lock_irqsave(&rpm->lock, flags);
+ if (mbox_controller_is_idle(rc->chan)) {
+ rpm->in_solver_mode = enable;
+ spin_unlock_irqrestore(&rpm->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&rpm->lock, flags);
+ udelay(10);
+ } while (1);
+
+ return 0;
+}
+EXPORT_SYMBOL(rpmh_mode_solver_set);
+
+/**
* rpmh_write_control: Write async control commands to the controller
*
* @rc: The RPMh handle got from rpmh_get_dev_channel
@@ -551,15 +591,16 @@
{
DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
- if (IS_ERR_OR_NULL(rc))
+ if (IS_ERR_OR_NULL(rc) || n > MAX_RPMH_PAYLOAD)
return -EINVAL;
if (rpmh_standalone)
return 0;
- rpm_msg.msg.payload = cmd;
+ memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
rpm_msg.msg.num_payload = n;
rpm_msg.msg.is_control = true;
+ rpm_msg.msg.is_complete = false;
return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
}
@@ -578,6 +619,7 @@
{
DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
struct rpmh_mbox *rpm;
+ unsigned long flags;
if (IS_ERR_OR_NULL(rc))
return -EINVAL;
@@ -587,10 +629,11 @@
rpm = rc->rpmh;
rpm_msg.msg.invalidate = true;
+ rpm_msg.msg.is_complete = false;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
rpm->dirty = true;
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
}
@@ -621,8 +664,9 @@
if (rpmh_standalone)
return 0;
- rpm_msg.cmd.addr = addr;
- rpm_msg.cmd.data = 0;
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = 0;
+ rpm_msg.msg.num_payload = 1;
rpm_msg.msg.is_read = true;
@@ -631,12 +675,10 @@
return ret;
/* Wait until the response is received from RPMH */
- ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
- if (ret)
- return ret;
+ wait_event(waitq, atomic_read(&wait_count) == 0);
/* Read the data back from the tcs_mbox_msg structrure */
- *resp = rpm_msg.cmd.data;
+ *resp = rpm_msg.cmd[0].data;
return rpm_msg.err;
}
@@ -655,8 +697,10 @@
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.is_complete = (state == RPMH_WAKE_ONLY_STATE);
- rpm_msg.cmd.addr = addr;
- rpm_msg.cmd.data = data;
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_payload = 1;
+ rpm_msg.msg.is_complete = false;
return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
}
@@ -677,6 +721,7 @@
struct rpmh_req *p;
struct rpmh_mbox *rpm = rc->rpmh;
int ret;
+ unsigned long flags;
if (IS_ERR_OR_NULL(rc))
return -EINVAL;
@@ -687,13 +732,13 @@
if (!mbox_controller_is_idle(rc->chan))
return -EBUSY;
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
if (!rpm->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return 0;
}
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
/*
* Nobody else should be calling this function other than sleep,
@@ -714,9 +759,9 @@
return ret;
}
- spin_lock(&rpm->lock);
+ spin_lock_irqsave(&rpm->lock, flags);
rpm->dirty = false;
- spin_unlock(&rpm->lock);
+ spin_unlock_irqrestore(&rpm->lock, flags);
return 0;
}
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index b2627f2..f1e7347 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -365,28 +365,19 @@
int source_nelems, int *dest_vmids,
int *dest_perms, int dest_nelems)
{
- struct sg_table *table;
+ struct sg_table table;
int ret;
- table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
- ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ ret = sg_alloc_table(&table, 1, GFP_KERNEL);
if (ret)
- goto err1;
+ return ret;
- sg_set_page(table->sgl, phys_to_page(addr), size, 0);
+ sg_set_page(table.sgl, phys_to_page(addr), size, 0);
- ret = hyp_assign_table(table, source_vm_list, source_nelems, dest_vmids,
- dest_perms, dest_nelems);
- if (ret)
- goto err2;
+ ret = hyp_assign_table(&table, source_vm_list, source_nelems,
+ dest_vmids, dest_perms, dest_nelems);
- return ret;
-err2:
- sg_free_table(table);
-err1:
- kfree(table);
+ sg_free_table(&table);
return ret;
}
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index b40d678..6a54048 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -24,7 +24,6 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
-#include <linux/debugfs.h>
#include <soc/qcom/msm_qmi_interface.h>
#include <soc/qcom/service-locator.h>
@@ -441,140 +440,3 @@
return 0;
}
EXPORT_SYMBOL(find_subsys);
-
-static struct pd_qmi_client_data test_data;
-
-static int servloc_test_pdr_cb(struct notifier_block *this,
- unsigned long opcode, void *ptr)
-{
- int i, rc = 0;
- char subsys[QMI_SERVREG_LOC_NAME_LENGTH_V01];
- struct pd_qmi_client_data *return_data;
-
- return_data = (struct pd_qmi_client_data *)ptr;
-
- if (opcode) {
- pr_err("%s: Failed to get process domain!, opcode = %lu\n",
- __func__, opcode);
- return -EIO;
- }
-
- pr_err("Service Name: %s\tTotal Domains: %d\n",
- return_data->service_name, return_data->total_domains);
-
- for (i = 0; i < return_data->total_domains; i++) {
- pr_err("Instance ID: %d\t ",
- return_data->domain_list[i].instance_id);
- pr_err("Domain Name: %s\n",
- return_data->domain_list[i].name);
- rc = find_subsys(return_data->domain_list[i].name,
- subsys);
- if (rc < 0)
- pr_err("No valid subsys found for %s!\n",
- return_data->domain_list[i].name);
- else
- pr_err("Subsys: %s\n", subsys);
- }
- return 0;
-}
-
-static struct notifier_block pdr_service_nb = {
- .notifier_call = servloc_test_pdr_cb,
-};
-
-static ssize_t servloc_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- int rc = 0;
- char *node_name = filp->private_data;
-
- if (!strcmp(node_name, "test_servloc_get"))
- rc = get_service_location(test_data.client_name,
- test_data.service_name, &pdr_service_nb);
-
- return rc;
-}
-
-static ssize_t servloc_write(struct file *fp, const char __user *buf,
- size_t count, loff_t *unused)
-{
- char *node_name = fp->private_data;
-
- if (!buf)
- return -EIO;
- if (!strcmp(node_name, "service_name")) {
- snprintf(test_data.service_name, sizeof(test_data.service_name),
- "%.*s", (int) min((size_t)count - 1,
- (sizeof(test_data.service_name) - 1)), buf);
- } else {
- snprintf(test_data.client_name, sizeof(test_data.client_name),
- "%.*s", (int) min((size_t)count - 1,
- (sizeof(test_data.client_name) - 1)), buf);
- }
- return count;
-}
-
-static const struct file_operations servloc_fops = {
- .open = simple_open,
- .read = servloc_read,
- .write = servloc_write,
-};
-
-static struct dentry *servloc_base_dir;
-static struct dentry *test_servloc_file;
-
-static int __init servloc_debugfs_init(void)
-{
- servloc_base_dir = debugfs_create_dir("test_servloc", NULL);
- return !servloc_base_dir ? -ENOMEM : 0;
-}
-
-static void servloc_debugfs_exit(void)
-{
- debugfs_remove_recursive(servloc_base_dir);
-}
-
-static int servloc_debugfs_add(void)
-{
- int rc;
-
- if (!servloc_base_dir)
- return -ENOMEM;
-
- test_servloc_file = debugfs_create_file("client_name",
- 0644, servloc_base_dir,
- "client_name", &servloc_fops);
- rc = !test_servloc_file ? -ENOMEM : 0;
-
- if (rc == 0) {
- test_servloc_file = debugfs_create_file("service_name",
- 0644, servloc_base_dir,
- "service_name", &servloc_fops);
- rc = !test_servloc_file ? -ENOMEM : 0;
- }
-
- if (rc == 0) {
- test_servloc_file = debugfs_create_file("test_servloc_get",
- 0644, servloc_base_dir,
- "test_servloc_get", &servloc_fops);
- rc = !test_servloc_file ? -ENOMEM : 0;
- }
- return rc;
-}
-
-static int __init service_locator_init(void)
-{
- pr_debug("service_locator_status = %d\n", locator_status);
- if (servloc_debugfs_init())
- pr_err("Could not create test_servloc base directory!");
- if (servloc_debugfs_add())
- pr_err("Could not create test_servloc node entries!");
- return 0;
-}
-
-static void __exit service_locator_exit(void)
-{
- servloc_debugfs_exit();
-}
-module_init(service_locator_init);
-module_exit(service_locator_exit);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index fca1c68..68592fe 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/err.h>
-#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <soc/qcom/subsystem_restart.h>
@@ -104,6 +103,7 @@
struct work_struct svc_exit;
struct work_struct svc_rcv_msg;
struct work_struct ind_ack;
+ struct work_struct qmi_handle_free;
struct workqueue_struct *svc_event_wq;
struct qmi_handle *clnt_handle;
struct notifier_block notifier;
@@ -123,6 +123,18 @@
static void root_service_service_arrive(struct work_struct *work);
static void root_service_exit_work(struct work_struct *work);
+static void free_qmi_handle(struct work_struct *work)
+{
+ struct qmi_client_info *data = container_of(work,
+ struct qmi_client_info, qmi_handle_free);
+
+ mutex_lock(&qmi_client_release_lock);
+ data->service_connected = false;
+ qmi_handle_destroy(data->clnt_handle);
+ data->clnt_handle = NULL;
+ mutex_unlock(&qmi_client_release_lock);
+}
+
static struct service_notif_info *_find_service_info(const char *service_path)
{
struct service_notif_info *service_notif;
@@ -426,11 +438,7 @@
* Destroy client handle and try connecting when
* service comes up again.
*/
- mutex_lock(&qmi_client_release_lock);
- data->service_connected = false;
- qmi_handle_destroy(data->clnt_handle);
- data->clnt_handle = NULL;
- mutex_unlock(&qmi_client_release_lock);
+ queue_work(data->svc_event_wq, &data->qmi_handle_free);
}
static void root_service_exit_work(struct work_struct *work)
@@ -486,7 +494,7 @@
info->subsys_state = ROOT_PD_SHUTDOWN;
break;
}
- queue_work(info->svc_event_wq, &info->svc_exit);
+ root_service_service_exit(info, info->subsys_state);
break;
default:
break;
@@ -561,6 +569,7 @@
INIT_WORK(&qmi_data->svc_exit, root_service_exit_work);
INIT_WORK(&qmi_data->svc_rcv_msg, root_service_clnt_recv_msg);
INIT_WORK(&qmi_data->ind_ack, send_ind_ack);
+ INIT_WORK(&qmi_data->qmi_handle_free, free_qmi_handle);
*curr_state = service_notif->curr_state =
SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01;
@@ -742,179 +751,3 @@
&service_notif->service_notif_rcvr_list, nb);
}
EXPORT_SYMBOL(service_notif_unregister_notifier);
-
-struct service_notifier_test_data {
- char service_path[MAX_STRING_LEN];
- int instance_id;
- struct notifier_block nb;
- void *service_notif_handle;
-};
-
-static struct service_notifier_test_data test_data;
-
-static void print_service_provider_state(int notification, char *type)
-{
- if (notification == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01)
- pr_info("%s: Service %s down!\n", type, test_data.service_path);
- else if (notification == SERVREG_NOTIF_SERVICE_STATE_UP_V01)
- pr_info("%s: Service %s up!\n", type, test_data.service_path);
- else if (notification == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01)
- pr_info("%s: Service %s state uninit!\n", type,
- test_data.service_path);
- else
- pr_info("%s: Service %s state Unknown 0x%x!\n", type,
- test_data.service_path, notification);
-}
-
-static int nb_callback(struct notifier_block *nb,
- unsigned long notification,
- void *data)
-{
- print_service_provider_state((int)notification, "Notification:");
- return 0;
-}
-
-static ssize_t show_service_path(struct seq_file *f, void *unused)
-{
- if (test_data.service_notif_handle)
- seq_printf(f, "Service Path: %s\n", test_data.service_path);
- else
- seq_puts(f, "No existing notifier\n");
- return 0;
-}
-
-
-static ssize_t set_service_notifier_register(struct file *fp,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- int curr_state = INT_MAX, rc;
-
- if (!buf)
- return -EIO;
- if (test_data.service_notif_handle) {
- service_notif_unregister_notifier(
- test_data.service_notif_handle,
- &test_data.nb);
- test_data.service_notif_handle = NULL;
- pr_info("Unregistering existing notifier for %s\n",
- test_data.service_path);
- }
- rc = simple_write_to_buffer(test_data.service_path, MAX_STRING_LEN,
- ppos, buf, count - 1);
- if (rc != count - 1) {
- pr_err("Unable to read data into kernel buffer\n");
- goto err;
- }
- test_data.nb.notifier_call = nb_callback;
- test_data.service_notif_handle = service_notif_register_notifier(
- test_data.service_path,
- test_data.instance_id, &test_data.nb,
- &curr_state);
- if (!IS_ERR(test_data.service_notif_handle)) {
- pr_info("Notifier Registered for service %s\n",
- test_data.service_path);
- print_service_provider_state(curr_state, "Initial State");
- return count;
- }
-err:
- test_data.service_notif_handle = NULL;
- pr_err("Unable to register notifier for %s\n", test_data.service_path);
- return -EIO;
-}
-
-static int open_service_notifier_register(struct inode *inode, struct file *f)
-{
- return single_open(f, (void *) show_service_path,
- inode->i_private);
-}
-
-static const struct file_operations service_notifier_register_fops = {
- .open = open_service_notifier_register,
- .read = seq_read,
- .write = set_service_notifier_register,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static ssize_t show_service_notifier_id(struct seq_file *f, void *unused)
-{
- seq_printf(f, "Service instance ID: %d\n", test_data.instance_id);
- return 0;
-}
-
-static ssize_t set_service_notifier_id(struct file *fp,
- const char __user *buf,
- size_t count, loff_t *unused)
-{
- int val, rc;
- char kbuf[MAX_STRING_LEN];
-
- if (count > MAX_STRING_LEN) {
- rc = -EIO;
- goto err;
- }
- rc = copy_from_user(kbuf, buf, count);
- if (rc != 0) {
- rc = -EFAULT;
- goto err;
- }
-
- kbuf[count - 1] = '\0';
- rc = kstrtoint(kbuf, 0, &val);
- if (rc < 0)
- goto err;
-
- test_data.instance_id = val;
- return count;
-err:
- pr_err("Invalid input parameters: rc = %d\n", rc);
- return rc;
-}
-
-static int open_service_notifier_id(struct inode *inode, struct file *f)
-{
- return single_open(f, (void *) show_service_notifier_id,
- inode->i_private);
-}
-
-static const struct file_operations service_notifier_id_fops = {
- .open = open_service_notifier_id,
- .read = seq_read,
- .write = set_service_notifier_id,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static struct dentry *service_notifier_dir;
-static struct dentry *service_path_file;
-static struct dentry *service_id_file;
-
-static int __init service_notifier_init(void)
-{
- service_notifier_dir = debugfs_create_dir("service_notifier", NULL);
- if (service_notifier_dir) {
- service_path_file = debugfs_create_file("service_path",
- 0644, service_notifier_dir, NULL,
- &service_notifier_register_fops);
- if (!service_path_file)
- goto err;
- service_id_file = debugfs_create_file("service_id",
- 0644, service_notifier_dir, NULL,
- &service_notifier_id_fops);
- if (!service_id_file)
- goto err;
- }
- return 0;
-err:
- debugfs_remove_recursive(service_notifier_dir);
- return 0;
-}
-
-static void __exit service_notifier_exit(void)
-{
- debugfs_remove_recursive(service_notifier_dir);
- test_data.nb.notifier_call = nb_callback;
-}
-module_init(service_notifier_init);
-module_exit(service_notifier_exit);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 55fa5d2..119ede3 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -550,6 +550,9 @@
/* Bat ID */
[328] = {MSM_CPU_SDM830, "SDM830"},
+ /* sdxpoorwills ID */
+ [334] = {SDX_CPU_SDXPOORWILLS, "SDXPOORWILLS"},
+
/* Uninitialized IDs are not known to run Linux.
* MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
* considered as unknown CPU.
@@ -1256,6 +1259,10 @@
dummy_socinfo.id = 328;
strlcpy(dummy_socinfo.build_id, "sdm830 - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_sdxpoorwills()) {
+ dummy_socinfo.id = 334;
+ strlcpy(dummy_socinfo.build_id, "sdxpoorwills - ",
+ sizeof(dummy_socinfo.build_id));
}
strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
new file mode 100644
index 0000000..c70ef3a
--- /dev/null
+++ b/drivers/soc/qcom/spss_utils.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Secure-Processor-SubSystem (SPSS) utilities.
+ *
+ * This driver provides utilities for the Secure Processor (SP).
+ *
+ * The SP daemon needs to load different SPSS images based on:
+ *
+ * 1. Test/Production key used to sign the SPSS image (read fuses).
+ * 2. SPSS HW version (selected via Device Tree).
+ *
+ */
+
+#define pr_fmt(fmt) "spss_utils [%s]: " fmt, __func__
+
+#include <linux/kernel.h> /* min() */
+#include <linux/module.h> /* MODULE_LICENSE */
+#include <linux/device.h> /* class_create() */
+#include <linux/slab.h> /* kzalloc() */
+#include <linux/fs.h> /* file_operations */
+#include <linux/cdev.h> /* cdev_add() */
+#include <linux/errno.h> /* EINVAL, ETIMEDOUT */
+#include <linux/printk.h> /* pr_err() */
+#include <linux/bitops.h> /* BIT(x) */
+#include <linux/platform_device.h> /* platform_driver_register() */
+#include <linux/of.h> /* of_property_count_strings() */
+#include <linux/io.h> /* ioremap_nocache() */
+
+#include <soc/qcom/subsystem_restart.h>
+
+/* driver name */
+#define DEVICE_NAME "spss-utils"
+
+enum spss_firmware_type {
+ SPSS_FW_TYPE_DEV = 'd',
+ SPSS_FW_TYPE_TEST = 't',
+ SPSS_FW_TYPE_PROD = 'p',
+};
+
+static enum spss_firmware_type firmware_type = SPSS_FW_TYPE_TEST;
+static const char *dev_firmware_name;
+static const char *test_firmware_name;
+static const char *prod_firmware_name;
+static const char *firmware_name = "NA";
+static struct device *spss_dev;
+static u32 spss_debug_reg_addr; /* SP_SCSR_MBn_SP2CL_GPm(n,m) */
+
+/*==========================================================================*/
+/* Device Sysfs */
+/*==========================================================================*/
+
+static ssize_t firmware_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+
+ if (!dev || !attr || !buf) {
+ pr_err("invalid param.\n");
+ return -EINVAL;
+ }
+
+ if (firmware_name == NULL)
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "unknown");
+ else
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", firmware_name);
+
+ return ret;
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ pr_err("set firmware name is not allowed.\n");
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(firmware_name, 0444,
+ firmware_name_show, firmware_name_store);
+
+static ssize_t test_fuse_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+
+ if (!dev || !attr || !buf) {
+ pr_err("invalid param.\n");
+ return -EINVAL;
+ }
+
+ switch (firmware_type) {
+ case SPSS_FW_TYPE_DEV:
+ ret = snprintf(buf, PAGE_SIZE, "%s", "dev");
+ break;
+ case SPSS_FW_TYPE_TEST:
+ ret = snprintf(buf, PAGE_SIZE, "%s", "test");
+ break;
+ case SPSS_FW_TYPE_PROD:
+ ret = snprintf(buf, PAGE_SIZE, "%s", "prod");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t test_fuse_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ pr_err("set test fuse state is not allowed.\n");
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(test_fuse_state, 0444,
+ test_fuse_state_show, test_fuse_state_store);
+
+static ssize_t spss_debug_reg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ void __iomem *spss_debug_reg = NULL;
+ u32 val1, val2;
+
+ if (!dev || !attr || !buf) {
+ pr_err("invalid param.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("spss_debug_reg_addr [0x%x].\n", spss_debug_reg_addr);
+
+ spss_debug_reg = ioremap_nocache(spss_debug_reg_addr, sizeof(u32)*2);
+
+ if (!spss_debug_reg) {
+ pr_err("can't map debug reg addr.\n");
+ return -EFAULT;
+ }
+
+ val1 = readl_relaxed(spss_debug_reg);
+ val2 = readl_relaxed(((char *) spss_debug_reg) + sizeof(u32));
+
+ ret = snprintf(buf, PAGE_SIZE, "val1 [0x%x] val2 [0x%x]", val1, val2);
+
+ iounmap(spss_debug_reg);
+
+ return ret;
+}
+
+static ssize_t spss_debug_reg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ pr_err("set debug reg is not allowed.\n");
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(spss_debug_reg, 0444,
+ spss_debug_reg_show, spss_debug_reg_store);
+
+static int spss_create_sysfs(struct device *dev)
+{
+ int ret;
+
+ ret = device_create_file(dev, &dev_attr_firmware_name);
+ if (ret < 0) {
+ pr_err("failed to create sysfs file for firmware_name.\n");
+ return ret;
+ }
+
+ ret = device_create_file(dev, &dev_attr_test_fuse_state);
+ if (ret < 0) {
+ pr_err("failed to create sysfs file for test_fuse_state.\n");
+ device_remove_file(dev, &dev_attr_firmware_name);
+ return ret;
+ }
+
+ ret = device_create_file(dev, &dev_attr_spss_debug_reg);
+ if (ret < 0) {
+ pr_err("failed to create sysfs file for spss_debug_reg.\n");
+ device_remove_file(dev, &dev_attr_firmware_name);
+ device_remove_file(dev, &dev_attr_test_fuse_state);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*==========================================================================*/
+/* Device Tree */
+/*==========================================================================*/
+
+/**
+ * spss_parse_dt() - Parse Device Tree info.
+ */
+static int spss_parse_dt(struct device_node *node)
+{
+ int ret;
+ u32 spss_fuse1_addr = 0;
+ u32 spss_fuse1_bit = 0;
+ u32 spss_fuse1_mask = 0;
+ void __iomem *spss_fuse1_reg = NULL;
+ u32 spss_fuse2_addr = 0;
+ u32 spss_fuse2_bit = 0;
+ u32 spss_fuse2_mask = 0;
+ void __iomem *spss_fuse2_reg = NULL;
+ u32 val1 = 0;
+ u32 val2 = 0;
+
+ ret = of_property_read_string(node, "qcom,spss-dev-firmware-name",
+ &dev_firmware_name);
+ if (ret < 0) {
+ pr_err("can't get dev fw name.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_string(node, "qcom,spss-test-firmware-name",
+ &test_firmware_name);
+ if (ret < 0) {
+ pr_err("can't get test fw name.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_string(node, "qcom,spss-prod-firmware-name",
+ &prod_firmware_name);
+ if (ret < 0) {
+ pr_err("can't get prod fw name.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,spss-fuse1-addr",
+ &spss_fuse1_addr);
+ if (ret < 0) {
+ pr_err("can't get fuse1 addr.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,spss-fuse2-addr",
+ &spss_fuse2_addr);
+ if (ret < 0) {
+ pr_err("can't get fuse2 addr.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,spss-fuse1-bit",
+ &spss_fuse1_bit);
+ if (ret < 0) {
+ pr_err("can't get fuse1 bit.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,spss-fuse2-bit",
+ &spss_fuse2_bit);
+ if (ret < 0) {
+ pr_err("can't get fuse2 bit.\n");
+ return -EFAULT;
+ }
+
+
+ spss_fuse1_mask = BIT(spss_fuse1_bit);
+ spss_fuse2_mask = BIT(spss_fuse2_bit);
+
+ pr_debug("spss fuse1 addr [0x%x] bit [%d] .\n",
+ (int) spss_fuse1_addr, (int) spss_fuse1_bit);
+ pr_debug("spss fuse2 addr [0x%x] bit [%d] .\n",
+ (int) spss_fuse2_addr, (int) spss_fuse2_bit);
+
+ spss_fuse1_reg = ioremap_nocache(spss_fuse1_addr, sizeof(u32));
+
+ if (!spss_fuse1_reg) {
+ pr_err("can't map fuse1 addr.\n");
+ return -EFAULT;
+ }
+
+ spss_fuse2_reg = ioremap_nocache(spss_fuse2_addr, sizeof(u32));
+
+ if (!spss_fuse2_reg) {
+ iounmap(spss_fuse1_reg);
+ pr_err("can't map fuse2 addr.\n");
+ return -EFAULT;
+ }
+
+ val1 = readl_relaxed(spss_fuse1_reg);
+ val2 = readl_relaxed(spss_fuse2_reg);
+
+ pr_debug("spss fuse1 value [0x%08x].\n", (int) val1);
+ pr_debug("spss fuse2 value [0x%08x].\n", (int) val2);
+
+ pr_debug("spss fuse1 mask [0x%08x].\n", (int) spss_fuse1_mask);
+ pr_debug("spss fuse2 mask [0x%08x].\n", (int) spss_fuse2_mask);
+
+ /**
+ * Set firmware_type based on fuses:
+ * SPSS_CONFIG_MODE 11: dev
+ * SPSS_CONFIG_MODE 01 or 10: test
+ * SPSS_CONFIG_MODE 00: prod
+ */
+ if ((val1 & spss_fuse1_mask) && (val2 & spss_fuse2_mask))
+ firmware_type = SPSS_FW_TYPE_DEV;
+ else if ((val1 & spss_fuse1_mask) || (val2 & spss_fuse2_mask))
+ firmware_type = SPSS_FW_TYPE_TEST;
+ else
+ firmware_type = SPSS_FW_TYPE_PROD;
+
+ iounmap(spss_fuse1_reg);
+ iounmap(spss_fuse2_reg);
+
+ ret = of_property_read_u32(node, "qcom,spss-debug-reg-addr",
+ &spss_debug_reg_addr);
+ if (ret < 0) {
+ pr_err("can't get debug regs addr.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spss_probe() - initialization sequence
+ */
+static int spss_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *np = NULL;
+ struct device *dev = NULL;
+
+ if (!pdev) {
+ pr_err("invalid pdev.\n");
+ return -ENODEV;
+ }
+
+ np = pdev->dev.of_node;
+ if (!np) {
+ pr_err("invalid DT node.\n");
+ return -EINVAL;
+ }
+
+ dev = &pdev->dev;
+ spss_dev = dev;
+
+ if (dev == NULL) {
+ pr_err("invalid dev.\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ ret = spss_parse_dt(np);
+ if (ret < 0) {
+ pr_err("fail to parse device tree.\n");
+ return -EFAULT;
+ }
+
+ switch (firmware_type) {
+ case SPSS_FW_TYPE_DEV:
+ firmware_name = dev_firmware_name;
+ break;
+ case SPSS_FW_TYPE_TEST:
+ firmware_name = test_firmware_name;
+ break;
+ case SPSS_FW_TYPE_PROD:
+ firmware_name = prod_firmware_name;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = subsystem_set_fwname("spss", firmware_name);
+ if (ret < 0) {
+ pr_err("fail to set fw name.\n");
+ return -EFAULT;
+ }
+
+ ret = spss_create_sysfs(dev);
+ if (ret < 0) {
+ pr_err("fail to create sysfs.\n");
+ return -EFAULT;
+ }
+
+ pr_info("Initialization completed ok, firmware_name [%s].\n",
+ firmware_name);
+
+ return 0;
+}
+
+static const struct of_device_id spss_match_table[] = {
+ { .compatible = "qcom,spss-utils", },
+ { },
+};
+
+static struct platform_driver spss_driver = {
+ .probe = spss_probe,
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spss_match_table),
+ },
+};
+
+/*==========================================================================*/
+/* Driver Init/Exit */
+/*==========================================================================*/
+static int __init spss_init(void)
+{
+ int ret = 0;
+
+ pr_info("spss-utils driver Ver 2.0 30-Mar-2017.\n");
+
+ ret = platform_driver_register(&spss_driver);
+ if (ret)
+ pr_err("register platform driver failed, ret [%d]\n", ret);
+
+ return ret;
+}
+late_initcall(spss_init); /* start after PIL driver */
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Secure Processor Utilities");
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index b5e3814..6ff39de 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -536,6 +536,17 @@
if (d->subsys_desc.no_auth)
return 0;
+ if (d->bus_client) {
+ rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+ if (rc) {
+ dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+ rc);
+ return rc;
+ }
+ } else
+ WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+ d->subsys_desc.name);
+
rc = enable_regulators(d, pil->dev, d->proxy_regs,
d->proxy_reg_count, false);
if (rc)
@@ -546,20 +557,8 @@
if (rc)
goto err_clks;
- if (d->bus_client) {
- rc = msm_bus_scale_client_update_request(d->bus_client, 1);
- if (rc) {
- dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
- rc);
- goto err_bw;
- }
- } else
- WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
- d->subsys_desc.name);
-
return 0;
-err_bw:
- disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
err_clks:
disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
@@ -573,15 +572,15 @@
if (d->subsys_desc.no_auth)
return;
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
+ disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
+
if (d->bus_client)
msm_bus_scale_client_update_request(d->bus_client, 0);
else
WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
d->subsys_desc.name);
-
- disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
-
- disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
}
static int pil_init_image_trusted(struct pil_desc *pil,
@@ -693,6 +692,10 @@
desc.args[0] = proc = d->pas_id;
desc.arginfo = SCM_ARGS(1);
+ rc = scm_pas_enable_bw();
+ if (rc)
+ return rc;
+
rc = enable_regulators(d, pil->dev, d->regs, d->reg_count, false);
if (rc)
return rc;
@@ -701,10 +704,6 @@
if (rc)
goto err_clks;
- rc = scm_pas_enable_bw();
- if (rc)
- goto err_reset;
-
if (!is_scm_armv8()) {
rc = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
sizeof(proc), &scm_ret, sizeof(scm_ret));
@@ -739,10 +738,21 @@
desc.args[0] = proc = d->pas_id;
desc.arginfo = SCM_ARGS(1);
+ if (d->bus_client) {
+ rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+ if (rc) {
+ dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+ rc);
+ return rc;
+ }
+ } else
+ WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+ d->subsys_desc.name);
+
rc = enable_regulators(d, pil->dev, d->proxy_regs,
d->proxy_reg_count, true);
if (rc)
- return rc;
+ goto err_regulators;
rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
d->proxy_clk_count);
@@ -760,6 +770,11 @@
disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+ if (d->bus_client)
+ msm_bus_scale_client_update_request(d->bus_client, 0);
+ else
+ WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+ d->subsys_desc.name);
if (rc)
return rc;
@@ -768,8 +783,15 @@
disable_regulators(d, d->regs, d->reg_count, false);
return scm_ret;
+
err_clks:
disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+err_regulators:
+ if (d->bus_client)
+ msm_bus_scale_client_update_request(d->bus_client, 0);
+ else
+ WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+ d->subsys_desc.name);
return rc;
}
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index c846d26..e7c2bb2 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -28,7 +28,6 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/idr.h>
-#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/of_gpio.h>
#include <linux/cdev.h>
@@ -149,7 +148,6 @@
* @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
* @restart_order: order of other devices this devices restarts with
* @crash_count: number of times the device has crashed
- * @dentry: debugfs directory for this device
* @do_ramdump_on_put: ramdump on subsystem_put() if true
* @err_ready: completion variable to record error ready from subsystem
* @crashed: indicates if subsystem has crashed
@@ -171,9 +169,6 @@
int restart_level;
int crash_count;
struct subsys_soc_restart_order *restart_order;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *dentry;
-#endif
bool do_ramdump_on_put;
struct cdev char_dev;
dev_t dev_no;
@@ -354,10 +349,11 @@
__ATTR_NULL,
};
-static struct bus_type subsys_bus_type = {
+struct bus_type subsys_bus_type = {
.name = "msm_subsys",
.dev_attrs = subsys_attrs,
};
+EXPORT_SYMBOL(subsys_bus_type);
static DEFINE_IDA(subsys_ida);
@@ -1172,87 +1168,6 @@
notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
}
-#ifdef CONFIG_DEBUG_FS
-static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- int r;
- char buf[40];
- struct subsys_device *subsys = filp->private_data;
-
- r = snprintf(buf, sizeof(buf), "%d\n", subsys->count);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t subsys_debugfs_write(struct file *filp,
- const char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- struct subsys_device *subsys = filp->private_data;
- char buf[10];
- char *cmp;
-
- cnt = min(cnt, sizeof(buf) - 1);
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = '\0';
- cmp = strstrip(buf);
-
- if (!strcmp(cmp, "restart")) {
- if (subsystem_restart_dev(subsys))
- return -EIO;
- } else if (!strcmp(cmp, "get")) {
- if (subsystem_get(subsys->desc->name))
- return -EIO;
- } else if (!strcmp(cmp, "put")) {
- subsystem_put(subsys);
- } else {
- return -EINVAL;
- }
-
- return cnt;
-}
-
-static const struct file_operations subsys_debugfs_fops = {
- .open = simple_open,
- .read = subsys_debugfs_read,
- .write = subsys_debugfs_write,
-};
-
-static struct dentry *subsys_base_dir;
-
-static int __init subsys_debugfs_init(void)
-{
- subsys_base_dir = debugfs_create_dir("msm_subsys", NULL);
- return !subsys_base_dir ? -ENOMEM : 0;
-}
-
-static void subsys_debugfs_exit(void)
-{
- debugfs_remove_recursive(subsys_base_dir);
-}
-
-static int subsys_debugfs_add(struct subsys_device *subsys)
-{
- if (!subsys_base_dir)
- return -ENOMEM;
-
- subsys->dentry = debugfs_create_file(subsys->desc->name,
- 0644, subsys_base_dir,
- subsys, &subsys_debugfs_fops);
- return !subsys->dentry ? -ENOMEM : 0;
-}
-
-static void subsys_debugfs_remove(struct subsys_device *subsys)
-{
- debugfs_remove(subsys->dentry);
-}
-#else
-static int __init subsys_debugfs_init(void) { return 0; };
-static void subsys_debugfs_exit(void) { }
-static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; }
-static void subsys_debugfs_remove(struct subsys_device *subsys) { }
-#endif
-
static int subsys_device_open(struct inode *inode, struct file *file)
{
struct subsys_device *device, *subsys_dev = 0;
@@ -1690,17 +1605,8 @@
mutex_init(&subsys->track.lock);
- ret = subsys_debugfs_add(subsys);
- if (ret) {
- ida_simple_remove(&subsys_ida, subsys->id);
- wakeup_source_trash(&subsys->ssr_wlock);
- kfree(subsys);
- return ERR_PTR(ret);
- }
-
ret = device_register(&subsys->dev);
if (ret) {
- subsys_debugfs_remove(subsys);
put_device(&subsys->dev);
return ERR_PTR(ret);
}
@@ -1761,7 +1667,6 @@
if (ofnode)
subsys_remove_restart_order(ofnode);
err_register:
- subsys_debugfs_remove(subsys);
device_unregister(&subsys->dev);
return ERR_PTR(ret);
}
@@ -1790,7 +1695,6 @@
WARN_ON(subsys->count);
device_unregister(&subsys->dev);
mutex_unlock(&subsys->track.lock);
- subsys_debugfs_remove(subsys);
subsys_char_device_remove(subsys);
sysmon_notifier_unregister(subsys->desc);
if (subsys->desc->edge)
@@ -1830,9 +1734,6 @@
ret = bus_register(&subsys_bus_type);
if (ret)
goto err_bus;
- ret = subsys_debugfs_init();
- if (ret)
- goto err_debugfs;
char_class = class_create(THIS_MODULE, "subsys");
if (IS_ERR(char_class)) {
@@ -1851,8 +1752,6 @@
err_soc:
class_destroy(char_class);
err_class:
- subsys_debugfs_exit();
-err_debugfs:
bus_unregister(&subsys_bus_type);
err_bus:
destroy_workqueue(ssr_wq);
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 2855a15..d8c5a8f 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -56,9 +56,13 @@
* Set up the wake up value offset from the current time.
* Convert us to ns to allow div by 19.2 Mhz tick timer.
*/
- sleep_val *= NSEC_PER_USEC;
- do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
- sleep_val += arch_counter_get_cntvct();
+ if (sleep_val) {
+ sleep_val *= NSEC_PER_USEC;
+ do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
+ sleep_val += arch_counter_get_cntvct();
+ } else {
+ sleep_val = ~0ULL;
+ }
return setup_wakeup(sleep_val);
}
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index f3d6209..7a784aa 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,7 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/watchdog.h>
+#include <linux/dma-mapping.h>
#define MODULE_NAME "msm_watchdog"
#define WDT0_ACCSCSSNBARK_INT 0
@@ -49,6 +50,7 @@
#define SCM_SET_REGSAVE_CMD 0x2
#define SCM_SVC_SEC_WDOG_DIS 0x7
#define MAX_CPU_CTX_SIZE 2048
+#define MAX_CPU_SCANDUMP_SIZE 0x10000
static struct msm_watchdog_data *wdog_data;
@@ -557,6 +559,49 @@
return;
}
+static void configure_scandump(struct msm_watchdog_data *wdog_dd)
+{
+ int ret;
+ struct msm_dump_entry dump_entry;
+ struct msm_dump_data *cpu_data;
+ int cpu;
+ static dma_addr_t dump_addr;
+ static void *dump_vaddr;
+
+ for_each_cpu(cpu, cpu_present_mask) {
+ cpu_data = devm_kzalloc(wdog_dd->dev,
+ sizeof(struct msm_dump_data),
+ GFP_KERNEL);
+ if (!cpu_data)
+ continue;
+
+ dump_vaddr = (void *) dma_alloc_coherent(wdog_dd->dev,
+ MAX_CPU_SCANDUMP_SIZE,
+ &dump_addr,
+ GFP_KERNEL);
+ if (!dump_vaddr) {
+ dev_err(wdog_dd->dev, "Couldn't get memory for dump\n");
+ continue;
+ }
+ memset(dump_vaddr, 0x0, MAX_CPU_SCANDUMP_SIZE);
+
+ cpu_data->addr = dump_addr;
+ cpu_data->len = MAX_CPU_SCANDUMP_SIZE;
+ dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu;
+ dump_entry.addr = virt_to_phys(cpu_data);
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+ &dump_entry);
+ if (ret) {
+ dev_err(wdog_dd->dev, "Dump setup failed, id = %d\n",
+ MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu);
+ dma_free_coherent(wdog_dd->dev, MAX_CPU_SCANDUMP_SIZE,
+ dump_vaddr,
+ dump_addr);
+ devm_kfree(wdog_dd->dev, cpu_data);
+ }
+ }
+}
+
static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
{
int error = 0;
@@ -617,6 +662,7 @@
delay_time = msecs_to_jiffies(wdog_dd->pet_time);
wdog_dd->min_slack_ticks = UINT_MAX;
wdog_dd->min_slack_ns = ULLONG_MAX;
+ configure_scandump(wdog_dd);
configure_bark_dump(wdog_dd);
timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
diff --git a/drivers/soundwire/swr-wcd-ctrl.c b/drivers/soundwire/swr-wcd-ctrl.c
index 7e33e8b..ce2a367 100644
--- a/drivers/soundwire/swr-wcd-ctrl.c
+++ b/drivers/soundwire/swr-wcd-ctrl.c
@@ -524,7 +524,7 @@
{
struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
int ret = 0;
- int val = 0;
+ int val;
u8 *reg_val = (u8 *)buf;
if (!swrm) {
@@ -538,7 +538,9 @@
else
val = swrm->read(swrm->handle, reg_addr);
- *reg_val = (u8)val;
+ if (!ret)
+ *reg_val = (u8)val;
+
pm_runtime_mark_last_busy(&swrm->pdev->dev);
return ret;
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 4c86197..db12900 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/spi/spi.h>
@@ -119,30 +120,30 @@
int div = 0;
int idx;
struct se_geni_rsc *rsc = &mas->spi_rsc;
- int ret = 0;
u32 clk_sel = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
u32 m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
+ int ret;
clk_sel &= ~CLK_SEL_MSK;
m_clk_cfg &= ~CLK_DIV_MSK;
idx = get_sclk(speed_hz, &sclk_freq);
- if (idx < 0) {
- ret = -EINVAL;
- goto spi_clk_cfg_exit;
- }
- div = (sclk_freq / (SPI_OVERSAMPLING / speed_hz));
+ if (idx < 0)
+ return -EINVAL;
+
+ div = ((sclk_freq / SPI_OVERSAMPLING) / speed_hz);
+ if (!div)
+ return -EINVAL;
clk_sel |= (idx & CLK_SEL_MSK);
m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
ret = clk_set_rate(rsc->se_clk, sclk_freq);
if (ret)
- goto spi_clk_cfg_exit;
+ return ret;
geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
-spi_clk_cfg_exit:
- return ret;
+ return 0;
}
static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
@@ -195,7 +196,8 @@
ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
if (ret) {
- dev_err(&spi_mas->dev, "Err setting clks ret %d\n", ret);
+ dev_err(&spi_mas->dev, "Err setting clks ret(%d) for %d\n",
+ ret, mas->cur_speed_hz);
goto prepare_message_exit;
}
spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 0d3b70b..633632a 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -24,4 +24,14 @@
This is required for communicating with Qualcomm PMICs and
other devices that have the SPMI interface.
+config SPMI_MSM_PMIC_ARB_DEBUG
+ tristate "QTI SPMI Debug Controller (PMIC Arbiter)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter debug interface on Qualcomm Technologies,
+ Inc. (QTI) MSM family processors. This feature is available on chips
+ with PMIC arbiter version 5 and above.
+
endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index fc75104..4f20815 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_SPMI) += spmi.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
+obj-$(CONFIG_SPMI_MSM_PMIC_ARB_DEBUG) += spmi-pmic-arb-debug.o
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
new file mode 100644
index 0000000..c5a31a9
--- /dev/null
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+
+/* PMIC Arbiter debug register offsets */
+#define PMIC_ARB_DEBUG_CMD0 0x00
+#define PMIC_ARB_DEBUG_CMD1 0x04
+#define PMIC_ARB_DEBUG_CMD2 0x08
+#define PMIC_ARB_DEBUG_CMD3 0x0C
+#define PMIC_ARB_DEBUG_STATUS 0x14
+#define PMIC_ARB_DEBUG_WDATA(n) (0x18 + 4 * (n))
+#define PMIC_ARB_DEBUG_RDATA(n) (0x38 + 4 * (n))
+
+/* Transaction status flag bits */
+enum pmic_arb_chnl_status {
+ PMIC_ARB_STATUS_DONE = BIT(0),
+ PMIC_ARB_STATUS_FAILURE = BIT(1),
+ PMIC_ARB_STATUS_DENIED = BIT(2),
+ PMIC_ARB_STATUS_DROPPED = BIT(3),
+};
+
+/* Command Opcodes */
+enum pmic_arb_cmd_op_code {
+ PMIC_ARB_OP_EXT_WRITEL = 0,
+ PMIC_ARB_OP_EXT_READL = 1,
+ PMIC_ARB_OP_EXT_WRITE = 2,
+ PMIC_ARB_OP_RESET = 3,
+ PMIC_ARB_OP_SLEEP = 4,
+ PMIC_ARB_OP_SHUTDOWN = 5,
+ PMIC_ARB_OP_WAKEUP = 6,
+ PMIC_ARB_OP_AUTHENTICATE = 7,
+ PMIC_ARB_OP_MSTR_READ = 8,
+ PMIC_ARB_OP_MSTR_WRITE = 9,
+ PMIC_ARB_OP_EXT_READ = 13,
+ PMIC_ARB_OP_WRITE = 14,
+ PMIC_ARB_OP_READ = 15,
+ PMIC_ARB_OP_ZERO_WRITE = 16,
+};
+
+#define PMIC_ARB_TIMEOUT_US 100
+#define PMIC_ARB_MAX_TRANS_BYTES 8
+#define PMIC_ARB_MAX_SID 0xF
+
+/**
+ * spmi_pmic_arb_debug - SPMI PMIC Arbiter debug object
+ *
+ * @addr: base address of SPMI PMIC arbiter debug module
+ * @lock: lock to synchronize accesses.
+ */
+struct spmi_pmic_arb_debug {
+ void __iomem *addr;
+ raw_spinlock_t lock;
+};
+
+static inline void pmic_arb_debug_write(struct spmi_pmic_arb_debug *pa,
+ u32 offset, u32 val)
+{
+ writel_relaxed(val, pa->addr + offset);
+}
+
+static inline u32 pmic_arb_debug_read(struct spmi_pmic_arb_debug *pa,
+ u32 offset)
+{
+ return readl_relaxed(pa->addr + offset);
+}
+
+/* pa->lock must be held by the caller. */
+static int pmic_arb_debug_wait_for_done(struct spmi_controller *ctrl)
+{
+ struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+ u32 status = 0;
+ u32 timeout = PMIC_ARB_TIMEOUT_US;
+
+ while (timeout--) {
+ status = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_STATUS);
+
+ if (status & PMIC_ARB_STATUS_DONE) {
+ if (status & PMIC_ARB_STATUS_DENIED) {
+ dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n",
+ __func__, status);
+ return -EPERM;
+ }
+
+ if (status & PMIC_ARB_STATUS_FAILURE) {
+ dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+
+ if (status & PMIC_ARB_STATUS_DROPPED) {
+ dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+
+ return 0;
+ }
+ udelay(1);
+ }
+
+ dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n", __func__, status);
+ return -ETIMEDOUT;
+}
+
+/* pa->lock must be held by the caller. */
+static int pmic_arb_debug_issue_command(struct spmi_controller *ctrl, u8 opc,
+ u8 sid, u16 addr, size_t len)
+{
+ struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+ u16 pid = (addr >> 8) & 0xFF;
+ u16 offset = addr & 0xFF;
+ u8 byte_count = len - 1;
+
+ if (byte_count >= PMIC_ARB_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev, "pmic-arb supports 1 to %d bytes per transaction, but %zu requested",
+ PMIC_ARB_MAX_TRANS_BYTES, len);
+ return -EINVAL;
+ }
+
+ if (sid > PMIC_ARB_MAX_SID) {
+ dev_err(&ctrl->dev, "pmic-arb supports sid 0 to %u, but %u requested",
+ PMIC_ARB_MAX_SID, sid);
+ return -EINVAL;
+ }
+
+ pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD3, offset);
+ pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD2, pid);
+ pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD1, (byte_count << 4) | sid);
+
+ /* Start the transaction */
+ pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD0, opc << 1);
+
+ return pmic_arb_debug_wait_for_done(ctrl);
+}
+
+/* Non-data command */
+static int pmic_arb_debug_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
+
+ /* Check for valid non-data command */
+ if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+ return -EINVAL;
+
+ return -EOPNOTSUPP;
+}
+
+static int pmic_arb_debug_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ int i, rc;
+
+ /* Check the opcode */
+ if (opc >= 0x60 && opc <= 0x7F)
+ opc = PMIC_ARB_OP_READ;
+ else if (opc >= 0x20 && opc <= 0x2F)
+ opc = PMIC_ARB_OP_EXT_READ;
+ else if (opc >= 0x38 && opc <= 0x3F)
+ opc = PMIC_ARB_OP_EXT_READL;
+ else
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+
+ rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
+ if (rc)
+ goto done;
+
+ /* Read data from FIFO */
+ for (i = 0; i < len; i++)
+ buf[i] = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_RDATA(i));
+done:
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ return rc;
+}
+
+static int pmic_arb_debug_write_cmd(struct spmi_controller *ctrl, u8 opc,
+ u8 sid, u16 addr, const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ int i, rc;
+
+ if (len > PMIC_ARB_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev, "pmic-arb supports 1 to %d bytes per transaction, but %zu requested",
+ PMIC_ARB_MAX_TRANS_BYTES, len);
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x40 && opc <= 0x5F)
+ opc = PMIC_ARB_OP_WRITE;
+ else if (opc >= 0x00 && opc <= 0x0F)
+ opc = PMIC_ARB_OP_EXT_WRITE;
+ else if (opc >= 0x30 && opc <= 0x37)
+ opc = PMIC_ARB_OP_EXT_WRITEL;
+ else if (opc >= 0x80)
+ opc = PMIC_ARB_OP_ZERO_WRITE;
+ else
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pa->lock, flags);
+
+ /* Write data to FIFO */
+ for (i = 0; i < len; i++)
+ pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_WDATA(i), buf[i]);
+
+ rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
+
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+ return rc;
+}
+
+static int spmi_pmic_arb_debug_probe(struct platform_device *pdev)
+{
+ struct spmi_pmic_arb_debug *pa;
+ struct spmi_controller *ctrl;
+ struct resource *res;
+ int rc;
+ u32 fuse_val, fuse_bit;
+ void __iomem *fuse_addr;
+
+ /* Check if the debug bus is disabled by a fuse. */
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,fuse-disable-bit",
+ &fuse_bit);
+ if (!rc) {
+ if (fuse_bit > 31) {
+ dev_err(&pdev->dev, "qcom,fuse-disable-bit supports values 0 to 31, but %u specified\n",
+ fuse_bit);
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "fuse");
+ if (!res) {
+ dev_err(&pdev->dev, "fuse address not specified\n");
+ return -EINVAL;
+ }
+
+ fuse_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fuse_addr))
+ return PTR_ERR(fuse_addr);
+
+ fuse_val = readl_relaxed(fuse_addr);
+ devm_iounmap(&pdev->dev, fuse_addr);
+
+ if (fuse_val & BIT(fuse_bit)) {
+ dev_err(&pdev->dev, "SPMI PMIC arbiter debug bus disabled by fuse\n");
+ return -ENODEV;
+ }
+ }
+
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
+ if (!ctrl)
+ return -ENOMEM;
+
+ pa = spmi_controller_get_drvdata(ctrl);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ if (!res) {
+ dev_err(&pdev->dev, "core address not specified\n");
+ rc = -EINVAL;
+ goto err_put_ctrl;
+ }
+
+ pa->addr = devm_ioremap_resource(&ctrl->dev, res);
+ if (IS_ERR(pa->addr)) {
+ rc = PTR_ERR(pa->addr);
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, ctrl);
+ raw_spin_lock_init(&pa->lock);
+
+ ctrl->cmd = pmic_arb_debug_cmd;
+ ctrl->read_cmd = pmic_arb_debug_read_cmd;
+ ctrl->write_cmd = pmic_arb_debug_write_cmd;
+
+ rc = spmi_controller_add(ctrl);
+ if (rc)
+ goto err_put_ctrl;
+
+ dev_info(&ctrl->dev, "SPMI PMIC arbiter debug bus controller added\n");
+
+ return 0;
+
+err_put_ctrl:
+ spmi_controller_put(ctrl);
+ return rc;
+}
+
+static int spmi_pmic_arb_debug_remove(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+
+ spmi_controller_remove(ctrl);
+ spmi_controller_put(ctrl);
+
+ return 0;
+}
+
+static const struct of_device_id spmi_pmic_arb_debug_match_table[] = {
+ { .compatible = "qcom,spmi-pmic-arb-debug", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spmi_pmic_arb_debug_match_table);
+
+static struct platform_driver spmi_pmic_arb_debug_driver = {
+ .probe = spmi_pmic_arb_debug_probe,
+ .remove = spmi_pmic_arb_debug_remove,
+ .driver = {
+ .name = "spmi_pmic_arb_debug",
+ .of_match_table = spmi_pmic_arb_debug_match_table,
+ },
+};
+
+int __init spmi_pmic_arb_debug_init(void)
+{
+ return platform_driver_register(&spmi_pmic_arb_debug_driver);
+}
+arch_initcall(spmi_pmic_arb_debug_init);
+
+static void __exit spmi_pmic_arb_debug_exit(void)
+{
+ platform_driver_unregister(&spmi_pmic_arb_debug_driver);
+}
+module_exit(spmi_pmic_arb_debug_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_pmic_arb_debug");
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 3a52b29..eb41e84 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -405,6 +405,7 @@
ret = PTR_ERR(vmfile);
goto out;
}
+ vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
}
get_file(asma->file);
@@ -765,10 +766,12 @@
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
+ mutex_unlock(&ashmem_mutex);
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 0efa80b..4a073339a 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
/*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
- /*
* Required for gPXE iSCSI boot client
*/
if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1f38177..da5a5fc 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -735,21 +735,23 @@
{
struct se_cmd *se_cmd = NULL;
int rc;
+ bool op_scsi = false;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
+ op_scsi = true;
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ rc = transport_generic_free_cmd(se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd);
}
break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096a..6e456de 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@
pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL;
}
+ if (lun->lun_shutdown) {
+ pr_err("Unable to create mappedlun symlink because"
+ " lun->lun_shutdown=true\n");
+ return -EINVAL;
+ }
se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2744251..1949f50 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -640,6 +640,8 @@
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+ lun->lun_shutdown = true;
+
core_clear_lun_from_tpg(lun, tpg);
/*
* Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -661,6 +663,8 @@
}
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_del_rcu(&lun->link);
+
+ lun->lun_shutdown = false;
mutex_unlock(&tpg->tpg_lun_mutex);
percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 70c143a..1a83456 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -306,24 +306,50 @@
DATA_BLOCK_BITS);
}
-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
- struct scatterlist *data_sg, unsigned int data_nents)
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ bool bidi)
{
+ struct se_cmd *se_cmd = cmd->se_cmd;
int i, block;
int block_remaining = 0;
void *from, *to;
size_t copy_bytes, from_offset;
- struct scatterlist *sg;
+ struct scatterlist *sg, *data_sg;
+ unsigned int data_nents;
+ DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+ bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+
+ if (!bidi) {
+ data_sg = se_cmd->t_data_sg;
+ data_nents = se_cmd->t_data_nents;
+ } else {
+ uint32_t count;
+
+ /*
+ * For bidi case, the first count blocks are for Data-Out
+ * buffer blocks, and before gathering the Data-In buffer
+ * the Data-Out buffer blocks should be discarded.
+ */
+ count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ while (count--) {
+ block = find_first_bit(bitmap, DATA_BLOCK_BITS);
+ clear_bit(block, bitmap);
+ }
+
+ data_sg = se_cmd->t_bidi_data_sg;
+ data_nents = se_cmd->t_bidi_data_nents;
+ }
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
if (block_remaining == 0) {
- block = find_first_bit(cmd_bitmap,
+ block = find_first_bit(bitmap,
DATA_BLOCK_BITS);
block_remaining = DATA_BLOCK_SIZE;
- clear_bit(block, cmd_bitmap);
+ clear_bit(block, bitmap);
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
@@ -389,6 +415,27 @@
return true;
}
+static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+ data_length += round_up(se_cmd->t_bidi_data_sg->length,
+ DATA_BLOCK_SIZE);
+ }
+
+ return data_length;
+}
+
+static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+{
+ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+
+ return data_length / DATA_BLOCK_SIZE;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
@@ -402,7 +449,7 @@
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
- size_t data_length;
+ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -416,8 +463,7 @@
* expensive to tell how many regions are freed in the bitmap
*/
base_command_size = max(offsetof(struct tcmu_cmd_entry,
- req.iov[se_cmd->t_bidi_data_nents +
- se_cmd->t_data_nents]),
+ req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
sizeof(struct tcmu_cmd_entry));
command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -428,11 +474,6 @@
mb = udev->mb_addr;
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
- data_length = se_cmd->data_length;
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
- BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- data_length += se_cmd->t_bidi_data_sg->length;
- }
if ((command_size > (udev->cmdr_size / 2)) ||
data_length > udev->data_size) {
pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -502,11 +543,14 @@
entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */
- iov_cnt = 0;
- alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
- entry->req.iov_bidi_cnt = iov_cnt;
-
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ iov_cnt = 0;
+ iov++;
+ alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+ se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+ false);
+ entry->req.iov_bidi_cnt = iov_cnt;
+ }
/* cmd's data_bitmap is what changed in process */
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
DATA_BLOCK_BITS);
@@ -582,19 +626,11 @@
se_cmd->scsi_sense_length);
free_data_area(udev, cmd);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
/* Get Data-In buffer before clean up */
- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
- gather_data_area(udev, bitmap,
- se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+ gather_data_area(udev, cmd, true);
free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
- gather_data_area(udev, bitmap,
- se_cmd->t_data_sg, se_cmd->t_data_nents);
+ gather_data_area(udev, cmd, false);
free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
free_data_area(udev, cmd);
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 2faed7f..acbd26b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -52,7 +52,7 @@
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
-obj-$(CONFIG_QCOM_TSENS) += qcom/
+obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index a6245d5..37125c0 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -548,11 +548,29 @@
if (cpufreq_device->cpufreq_floor_state == state)
return 0;
- floor_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_floor_state = state;
- cpufreq_device->floor_freq = floor_freq;
- cpufreq_update_policy(cpu);
+ /*
+ * Check if the device has a platform mitigation function that
+ * can handle the CPU freq mitigation, if not, notify cpufreq
+ * framework.
+ */
+ if (cpufreq_device->plat_ops &&
+ cpufreq_device->plat_ops->floor_limit) {
+ /*
+ * Last level is core isolation so use the frequency
+ * of previous state.
+ */
+ if (state == cpufreq_device->max_level)
+ state--;
+ floor_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->floor_freq = floor_freq;
+ cpufreq_device->plat_ops->floor_limit(cpu, floor_freq);
+ } else {
+ floor_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->floor_freq = floor_freq;
+ cpufreq_update_policy(cpu);
+ }
return 0;
}
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 68bd1b56..f2011f6 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -52,7 +52,7 @@
*/
if (count > 0) {
tz->ops->get_trip_type(tz, count - 1, &trip_type);
- trace_thermal_zone_trip(tz, count - 1, trip_type);
+ trace_thermal_zone_trip(tz, count - 1, trip_type, true);
}
return count;
diff --git a/drivers/thermal/gov_low_limits.c b/drivers/thermal/gov_low_limits.c
index cf2dbc4..b75abf7 100644
--- a/drivers/thermal/gov_low_limits.c
+++ b/drivers/thermal/gov_low_limits.c
@@ -67,10 +67,11 @@
if (old_target == THERMAL_NO_TARGET &&
instance->target != THERMAL_NO_TARGET) {
- trace_thermal_zone_trip(tz, trip, trip_type);
+ trace_thermal_zone_trip(tz, trip, trip_type, true);
tz->passive += 1;
} else if (old_target != THERMAL_NO_TARGET &&
instance->target == THERMAL_NO_TARGET) {
+ trace_thermal_zone_trip(tz, trip, trip_type, false);
tz->passive -= 1;
}
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 5b4bb7a..432adbc 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
- * it under the term_tm of the GNU General Public License version 2 and
+ * it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
@@ -24,29 +24,28 @@
LIST_HEAD(tsens_device_list);
-static int tsens_get_temp(struct tsens_sensor *s, int *temp)
+static int tsens_get_temp(void *data, int *temp)
{
+ struct tsens_sensor *s = data;
struct tsens_device *tmdev = s->tmdev;
return tmdev->ops->get_temp(s, temp);
}
-static int tsens_set_trip_temp(struct tsens_sensor *s, int trip, int temp)
+static int tsens_set_trip_temp(void *data, int low_temp, int high_temp)
{
+ struct tsens_sensor *s = data;
struct tsens_device *tmdev = s->tmdev;
- if (tmdev->ops->set_trip_temp)
- return tmdev->ops->set_trip_temp(s, trip, temp);
+ if (tmdev->ops->set_trips)
+ return tmdev->ops->set_trips(s, low_temp, high_temp);
return 0;
}
static int tsens_init(struct tsens_device *tmdev)
{
- if (tmdev->ops->hw_init)
- return tmdev->ops->hw_init(tmdev);
-
- return 0;
+ return tmdev->ops->hw_init(tmdev);
}
static int tsens_register_interrupts(struct tsens_device *tmdev)
@@ -85,19 +84,16 @@
static struct thermal_zone_of_device_ops tsens_tm_thermal_zone_ops = {
.get_temp = tsens_get_temp,
- .set_trip_temp = tsens_set_trip_temp,
+ .set_trips = tsens_set_trip_temp,
};
static int get_device_tree_data(struct platform_device *pdev,
struct tsens_device *tmdev)
{
struct device_node *of_node = pdev->dev.of_node;
- u32 *hw_id, *client_id;
- u32 rc = 0, i, tsens_num_sensors = 0;
- int tsens_len;
const struct of_device_id *id;
const struct tsens_data *data;
- struct resource *res_tsens_mem, *res_mem = NULL;
+ struct resource *res_tsens_mem;
if (!of_match_node(tsens_table, of_node)) {
pr_err("Need to read SoC specific fuse map\n");
@@ -111,16 +107,6 @@
}
data = id->data;
- hw_id = devm_kzalloc(&pdev->dev,
- tsens_num_sensors * sizeof(u32), GFP_KERNEL);
- if (!hw_id)
- return -ENOMEM;
-
- client_id = devm_kzalloc(&pdev->dev,
- tsens_num_sensors * sizeof(u32), GFP_KERNEL);
- if (!client_id)
- return -ENOMEM;
-
tmdev->ops = data->ops;
tmdev->ctrl_data = data;
tmdev->pdev = pdev;
@@ -132,49 +118,32 @@
/* TSENS register region */
res_tsens_mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "tsens_physical");
+ IORESOURCE_MEM, "tsens_srot_physical");
if (!res_tsens_mem) {
pr_err("Could not get tsens physical address resource\n");
return -EINVAL;
}
- tsens_len = res_tsens_mem->end - res_tsens_mem->start + 1;
+ tmdev->tsens_srot_addr = devm_ioremap_resource(&pdev->dev,
+ res_tsens_mem);
+ if (IS_ERR(tmdev->tsens_srot_addr)) {
+ dev_err(&pdev->dev, "Failed to IO map TSENS registers.\n");
+ return PTR_ERR(tmdev->tsens_srot_addr);
+ }
- res_mem = request_mem_region(res_tsens_mem->start,
- tsens_len, res_tsens_mem->name);
- if (!res_mem) {
- pr_err("Request tsens physical memory region failed\n");
+ /* TSENS TM register region */
+ res_tsens_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tsens_tm_physical");
+ if (!res_tsens_mem) {
+ pr_err("Could not get tsens physical address resource\n");
return -EINVAL;
}
- tmdev->tsens_addr = ioremap(res_mem->start, tsens_len);
- if (!tmdev->tsens_addr) {
- pr_err("Failed to IO map TSENS registers.\n");
- return -EINVAL;
- }
-
- rc = of_property_read_u32_array(of_node,
- "qcom,sensor-id", hw_id, tsens_num_sensors);
- if (rc) {
- pr_err("Default sensor id mapping\n");
- for (i = 0; i < tsens_num_sensors; i++)
- tmdev->sensor[i].hw_id = i;
- } else {
- pr_err("Use specified sensor id mapping\n");
- for (i = 0; i < tsens_num_sensors; i++)
- tmdev->sensor[i].hw_id = hw_id[i];
- }
-
- rc = of_property_read_u32_array(of_node,
- "qcom,client-id", client_id, tsens_num_sensors);
- if (rc) {
- for (i = 0; i < tsens_num_sensors; i++)
- tmdev->sensor[i].id = i;
- pr_debug("Default client id mapping\n");
- } else {
- for (i = 0; i < tsens_num_sensors; i++)
- tmdev->sensor[i].id = client_id[i];
- pr_debug("Use specified client id mapping\n");
+ tmdev->tsens_tm_addr = devm_ioremap_resource(&pdev->dev,
+ res_tsens_mem);
+ if (IS_ERR(tmdev->tsens_tm_addr)) {
+ dev_err(&pdev->dev, "Failed to IO map TSENS TM registers.\n");
+ return PTR_ERR(tmdev->tsens_tm_addr);
}
return 0;
@@ -182,20 +151,28 @@
static int tsens_thermal_zone_register(struct tsens_device *tmdev)
{
- int rc = 0, i = 0;
+ int i = 0, sensor_missing = 0;
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < TSENS_MAX_SENSORS; i++) {
tmdev->sensor[i].tmdev = tmdev;
- tmdev->sensor[i].tzd = devm_thermal_zone_of_sensor_register(
- &tmdev->pdev->dev, i, &tmdev->sensor[i],
- &tsens_tm_thermal_zone_ops);
+ tmdev->sensor[i].hw_id = i;
+ tmdev->sensor[i].tzd =
+ devm_thermal_zone_of_sensor_register(
+ &tmdev->pdev->dev, i,
+ &tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
if (IS_ERR(tmdev->sensor[i].tzd)) {
- pr_err("Error registering sensor:%d\n", i);
+ pr_debug("Error registering sensor:%d\n", i);
+ sensor_missing++;
continue;
}
}
- return rc;
+ if (sensor_missing == TSENS_MAX_SENSORS) {
+ pr_err("No TSENS sensors to register?\n");
+ return -ENODEV;
+ }
+
+ return 0;
}
static int tsens_tm_remove(struct platform_device *pdev)
@@ -207,32 +184,19 @@
int tsens_tm_probe(struct platform_device *pdev)
{
- struct device_node *of_node = pdev->dev.of_node;
struct tsens_device *tmdev = NULL;
- u32 tsens_num_sensors = 0;
int rc;
if (!(pdev->dev.of_node))
return -ENODEV;
- rc = of_property_read_u32(of_node,
- "qcom,sensors", &tsens_num_sensors);
- if (rc || (!tsens_num_sensors)) {
- dev_err(&pdev->dev, "missing sensors\n");
- return -ENODEV;
- }
-
tmdev = devm_kzalloc(&pdev->dev,
sizeof(struct tsens_device) +
- tsens_num_sensors *
+ TSENS_MAX_SENSORS *
sizeof(struct tsens_sensor),
GFP_KERNEL);
- if (tmdev == NULL) {
- pr_err("%s: kzalloc() failed.\n", __func__);
+ if (tmdev == NULL)
return -ENOMEM;
- }
-
- tmdev->num_sensors = tsens_num_sensors;
rc = get_device_tree_data(pdev, tmdev);
if (rc) {
@@ -241,8 +205,10 @@
}
rc = tsens_init(tmdev);
- if (rc)
+ if (rc) {
+ pr_err("Error initializing TSENS controller\n");
return rc;
+ }
rc = tsens_thermal_zone_register(tmdev);
if (rc) {
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index b337ad7..f0be6e9 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -140,16 +140,11 @@
mutex_lock(&data->senps->lock);
of_thermal_aggregate_trip_types(tz, GENMASK(THERMAL_TRIP_CRITICAL, 0),
&low, &high);
- if (low == data->senps->trip_low
- && high == data->senps->trip_high)
- goto set_trips_exit;
-
data->senps->trip_low = low;
data->senps->trip_high = high;
ret = data->senps->ops->set_trips(data->senps->sensor_data,
low, high);
-set_trips_exit:
mutex_unlock(&data->senps->lock);
return ret;
}
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index be32e5a..473d15a 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -9,3 +9,24 @@
thermal zone device via the mode file results in disabling the sensor.
Also able to set threshold temperature for both hot and cold and update
when a threshold is reached.
+
+config MSM_BCL_PERIPHERAL_CTL
+ bool "BCL driver to control the PMIC BCL peripheral"
+ depends on SPMI && THERMAL_OF
+ help
+ Say Y here to enable this BCL PMIC peripheral driver. This driver
+ provides routines to configure and monitor the BCL
+ PMIC peripheral. This driver registers the battery current and
+ voltage sensors with the thermal core framework and can take
+ threshold input and notify the thermal core when the threshold is
+ reached.
+
+config QTI_THERMAL_LIMITS_DCVS
+ bool "QTI LMH DCVS Driver"
+ depends on THERMAL_OF
+ help
+ This enables the driver for Limits Management Hardware - DCVS block
+ for the application processors. The h/w block that is available for
+ each cluster can be used to perform quick thermal mitigations by
+ tracking temperatures of the CPUs and taking thermal action in the
+ hardware without s/w intervention.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 2cc2193..d1a53b0 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
+obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
+obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
diff --git a/drivers/thermal/qcom/bcl_peripheral.c b/drivers/thermal/qcom/bcl_peripheral.c
new file mode 100644
index 0000000..75e553f
--- /dev/null
+++ b/drivers/thermal/qcom/bcl_peripheral.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+
+#define BCL_DRIVER_NAME "bcl_peripheral"
+#define BCL_VBAT_INT "bcl-low-vbat"
+#define BCL_VLOW_VBAT_INT "bcl-very-low-vbat"
+#define BCL_CLOW_VBAT_INT "bcl-crit-low-vbat"
+#define BCL_IBAT_INT "bcl-high-ibat"
+#define BCL_VHIGH_IBAT_INT "bcl-very-high-ibat"
+#define BCL_MONITOR_EN 0x46
+#define BCL_VBAT_MIN 0x5C
+#define BCL_IBAT_MAX 0x5D
+#define BCL_MAX_MIN_CLR 0x48
+#define BCL_IBAT_MAX_CLR 3
+#define BCL_VBAT_MIN_CLR 2
+#define BCL_VBAT_ADC_LOW 0x72
+#define BCL_VBAT_COMP_LOW 0x75
+#define BCL_VBAT_COMP_TLOW 0x76
+#define BCL_IBAT_HIGH 0x78
+#define BCL_IBAT_TOO_HIGH 0x79
+#define BCL_LMH_CFG 0xA3
+#define BCL_CFG 0x6A
+#define LMH_INT_POL_HIGH 0x12
+#define LMH_INT_EN 0x15
+#define BCL_VBAT_SCALING 39000
+#define BCL_IBAT_SCALING 80
+#define BCL_LMH_CFG_VAL 0x3
+#define BCL_CFG_VAL 0x81
+#define LMH_INT_VAL 0x7
+#define BCL_READ_RETRY_LIMIT 3
+#define VAL_CP_REG_BUF_LEN 3
+#define VAL_REG_BUF_OFFSET 0
+#define VAL_CP_REG_BUF_OFFSET 2
+#define BCL_STD_VBAT_NR 9
+#define BCL_VBAT_NO_READING 127
+
+enum bcl_dev_type {
+ BCL_HIGH_IBAT,
+ BCL_VHIGH_IBAT,
+ BCL_LOW_VBAT,
+ BCL_VLOW_VBAT,
+ BCL_CLOW_VBAT,
+ BCL_SOC_MONITOR,
+ BCL_TYPE_MAX,
+};
+
+struct bcl_peripheral_data {
+ int irq_num;
+ long int trip_temp;
+ int trip_val;
+ int last_val;
+ struct mutex state_trans_lock;
+ bool irq_enabled;
+ struct thermal_zone_of_device_ops ops;
+ struct thermal_zone_device *tz_dev;
+};
+
+struct bcl_device {
+ struct regmap *regmap;
+ uint16_t fg_bcl_addr;
+ uint16_t fg_lmh_addr;
+ struct notifier_block psy_nb;
+ struct work_struct soc_eval_work;
+ struct bcl_peripheral_data param[BCL_TYPE_MAX];
+};
+
+static struct bcl_device *bcl_perph;
+static int vbat_low[BCL_STD_VBAT_NR] = {
+ 2400, 2500, 2600, 2700, 2800, 2900,
+ 3000, 3100, 3200};
+
+static int bcl_read_multi_register(int16_t reg_offset, uint8_t *data, int len)
+{
+ int ret = 0;
+
+ if (!bcl_perph) {
+ pr_err("BCL device not initialized\n");
+ return -EINVAL;
+ }
+ ret = regmap_bulk_read(bcl_perph->regmap,
+ (bcl_perph->fg_bcl_addr + reg_offset),
+ data, len);
+ if (ret < 0) {
+ pr_err("Error reading register %d. err:%d", reg_offset, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bcl_write_general_register(int16_t reg_offset,
+ uint16_t base, uint8_t data)
+{
+ int ret = 0;
+ uint8_t *write_buf = &data;
+
+ if (!bcl_perph) {
+ pr_err("BCL device not initialized\n");
+ return -EINVAL;
+ }
+ ret = regmap_write(bcl_perph->regmap, (base + reg_offset), *write_buf);
+ if (ret < 0) {
+ pr_err("Error reading register %d. err:%d", reg_offset, ret);
+ return ret;
+ }
+ pr_debug("wrote 0x%02x to 0x%04x\n", data, base + reg_offset);
+
+ return ret;
+}
+
+static int bcl_write_register(int16_t reg_offset, uint8_t data)
+{
+ return bcl_write_general_register(reg_offset,
+ bcl_perph->fg_bcl_addr, data);
+}
+
+static void convert_vbat_to_adc_val(int *val)
+{
+ *val = (*val * 1000) / BCL_VBAT_SCALING;
+}
+
+static void convert_adc_to_vbat_val(int *val)
+{
+ *val = *val * BCL_VBAT_SCALING / 1000;
+}
+
+static void convert_ibat_to_adc_val(int *val)
+{
+ *val = *val / BCL_IBAT_SCALING;
+}
+
+static void convert_adc_to_ibat_val(int *val)
+{
+ *val = *val * BCL_IBAT_SCALING;
+}
+
+static int bcl_set_ibat(void *data, int low, int high)
+{
+ int ret = 0, ibat_ua, thresh_value;
+ int8_t val = 0;
+ int16_t addr;
+ struct bcl_peripheral_data *bat_data =
+ (struct bcl_peripheral_data *)data;
+
+ thresh_value = high;
+ if (bat_data->trip_temp == thresh_value)
+ return 0;
+
+ mutex_lock(&bat_data->state_trans_lock);
+ if (bat_data->irq_num && bat_data->irq_enabled) {
+ disable_irq_nosync(bat_data->irq_num);
+ bat_data->irq_enabled = false;
+ }
+ if (thresh_value == INT_MAX) {
+ bat_data->trip_temp = thresh_value;
+ goto set_trip_exit;
+ }
+
+ ibat_ua = thresh_value;
+ convert_ibat_to_adc_val(&thresh_value);
+ val = (int8_t)thresh_value;
+ if (&bcl_perph->param[BCL_HIGH_IBAT] == bat_data) {
+ addr = BCL_IBAT_HIGH;
+ pr_debug("ibat high threshold:%d mA ADC:0x%02x\n",
+ ibat_ua, val);
+ } else if (&bcl_perph->param[BCL_VHIGH_IBAT] == bat_data) {
+ addr = BCL_IBAT_TOO_HIGH;
+ pr_debug("ibat too high threshold:%d mA ADC:0x%02x\n",
+ ibat_ua, val);
+ } else {
+ goto set_trip_exit;
+ }
+ ret = bcl_write_register(addr, val);
+ if (ret) {
+ pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+ goto set_trip_exit;
+ }
+ bat_data->trip_temp = ibat_ua;
+
+ if (bat_data->irq_num && !bat_data->irq_enabled) {
+ enable_irq(bat_data->irq_num);
+ bat_data->irq_enabled = true;
+ }
+
+set_trip_exit:
+ mutex_unlock(&bat_data->state_trans_lock);
+
+ return ret;
+}
+
+static int bcl_set_vbat(void *data, int low, int high)
+{
+ int ret = 0, vbat_uv, vbat_idx, thresh_value;
+ int8_t val = 0;
+ struct bcl_peripheral_data *bat_data =
+ (struct bcl_peripheral_data *)data;
+ uint16_t addr;
+
+ thresh_value = low;
+ if (bat_data->trip_temp == thresh_value)
+ return 0;
+
+ mutex_lock(&bat_data->state_trans_lock);
+
+ if (bat_data->irq_num && bat_data->irq_enabled) {
+ disable_irq_nosync(bat_data->irq_num);
+ bat_data->irq_enabled = false;
+ }
+ if (thresh_value == INT_MIN) {
+ bat_data->trip_temp = thresh_value;
+ goto set_trip_exit;
+ }
+ vbat_uv = thresh_value;
+ convert_vbat_to_adc_val(&thresh_value);
+ val = (int8_t)thresh_value;
+ /*
+ * very low and critical low trip can support only standard
+ * trip thresholds
+ */
+ if (&bcl_perph->param[BCL_LOW_VBAT] == bat_data) {
+ addr = BCL_VBAT_ADC_LOW;
+ pr_debug("vbat low threshold:%d mv ADC:0x%02x\n",
+ vbat_uv, val);
+ } else if (&bcl_perph->param[BCL_VLOW_VBAT] == bat_data) {
+ /*
+ * Scan the standard voltage table, sorted in ascending order
+ * and find the closest threshold that is lower or equal to
+ * the requested value. Passive trip supports thresholds
+ * indexed from 1...BCL_STD_VBAT_NR in the voltage table.
+ */
+ for (vbat_idx = 2; vbat_idx < BCL_STD_VBAT_NR;
+ vbat_idx++) {
+ if (vbat_uv >= vbat_low[vbat_idx])
+ continue;
+ break;
+ }
+ addr = BCL_VBAT_COMP_LOW;
+ val = vbat_idx - 2;
+ vbat_uv = vbat_low[vbat_idx - 1];
+ pr_debug("vbat too low threshold:%d mv ADC:0x%02x\n",
+ vbat_uv, val);
+ } else if (&bcl_perph->param[BCL_CLOW_VBAT] == bat_data) {
+ /* Hot trip supports thresholds indexed from
+ * 0...BCL_STD_VBAT_NR-1 in the voltage table.
+ */
+ for (vbat_idx = 1; vbat_idx < (BCL_STD_VBAT_NR - 1);
+ vbat_idx++) {
+ if (vbat_uv >= vbat_low[vbat_idx])
+ continue;
+ break;
+ }
+ addr = BCL_VBAT_COMP_TLOW;
+ val = vbat_idx - 1;
+ vbat_uv = vbat_low[vbat_idx - 1];
+ pr_debug("vbat critic low threshold:%d mv ADC:0x%02x\n",
+ vbat_uv, val);
+ } else {
+ goto set_trip_exit;
+ }
+
+ ret = bcl_write_register(addr, val);
+ if (ret) {
+ pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+ goto set_trip_exit;
+ }
+ bat_data->trip_temp = vbat_uv;
+ if (bat_data->irq_num && !bat_data->irq_enabled) {
+ enable_irq(bat_data->irq_num);
+ bat_data->irq_enabled = true;
+ }
+
+set_trip_exit:
+ mutex_unlock(&bat_data->state_trans_lock);
+ return ret;
+}
+
+static int bcl_clear_vbat_min(void)
+{
+ int ret = 0;
+
+ ret = bcl_write_register(BCL_MAX_MIN_CLR,
+ BIT(BCL_VBAT_MIN_CLR));
+ if (ret)
+ pr_err("Error in clearing vbat min reg. err:%d", ret);
+
+ return ret;
+}
+
+static int bcl_clear_ibat_max(void)
+{
+ int ret = 0;
+
+ ret = bcl_write_register(BCL_MAX_MIN_CLR,
+ BIT(BCL_IBAT_MAX_CLR));
+ if (ret)
+ pr_err("Error in clearing ibat max reg. err:%d", ret);
+
+ return ret;
+}
+
+static int bcl_read_ibat(void *data, int *adc_value)
+{
+ int ret = 0, timeout = 0;
+ int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+ struct bcl_peripheral_data *bat_data =
+ (struct bcl_peripheral_data *)data;
+
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ do {
+ ret = bcl_read_multi_register(BCL_IBAT_MAX, val,
+ VAL_CP_REG_BUF_LEN);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ goto bcl_read_exit;
+ }
+ } while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+ && timeout++ < BCL_READ_RETRY_LIMIT);
+ if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+ ret = -ENODEV;
+ *adc_value = bat_data->last_val;
+ goto bcl_read_exit;
+ }
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ if (*adc_value == 0) {
+ /*
+ * The sensor sometime can read a value 0 if there is
+ * consequtive reads
+ */
+ *adc_value = bat_data->last_val;
+ } else {
+ convert_adc_to_ibat_val(adc_value);
+ bat_data->last_val = *adc_value;
+ }
+ pr_debug("ibat:%d mA\n", bat_data->last_val);
+
+bcl_read_exit:
+ return ret;
+}
+
+static int bcl_read_ibat_and_clear(void *data, int *adc_value)
+{
+ int ret = 0;
+
+ ret = bcl_read_ibat(data, adc_value);
+ if (ret)
+ return ret;
+ return bcl_clear_ibat_max();
+}
+
+static int bcl_read_vbat(void *data, int *adc_value)
+{
+ int ret = 0, timeout = 0;
+ int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+ struct bcl_peripheral_data *bat_data =
+ (struct bcl_peripheral_data *)data;
+
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ do {
+ ret = bcl_read_multi_register(BCL_VBAT_MIN, val,
+ VAL_CP_REG_BUF_LEN);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ goto bcl_read_exit;
+ }
+ } while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+ && timeout++ < BCL_READ_RETRY_LIMIT);
+ if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+ ret = -ENODEV;
+ goto bcl_read_exit;
+ }
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ if (*adc_value == BCL_VBAT_NO_READING) {
+ *adc_value = bat_data->last_val;
+ } else {
+ convert_adc_to_vbat_val(adc_value);
+ bat_data->last_val = *adc_value;
+ }
+ pr_debug("vbat:%d mv\n", bat_data->last_val);
+
+bcl_read_exit:
+ return ret;
+}
+
+static int bcl_read_vbat_and_clear(void *data, int *adc_value)
+{
+ int ret;
+
+ ret = bcl_read_vbat(data, adc_value);
+ if (ret)
+ return ret;
+ return bcl_clear_vbat_min();
+}
+
+static irqreturn_t bcl_handle_ibat(int irq, void *data)
+{
+ struct bcl_peripheral_data *perph_data =
+ (struct bcl_peripheral_data *)data;
+
+ mutex_lock(&perph_data->state_trans_lock);
+ if (!perph_data->irq_enabled) {
+ WARN_ON(1);
+ disable_irq_nosync(irq);
+ perph_data->irq_enabled = false;
+ goto exit_intr;
+ }
+ mutex_unlock(&perph_data->state_trans_lock);
+ of_thermal_handle_trip(perph_data->tz_dev);
+
+ return IRQ_HANDLED;
+
+exit_intr:
+ mutex_unlock(&perph_data->state_trans_lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcl_handle_vbat(int irq, void *data)
+{
+ struct bcl_peripheral_data *perph_data =
+ (struct bcl_peripheral_data *)data;
+
+ mutex_lock(&perph_data->state_trans_lock);
+ if (!perph_data->irq_enabled) {
+ WARN_ON(1);
+ disable_irq_nosync(irq);
+ perph_data->irq_enabled = false;
+ goto exit_intr;
+ }
+ mutex_unlock(&perph_data->state_trans_lock);
+ of_thermal_handle_trip(perph_data->tz_dev);
+
+ return IRQ_HANDLED;
+
+exit_intr:
+ mutex_unlock(&perph_data->state_trans_lock);
+ return IRQ_HANDLED;
+}
+
+static int bcl_get_devicetree_data(struct platform_device *pdev)
+{
+ int ret = 0;
+ const __be32 *prop = NULL;
+ struct device_node *dev_node = pdev->dev.of_node;
+
+ prop = of_get_address(dev_node, 0, NULL, NULL);
+ if (prop) {
+ bcl_perph->fg_bcl_addr = be32_to_cpu(*prop);
+ pr_debug("fg_user_adc@%04x\n", bcl_perph->fg_bcl_addr);
+ } else {
+ dev_err(&pdev->dev, "No fg_user_adc registers found\n");
+ return -ENODEV;
+ }
+
+ prop = of_get_address(dev_node, 1, NULL, NULL);
+ if (prop) {
+ bcl_perph->fg_lmh_addr = be32_to_cpu(*prop);
+ pr_debug("fg_lmh@%04x\n", bcl_perph->fg_lmh_addr);
+ } else {
+ dev_err(&pdev->dev, "No fg_lmh registers found\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+static int bcl_set_soc(void *data, int low, int high)
+{
+ struct bcl_peripheral_data *bat_data =
+ (struct bcl_peripheral_data *)data;
+
+ if (low == bat_data->trip_temp)
+ return 0;
+
+ mutex_lock(&bat_data->state_trans_lock);
+ pr_debug("low soc threshold:%d\n", low);
+ bat_data->trip_temp = low;
+ if (low == INT_MIN) {
+ bat_data->irq_enabled = false;
+ goto unlock_and_exit;
+ }
+ bat_data->irq_enabled = true;
+ schedule_work(&bcl_perph->soc_eval_work);
+
+unlock_and_exit:
+ mutex_unlock(&bat_data->state_trans_lock);
+ return 0;
+}
+
+static int bcl_read_soc(void *data, int *val)
+{
+ static struct power_supply *batt_psy;
+ union power_supply_propval ret = {0,};
+ int err = 0;
+
+ *val = 100;
+ if (!batt_psy)
+ batt_psy = power_supply_get_by_name("battery");
+ if (batt_psy) {
+ err = power_supply_get_property(batt_psy,
+ POWER_SUPPLY_PROP_CAPACITY, &ret);
+ if (err) {
+ pr_err("battery percentage read error:%d\n",
+ err);
+ return err;
+ }
+ *val = ret.intval;
+ }
+ pr_debug("soc:%d\n", *val);
+
+ return err;
+}
+
+static void bcl_evaluate_soc(struct work_struct *work)
+{
+ int battery_percentage;
+ struct bcl_peripheral_data *perph_data =
+ &bcl_perph->param[BCL_SOC_MONITOR];
+
+ if (bcl_read_soc((void *)perph_data, &battery_percentage))
+ return;
+
+ mutex_lock(&perph_data->state_trans_lock);
+ if (!perph_data->irq_enabled)
+ goto eval_exit;
+ if (battery_percentage > perph_data->trip_temp)
+ goto eval_exit;
+
+ perph_data->trip_val = battery_percentage;
+ mutex_unlock(&perph_data->state_trans_lock);
+ of_thermal_handle_trip(perph_data->tz_dev);
+
+ return;
+eval_exit:
+ mutex_unlock(&perph_data->state_trans_lock);
+}
+
+static int battery_supply_callback(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct power_supply *psy = data;
+
+ if (strcmp(psy->desc->name, "battery"))
+ return NOTIFY_OK;
+ schedule_work(&bcl_perph->soc_eval_work);
+
+ return NOTIFY_OK;
+}
+
+static void bcl_fetch_trip(struct platform_device *pdev, const char *int_name,
+ struct bcl_peripheral_data *data,
+ irqreturn_t (*handle)(int, void *))
+{
+ int ret = 0, irq_num = 0;
+
+ /*
+ * Allow flexibility for the HLOS to set the trip temperature for
+ * all the thresholds but handle the interrupt for only one vbat
+ * and ibat interrupt. The LMH-DCVSh will handle and mitigate for the
+ * rest of the ibat/vbat interrupts.
+ */
+ if (!handle) {
+ mutex_lock(&data->state_trans_lock);
+ data->irq_num = 0;
+ data->irq_enabled = false;
+ mutex_unlock(&data->state_trans_lock);
+ return;
+ }
+
+ irq_num = platform_get_irq_byname(pdev, int_name);
+ if (irq_num) {
+ mutex_lock(&data->state_trans_lock);
+ ret = devm_request_threaded_irq(&pdev->dev,
+ irq_num, NULL, handle,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ int_name, data);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Error requesting trip irq. err:%d",
+ ret);
+ mutex_unlock(&data->state_trans_lock);
+ return;
+ }
+ disable_irq_nosync(irq_num);
+ data->irq_num = irq_num;
+ data->irq_enabled = false;
+ mutex_unlock(&data->state_trans_lock);
+ }
+}
+
+static void bcl_probe_soc(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct bcl_peripheral_data *soc_data;
+
+ soc_data = &bcl_perph->param[BCL_SOC_MONITOR];
+ mutex_init(&soc_data->state_trans_lock);
+ soc_data->ops.get_temp = bcl_read_soc;
+ soc_data->ops.set_trips = bcl_set_soc;
+ INIT_WORK(&bcl_perph->soc_eval_work, bcl_evaluate_soc);
+ bcl_perph->psy_nb.notifier_call = battery_supply_callback;
+ ret = power_supply_reg_notifier(&bcl_perph->psy_nb);
+ if (ret < 0) {
+ pr_err("Unable to register soc notifier. err:%d\n", ret);
+ return;
+ }
+ soc_data->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+ BCL_SOC_MONITOR, soc_data, &soc_data->ops);
+ if (IS_ERR(soc_data->tz_dev)) {
+ pr_err("vbat register failed. err:%ld\n",
+ PTR_ERR(soc_data->tz_dev));
+ return;
+ }
+ thermal_zone_device_update(soc_data->tz_dev, THERMAL_DEVICE_UP);
+ schedule_work(&bcl_perph->soc_eval_work);
+}
+
+static void bcl_vbat_init(struct platform_device *pdev,
+ struct bcl_peripheral_data *vbat, enum bcl_dev_type type)
+{
+ mutex_init(&vbat->state_trans_lock);
+ switch (type) {
+ case BCL_LOW_VBAT:
+ bcl_fetch_trip(pdev, BCL_VBAT_INT, vbat, bcl_handle_vbat);
+ break;
+ case BCL_VLOW_VBAT:
+ bcl_fetch_trip(pdev, BCL_VLOW_VBAT_INT, vbat, NULL);
+ break;
+ case BCL_CLOW_VBAT:
+ bcl_fetch_trip(pdev, BCL_CLOW_VBAT_INT, vbat, NULL);
+ break;
+ default:
+ return;
+ }
+ vbat->ops.get_temp = bcl_read_vbat_and_clear;
+ vbat->ops.set_trips = bcl_set_vbat;
+ vbat->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+ type, vbat, &vbat->ops);
+ if (IS_ERR(vbat->tz_dev)) {
+ pr_err("vbat register failed. err:%ld\n",
+ PTR_ERR(vbat->tz_dev));
+ return;
+ }
+ thermal_zone_device_update(vbat->tz_dev, THERMAL_DEVICE_UP);
+}
+
+static void bcl_probe_vbat(struct platform_device *pdev)
+{
+ bcl_vbat_init(pdev, &bcl_perph->param[BCL_LOW_VBAT], BCL_LOW_VBAT);
+ bcl_vbat_init(pdev, &bcl_perph->param[BCL_VLOW_VBAT], BCL_VLOW_VBAT);
+ bcl_vbat_init(pdev, &bcl_perph->param[BCL_CLOW_VBAT], BCL_CLOW_VBAT);
+}
+
+static void bcl_ibat_init(struct platform_device *pdev,
+ struct bcl_peripheral_data *ibat, enum bcl_dev_type type)
+{
+ mutex_init(&ibat->state_trans_lock);
+ if (type == BCL_HIGH_IBAT)
+ bcl_fetch_trip(pdev, BCL_IBAT_INT, ibat, bcl_handle_ibat);
+ else
+ bcl_fetch_trip(pdev, BCL_VHIGH_IBAT_INT, ibat, NULL);
+ ibat->ops.get_temp = bcl_read_ibat_and_clear;
+ ibat->ops.set_trips = bcl_set_ibat;
+ ibat->tz_dev = thermal_zone_of_sensor_register(&pdev->dev,
+ type, ibat, &ibat->ops);
+ if (IS_ERR(ibat->tz_dev)) {
+ pr_err("ibat register failed. err:%ld\n",
+ PTR_ERR(ibat->tz_dev));
+ return;
+ }
+ thermal_zone_device_update(ibat->tz_dev, THERMAL_DEVICE_UP);
+}
+
+static void bcl_probe_ibat(struct platform_device *pdev)
+{
+ bcl_ibat_init(pdev, &bcl_perph->param[BCL_HIGH_IBAT], BCL_HIGH_IBAT);
+ bcl_ibat_init(pdev, &bcl_perph->param[BCL_VHIGH_IBAT], BCL_VHIGH_IBAT);
+}
+
+static void bcl_configure_lmh_peripheral(void)
+{
+ bcl_write_register(BCL_LMH_CFG, BCL_LMH_CFG_VAL);
+ bcl_write_register(BCL_CFG, BCL_CFG_VAL);
+ bcl_write_general_register(LMH_INT_POL_HIGH,
+ bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+ bcl_write_general_register(LMH_INT_EN,
+ bcl_perph->fg_lmh_addr, LMH_INT_VAL);
+}
+
+static int bcl_remove(struct platform_device *pdev)
+{
+ int i = 0;
+
+ for (; i < BCL_TYPE_MAX; i++) {
+ if (!bcl_perph->param[i].tz_dev)
+ continue;
+ if (i == BCL_SOC_MONITOR) {
+ power_supply_unreg_notifier(&bcl_perph->psy_nb);
+ flush_work(&bcl_perph->soc_eval_work);
+ }
+ thermal_zone_of_sensor_unregister(&pdev->dev,
+ bcl_perph->param[i].tz_dev);
+ }
+ bcl_perph = NULL;
+
+ return 0;
+}
+
+static int bcl_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ bcl_perph = devm_kzalloc(&pdev->dev, sizeof(*bcl_perph), GFP_KERNEL);
+ if (!bcl_perph)
+ return -ENOMEM;
+
+ bcl_perph->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!bcl_perph->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ bcl_get_devicetree_data(pdev);
+ bcl_probe_ibat(pdev);
+ bcl_probe_vbat(pdev);
+ bcl_probe_soc(pdev);
+ bcl_configure_lmh_peripheral();
+
+ dev_set_drvdata(&pdev->dev, bcl_perph);
+ ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
+ if (ret) {
+ pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+ goto bcl_probe_exit;
+ }
+
+ return 0;
+
+bcl_probe_exit:
+ bcl_remove(pdev);
+ return ret;
+}
+
+static const struct of_device_id bcl_match[] = {
+ {
+ .compatible = "qcom,msm-bcl-lmh",
+ },
+ {},
+};
+
+static struct platform_driver bcl_driver = {
+ .probe = bcl_probe,
+ .remove = bcl_remove,
+ .driver = {
+ .name = BCL_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = bcl_match,
+ },
+};
+
+builtin_platform_driver(bcl_driver);
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
new file mode 100644
index 0000000..bfaf7c7
--- /dev/null
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -0,0 +1,567 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/pm_opp.h>
+#include <linux/cpu_cooling.h>
+#include <linux/atomic.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+
+#include <soc/qcom/scm.h>
+
+#include "../thermal_core.h"
+
+#define LIMITS_DCVSH 0x10
+#define LIMITS_PROFILE_CHANGE 0x01
+#define LIMITS_NODE_DCVS 0x44435653
+
+#define LIMITS_SUB_FN_THERMAL 0x54484D4C
+#define LIMITS_SUB_FN_CRNT 0x43524E54
+#define LIMITS_SUB_FN_REL 0x52454C00
+#define LIMITS_SUB_FN_BCL 0x42434C00
+#define LIMITS_SUB_FN_GENERAL 0x47454E00
+
+#define LIMITS_ALGO_MODE_ENABLE 0x454E424C
+
+#define LIMITS_HI_THRESHOLD 0x48494748
+#define LIMITS_LOW_THRESHOLD 0x4C4F5700
+#define LIMITS_ARM_THRESHOLD 0x41524D00
+
+#define LIMITS_CLUSTER_0 0x6370302D
+#define LIMITS_CLUSTER_1 0x6370312D
+
+#define LIMITS_DOMAIN_MAX 0x444D4158
+#define LIMITS_DOMAIN_MIN 0x444D494E
+
+#define LIMITS_TEMP_DEFAULT 75000
+#define LIMITS_TEMP_HIGH_THRESH_MAX 120000
+#define LIMITS_LOW_THRESHOLD_OFFSET 500
+#define LIMITS_POLLING_DELAY_MS 10
+#define LIMITS_CLUSTER_0_REQ 0x179C1B04
+#define LIMITS_CLUSTER_1_REQ 0x179C3B04
+#define LIMITS_CLUSTER_0_INT_CLR 0x179CE808
+#define LIMITS_CLUSTER_1_INT_CLR 0x179CC808
+#define LIMITS_CLUSTER_0_MIN_FREQ 0x17D78BC0
+#define LIMITS_CLUSTER_1_MIN_FREQ 0x17D70BC0
+#define dcvsh_get_frequency(_val, _max) do { \
+ _max = (_val) & 0x3FF; \
+ _max *= 19200; \
+} while (0)
+#define FREQ_KHZ_TO_HZ(_val) ((_val) * 1000)
+#define FREQ_HZ_TO_KHZ(_val) ((_val) / 1000)
+
+enum lmh_hw_trips {
+ LIMITS_TRIP_ARM,
+ LIMITS_TRIP_HI,
+ LIMITS_TRIP_MAX,
+};
+
+struct __limits_cdev_data {
+ struct thermal_cooling_device *cdev;
+ u32 max_freq;
+ u32 min_freq;
+};
+
+struct limits_dcvs_hw {
+ char sensor_name[THERMAL_NAME_LENGTH];
+ uint32_t affinity;
+ uint32_t temp_limits[LIMITS_TRIP_MAX];
+ int irq_num;
+ void *osm_hw_reg;
+ void *int_clr_reg;
+ void *min_freq_reg;
+ cpumask_t core_map;
+ struct timer_list poll_timer;
+ unsigned long max_freq;
+ unsigned long min_freq;
+ unsigned long hw_freq_limit;
+ struct list_head list;
+ atomic_t is_irq_enabled;
+ struct mutex access_lock;
+ struct __limits_cdev_data *cdev_data;
+};
+
+LIST_HEAD(lmh_dcvs_hw_list);
+
+static int limits_dcvs_get_freq_limits(uint32_t cpu, unsigned long *max_freq,
+ unsigned long *min_freq)
+{
+ unsigned long freq_ceil = UINT_MAX, freq_floor = 0;
+ struct device *cpu_dev = NULL;
+ int ret = 0;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("Error in get CPU%d device\n", cpu);
+ return -ENODEV;
+ }
+
+ rcu_read_lock();
+ dev_pm_opp_find_freq_floor(cpu_dev, &freq_ceil);
+ dev_pm_opp_find_freq_ceil(cpu_dev, &freq_floor);
+ rcu_read_unlock();
+
+ *max_freq = freq_ceil / 1000;
+ *min_freq = freq_floor / 1000;
+
+ return ret;
+}
+
+static unsigned long limits_mitigation_notify(struct limits_dcvs_hw *hw)
+{
+ uint32_t val = 0;
+ struct device *cpu_dev = NULL;
+ unsigned long freq_val, max_limit = 0;
+ struct dev_pm_opp *opp_entry;
+
+ val = readl_relaxed(hw->osm_hw_reg);
+ dcvsh_get_frequency(val, max_limit);
+ cpu_dev = get_cpu_device(cpumask_first(&hw->core_map));
+ if (!cpu_dev) {
+ pr_err("Error in get CPU%d device\n",
+ cpumask_first(&hw->core_map));
+ goto notify_exit;
+ }
+
+ freq_val = FREQ_KHZ_TO_HZ(max_limit);
+ rcu_read_lock();
+ opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+ /*
+ * Hardware mitigation frequency can be lower than the lowest
+ * possible CPU frequency. In that case freq floor call will
+ * fail with -ERANGE and we need to match to the lowest
+ * frequency using freq_ceil.
+ */
+ if (IS_ERR(opp_entry) && PTR_ERR(opp_entry) == -ERANGE) {
+ opp_entry = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_val);
+ if (IS_ERR(opp_entry))
+ dev_err(cpu_dev, "frequency:%lu. opp error:%ld\n",
+ freq_val, PTR_ERR(opp_entry));
+ }
+ rcu_read_unlock();
+ max_limit = FREQ_HZ_TO_KHZ(freq_val);
+
+ sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
+
+notify_exit:
+ hw->hw_freq_limit = max_limit;
+ return max_limit;
+}
+
+static void limits_dcvs_poll(unsigned long data)
+{
+ unsigned long max_limit = 0;
+ struct limits_dcvs_hw *hw = (struct limits_dcvs_hw *)data;
+
+ if (hw->max_freq == UINT_MAX)
+ limits_dcvs_get_freq_limits(cpumask_first(&hw->core_map),
+ &hw->max_freq, &hw->min_freq);
+ max_limit = limits_mitigation_notify(hw);
+ if (max_limit >= hw->max_freq) {
+ del_timer(&hw->poll_timer);
+ writel_relaxed(0xFF, hw->int_clr_reg);
+ atomic_set(&hw->is_irq_enabled, 1);
+ enable_irq(hw->irq_num);
+ } else {
+ mod_timer(&hw->poll_timer, jiffies + msecs_to_jiffies(
+ LIMITS_POLLING_DELAY_MS));
+ }
+}
+
+static void lmh_dcvs_notify(struct limits_dcvs_hw *hw)
+{
+ if (atomic_dec_and_test(&hw->is_irq_enabled)) {
+ disable_irq_nosync(hw->irq_num);
+ limits_mitigation_notify(hw);
+ mod_timer(&hw->poll_timer, jiffies + msecs_to_jiffies(
+ LIMITS_POLLING_DELAY_MS));
+ }
+}
+
+static irqreturn_t lmh_dcvs_handle_isr(int irq, void *data)
+{
+ struct limits_dcvs_hw *hw = data;
+
+ lmh_dcvs_notify(hw);
+
+ return IRQ_HANDLED;
+}
+
+static int limits_dcvs_write(uint32_t node_id, uint32_t fn,
+ uint32_t setting, uint32_t val)
+{
+ int ret;
+ struct scm_desc desc_arg;
+ uint32_t *payload = NULL;
+
+ payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+ if (!payload)
+ return -ENOMEM;
+
+ payload[0] = fn; /* algorithm */
+ payload[1] = 0; /* unused sub-algorithm */
+ payload[2] = setting;
+ payload[3] = 1; /* number of values */
+ payload[4] = val;
+
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = sizeof(uint32_t) * 5;
+ desc_arg.args[2] = LIMITS_NODE_DCVS;
+ desc_arg.args[3] = node_id;
+ desc_arg.args[4] = 0; /* version */
+ desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
+ SCM_VAL, SCM_VAL);
+
+ dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_DCVSH), &desc_arg);
+
+ kfree(payload);
+
+ return ret;
+}
+
+static int lmh_get_temp(void *data, int *val)
+{
+ /*
+ * LMH DCVSh hardware doesn't support temperature read.
+ * return a default value for the thermal core to aggregate
+ * the thresholds
+ */
+ *val = LIMITS_TEMP_DEFAULT;
+
+ return 0;
+}
+
+static int lmh_set_trips(void *data, int low, int high)
+{
+ struct limits_dcvs_hw *hw = (struct limits_dcvs_hw *)data;
+ int ret = 0;
+
+ if (high >= LIMITS_TEMP_HIGH_THRESH_MAX || low < 0) {
+ pr_err("Value out of range low:%d high:%d\n",
+ low, high);
+ return -EINVAL;
+ }
+
+ /* Sanity check limits before writing to the hardware */
+ if (low >= high)
+ return -EINVAL;
+
+ hw->temp_limits[LIMITS_TRIP_HI] = (uint32_t)high;
+ hw->temp_limits[LIMITS_TRIP_ARM] = (uint32_t)low;
+
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+ LIMITS_ARM_THRESHOLD, low);
+ if (ret)
+ return ret;
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+ LIMITS_HI_THRESHOLD, high);
+ if (ret)
+ return ret;
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+ LIMITS_LOW_THRESHOLD,
+ high - LIMITS_LOW_THRESHOLD_OFFSET);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static struct thermal_zone_of_device_ops limits_sensor_ops = {
+ .get_temp = lmh_get_temp,
+ .set_trips = lmh_set_trips,
+};
+
+static struct limits_dcvs_hw *get_dcvsh_hw_from_cpu(int cpu)
+{
+ struct limits_dcvs_hw *hw;
+
+ list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
+ if (cpumask_test_cpu(cpu, &hw->core_map))
+ return hw;
+ }
+
+ return NULL;
+}
+
+static int enable_lmh(void)
+{
+ int ret = 0;
+ struct scm_desc desc_arg;
+
+ desc_arg.args[0] = 1;
+ desc_arg.arginfo = SCM_ARGS(1, SCM_VAL);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_PROFILE_CHANGE),
+ &desc_arg);
+ if (ret) {
+ pr_err("Error switching profile:[1]. err:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int lmh_set_max_limit(int cpu, u32 freq)
+{
+ struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+ int ret = 0, cpu_idx, idx = 0;
+ u32 max_freq = U32_MAX;
+
+ if (!hw)
+ return -EINVAL;
+
+ mutex_lock(&hw->access_lock);
+ for_each_cpu(cpu_idx, &hw->core_map) {
+ if (cpu_idx == cpu)
+ hw->cdev_data[idx].max_freq = freq;
+ if (max_freq > hw->cdev_data[idx].max_freq)
+ max_freq = hw->cdev_data[idx].max_freq;
+ idx++;
+ }
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_GENERAL,
+ LIMITS_DOMAIN_MAX, max_freq);
+ mutex_unlock(&hw->access_lock);
+
+ return ret;
+}
+
+static int lmh_set_min_limit(int cpu, u32 freq)
+{
+ struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+ int cpu_idx, idx = 0;
+ u32 min_freq = 0;
+
+ if (!hw)
+ return -EINVAL;
+
+ mutex_lock(&hw->access_lock);
+ for_each_cpu(cpu_idx, &hw->core_map) {
+ if (cpu_idx == cpu)
+ hw->cdev_data[idx].min_freq = freq;
+ if (min_freq < hw->cdev_data[idx].min_freq)
+ min_freq = hw->cdev_data[idx].min_freq;
+ idx++;
+ }
+ if (min_freq != hw->min_freq)
+ writel_relaxed(0x01, hw->min_freq_reg);
+ else
+ writel_relaxed(0x00, hw->min_freq_reg);
+ mutex_unlock(&hw->access_lock);
+
+ return 0;
+}
+static struct cpu_cooling_ops cd_ops = {
+ .ceil_limit = lmh_set_max_limit,
+ .floor_limit = lmh_set_min_limit,
+};
+
+static int limits_dcvs_probe(struct platform_device *pdev)
+{
+ int ret;
+ int affinity = -1;
+ struct limits_dcvs_hw *hw;
+ struct thermal_zone_device *tzdev;
+ struct device_node *dn = pdev->dev.of_node;
+ struct device_node *cpu_node, *lmh_node;
+ uint32_t request_reg, clear_reg, min_reg;
+ unsigned long max_freq, min_freq;
+ int cpu, idx;
+ cpumask_t mask = { CPU_BITS_NONE };
+
+ for_each_possible_cpu(cpu) {
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ continue;
+ lmh_node = of_parse_phandle(cpu_node, "qcom,lmh-dcvs", 0);
+ if (lmh_node == dn) {
+ /*set the cpumask*/
+ cpumask_set_cpu(cpu, &(mask));
+ }
+ of_node_put(cpu_node);
+ of_node_put(lmh_node);
+ }
+
+ /*
+ * We return error if none of the CPUs have
+ * reference to our LMH node
+ */
+ if (cpumask_empty(&mask))
+ return -EINVAL;
+
+ ret = limits_dcvs_get_freq_limits(cpumask_first(&mask), &max_freq,
+ &min_freq);
+ if (ret)
+ return ret;
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+ hw->cdev_data = devm_kcalloc(&pdev->dev, cpumask_weight(&mask),
+ sizeof(*hw->cdev_data),
+ GFP_KERNEL);
+ if (!hw->cdev_data)
+ return -ENOMEM;
+
+ cpumask_copy(&hw->core_map, &mask);
+ ret = of_property_read_u32(dn, "qcom,affinity", &affinity);
+ if (ret)
+ return -ENODEV;
+ switch (affinity) {
+ case 0:
+ hw->affinity = LIMITS_CLUSTER_0;
+ break;
+ case 1:
+ hw->affinity = LIMITS_CLUSTER_1;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ /* Enable the thermal algorithm early */
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+ LIMITS_ALGO_MODE_ENABLE, 1);
+ if (ret)
+ return ret;
+ /* Enable the LMH outer loop algorithm */
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_CRNT,
+ LIMITS_ALGO_MODE_ENABLE, 1);
+ if (ret)
+ return ret;
+ /* Enable the Reliability algorithm */
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_REL,
+ LIMITS_ALGO_MODE_ENABLE, 1);
+ if (ret)
+ return ret;
+ /* Enable the BCL algorithm */
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_BCL,
+ LIMITS_ALGO_MODE_ENABLE, 1);
+ if (ret)
+ return ret;
+ ret = enable_lmh();
+ if (ret)
+ return ret;
+
+ /*
+ * Setup virtual thermal zones for each LMH-DCVS hardware
+ * The sensor does not do actual thermal temperature readings
+ * but does support setting thresholds for trips.
+ * Let's register with thermal framework, so we have the ability
+ * to set low/high thresholds.
+ */
+ hw->temp_limits[LIMITS_TRIP_HI] = INT_MAX;
+ hw->temp_limits[LIMITS_TRIP_ARM] = 0;
+ hw->hw_freq_limit = hw->max_freq = max_freq;
+ hw->min_freq = min_freq;
+ snprintf(hw->sensor_name, sizeof(hw->sensor_name), "limits_sensor-%02d",
+ affinity);
+ tzdev = thermal_zone_of_sensor_register(&pdev->dev, 0, hw,
+ &limits_sensor_ops);
+ if (IS_ERR_OR_NULL(tzdev))
+ return PTR_ERR(tzdev);
+
+ /* Setup cooling devices to request mitigation states */
+ mutex_init(&hw->access_lock);
+ idx = 0;
+ for_each_cpu(cpu, &hw->core_map) {
+ cpumask_t cpu_mask = { CPU_BITS_NONE };
+
+ cpumask_set_cpu(cpu, &cpu_mask);
+ hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
+ &cpu_mask, &cd_ops);
+ if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev))
+ return PTR_ERR(hw->cdev_data[idx].cdev);
+ hw->cdev_data[idx].max_freq = U32_MAX;
+ hw->cdev_data[idx].min_freq = 0;
+ idx++;
+ }
+
+ switch (affinity) {
+ case 0:
+ request_reg = LIMITS_CLUSTER_0_REQ;
+ clear_reg = LIMITS_CLUSTER_0_INT_CLR;
+ min_reg = LIMITS_CLUSTER_0_MIN_FREQ;
+ break;
+ case 1:
+ request_reg = LIMITS_CLUSTER_1_REQ;
+ clear_reg = LIMITS_CLUSTER_1_INT_CLR;
+ min_reg = LIMITS_CLUSTER_1_MIN_FREQ;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
+ if (!hw->osm_hw_reg) {
+ pr_err("register remap failed\n");
+ return -ENOMEM;
+ }
+ hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
+ if (!hw->int_clr_reg) {
+ pr_err("interrupt clear reg remap failed\n");
+ return -ENOMEM;
+ }
+ hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
+ if (!hw->min_freq_reg) {
+ pr_err("min frequency enable register remap failed\n");
+ return -ENOMEM;
+ }
+ init_timer_deferrable(&hw->poll_timer);
+ hw->poll_timer.data = (unsigned long)hw;
+ hw->poll_timer.function = limits_dcvs_poll;
+
+ hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
+ if (hw->irq_num < 0) {
+ ret = hw->irq_num;
+ pr_err("Error getting IRQ number. err:%d\n", ret);
+ return ret;
+ }
+ atomic_set(&hw->is_irq_enabled, 1);
+ ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
+ lmh_dcvs_handle_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT
+ | IRQF_NO_SUSPEND, hw->sensor_name, hw);
+ if (ret) {
+ pr_err("Error registering for irq. err:%d\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&hw->list);
+ list_add(&hw->list, &lmh_dcvs_hw_list);
+
+ return ret;
+}
+
+static const struct of_device_id limits_dcvs_match[] = {
+ { .compatible = "qcom,msm-hw-limits", },
+ {},
+};
+
+static struct platform_driver limits_dcvs_driver = {
+ .probe = limits_dcvs_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = limits_dcvs_match,
+ },
+};
+builtin_platform_driver(limits_dcvs_driver);
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index e86a297..09c95e5 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -28,6 +28,8 @@
#include <linux/thermal.h>
#include <linux/qpnp/qpnp-adc.h>
+#include "thermal_core.h"
+
#define QPNP_TM_DRIVER_NAME "qcom,qpnp-temp-alarm"
enum qpnp_tm_registers {
@@ -97,7 +99,6 @@
unsigned int subtype;
enum qpnp_tm_adc_type adc_type;
int temperature;
- enum thermal_device_mode mode;
unsigned int thresh;
unsigned int clock_rate;
unsigned int stage;
@@ -105,18 +106,12 @@
int irq;
enum qpnp_vadc_channels adc_channel;
u16 base_addr;
- bool allow_software_override;
struct qpnp_vadc_chip *vadc_dev;
};
/* Delay between TEMP_STAT IRQ going high and status value changing in ms. */
#define STATUS_REGISTER_DELAY_MS 40
-enum pmic_thermal_override_mode {
- SOFTWARE_OVERRIDE_DISABLED = 0,
- SOFTWARE_OVERRIDE_ENABLED,
-};
-
/* This array maps from GEN2 alarm state to GEN1 alarm stage */
const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
@@ -156,28 +151,6 @@
return rc;
}
-
-static inline int qpnp_tm_shutdown_override(struct qpnp_tm_chip *chip,
- enum pmic_thermal_override_mode mode)
-{
- int rc = 0;
- u8 reg;
-
- if (chip->allow_software_override) {
- reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
- reg |= (chip->clock_rate << SHUTDOWN_CTRL1_CLK_RATE_SHIFT)
- & SHUTDOWN_CTRL1_CLK_RATE_MASK;
-
- if (mode == SOFTWARE_OVERRIDE_ENABLED)
- reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2
- | SHUTDOWN_CTRL1_OVERRIDE_STAGE3;
-
- rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, ®, 1);
- }
-
- return rc;
-}
-
static int qpnp_tm_update_temp(struct qpnp_tm_chip *chip)
{
struct qpnp_vadc_result adc_result;
@@ -274,10 +247,9 @@
return 0;
}
-static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
- int *temperature)
+static int qpnp_tz_get_temp_no_adc(void *data, int *temperature)
{
- struct qpnp_tm_chip *chip = thermal->devdata;
+ struct qpnp_tm_chip *chip = (struct qpnp_tm_chip *)data;
int rc;
if (!temperature)
@@ -292,10 +264,9 @@
return 0;
}
-static int qpnp_tz_get_temp_qpnp_adc(struct thermal_zone_device *thermal,
- int *temperature)
+static int qpnp_tz_get_temp_qpnp_adc(void *data, int *temperature)
{
- struct qpnp_tm_chip *chip = thermal->devdata;
+ struct qpnp_tm_chip *chip = (struct qpnp_tm_chip *)data;
int rc;
if (!temperature)
@@ -314,121 +285,12 @@
return 0;
}
-static int qpnp_tz_get_mode(struct thermal_zone_device *thermal,
- enum thermal_device_mode *mode)
-{
- struct qpnp_tm_chip *chip = thermal->devdata;
-
- if (!mode)
- return -EINVAL;
-
- *mode = chip->mode;
-
- return 0;
-}
-
-static int qpnp_tz_set_mode(struct thermal_zone_device *thermal,
- enum thermal_device_mode mode)
-{
- struct qpnp_tm_chip *chip = thermal->devdata;
- int rc = 0;
-
- if (mode != chip->mode) {
- if (mode == THERMAL_DEVICE_ENABLED)
- rc = qpnp_tm_shutdown_override(chip,
- SOFTWARE_OVERRIDE_ENABLED);
- else
- rc = qpnp_tm_shutdown_override(chip,
- SOFTWARE_OVERRIDE_DISABLED);
-
- chip->mode = mode;
- }
-
- return rc;
-}
-
-static int qpnp_tz_get_trip_type(struct thermal_zone_device *thermal,
- int trip, enum thermal_trip_type *type)
-{
- if (trip < 0 || !type)
- return -EINVAL;
-
- switch (trip) {
- case TRIP_STAGE3:
- *type = THERMAL_TRIP_CRITICAL;
- break;
- case TRIP_STAGE2:
- *type = THERMAL_TRIP_HOT;
- break;
- case TRIP_STAGE1:
- *type = THERMAL_TRIP_HOT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, int *temperature)
-{
- struct qpnp_tm_chip *chip = thermal->devdata;
- int thresh_temperature;
-
- if (trip < 0 || !temperature)
- return -EINVAL;
-
- thresh_temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN;
-
- switch (trip) {
- case TRIP_STAGE3:
- thresh_temperature += 2 * TEMP_STAGE_STEP;
- break;
- case TRIP_STAGE2:
- thresh_temperature += TEMP_STAGE_STEP;
- break;
- case TRIP_STAGE1:
- break;
- default:
- return -EINVAL;
- }
-
- *temperature = thresh_temperature;
-
- return 0;
-}
-
-static int qpnp_tz_get_crit_temp(struct thermal_zone_device *thermal,
- int *temperature)
-{
- struct qpnp_tm_chip *chip = thermal->devdata;
-
- if (!temperature)
- return -EINVAL;
-
- *temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
- 2 * TEMP_STAGE_STEP;
-
- return 0;
-}
-
-static struct thermal_zone_device_ops qpnp_thermal_zone_ops_no_adc = {
+static struct thermal_zone_of_device_ops qpnp_thermal_zone_ops_no_adc = {
.get_temp = qpnp_tz_get_temp_no_adc,
- .get_mode = qpnp_tz_get_mode,
- .set_mode = qpnp_tz_set_mode,
- .get_trip_type = qpnp_tz_get_trip_type,
- .get_trip_temp = qpnp_tz_get_trip_temp,
- .get_crit_temp = qpnp_tz_get_crit_temp,
};
-static struct thermal_zone_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
+static struct thermal_zone_of_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
.get_temp = qpnp_tz_get_temp_qpnp_adc,
- .get_mode = qpnp_tz_get_mode,
- .set_mode = qpnp_tz_set_mode,
- .get_trip_type = qpnp_tz_get_trip_type,
- .get_trip_temp = qpnp_tz_get_trip_temp,
- .get_crit_temp = qpnp_tz_get_crit_temp,
};
static void qpnp_tm_work(struct work_struct *work)
@@ -474,11 +336,7 @@
chip->tm_name, stage_new, chip->stage,
chip->thresh, chip->temperature);
- thermal_zone_device_update(chip->tz_dev,
- THERMAL_EVENT_UNSPECIFIED);
-
- /* Notify user space */
- sysfs_notify(&chip->tz_dev->device.kobj, NULL, "type");
+ of_thermal_handle_trip(chip->tz_dev);
}
bail:
@@ -539,7 +397,7 @@
struct device_node *node;
unsigned int base;
struct qpnp_tm_chip *chip;
- struct thermal_zone_device_ops *tz_ops;
+ struct thermal_zone_of_device_ops *tz_ops;
char *tm_name;
u32 default_temperature;
int rc = 0;
@@ -640,9 +498,6 @@
else
tz_ops = &qpnp_thermal_zone_ops_no_adc;
- chip->allow_software_override
- = of_property_read_bool(node, "qcom,allow-override");
-
default_temperature = DEFAULT_NO_ADC_TEMP;
rc = of_property_read_u32(node, "qcom,default-temp",
&default_temperature);
@@ -686,18 +541,8 @@
}
}
- /* Start in HW control; switch to SW control when user changes mode. */
- chip->mode = THERMAL_DEVICE_DISABLED;
- rc = qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
- if (rc) {
- dev_err(&pdev->dev,
- "%s: qpnp_tm_shutdown_override() failed, rc=%d\n",
- __func__, rc);
- goto err_cancel_work;
- }
-
- chip->tz_dev = thermal_zone_device_register(tm_name, TRIP_NUM, 0, chip,
- tz_ops, NULL, 0, 0);
+ chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+ tz_ops);
if (chip->tz_dev == NULL) {
dev_err(&pdev->dev,
"%s: thermal_zone_device_register() failed.\n",
@@ -717,7 +562,7 @@
return 0;
err_free_tz:
- thermal_zone_device_unregister(chip->tz_dev);
+ thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
err_cancel_work:
cancel_delayed_work_sync(&chip->irq_work);
kfree(chip->tm_name);
@@ -731,10 +576,9 @@
{
struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
+ thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
dev_set_drvdata(&pdev->dev, NULL);
- thermal_zone_device_unregister(chip->tz_dev);
kfree(chip->tm_name);
- qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
free_irq(chip->irq, chip);
cancel_delayed_work_sync(&chip->irq_work);
kfree(chip);
@@ -742,38 +586,6 @@
return 0;
}
-#ifdef CONFIG_PM
-static int qpnp_tm_suspend(struct device *dev)
-{
- struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
-
- /* Clear override bits in suspend to allow hardware control */
- qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
-
- return 0;
-}
-
-static int qpnp_tm_resume(struct device *dev)
-{
- struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
-
- /* Override hardware actions so software can control */
- if (chip->mode == THERMAL_DEVICE_ENABLED)
- qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_ENABLED);
-
- return 0;
-}
-
-static const struct dev_pm_ops qpnp_tm_pm_ops = {
- .suspend = qpnp_tm_suspend,
- .resume = qpnp_tm_resume,
-};
-
-#define QPNP_TM_PM_OPS (&qpnp_tm_pm_ops)
-#else
-#define QPNP_TM_PM_OPS NULL
-#endif
-
static const struct of_device_id qpnp_tm_match_table[] = {
{ .compatible = QPNP_TM_DRIVER_NAME, },
{}
@@ -789,7 +601,6 @@
.name = QPNP_TM_DRIVER_NAME,
.of_match_table = qpnp_tm_match_table,
.owner = THIS_MODULE,
- .pm = QPNP_TM_PM_OPS,
},
.probe = qpnp_tm_probe,
.remove = qpnp_tm_remove,
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 4fa7f82..ecfc4ef 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -169,12 +169,10 @@
*/
if (tz->temperature >= trip_temp ||
(tz->temperature >= hyst_temp &&
- old_target != THERMAL_NO_TARGET)) {
+ old_target != THERMAL_NO_TARGET))
throttle = true;
- trace_thermal_zone_trip(tz, trip, trip_type);
- } else {
+ else
throttle = false;
- }
instance->target = get_target_state(instance, trend, throttle);
dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
@@ -185,12 +183,15 @@
/* Activate a passive thermal instance */
if (old_target == THERMAL_NO_TARGET &&
- instance->target != THERMAL_NO_TARGET)
+ instance->target != THERMAL_NO_TARGET) {
update_passive_instance(tz, trip_type, 1);
+ trace_thermal_zone_trip(tz, trip, trip_type, true);
/* Deactivate a passive thermal instance */
- else if (old_target != THERMAL_NO_TARGET &&
- instance->target == THERMAL_NO_TARGET)
+ } else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET) {
update_passive_instance(tz, trip_type, -1);
+ trace_thermal_zone_trip(tz, trip, trip_type, false);
+ }
instance->initialized = true;
mutex_lock(&instance->cdev->lock);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 7b45b9a..b137c4e 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -444,7 +444,7 @@
if (trip_temp <= 0 || tz->temperature < trip_temp)
return;
- trace_thermal_zone_trip(tz, trip, trip_type);
+ trace_thermal_zone_trip(tz, trip, trip_type, true);
if (tz->ops->notify)
tz->ops->notify(tz, trip, trip_type);
@@ -553,10 +553,6 @@
high = trip_temp;
}
- /* No need to change trip points */
- if (tz->prev_low_trip == low && tz->prev_high_trip == high)
- goto exit;
-
tz->prev_low_trip = low;
tz->prev_high_trip = high;
@@ -815,8 +811,7 @@
*/
ret = tz->ops->set_trip_hyst(tz, trip, temperature);
- if (!ret)
- thermal_zone_set_trips(tz);
+ thermal_zone_set_trips(tz);
return ret ? ret : count;
}
@@ -1290,6 +1285,30 @@
thermal_cooling_device_min_state_store);
static ssize_t
+thermal_cooling_device_lower_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_instance *instance;
+
+ instance =
+ container_of(attr, struct thermal_instance, lower_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", instance->lower);
+}
+
+static ssize_t
+thermal_cooling_device_upper_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_instance *instance;
+
+ instance =
+ container_of(attr, struct thermal_instance, upper_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", instance->upper);
+}
+
+static ssize_t
thermal_cooling_device_trip_point_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1443,6 +1462,26 @@
if (result)
goto remove_symbol_link;
+ snprintf(dev->upper_attr_name, THERMAL_NAME_LENGTH,
+ "cdev%d_upper_limit", dev->id);
+ sysfs_attr_init(&dev->upper_attr.attr);
+ dev->upper_attr.attr.name = dev->upper_attr_name;
+ dev->upper_attr.attr.mode = 0444;
+ dev->upper_attr.show = thermal_cooling_device_upper_limit_show;
+ result = device_create_file(&tz->device, &dev->upper_attr);
+ if (result)
+ goto remove_trip_file;
+
+ snprintf(dev->lower_attr_name, THERMAL_NAME_LENGTH,
+ "cdev%d_lower_limit", dev->id);
+ sysfs_attr_init(&dev->lower_attr.attr);
+ dev->lower_attr.attr.name = dev->lower_attr_name;
+ dev->lower_attr.attr.mode = 0444;
+ dev->lower_attr.show = thermal_cooling_device_lower_limit_show;
+ result = device_create_file(&tz->device, &dev->lower_attr);
+ if (result)
+ goto remove_upper_file;
+
sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
sysfs_attr_init(&dev->weight_attr.attr);
dev->weight_attr.attr.name = dev->weight_attr_name;
@@ -1451,7 +1490,7 @@
dev->weight_attr.store = thermal_cooling_device_weight_store;
result = device_create_file(&tz->device, &dev->weight_attr);
if (result)
- goto remove_trip_file;
+ goto remove_lower_file;
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
@@ -1472,6 +1511,10 @@
return 0;
device_remove_file(&tz->device, &dev->weight_attr);
+remove_lower_file:
+ device_remove_file(&tz->device, &dev->lower_attr);
+remove_upper_file:
+ device_remove_file(&tz->device, &dev->upper_attr);
remove_trip_file:
device_remove_file(&tz->device, &dev->attr);
remove_symbol_link:
@@ -1521,6 +1564,8 @@
return -ENODEV;
unbind:
+ device_remove_file(&tz->device, &pos->lower_attr);
+ device_remove_file(&tz->device, &pos->upper_attr);
device_remove_file(&tz->device, &pos->weight_attr);
device_remove_file(&tz->device, &pos->attr);
sysfs_remove_link(&tz->device.kobj, pos->name);
@@ -1768,7 +1813,7 @@
cdev->ops->set_min_state(cdev, min_target);
cdev->updated = true;
mutex_unlock(&cdev->lock);
- trace_cdev_update(cdev, current_target);
+ trace_cdev_update(cdev, current_target, min_target);
dev_dbg(&cdev->device, "set to state %lu min state %lu\n",
current_target, min_target);
}
@@ -2443,7 +2488,7 @@
return result;
}
-static void __exit thermal_exit(void)
+static void thermal_exit(void)
{
unregister_pm_notifier(&thermal_pm_nb);
of_thermal_destroy_zones();
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index eca8c3c..567f630 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -49,6 +49,10 @@
struct device_attribute attr;
char weight_attr_name[THERMAL_NAME_LENGTH];
struct device_attribute weight_attr;
+ char upper_attr_name[THERMAL_NAME_LENGTH];
+ struct device_attribute upper_attr;
+ char lower_attr_name[THERMAL_NAME_LENGTH];
+ struct device_attribute lower_attr;
struct list_head tz_node; /* node in tz->thermal_instances */
struct list_head cdev_node; /* node in cdev->thermal_instances */
unsigned int weight; /* The weight of the cooling device */
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
index d965a5c..7cd8c86 100644
--- a/drivers/thermal/tsens-dbg.c
+++ b/drivers/thermal/tsens-dbg.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
- * it under the term_tm of the GNU General Public License version 2 and
+ * it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 45d244e..3b9b01a 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -1,14 +1,14 @@
-/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
*/
#ifndef __QCOM_TSENS_H__
#define __QCOM_TSENS_H__
@@ -23,9 +23,9 @@
#define DEBUG_SIZE 10
#define TSENS_MAX_SENSORS 16
-#define TSENS_CONTROLLER_ID(n) ((n) + 0x1000)
+#define TSENS_CONTROLLER_ID(n) (n)
#define TSENS_CTRL_ADDR(n) (n)
-#define TSENS_TM_SN_STATUS(n) ((n) + 0x10a0)
+#define TSENS_TM_SN_STATUS(n) ((n) + 0xa0)
enum tsens_dbg_type {
TSENS_DBG_POLL,
@@ -87,7 +87,7 @@
struct tsens_ops {
int (*hw_init)(struct tsens_device *);
int (*get_temp)(struct tsens_sensor *, int *);
- int (*set_trip_temp)(struct tsens_sensor *, int, int);
+ int (*set_trips)(struct tsens_sensor *, int, int);
int (*interrupts_reg)(struct tsens_device *);
int (*dbg)(struct tsens_device *, u32, u32, int *);
};
@@ -121,7 +121,8 @@
u32 num_sensors;
struct regmap *map;
struct regmap_field *status_field;
- void *tsens_addr;
+ void __iomem *tsens_srot_addr;
+ void __iomem *tsens_tm_addr;
const struct tsens_ops *ops;
struct tsens_dbg_context tsens_dbg;
spinlock_t tsens_crit_lock;
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 0f59dc5..13b183d 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
- * it under the term_tm of the GNU General Public License version 2 and
+ * it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
@@ -18,19 +18,20 @@
#include <linux/of.h>
#include <linux/vmalloc.h>
#include "tsens.h"
+#include "thermal_core.h"
#define TSENS_DRIVER_NAME "msm-tsens"
-#define TSENS_TM_INT_EN(n) ((n) + 0x1004)
-#define TSENS_TM_CRITICAL_INT_STATUS(n) ((n) + 0x1014)
-#define TSENS_TM_CRITICAL_INT_CLEAR(n) ((n) + 0x1018)
-#define TSENS_TM_CRITICAL_INT_MASK(n) ((n) + 0x101c)
+#define TSENS_TM_INT_EN(n) ((n) + 0x4)
+#define TSENS_TM_CRITICAL_INT_STATUS(n) ((n) + 0x14)
+#define TSENS_TM_CRITICAL_INT_CLEAR(n) ((n) + 0x18)
+#define TSENS_TM_CRITICAL_INT_MASK(n) ((n) + 0x1c)
#define TSENS_TM_CRITICAL_WD_BARK BIT(31)
#define TSENS_TM_CRITICAL_CYCLE_MONITOR BIT(30)
#define TSENS_TM_CRITICAL_INT_EN BIT(2)
#define TSENS_TM_UPPER_INT_EN BIT(1)
#define TSENS_TM_LOWER_INT_EN BIT(0)
-#define TSENS_TM_SN_UPPER_LOWER_THRESHOLD(n) ((n) + 0x1020)
+#define TSENS_TM_SN_UPPER_LOWER_THRESHOLD(n) ((n) + 0x20)
#define TSENS_TM_SN_ADDR_OFFSET 0x4
#define TSENS_TM_UPPER_THRESHOLD_SET(n) ((n) << 12)
#define TSENS_TM_UPPER_THRESHOLD_VALUE_SHIFT(n) ((n) >> 12)
@@ -39,13 +40,13 @@
#define TSENS_TM_UPPER_THRESHOLD_MASK 0xfff000
#define TSENS_TM_LOWER_THRESHOLD_MASK 0xfff
#define TSENS_TM_UPPER_THRESHOLD_SHIFT 12
-#define TSENS_TM_SN_CRITICAL_THRESHOLD(n) ((n) + 0x1060)
+#define TSENS_TM_SN_CRITICAL_THRESHOLD(n) ((n) + 0x60)
#define TSENS_STATUS_ADDR_OFFSET 2
#define TSENS_TM_UPPER_INT_MASK(n) (((n) & 0xffff0000) >> 16)
#define TSENS_TM_LOWER_INT_MASK(n) ((n) & 0xffff)
-#define TSENS_TM_UPPER_LOWER_INT_STATUS(n) ((n) + 0x1008)
-#define TSENS_TM_UPPER_LOWER_INT_CLEAR(n) ((n) + 0x100c)
-#define TSENS_TM_UPPER_LOWER_INT_MASK(n) ((n) + 0x1010)
+#define TSENS_TM_UPPER_LOWER_INT_STATUS(n) ((n) + 0x8)
+#define TSENS_TM_UPPER_LOWER_INT_CLEAR(n) ((n) + 0xc)
+#define TSENS_TM_UPPER_LOWER_INT_MASK(n) ((n) + 0x10)
#define TSENS_TM_UPPER_INT_SET(n) (1 << (n + 16))
#define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK 0xfff
#define TSENS_TM_SN_STATUS_VALID_BIT BIT(21)
@@ -55,6 +56,7 @@
#define TSENS_TM_SN_LAST_TEMP_MASK 0xfff
#define TSENS_TM_CODE_BIT_MASK 0xfff
#define TSENS_TM_CODE_SIGN_BIT 0x800
+#define TSENS_TM_SCALE_DECI_MILLIDEG 100
#define TSENS_EN BIT(0)
@@ -67,7 +69,7 @@
last_temp |= code_mask;
}
- *temp = last_temp * 100;
+ *temp = last_temp * TSENS_TM_SCALE_DECI_MILLIDEG;
}
static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
@@ -81,7 +83,7 @@
return -EINVAL;
tmdev = sensor->tmdev;
- sensor_addr = TSENS_TM_SN_STATUS(tmdev->tsens_addr);
+ sensor_addr = TSENS_TM_SN_STATUS(tmdev->tsens_tm_addr);
code = readl_relaxed_no_log(sensor_addr +
(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
@@ -130,7 +132,6 @@
{
struct tsens_device *tmdev = NULL;
unsigned int reg_cntl, mask;
- unsigned long flags;
int rc = 0;
/* clear the interrupt and unmask */
@@ -141,56 +142,57 @@
if (!tmdev)
return -EINVAL;
- spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+
mask = (tm_sensor->hw_id);
switch (trip) {
case THERMAL_TRIP_CRITICAL:
tmdev->sensor[tm_sensor->hw_id].
thr_state.crit_th_state = mode;
reg_cntl = readl_relaxed(TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_addr));
+ (tmdev->tsens_tm_addr));
if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
writel_relaxed(reg_cntl | (1 << mask),
(TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_addr)));
+ (tmdev->tsens_tm_addr)));
else
writel_relaxed(reg_cntl & ~(1 << mask),
(TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_addr)));
+ (tmdev->tsens_tm_addr)));
break;
- case THERMAL_TRIP_ACTIVE:
+ case THERMAL_TRIP_CONFIGURABLE_HI:
tmdev->sensor[tm_sensor->hw_id].
thr_state.high_th_state = mode;
reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
- (tmdev->tsens_addr));
+ (tmdev->tsens_tm_addr));
if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
writel_relaxed(reg_cntl |
(TSENS_TM_UPPER_INT_SET(mask)),
(TSENS_TM_UPPER_LOWER_INT_MASK
- (tmdev->tsens_addr)));
+ (tmdev->tsens_tm_addr)));
else
writel_relaxed(reg_cntl &
~(TSENS_TM_UPPER_INT_SET(mask)),
(TSENS_TM_UPPER_LOWER_INT_MASK
- (tmdev->tsens_addr)));
+ (tmdev->tsens_tm_addr)));
break;
- case THERMAL_TRIP_PASSIVE:
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
tmdev->sensor[tm_sensor->hw_id].
thr_state.low_th_state = mode;
reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
- (tmdev->tsens_addr));
+ (tmdev->tsens_tm_addr));
if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
writel_relaxed(reg_cntl | (1 << mask),
- (TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+ (TSENS_TM_UPPER_LOWER_INT_MASK
+ (tmdev->tsens_tm_addr)));
else
writel_relaxed(reg_cntl & ~(1 << mask),
- (TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+ (TSENS_TM_UPPER_LOWER_INT_MASK
+ (tmdev->tsens_tm_addr)));
break;
default:
rc = -EINVAL;
}
- spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
/* Activate and enable the respective trip threshold setting */
mb();
@@ -198,14 +200,14 @@
}
static int tsens2xxx_set_trip_temp(struct tsens_sensor *tm_sensor,
- int trip, int temp)
+ int low_temp, int high_temp)
{
unsigned int reg_cntl;
unsigned long flags;
struct tsens_device *tmdev = NULL;
int rc = 0;
- if (!tm_sensor || trip < 0)
+ if (!tm_sensor)
return -EINVAL;
tmdev = tm_sensor->tmdev;
@@ -213,56 +215,81 @@
return -EINVAL;
spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
- switch (trip) {
- case THERMAL_TRIP_CRITICAL:
+
+ if (high_temp != INT_MAX) {
tmdev->sensor[tm_sensor->hw_id].
- thr_state.crit_temp = temp;
- temp &= TSENS_TM_SN_CRITICAL_THRESHOLD_MASK;
- writel_relaxed(temp,
- (TSENS_TM_SN_CRITICAL_THRESHOLD(tmdev->tsens_addr) +
- (tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
- break;
- case THERMAL_TRIP_ACTIVE:
- tmdev->sensor[tm_sensor->hw_id].
- thr_state.high_temp = temp;
+ thr_state.high_temp = high_temp;
reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
- (tmdev->tsens_addr)) +
+ (tmdev->tsens_tm_addr)) +
(tm_sensor->hw_id *
TSENS_TM_SN_ADDR_OFFSET));
- temp = TSENS_TM_UPPER_THRESHOLD_SET(temp);
- temp &= TSENS_TM_UPPER_THRESHOLD_MASK;
+ high_temp /= TSENS_TM_SCALE_DECI_MILLIDEG;
+ high_temp = TSENS_TM_UPPER_THRESHOLD_SET(high_temp);
+ high_temp &= TSENS_TM_UPPER_THRESHOLD_MASK;
reg_cntl &= ~TSENS_TM_UPPER_THRESHOLD_MASK;
- writel_relaxed(reg_cntl | temp,
- (TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+ writel_relaxed(reg_cntl | high_temp,
+ (TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+ (tmdev->tsens_tm_addr) +
(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
- break;
- case THERMAL_TRIP_PASSIVE:
- tmdev->sensor[tm_sensor->hw_id].
- thr_state.low_temp = temp;
- reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
- (tmdev->tsens_addr)) +
- (tm_sensor->hw_id *
- TSENS_TM_SN_ADDR_OFFSET));
- temp &= TSENS_TM_LOWER_THRESHOLD_MASK;
- reg_cntl &= ~TSENS_TM_LOWER_THRESHOLD_MASK;
- writel_relaxed(reg_cntl | temp,
- (TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
- (tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
- break;
- default:
- pr_err("Invalid trip to TSENS: %d\n", trip);
- rc = -EINVAL;
}
- spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+ if (low_temp != INT_MIN) {
+ tmdev->sensor[tm_sensor->hw_id].
+ thr_state.low_temp = low_temp;
+ reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+ (tmdev->tsens_tm_addr)) +
+ (tm_sensor->hw_id *
+ TSENS_TM_SN_ADDR_OFFSET));
+ low_temp /= TSENS_TM_SCALE_DECI_MILLIDEG;
+ low_temp &= TSENS_TM_LOWER_THRESHOLD_MASK;
+ reg_cntl &= ~TSENS_TM_LOWER_THRESHOLD_MASK;
+ writel_relaxed(reg_cntl | low_temp,
+ (TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+ (tmdev->tsens_tm_addr) +
+ (tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+ }
+
/* Set trip temperature thresholds */
mb();
- rc = tsens_tm_activate_trip_type(tm_sensor, trip,
+ if (high_temp != INT_MAX) {
+ rc = tsens_tm_activate_trip_type(tm_sensor,
+ THERMAL_TRIP_CONFIGURABLE_HI,
THERMAL_TRIP_ACTIVATION_ENABLED);
- if (rc)
- pr_err("Error during trip activation :%d\n", rc);
+ if (rc) {
+ pr_err("trip high enable error :%d\n", rc);
+ goto fail;
+ }
+ } else {
+ rc = tsens_tm_activate_trip_type(tm_sensor,
+ THERMAL_TRIP_CONFIGURABLE_HI,
+ THERMAL_TRIP_ACTIVATION_DISABLED);
+ if (rc) {
+ pr_err("trip high disable error :%d\n", rc);
+ goto fail;
+ }
+ }
+ if (low_temp != INT_MIN) {
+ rc = tsens_tm_activate_trip_type(tm_sensor,
+ THERMAL_TRIP_CONFIGURABLE_LOW,
+ THERMAL_TRIP_ACTIVATION_ENABLED);
+ if (rc) {
+ pr_err("trip low enable activation error :%d\n", rc);
+ goto fail;
+ }
+ } else {
+ rc = tsens_tm_activate_trip_type(tm_sensor,
+ THERMAL_TRIP_CONFIGURABLE_LOW,
+ THERMAL_TRIP_ACTIVATION_DISABLED);
+ if (rc) {
+ pr_err("trip low disable error :%d\n", rc);
+ goto fail;
+ }
+ }
+
+fail:
+ spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
return rc;
}
@@ -277,13 +304,13 @@
void __iomem *wd_critical_addr;
int wd_mask;
- sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+ sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_tm_addr);
sensor_int_mask_addr =
- TSENS_TM_CRITICAL_INT_MASK(tm->tsens_addr);
+ TSENS_TM_CRITICAL_INT_MASK(tm->tsens_tm_addr);
sensor_critical_addr =
- TSENS_TM_SN_CRITICAL_THRESHOLD(tm->tsens_addr);
+ TSENS_TM_SN_CRITICAL_THRESHOLD(tm->tsens_tm_addr);
wd_critical_addr =
- TSENS_TM_CRITICAL_INT_STATUS(tm->tsens_addr);
+ TSENS_TM_CRITICAL_INT_STATUS(tm->tsens_tm_addr);
if (tm->ctrl_data->wd_bark) {
wd_mask = readl_relaxed(wd_critical_addr);
@@ -294,19 +321,22 @@
*/
writel_relaxed(wd_mask | TSENS_TM_CRITICAL_WD_BARK,
(TSENS_TM_CRITICAL_INT_CLEAR
- (tm->tsens_addr)));
+ (tm->tsens_tm_addr)));
writel_relaxed(wd_mask & ~(TSENS_TM_CRITICAL_WD_BARK),
(TSENS_TM_CRITICAL_INT_CLEAR
- (tm->tsens_addr)));
+ (tm->tsens_tm_addr)));
tm->tsens_dbg.tsens_critical_wd_cnt++;
return IRQ_HANDLED;
}
}
- for (i = 0; i < tm->num_sensors; i++) {
+ for (i = 0; i < TSENS_MAX_SENSORS; i++) {
int int_mask, int_mask_val;
u32 addr_offset;
+ if (IS_ERR(tm->sensor[i].tzd))
+ continue;
+
spin_lock_irqsave(&tm->tsens_crit_lock, flags);
addr_offset = tm->sensor[i].hw_id *
TSENS_TM_SN_ADDR_OFFSET;
@@ -320,13 +350,14 @@
/* Mask the corresponding interrupt for the sensors */
writel_relaxed(int_mask | int_mask_val,
TSENS_TM_CRITICAL_INT_MASK(
- tm->tsens_addr));
+ tm->tsens_tm_addr));
/* Clear the corresponding sensors interrupt */
writel_relaxed(int_mask_val,
- TSENS_TM_CRITICAL_INT_CLEAR(tm->tsens_addr));
+ TSENS_TM_CRITICAL_INT_CLEAR
+ (tm->tsens_tm_addr));
writel_relaxed(0,
TSENS_TM_CRITICAL_INT_CLEAR(
- tm->tsens_addr));
+ tm->tsens_tm_addr));
tm->sensor[i].thr_state.
crit_th_state = THERMAL_DEVICE_DISABLED;
}
@@ -342,22 +373,31 @@
static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
{
struct tsens_device *tm = data;
- unsigned int i, status, threshold;
+ unsigned int i, status, threshold, temp;
unsigned long flags;
void __iomem *sensor_status_addr;
void __iomem *sensor_int_mask_addr;
void __iomem *sensor_upper_lower_addr;
u32 addr_offset = 0;
- sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+ sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_tm_addr);
sensor_int_mask_addr =
- TSENS_TM_UPPER_LOWER_INT_MASK(tm->tsens_addr);
+ TSENS_TM_UPPER_LOWER_INT_MASK(tm->tsens_tm_addr);
sensor_upper_lower_addr =
- TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tm->tsens_addr);
+ TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tm->tsens_tm_addr);
- for (i = 0; i < tm->num_sensors; i++) {
+ for (i = 0; i < TSENS_MAX_SENSORS; i++) {
bool upper_thr = false, lower_thr = false;
- int int_mask, int_mask_val = 0;
+ int int_mask, int_mask_val = 0, rc;
+
+ if (IS_ERR(tm->sensor[i].tzd))
+ continue;
+
+ rc = tsens2xxx_get_temp(&tm->sensor[i], &temp);
+ if (rc) {
+ pr_debug("Error:%d reading temp sensor:%d\n", rc, i);
+ continue;
+ }
spin_lock_irqsave(&tm->tsens_upp_low_lock, flags);
addr_offset = tm->sensor[i].hw_id *
@@ -376,17 +416,28 @@
/* Mask the corresponding interrupt for the sensors */
writel_relaxed(int_mask | int_mask_val,
TSENS_TM_UPPER_LOWER_INT_MASK(
- tm->tsens_addr));
+ tm->tsens_tm_addr));
/* Clear the corresponding sensors interrupt */
writel_relaxed(int_mask_val,
TSENS_TM_UPPER_LOWER_INT_CLEAR(
- tm->tsens_addr));
+ tm->tsens_tm_addr));
writel_relaxed(0,
TSENS_TM_UPPER_LOWER_INT_CLEAR(
- tm->tsens_addr));
- upper_thr = true;
- tm->sensor[i].thr_state.
+ tm->tsens_tm_addr));
+ if (TSENS_TM_UPPER_THRESHOLD_VALUE(threshold) >
+ (temp/TSENS_TM_SCALE_DECI_MILLIDEG)) {
+ pr_debug("Re-arm high threshold\n");
+ rc = tsens_tm_activate_trip_type(
+ &tm->sensor[i],
+ THERMAL_TRIP_CONFIGURABLE_HI,
+ THERMAL_TRIP_ACTIVATION_ENABLED);
+ if (rc)
+ pr_err("high rearm failed:%d\n", rc);
+ } else {
+ upper_thr = true;
+ tm->sensor[i].thr_state.
high_th_state = THERMAL_DEVICE_DISABLED;
+ }
}
if ((status & TSENS_TM_SN_STATUS_LOWER_STATUS) &&
@@ -397,32 +448,36 @@
/* Mask the corresponding interrupt for the sensors */
writel_relaxed(int_mask | int_mask_val,
TSENS_TM_UPPER_LOWER_INT_MASK(
- tm->tsens_addr));
+ tm->tsens_tm_addr));
/* Clear the corresponding sensors interrupt */
writel_relaxed(int_mask_val,
TSENS_TM_UPPER_LOWER_INT_CLEAR(
- tm->tsens_addr));
+ tm->tsens_tm_addr));
writel_relaxed(0,
TSENS_TM_UPPER_LOWER_INT_CLEAR(
- tm->tsens_addr));
- lower_thr = true;
- tm->sensor[i].thr_state.
+ tm->tsens_tm_addr));
+ if (TSENS_TM_LOWER_THRESHOLD_VALUE(threshold)
+ < (temp/TSENS_TM_SCALE_DECI_MILLIDEG)) {
+ pr_debug("Re-arm low threshold\n");
+ rc = tsens_tm_activate_trip_type(
+ &tm->sensor[i],
+ THERMAL_TRIP_CONFIGURABLE_LOW,
+ THERMAL_TRIP_ACTIVATION_ENABLED);
+ if (rc)
+ pr_err("low rearm failed:%d\n", rc);
+ } else {
+ lower_thr = true;
+ tm->sensor[i].thr_state.
low_th_state = THERMAL_DEVICE_DISABLED;
+ }
}
spin_unlock_irqrestore(&tm->tsens_upp_low_lock, flags);
if (upper_thr || lower_thr) {
- int temp;
- enum thermal_trip_type trip =
- THERMAL_TRIP_CONFIGURABLE_LOW;
-
- if (upper_thr)
- trip = THERMAL_TRIP_CONFIGURABLE_HI;
- tsens2xxx_get_temp(&tm->sensor[i], &temp);
/* Use id for multiple controllers */
pr_debug("sensor:%d trigger temp (%d degC)\n",
- tm->sensor[i].hw_id,
- (status & TSENS_TM_SN_LAST_TEMP_MASK));
+ tm->sensor[i].hw_id, temp);
+ of_thermal_handle_trip(tm->sensor[i].tzd);
}
}
@@ -442,7 +497,7 @@
unsigned int srot_val;
int crit_mask;
- srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_addr + 0x4);
+ srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
srot_val = readl_relaxed(srot_addr);
if (!(srot_val & TSENS_EN)) {
pr_err("TSENS device is not enabled\n");
@@ -451,18 +506,18 @@
if (tmdev->ctrl_data->cycle_monitor) {
sensor_int_mask_addr =
- TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_addr);
+ TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
crit_mask = readl_relaxed(sensor_int_mask_addr);
writel_relaxed(
crit_mask | tmdev->ctrl_data->cycle_compltn_monitor_val,
(TSENS_TM_CRITICAL_INT_MASK
- (tmdev->tsens_addr)));
+ (tmdev->tsens_tm_addr)));
/*Update critical cycle monitoring*/
mb();
}
writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
- TSENS_TM_INT_EN(tmdev->tsens_addr));
+ TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
spin_lock_init(&tmdev->tsens_crit_lock);
spin_lock_init(&tmdev->tsens_upp_low_lock);
@@ -513,7 +568,7 @@
static const struct tsens_ops ops_tsens2xxx = {
.hw_init = tsens2xxx_hw_init,
.get_temp = tsens2xxx_get_temp,
- .set_trip_temp = tsens2xxx_set_trip_temp,
+ .set_trips = tsens2xxx_set_trip_temp,
.interrupts_reg = tsens2xxx_register_interrupts,
.dbg = tsens2xxx_dbg,
};
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 61ad6c3..f4eb807 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1075,15 +1075,15 @@
}
static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
-static const u8 am4372_habit = UART_ERRATA_CLOCK_DISABLE;
+static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
static const struct of_device_id omap8250_dt_ids[] = {
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
{ .compatible = "ti,omap4-uart" },
{ .compatible = "ti,am3352-uart", .data = &am3352_habit, },
- { .compatible = "ti,am4372-uart", .data = &am4372_habit, },
- { .compatible = "ti,dra742-uart", .data = &am4372_habit, },
+ { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
+ { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
{},
};
MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
@@ -1218,9 +1218,6 @@
priv->omap8250_dma.rx_size = RX_TRIGGER;
priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;
-
- if (of_machine_is_compatible("ti,am33xx"))
- priv->habit |= OMAP_DMA_TX_KICK;
/*
* pause is currently not supported atleast on omap-sdma
* and edma on most earlier kernels.
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a9ded51..bac9975 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1079,6 +1079,12 @@
hardware.
The driver supports console and High speed UART functions.
+config SERIAL_MSM_GENI_CONSOLE
+ tristate "MSM on-chip GENI HW based console support"
+ depends on SERIAL_MSM_GENI=y
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+
config SERIAL_MSM_CONSOLE
bool "MSM serial console support"
depends on SERIAL_MSM=y
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index fabbe76..4d079cd 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1938,6 +1938,11 @@
atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
+ /*
+ * in uart_flush_buffer(), the xmit circular buffer has just
+ * been cleared, so we have to reset tx_len accordingly.
+ */
+ atmel_port->tx_len = 0;
}
/*
@@ -2471,6 +2476,9 @@
pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ /* Make sure that tx path is actually able to send characters */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+
uart_console_write(port, s, count, atmel_console_putchar);
/*
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 4d4fdf4..3fec1d7 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -17,15 +17,19 @@
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/io.h>
+#include <linux/ipc_logging.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
/* UART specific GENI registers */
#define SE_UART_LOOPBACK_CFG (0x22C)
@@ -38,7 +42,7 @@
#define SE_UART_RX_STALE_CNT (0x294)
#define SE_UART_TX_PARITY_CFG (0x2A4)
#define SE_UART_RX_PARITY_CFG (0x2A8)
-#define SE_UART_MANUAL_RFT (0x2AC)
+#define SE_UART_MANUAL_RFR (0x2AC)
/* SE_UART_LOOPBACK_CFG */
#define NO_LOOPBACK (0)
@@ -81,6 +85,11 @@
#define PAR_SPACE (0x10)
#define PAR_MARK (0x11)
+/* SE_UART_MANUAL_RFR register fields */
+#define UART_MANUAL_RFR_EN (BIT(31))
+#define UART_RFR_NOT_READY (BIT(1))
+#define UART_RFR_READY (BIT(0))
+
/* UART M_CMD OP codes */
#define UART_START_TX (0x1)
#define UART_START_BREAK (0x4)
@@ -91,9 +100,26 @@
#define UART_OVERSAMPLING (32)
#define STALE_TIMEOUT (16)
+#define DEFAULT_BITS_PER_CHAR (10)
#define GENI_UART_NR_PORTS (15)
-#define DEF_FIFO_DEPTH_WORDS (64)
+#define GENI_UART_CONS_PORTS (1)
+#define DEF_FIFO_DEPTH_WORDS (16)
+#define DEF_TX_WM (2)
#define DEF_FIFO_WIDTH_BITS (32)
+#define UART_CORE2X_VOTE (10000)
+#define DEFAULT_SE_CLK (19200000)
+#define DEFAULT_BUS_WIDTH (4)
+
+#define WAKEBYTE_TIMEOUT_MSEC (2000)
+#define IPC_LOG_PWR_PAGES (2)
+#define IPC_LOG_MISC_PAGES (2)
+#define IPC_LOG_TX_RX_PAGES (3)
+#define DATA_BYTES_PER_LINE (32)
+
+#define IPC_LOG_MSG(ctx, x...) do { \
+ if (ctx) \
+ ipc_log_string(ctx, x); \
+} while (0)
struct msm_geni_serial_port {
struct uart_port uport;
@@ -114,6 +140,14 @@
unsigned int rx_last);
struct se_geni_rsc serial_rsc;
int loopback;
+ int wakeup_irq;
+ unsigned char wakeup_byte;
+ struct wakeup_source geni_wake;
+ void *ipc_log_tx;
+ void *ipc_log_rx;
+ void *ipc_log_pwr;
+ void *ipc_log_misc;
+ unsigned int cur_baud;
};
static const struct uart_ops msm_geni_serial_pops;
@@ -127,12 +161,16 @@
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
unsigned int rx_last);
+static unsigned int msm_geni_serial_tx_empty(struct uart_port *port);
+static int msm_geni_serial_power_on(struct uart_port *uport);
+static void msm_geni_serial_power_off(struct uart_port *uport);
static atomic_t uart_line_id = ATOMIC_INIT(0);
#define GET_DEV_PORT(uport) \
container_of(uport, struct msm_geni_serial_port, uport)
+static struct msm_geni_serial_port msm_geni_console_port;
static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS];
static void msm_geni_serial_config_port(struct uart_port *uport, int cfg_flags)
@@ -167,22 +205,172 @@
static DEVICE_ATTR(loopback, 0644, msm_geni_serial_loopback_show,
msm_geni_serial_loopback_store);
-static void msm_geni_serial_set_mctrl(struct uart_port *port,
+static void dump_ipc(void *ipc_ctx, char *prefix, char *string,
+ u64 addr, int size)
+
+{
+ char buf[DATA_BYTES_PER_LINE * 2];
+ int len = 0;
+
+ if (!ipc_ctx)
+ return;
+ len = min(size, DATA_BYTES_PER_LINE);
+ hex_dump_to_buffer(string, len, DATA_BYTES_PER_LINE, 1, buf,
+ sizeof(buf), false);
+ ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+ (unsigned int)addr, size, buf);
+}
+
+static void check_tx_active(struct uart_port *uport)
+{
+ u32 geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+
+ while ((geni_status & M_GENI_CMD_ACTIVE)) {
+ cpu_relax();
+ geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ }
+}
+
+static int vote_clock_on(struct uart_port *uport)
+{
+ int ret = 0;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+ if (!pm_runtime_enabled(uport->dev)) {
+ dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
+ ret = -EPERM;
+ return ret;
+ }
+ ret = msm_geni_serial_power_on(uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to vote clock on\n");
+ return ret;
+ }
+ __pm_relax(&port->geni_wake);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
+ return 0;
+}
+
+static int vote_clock_off(struct uart_port *uport)
+{
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+ int ret = 0;
+
+ if (!pm_runtime_enabled(uport->dev)) {
+ dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
+ ret = -EPERM;
+ return ret;
+ }
+ /* Block till any on going Tx goes out.*/
+ check_tx_active(uport);
+ msm_geni_serial_power_off(uport);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
+ return 0;
+};
+
+static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case TIOCPMGET: {
+ ret = vote_clock_on(uport);
+ break;
+ }
+ case TIOCPMPUT: {
+ ret = vote_clock_off(uport);
+ break;
+ }
+ case TIOCPMACT: {
+ ret = !pm_runtime_status_suspended(uport->dev);
+ break;
+ }
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_geni_serial_break_ctl(struct uart_port *uport, int ctl)
+{
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ dev_err(uport->dev, "%s Device suspended,vote clocks on.\n",
+ __func__);
+ return;
+ }
+
+ if (ctl) {
+ check_tx_active(uport);
+ geni_setup_m_cmd(uport->membase, UART_START_BREAK, 0);
+ } else {
+ geni_setup_m_cmd(uport->membase, UART_STOP_BREAK, 0);
+ }
+ /* Ensure break start/stop command is setup before returning.*/
+ mb();
+}
+
+static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport)
+{
+ u32 geni_ios = 0;
+ unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
+
+ if (pm_runtime_status_suspended(uport->dev))
+ return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+
+ geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
+ if (!(geni_ios & IO2_DATA_IN))
+ mctrl |= TIOCM_CTS;
+
+ return mctrl;
+}
+
+static void msm_geni_cons_set_mctrl(struct uart_port *uport,
unsigned int mctrl)
{
}
+static void msm_geni_serial_set_mctrl(struct uart_port *uport,
+ unsigned int mctrl)
+{
+ u32 uart_manual_rfr = 0;
+
+ if (pm_runtime_status_suspended(uport->dev)) {
+ dev_info(uport->dev, "%sDevice suspended,vote clocks on\n",
+ __func__);
+ return;
+ }
+ if (!(mctrl & TIOCM_RTS))
+ uart_manual_rfr |= (UART_MANUAL_RFR_EN | UART_RFR_NOT_READY);
+ geni_write_reg_nolog(uart_manual_rfr, uport->membase,
+ SE_UART_MANUAL_RFR);
+ /* Write to flow control must complete before return to client*/
+ mb();
+}
+
static const char *msm_geni_serial_get_type(struct uart_port *uport)
{
return "MSM";
}
-static struct msm_geni_serial_port *get_port_from_line(int line)
+static struct msm_geni_serial_port *get_port_from_line(int line,
+ bool is_console)
{
- if ((line < 0) || (line >= GENI_UART_NR_PORTS))
- return ERR_PTR(-ENXIO);
+ struct msm_geni_serial_port *port = NULL;
- return &msm_geni_serial_ports[line];
+ if (is_console) {
+ if ((line < 0) || (line >= GENI_UART_CONS_PORTS))
+ port = ERR_PTR(-ENXIO);
+ port = &msm_geni_console_port;
+ } else {
+ if ((line < 0) || (line >= GENI_UART_NR_PORTS))
+ return ERR_PTR(-ENXIO);
+ port = &msm_geni_serial_ports[line];
+ }
+
+ return port;
}
static int msm_geni_serial_power_on(struct uart_port *uport)
@@ -193,26 +381,46 @@
if (ret < 0) {
dev_err(uport->dev, "%s: Failed (%d)", __func__, ret);
pm_runtime_put_noidle(uport->dev);
+ pm_runtime_set_suspended(uport->dev);
+ return ret;
}
- return ret;
+ return 0;
}
static void msm_geni_serial_power_off(struct uart_port *uport)
{
- pm_runtime_mark_last_busy(uport->dev);
- pm_runtime_put_autosuspend(uport->dev);
+ pm_runtime_put_sync(uport->dev);
}
static int msm_geni_serial_poll_bit(struct uart_port *uport,
- int offset, int bit_field)
+ int offset, int bit_field, bool set)
{
int iter = 0;
unsigned int reg;
bool met = false;
+ struct msm_geni_serial_port *port = NULL;
+ bool cond = false;
+ unsigned int baud = 115200;
+ unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS;
+ unsigned long total_iter = 0;
- while (iter < 100) {
- reg = geni_read_reg(uport->membase, offset);
- if (reg & bit_field) {
+
+ if (uport->private_data) {
+ port = GET_DEV_PORT(uport);
+ baud = (port->cur_baud ? port->cur_baud : 115200);
+ fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+ }
+ /*
+ * Total polling iterations based on FIFO worth of bytes to be
+ * sent at current baud .Add a little fluff to the wait.
+ */
+ total_iter = ((fifo_bits * USEC_PER_SEC) / baud);
+ total_iter += 50;
+
+ while (iter < total_iter) {
+ reg = geni_read_reg_nolog(uport->membase, offset);
+ cond = reg & bit_field;
+ if (cond == set) {
met = true;
break;
}
@@ -223,10 +431,13 @@
}
static void msm_geni_serial_setup_tx(struct uart_port *uport,
- unsigned int xmit_size)
+ unsigned int xmit_size)
{
- geni_write_reg(xmit_size, uport->membase, SE_UART_TX_TRANS_LEN);
- geni_setup_m_cmd(uport->membase, UART_START_TX, 0);
+ u32 m_cmd = 0;
+
+ geni_write_reg_nolog(xmit_size, uport->membase, SE_UART_TX_TRANS_LEN);
+ m_cmd |= (UART_START_TX << M_OPCODE_SHFT);
+ geni_write_reg_nolog(m_cmd, uport->membase, SE_GENI_M_CMD0);
/*
* Writes to enable the primary sequencer should go through before
* exiting this function.
@@ -240,19 +451,34 @@
unsigned int irq_clear = M_CMD_DONE_EN;
done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_DONE_EN);
+ M_CMD_DONE_EN, true);
if (!done) {
- geni_cancel_m_cmd(uport->membase);
+ geni_write_reg_nolog(M_GENI_CMD_CANCEL, uport->membase,
+ SE_GENI_S_CMD_CTRL_REG);
irq_clear |= M_CMD_CANCEL_EN;
if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN)) {
- geni_abort_m_cmd(uport->membase);
+ M_CMD_CANCEL_EN, true)) {
+ geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase,
+ SE_GENI_M_CMD_CTRL_REG);
irq_clear |= M_CMD_ABORT_EN;
msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN);
+ M_CMD_ABORT_EN, true);
}
}
- geni_write_reg(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
+}
+
+static void msm_geni_serial_abort_rx(struct uart_port *uport)
+{
+ unsigned int irq_clear = S_CMD_DONE_EN;
+
+ geni_abort_s_cmd(uport->membase);
+ /* Ensure this goes through before polling. */
+ mb();
+ irq_clear |= S_CMD_ABORT_EN;
+ msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ S_GENI_CMD_ABORT, false);
+ geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -263,18 +489,22 @@
unsigned int s_irq_status;
if (!(msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_SEC_IRQ_EN))) {
+ M_SEC_IRQ_EN, true))) {
dev_err(uport->dev, "%s: Failed waiting for SE\n", __func__);
return -ENXIO;
}
- m_irq_status = geni_read_reg(uport->membase, SE_GENI_M_IRQ_STATUS);
- s_irq_status = geni_read_reg(uport->membase, SE_GENI_S_IRQ_STATUS);
- geni_write_reg(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
- geni_write_reg(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+ m_irq_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_STATUS);
+ s_irq_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_STATUS);
+ geni_write_reg_nolog(m_irq_status, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg_nolog(s_irq_status, uport->membase,
+ SE_GENI_S_IRQ_CLEAR);
if (!(msm_geni_serial_poll_bit(uport, SE_GENI_RX_FIFO_STATUS,
- RX_FIFO_WC_MSK))) {
+ RX_FIFO_WC_MSK, true))) {
dev_err(uport->dev, "%s: Failed waiting for Rx\n", __func__);
return -ENXIO;
}
@@ -284,7 +514,7 @@
* getting valid RX fifo status.
*/
mb();
- rx_fifo = geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+ rx_fifo = geni_read_reg_nolog(uport->membase, SE_GENI_RX_FIFOn);
rx_fifo &= 0xFF;
return rx_fifo;
}
@@ -295,15 +525,14 @@
int b = (int) c;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- se_config_packing(uport->membase, 8, 1, false);
- geni_write_reg(port->tx_wm, uport->membase,
+ geni_write_reg_nolog(port->tx_wm, uport->membase,
SE_GENI_TX_WATERMARK_REG);
msm_geni_serial_setup_tx(uport, 1);
if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_TX_FIFO_WATERMARK_EN))
+ M_TX_FIFO_WATERMARK_EN, true))
WARN_ON(1);
- geni_write_reg(b, uport->membase, SE_GENI_TX_FIFOn);
- geni_write_reg(M_TX_FIFO_WATERMARK_EN, uport->membase,
+ geni_write_reg_nolog(b, uport->membase, SE_GENI_TX_FIFOn);
+ geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
SE_GENI_M_IRQ_CLEAR);
/*
* Ensure FIFO write goes through before polling for status but.
@@ -313,9 +542,10 @@
}
#endif
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
static void msm_geni_serial_wr_char(struct uart_port *uport, int ch)
{
- geni_write_reg(ch, uport->membase, SE_GENI_TX_FIFOn);
+ geni_write_reg_nolog(ch, uport->membase, SE_GENI_TX_FIFOn);
/*
* Ensure FIFO write clear goes through before
* next iteration.
@@ -328,10 +558,11 @@
__msm_geni_serial_console_write(struct uart_port *uport, const char *s,
unsigned int count)
{
- struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
int new_line = 0;
int i;
int bytes_to_send = count;
+ int fifo_depth = DEF_FIFO_DEPTH_WORDS;
+ int tx_wm = DEF_TX_WM;
for (i = 0; i < count; i++) {
if (s[i] == '\n')
@@ -339,26 +570,32 @@
}
bytes_to_send += new_line;
- se_config_packing(uport->membase, 8, 1, false);
- geni_write_reg(port->tx_wm, uport->membase,
+ geni_write_reg_nolog(tx_wm, uport->membase,
SE_GENI_TX_WATERMARK_REG);
msm_geni_serial_setup_tx(uport, bytes_to_send);
i = 0;
while (i < count) {
u32 chars_to_write = 0;
- u32 avail_fifo_bytes = (port->tx_fifo_depth - port->tx_wm);
-
+ u32 avail_fifo_bytes = (fifo_depth - tx_wm);
+ /*
+ * If the WM bit never set, then the Tx state machine is not
+ * in a valid state, so break, cancel/abort any existing
+ * command. Unfortunately the current data being written is
+ * lost.
+ */
while (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_TX_FIFO_WATERMARK_EN))
- cpu_relax();
+ M_TX_FIFO_WATERMARK_EN, true))
+ break;
chars_to_write = min((unsigned int)(count - i),
avail_fifo_bytes);
if ((chars_to_write << 1) > avail_fifo_bytes)
chars_to_write = (avail_fifo_bytes >> 1);
uart_console_write(uport, (s + i), chars_to_write,
msm_geni_serial_wr_char);
- geni_write_reg(M_TX_FIFO_WATERMARK_EN, uport->membase,
+ geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
SE_GENI_M_IRQ_CLEAR);
+ /* Ensure this goes through before polling for WM IRQ again.*/
+ mb();
i += chars_to_write;
}
msm_geni_serial_poll_cancel_tx(uport);
@@ -372,7 +609,7 @@
WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
- port = get_port_from_line(co->index);
+ port = get_port_from_line(co->index, true);
if (IS_ERR_OR_NULL(port)) {
pr_err("%s:Invalid line %d\n", __func__, co->index);
return;
@@ -384,98 +621,6 @@
spin_unlock(&uport->lock);
}
-static void msm_geni_serial_start_tx(struct uart_port *uport)
-{
- unsigned int geni_m_irq_en;
- struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-
- geni_m_irq_en = geni_read_reg(uport->membase, SE_GENI_M_IRQ_EN);
- geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
-
- se_config_packing(uport->membase, 8, 4, false);
- geni_write_reg(port->tx_wm, uport->membase, SE_GENI_TX_WATERMARK_REG);
- geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
- /* Geni command setup/irq enables should complete before returning.*/
- mb();
-}
-
-static void msm_geni_serial_stop_tx(struct uart_port *uport)
-{
- unsigned int geni_m_irq_en;
- unsigned int geni_status;
-
- geni_m_irq_en = geni_read_reg(uport->membase, SE_GENI_M_IRQ_EN);
- geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
- geni_write_reg(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
- geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
-
- geni_status = geni_read_reg(uport->membase,
- SE_GENI_STATUS);
- /* Possible stop tx is called multiple times. */
- if (!(geni_status & M_GENI_CMD_ACTIVE))
- return;
-
- geni_cancel_m_cmd(uport->membase);
- if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN)) {
- geni_abort_m_cmd(uport->membase);
- msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN);
- geni_write_reg(M_CMD_ABORT_EN, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
- }
- geni_write_reg(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
-}
-
-static void msm_geni_serial_start_rx(struct uart_port *uport)
-{
- unsigned int geni_s_irq_en;
- unsigned int geni_m_irq_en;
-
- geni_s_irq_en = geni_read_reg(uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_m_irq_en = geni_read_reg(uport->membase,
- SE_GENI_M_IRQ_EN);
- geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
- geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
-
- geni_setup_s_cmd(uport->membase, UART_START_READ, 0);
- geni_write_reg(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
- geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
- /*
- * Ensure the writes to the secondary sequencer and interrupt enables
- * go through.
- */
- mb();
-}
-
-static void msm_geni_serial_stop_rx(struct uart_port *uport)
-{
- unsigned int geni_s_irq_en;
- unsigned int geni_m_irq_en;
- unsigned int geni_status;
-
- geni_s_irq_en = geni_read_reg(uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_m_irq_en = geni_read_reg(uport->membase,
- SE_GENI_M_IRQ_EN);
- geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
- geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
-
- geni_write_reg(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
- geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
-
- geni_status = geni_read_reg(uport->membase, SE_GENI_STATUS);
- /* Possible stop rx is called multiple times. */
- if (!(geni_status & S_GENI_CMD_ACTIVE))
- return;
- geni_write_reg(S_GENI_CMD_CANCEL, uport->membase,
- SE_GENI_S_CMD_CTRL_REG);
- if (!msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
- S_CMD_CANCEL_EN))
- WARN_ON(1);
-}
-
static int handle_rx_console(struct uart_port *uport,
unsigned int rx_fifo_wc,
unsigned int rx_last_byte_valid,
@@ -487,12 +632,11 @@
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
tport = &uport->state->port;
-
for (i = 0; i < rx_fifo_wc; i++) {
int bytes = 4;
*(msm_port->rx_fifo) =
- geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+ geni_read_reg_nolog(uport->membase, SE_GENI_RX_FIFOn);
rx_char = (unsigned char *)msm_port->rx_fifo;
if (i == (rx_fifo_wc - 1)) {
@@ -512,6 +656,135 @@
tty_flip_buffer_push(tport);
return 0;
}
+#else
+static int handle_rx_console(struct uart_port *uport,
+ unsigned int rx_fifo_wc,
+ unsigned int rx_last_byte_valid,
+ unsigned int rx_last)
+{
+ return -EPERM;
+}
+
+#endif /* (CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)) */
+
+static void msm_geni_serial_start_tx(struct uart_port *uport)
+{
+ unsigned int geni_m_irq_en;
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+ if (!msm_geni_serial_tx_empty(uport))
+ return;
+
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+ return;
+ }
+
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+ geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
+
+ geni_write_reg_nolog(msm_port->tx_wm, uport->membase,
+ SE_GENI_TX_WATERMARK_REG);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+ /* Geni command setup/irq enables should complete before returning.*/
+ mb();
+ IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
+}
+
+static void msm_geni_serial_stop_tx(struct uart_port *uport)
+{
+ unsigned int geni_m_irq_en;
+ unsigned int geni_status;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ return;
+
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+ geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
+ geni_write_reg_nolog(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+
+ geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ /* Possible stop tx is called multiple times. */
+ if (!(geni_status & M_GENI_CMD_ACTIVE))
+ return;
+
+ geni_cancel_m_cmd(uport->membase);
+ if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_abort_m_cmd(uport->membase);
+ msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ geni_write_reg_nolog(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
+ IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
+}
+
+static void msm_geni_serial_start_rx(struct uart_port *uport)
+{
+ unsigned int geni_s_irq_en;
+ unsigned int geni_m_irq_en;
+ unsigned long cfg0, cfg1;
+ unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
+ unsigned int geni_status;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ return;
+
+ geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+ if (geni_status & S_GENI_CMD_ACTIVE)
+ msm_geni_serial_abort_rx(uport);
+ geni_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_EN);
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ se_get_packing_config(8, 4, false, &cfg0, &cfg1);
+ geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_RX_PACKING_CFG0);
+ geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_RX_PACKING_CFG1);
+ geni_write_reg_nolog(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
+ geni_setup_s_cmd(uport->membase, UART_START_READ, 0);
+ geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+ /*
+ * Ensure the writes to the secondary sequencer and interrupt enables
+ * go through.
+ */
+ mb();
+ IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
+}
+
+static void msm_geni_serial_stop_rx(struct uart_port *uport)
+{
+ unsigned int geni_s_irq_en;
+ unsigned int geni_m_irq_en;
+ unsigned int geni_status;
+
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ return;
+
+ geni_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_EN);
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+ geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+
+ geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+
+ geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+ /* Possible stop rx is called multiple times. */
+ if (!(geni_status & S_GENI_CMD_ACTIVE))
+ return;
+ msm_geni_serial_abort_rx(uport);
+}
static int handle_rx_hs(struct uart_port *uport,
unsigned int rx_fifo_wc,
@@ -541,6 +814,8 @@
}
uport->icount.rx += ret;
tty_flip_buffer_push(tport);
+ dump_ipc(msm_port->ipc_log_rx, "Rx", (char *)msm_port->rx_fifo, 0,
+ rx_bytes);
return ret;
}
@@ -552,17 +827,17 @@
unsigned int rx_last_byte_valid = 0;
unsigned int rx_last = 0;
struct tty_port *tport;
- struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
tport = &uport->state->port;
- rx_fifo_status = geni_read_reg(uport->membase,
+ rx_fifo_status = geni_read_reg_nolog(uport->membase,
SE_GENI_RX_FIFO_STATUS);
rx_fifo_wc = rx_fifo_status & RX_FIFO_WC_MSK;
rx_last_byte_valid = ((rx_fifo_status & RX_LAST_BYTE_VALID_MSK) >>
RX_LAST_BYTE_VALID_SHFT);
rx_last = rx_fifo_status & RX_LAST;
if (rx_fifo_wc)
- msm_port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
+ port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
rx_last);
return ret;
}
@@ -577,9 +852,10 @@
int i = 0;
unsigned int tx_fifo_status;
unsigned int xmit_size;
- unsigned int fifo_width_bytes = msm_port->tx_fifo_width >> 3;
+ unsigned int fifo_width_bytes =
+ (uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
- tx_fifo_status = geni_read_reg(uport->membase,
+ tx_fifo_status = geni_read_reg_nolog(uport->membase,
SE_GENI_TX_FIFO_STATUS);
if (uart_circ_empty(xmit) && !tx_fifo_status) {
msm_geni_serial_stop_tx(uport);
@@ -600,6 +876,8 @@
msm_geni_serial_setup_tx(uport, xmit_size);
bytes_remaining = xmit_size;
+ dump_ipc(msm_port->ipc_log_tx, "Tx", (char *)&xmit->buf[xmit->tail], 0,
+ xmit_size);
while (i < xmit_size) {
unsigned int tx_bytes;
unsigned int buf = 0;
@@ -610,7 +888,7 @@
for (c = 0; c < tx_bytes ; c++)
buf |= (xmit->buf[xmit->tail + c] << (c * 8));
- geni_write_reg(buf, uport->membase, SE_GENI_TX_FIFOn);
+ geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
xmit->tail = (xmit->tail + tx_bytes) & (UART_XMIT_SIZE - 1);
i += tx_bytes;
uport->icount.tx += tx_bytes;
@@ -619,6 +897,8 @@
wmb();
}
msm_geni_serial_poll_cancel_tx(uport);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
exit_handle_tx:
return ret;
}
@@ -631,10 +911,18 @@
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
- m_irq_status = geni_read_reg(uport->membase, SE_GENI_M_IRQ_STATUS);
- s_irq_status = geni_read_reg(uport->membase, SE_GENI_S_IRQ_STATUS);
- geni_write_reg(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
- geni_write_reg(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+ if (uart_console(uport) && uport->suspended)
+ goto exit_geni_serial_isr;
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ goto exit_geni_serial_isr;
+ m_irq_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_STATUS);
+ s_irq_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_STATUS);
+ geni_write_reg_nolog(m_irq_status, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg_nolog(s_irq_status, uport->membase,
+ SE_GENI_S_IRQ_CLEAR);
if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
WARN_ON(1);
@@ -654,6 +942,28 @@
return IRQ_HANDLED;
}
+static irqreturn_t msm_geni_wakeup_isr(int isr, void *dev)
+{
+ struct uart_port *uport = dev;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uport->lock, flags);
+ if (port->wakeup_byte) {
+ tty = uport->state->port.tty;
+ tty_insert_flip_char(tty->port, port->wakeup_byte, TTY_NORMAL);
+ IPC_LOG_MSG(port->ipc_log_rx, "%s: Inject 0x%x\n",
+ __func__, port->wakeup_byte);
+ tty_flip_buffer_push(tty->port);
+ }
+ __pm_wakeup_event(&port->geni_wake, WAKEBYTE_TIMEOUT_MSEC);
+ IPC_LOG_MSG(port->ipc_log_misc, "%s:Holding Wake Lock for %d ms\n",
+ __func__, WAKEBYTE_TIMEOUT_MSEC);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ return IRQ_HANDLED;
+}
+
static int get_tx_fifo_size(struct msm_geni_serial_port *port)
{
struct uart_port *uport;
@@ -708,33 +1018,89 @@
msm_geni_serial_stop_rx(uport);
disable_irq(uport->irq);
free_irq(uport->irq, msm_port);
- if (uart_console(uport))
+ if (uart_console(uport)) {
se_geni_resources_off(&msm_port->serial_rsc);
- else
+ } else {
+ if (msm_port->wakeup_irq > 0) {
+ disable_irq(msm_port->wakeup_irq);
+ free_irq(msm_port->wakeup_irq, msm_port);
+ }
+ __pm_relax(&msm_port->geni_wake);
msm_geni_serial_power_off(uport);
+ }
+ IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
}
static int msm_geni_serial_port_setup(struct uart_port *uport)
{
int ret = 0;
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ unsigned long cfg0, cfg1;
- /* For now only assume FIFO mode. */
- msm_port->xfer_mode = FIFO_MODE;
+
set_rfr_wm(msm_port);
- ret = geni_se_init(uport->membase, msm_port->xfer_mode,
+ if (!uart_console(uport)) {
+ /* For now only assume FIFO mode. */
+ msm_port->xfer_mode = FIFO_MODE;
+ ret = geni_se_init(uport->membase, msm_port->xfer_mode,
msm_port->rx_wm, msm_port->rx_rfr);
- if (ret) {
- dev_err(uport->dev, "%s: Fail\n", __func__);
- goto exit_portsetup;
+ if (ret) {
+ dev_err(uport->dev, "%s: Fail\n", __func__);
+ goto exit_portsetup;
+ }
+ se_get_packing_config(8, 4, false, &cfg0, &cfg1);
+ geni_write_reg_nolog(cfg0, uport->membase,
+ SE_GENI_TX_PACKING_CFG0);
+ geni_write_reg_nolog(cfg1, uport->membase,
+ SE_GENI_TX_PACKING_CFG1);
}
-
msm_port->port_setup = true;
/*
* Ensure Port setup related IO completes before returning to
* framework.
*/
mb();
+ if (!uart_console(uport)) {
+ char name[30];
+
+ memset(name, 0, sizeof(name));
+ if (!msm_port->ipc_log_rx) {
+ scnprintf(name, sizeof(name), "%s%s",
+ dev_name(uport->dev), "_rx");
+ msm_port->ipc_log_rx = ipc_log_context_create(
+ IPC_LOG_TX_RX_PAGES, name, 0);
+ if (!msm_port->ipc_log_rx)
+ dev_info(uport->dev, "Err in Rx IPC Log\n");
+ }
+ memset(name, 0, sizeof(name));
+ if (!msm_port->ipc_log_tx) {
+ scnprintf(name, sizeof(name), "%s%s",
+ dev_name(uport->dev), "_tx");
+ msm_port->ipc_log_tx = ipc_log_context_create(
+ IPC_LOG_TX_RX_PAGES, name, 0);
+ if (!msm_port->ipc_log_tx)
+ dev_info(uport->dev, "Err in Tx IPC Log\n");
+ }
+ memset(name, 0, sizeof(name));
+ if (!msm_port->ipc_log_pwr) {
+ scnprintf(name, sizeof(name), "%s%s",
+ dev_name(uport->dev), "_pwr");
+ msm_port->ipc_log_pwr = ipc_log_context_create(
+ IPC_LOG_PWR_PAGES, name, 0);
+ if (!msm_port->ipc_log_pwr)
+ dev_info(uport->dev, "Err in Pwr IPC Log\n");
+ }
+ memset(name, 0, sizeof(name));
+ if (!msm_port->ipc_log_misc) {
+ scnprintf(name, sizeof(name), "%s%s",
+ dev_name(uport->dev), "_misc");
+ msm_port->ipc_log_misc = ipc_log_context_create(
+ IPC_LOG_MISC_PAGES, name, 0);
+ if (!msm_port->ipc_log_misc)
+ dev_info(uport->dev, "Err in Misc IPC Log\n");
+ }
+
+ }
exit_portsetup:
return ret;
}
@@ -755,12 +1121,36 @@
goto exit_startup;
}
+ if (msm_port->wakeup_irq > 0) {
+ ret = request_threaded_irq(msm_port->wakeup_irq, NULL,
+ msm_geni_wakeup_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hs_uart_wakeup", uport);
+ if (unlikely(ret)) {
+ dev_err(uport->dev, "%s:Failed to get WakeIRQ ret%d\n",
+ __func__, ret);
+ goto exit_startup;
+ }
+ disable_irq(msm_port->wakeup_irq);
+ }
+
if (likely(!uart_console(uport))) {
ret = msm_geni_serial_power_on(&msm_port->uport);
if (ret)
goto exit_startup;
}
+ if (unlikely(get_se_proto(uport->membase) != UART)) {
+ dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
+ __func__, get_se_proto(uport->membase));
+ if (unlikely(get_se_proto(uport->membase) != UART)) {
+ ret = -ENXIO;
+ disable_irq(uport->irq);
+ free_irq(uport->irq, msm_port);
+ goto exit_startup;
+ }
+ }
+
if (!msm_port->port_setup) {
if (msm_geni_serial_port_setup(uport))
goto exit_startup;
@@ -773,14 +1163,15 @@
* before returning to the framework.
*/
mb();
+ IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
exit_startup:
return ret;
}
-static int get_dfs_index(unsigned long clk_freq, unsigned long *ser_clk)
+static int get_clk_cfg(unsigned long clk_freq, unsigned long *ser_clk)
{
- unsigned long root_freq[] = {19200000, 7372800, 64000000,
- 96000000, 100000000, 102400000, 128000000};
+ unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 32000000, 48000000, 64000000, 80000000, 96000000, 100000000};
int i;
int match = -1;
@@ -795,25 +1186,33 @@
}
if (match != -1)
*ser_clk = root_freq[match];
+ else
+ pr_err("clk_freq %ld\n", clk_freq);
return match;
}
static void geni_serial_write_term_regs(struct uart_port *uport, u32 loopback,
u32 tx_trans_cfg, u32 tx_parity_cfg, u32 rx_trans_cfg,
u32 rx_parity_cfg, u32 bits_per_char, u32 stop_bit_len,
- u32 rxstale, u32 s_clk_cfg)
+ u32 s_clk_cfg)
{
- geni_write_reg(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
- geni_write_reg(tx_trans_cfg, uport->membase, SE_UART_TX_TRANS_CFG);
- geni_write_reg(tx_parity_cfg, uport->membase, SE_UART_TX_PARITY_CFG);
- geni_write_reg(rx_trans_cfg, uport->membase, SE_UART_RX_TRANS_CFG);
- geni_write_reg(rx_parity_cfg, uport->membase, SE_UART_RX_PARITY_CFG);
- geni_write_reg(bits_per_char, uport->membase, SE_UART_TX_WORD_LEN);
- geni_write_reg(bits_per_char, uport->membase, SE_UART_RX_WORD_LEN);
- geni_write_reg(stop_bit_len, uport->membase, SE_UART_TX_STOP_BIT_LEN);
- geni_write_reg(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
- geni_write_reg(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
- geni_write_reg(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
+ geni_write_reg_nolog(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
+ geni_write_reg_nolog(tx_trans_cfg, uport->membase,
+ SE_UART_TX_TRANS_CFG);
+ geni_write_reg_nolog(tx_parity_cfg, uport->membase,
+ SE_UART_TX_PARITY_CFG);
+ geni_write_reg_nolog(rx_trans_cfg, uport->membase,
+ SE_UART_RX_TRANS_CFG);
+ geni_write_reg_nolog(rx_parity_cfg, uport->membase,
+ SE_UART_RX_PARITY_CFG);
+ geni_write_reg_nolog(bits_per_char, uport->membase,
+ SE_UART_TX_WORD_LEN);
+ geni_write_reg_nolog(bits_per_char, uport->membase,
+ SE_UART_RX_WORD_LEN);
+ geni_write_reg_nolog(stop_bit_len, uport->membase,
+ SE_UART_TX_STOP_BIT_LEN);
+ geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
+ geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
}
static int get_clk_div_rate(unsigned int baud, unsigned long *desired_clk_rate)
@@ -823,8 +1222,8 @@
int clk_div = 0;
*desired_clk_rate = baud * UART_OVERSAMPLING;
- dfs_index = get_dfs_index(*desired_clk_rate, &ser_clk);
- if (dfs_index < 1) {
+ dfs_index = get_clk_cfg(*desired_clk_rate, &ser_clk);
+ if (dfs_index < 0) {
pr_err("%s: Can't find matching DFS entry for baud %d\n",
__func__, baud);
clk_div = -EINVAL;
@@ -832,6 +1231,7 @@
}
clk_div = ser_clk / *desired_clk_rate;
+ *desired_clk_rate = ser_clk;
exit_get_clk_div_rate:
return clk_div;
}
@@ -846,7 +1246,6 @@
unsigned int rx_trans_cfg;
unsigned int rx_parity_cfg;
unsigned int stop_bit_len;
- unsigned int rxstale;
unsigned int clk_div;
unsigned long ser_clk_cfg = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
@@ -854,6 +1253,7 @@
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+ port->cur_baud = baud;
clk_div = get_clk_div_rate(baud, &clk_rate);
if (clk_div <= 0)
goto exit_set_termios;
@@ -864,10 +1264,14 @@
ser_clk_cfg |= (clk_div << CLK_DIV_SHFT);
/* parity */
- tx_trans_cfg = geni_read_reg(uport->membase, SE_UART_TX_TRANS_CFG);
- tx_parity_cfg = geni_read_reg(uport->membase, SE_UART_TX_PARITY_CFG);
- rx_trans_cfg = geni_read_reg(uport->membase, SE_UART_RX_TRANS_CFG);
- rx_parity_cfg = geni_read_reg(uport->membase, SE_UART_RX_PARITY_CFG);
+ tx_trans_cfg = geni_read_reg_nolog(uport->membase,
+ SE_UART_TX_TRANS_CFG);
+ tx_parity_cfg = geni_read_reg_nolog(uport->membase,
+ SE_UART_TX_PARITY_CFG);
+ rx_trans_cfg = geni_read_reg_nolog(uport->membase,
+ SE_UART_RX_TRANS_CFG);
+ rx_parity_cfg = geni_read_reg_nolog(uport->membase,
+ SE_UART_RX_PARITY_CFG);
if (termios->c_cflag & PARENB) {
tx_trans_cfg |= UART_TX_PAR_EN;
rx_trans_cfg |= UART_RX_PAR_EN;
@@ -907,8 +1311,6 @@
break;
}
- /* stale timer, set this to 16 characters. */
- rxstale = bits_per_char * STALE_TIMEOUT;
/* stop bits */
if (termios->c_cflag & CSTOPB)
@@ -925,24 +1327,33 @@
geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
- stop_bit_len, rxstale, ser_clk_cfg);
+ stop_bit_len, ser_clk_cfg);
+ IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
+ IPC_LOG_MSG(port->ipc_log_misc, "Tx: trans_cfg%d parity %d\n",
+ tx_trans_cfg, tx_parity_cfg);
+ IPC_LOG_MSG(port->ipc_log_misc, "Rx: trans_cfg%d parity %d",
+ rx_trans_cfg, rx_parity_cfg);
+ IPC_LOG_MSG(port->ipc_log_misc, "BitsChar%d stop bit%d\n",
+ bits_per_char, stop_bit_len);
exit_set_termios:
return;
}
-static unsigned int msm_geni_serial_tx_empty(struct uart_port *port)
+static unsigned int msm_geni_serial_tx_empty(struct uart_port *uport)
{
unsigned int tx_fifo_status;
unsigned int is_tx_empty = 1;
- tx_fifo_status = geni_read_reg(port->membase, SE_GENI_TX_FIFO_STATUS);
+ tx_fifo_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_TX_FIFO_STATUS);
if (tx_fifo_status)
is_tx_empty = 0;
return is_tx_empty;
}
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
static int __init msm_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
@@ -952,11 +1363,12 @@
int parity = 'n';
int flow = 'n';
int ret = 0;
+ unsigned long cfg0, cfg1;
if (unlikely(co->index >= GENI_UART_NR_PORTS || co->index < 0))
return -ENXIO;
- dev_port = get_port_from_line(co->index);
+ dev_port = get_port_from_line(co->index, true);
if (IS_ERR_OR_NULL(dev_port)) {
ret = PTR_ERR(dev_port);
pr_err("Invalid line %d(%d)\n", co->index, ret);
@@ -979,19 +1391,129 @@
if (!dev_port->port_setup)
msm_geni_serial_port_setup(uport);
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ msm_geni_serial_poll_cancel_tx(uport);
+ se_get_packing_config(8, 1, false, &cfg0, &cfg1);
+ geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0);
+ geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_TX_PACKING_CFG1);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(uport, co, baud, parity, bits, flow);
}
-static void msm_geni_serial_debug_init(struct uart_port *uport)
+static void
+msm_geni_serial_early_console_write(struct console *con, const char *s,
+ unsigned int n)
{
- struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ struct earlycon_device *dev = con->data;
- msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL);
- if (IS_ERR_OR_NULL(msm_port->dbg))
- dev_err(uport->dev, "Failed to create dbg dir\n");
+ __msm_geni_serial_console_write(&dev->port, s, n);
+}
+
+static int __init
+msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ const char *opt)
+{
+ struct uart_port *uport = &dev->port;
+ int ret = 0;
+ u32 tx_trans_cfg = 0;
+ u32 tx_parity_cfg = 0;
+ u32 rx_trans_cfg = 0;
+ u32 rx_parity_cfg = 0;
+ u32 stop_bit = 0;
+ u32 rx_stale = 0;
+ u32 bits_per_char = 0;
+ u32 s_clk_cfg = 0;
+ u32 baud = 115200;
+ u32 clk_div;
+ unsigned long clk_rate;
+ unsigned long cfg0, cfg1;
+
+ if (!uport->membase) {
+ ret = -ENOMEM;
+ goto exit_geni_serial_earlyconsetup;
+ }
+
+ if (get_se_proto(uport->membase) != UART) {
+ ret = -ENXIO;
+ goto exit_geni_serial_earlyconsetup;
+ }
+
+ geni_se_init(uport->membase, FIFO_MODE, (DEF_FIFO_DEPTH_WORDS >> 1),
+ (DEF_FIFO_DEPTH_WORDS - 2));
+ /*
+ * Ignore Flow control.
+ * Disable Tx Parity.
+ * Don't check Parity during Rx.
+ * Disable Rx Parity.
+ * n = 8.
+ * Stop bit = 0.
+ * Stale timeout in bit-time (3 chars worth).
+ */
+ tx_trans_cfg |= UART_CTS_MASK;
+ tx_parity_cfg = 0;
+ rx_trans_cfg = 0;
+ rx_parity_cfg = 0;
+ bits_per_char = 0x8;
+ stop_bit = 0;
+ rx_stale = 0x18;
+ clk_div = get_clk_div_rate(baud, &clk_rate);
+ if (clk_div <= 0) {
+ ret = -EINVAL;
+ goto exit_geni_serial_earlyconsetup;
+ }
+
+ s_clk_cfg |= SER_CLK_EN;
+ s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
+
+ /*
+ * Make an unconditional cancel on the main sequencer to reset
+ * it else we could end up in data loss scenarios.
+ */
+ msm_geni_serial_poll_cancel_tx(uport);
+ se_get_packing_config(8, 1, false, &cfg0, &cfg1);
+ geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0);
+ geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_TX_PACKING_CFG1);
+ geni_write_reg_nolog(tx_trans_cfg, uport->membase,
+ SE_UART_TX_TRANS_CFG);
+ geni_write_reg_nolog(tx_parity_cfg, uport->membase,
+ SE_UART_TX_PARITY_CFG);
+ geni_write_reg_nolog(rx_trans_cfg, uport->membase,
+ SE_UART_RX_TRANS_CFG);
+ geni_write_reg_nolog(rx_parity_cfg, uport->membase,
+ SE_UART_RX_PARITY_CFG);
+ geni_write_reg_nolog(bits_per_char, uport->membase,
+ SE_UART_TX_WORD_LEN);
+ geni_write_reg_nolog(bits_per_char, uport->membase,
+ SE_UART_RX_WORD_LEN);
+ geni_write_reg_nolog(stop_bit, uport->membase, SE_UART_TX_STOP_BIT_LEN);
+ geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
+ geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
+
+ dev->con->write = msm_geni_serial_early_console_write;
+ dev->con->setup = NULL;
+ /*
+ * Ensure that the early console setup completes before
+ * returning.
+ */
+ mb();
+exit_geni_serial_earlyconsetup:
+ return ret;
+}
+OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-uart",
+ msm_geni_serial_earlycon_setup);
+
+static int console_register(struct uart_driver *drv)
+{
+ return uart_register_driver(drv);
+}
+static void console_unregister(struct uart_driver *drv)
+{
+ uart_unregister_driver(drv);
}
static struct console cons_ops = {
@@ -1004,6 +1526,50 @@
.data = &msm_geni_console_driver,
};
+static struct uart_driver msm_geni_console_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_geni_console",
+ .dev_name = "ttyMSM",
+ .nr = GENI_UART_NR_PORTS,
+ .cons = &cons_ops,
+};
+#else
+static int console_register(struct uart_driver *drv)
+{
+ return 0;
+}
+
+static void console_unregister(struct uart_driver *drv)
+{
+}
+#endif /* defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL) */
+
+static void msm_geni_serial_debug_init(struct uart_port *uport)
+{
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+ msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL);
+ if (IS_ERR_OR_NULL(msm_port->dbg))
+ dev_err(uport->dev, "Failed to create dbg dir\n");
+}
+
+static const struct uart_ops msm_geni_console_pops = {
+ .tx_empty = msm_geni_serial_tx_empty,
+ .stop_tx = msm_geni_serial_stop_tx,
+ .start_tx = msm_geni_serial_start_tx,
+ .stop_rx = msm_geni_serial_stop_rx,
+ .set_termios = msm_geni_serial_set_termios,
+ .startup = msm_geni_serial_startup,
+ .config_port = msm_geni_serial_config_port,
+ .shutdown = msm_geni_serial_shutdown,
+ .type = msm_geni_serial_get_type,
+ .set_mctrl = msm_geni_cons_set_mctrl,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = msm_geni_serial_get_char,
+ .poll_put_char = msm_geni_serial_poll_put_char,
+#endif
+};
+
static const struct uart_ops msm_geni_serial_pops = {
.tx_empty = msm_geni_serial_tx_empty,
.stop_tx = msm_geni_serial_stop_tx,
@@ -1015,15 +1581,17 @@
.shutdown = msm_geni_serial_shutdown,
.type = msm_geni_serial_get_type,
.set_mctrl = msm_geni_serial_set_mctrl,
-#ifdef CONFIG_CONSOLE_POLL
- .poll_get_char = msm_geni_serial_get_char,
- .poll_put_char = msm_geni_serial_poll_put_char,
-#endif
+ .get_mctrl = msm_geni_serial_get_mctrl,
+ .break_ctl = msm_geni_serial_break_ctl,
+ .flush_buffer = NULL,
+ .ioctl = msm_geni_serial_ioctl,
};
static const struct of_device_id msm_geni_device_tbl[] = {
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
{ .compatible = "qcom,msm-geni-console",
.data = (void *)&msm_geni_console_driver},
+#endif
{ .compatible = "qcom,msm-geni-serial-hs",
.data = (void *)&msm_geni_serial_hs_driver},
{},
@@ -1038,17 +1606,7 @@
struct resource *res;
struct uart_driver *drv;
const struct of_device_id *id;
-
- if (pdev->dev.of_node)
- line = of_alias_get_id(pdev->dev.of_node, "serial");
- else
- line = pdev->id;
-
- if (line < 0)
- line = atomic_inc_return(&uart_line_id) - 1;
-
- if ((line < 0) || (line >= GENI_UART_NR_PORTS))
- return -ENXIO;
+ bool is_console = false;
id = of_match_device(msm_geni_device_tbl, &pdev->dev);
if (id) {
@@ -1059,7 +1617,22 @@
return -ENODEV;
}
- dev_port = get_port_from_line(line);
+ if (pdev->dev.of_node) {
+ if (drv->cons)
+ line = of_alias_get_id(pdev->dev.of_node, "serial");
+ else
+ line = of_alias_get_id(pdev->dev.of_node, "hsuart");
+ } else {
+ line = pdev->id;
+ }
+
+ if (line < 0)
+ line = atomic_inc_return(&uart_line_id) - 1;
+
+ if ((line < 0) || (line >= GENI_UART_NR_PORTS))
+ return -ENXIO;
+ is_console = (drv->cons ? true : false);
+ dev_port = get_port_from_line(line, is_console);
if (IS_ERR_OR_NULL(dev_port)) {
ret = PTR_ERR(dev_port);
dev_err(&pdev->dev, "Invalid line %d(%d)\n",
@@ -1076,6 +1649,29 @@
}
uport->dev = &pdev->dev;
+
+ if (!(of_property_read_u32(pdev->dev.of_node, "qcom,bus-mas",
+ &dev_port->serial_rsc.bus_mas))) {
+ dev_port->serial_rsc.bus_bw =
+ msm_bus_scale_register(
+ dev_port->serial_rsc.bus_mas,
+ MSM_BUS_SLAVE_EBI_CH0,
+ (char *)dev_name(&pdev->dev),
+ false);
+ if (IS_ERR_OR_NULL(dev_port->serial_rsc.bus_bw)) {
+ ret = PTR_ERR(dev_port->serial_rsc.bus_bw);
+ goto exit_geni_serial_probe;
+ }
+ dev_port->serial_rsc.ab = UART_CORE2X_VOTE;
+ dev_port->serial_rsc.ib = DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH;
+ } else {
+ dev_info(&pdev->dev, "No bus master specified\n");
+ }
+
+ if (of_property_read_u8(pdev->dev.of_node, "qcom,wakeup-byte",
+ &dev_port->wakeup_byte))
+ dev_info(&pdev->dev, "No Wakeup byte specified\n");
+
dev_port->serial_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
if (IS_ERR(dev_port->serial_rsc.se_clk)) {
ret = PTR_ERR(dev_port->serial_rsc.se_clk);
@@ -1136,9 +1732,7 @@
goto exit_geni_serial_probe;
}
- /* Default core clk to 115200 Baud */
- clk_set_rate(dev_port->serial_rsc.se_clk, (115200 * UART_OVERSAMPLING));
- uport->uartclk = clk_get_rate(dev_port->serial_rsc.se_clk);
+ wakeup_source_init(&dev_port->geni_wake, dev_name(&pdev->dev));
dev_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
dev_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
dev_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
@@ -1152,9 +1746,14 @@
goto exit_geni_serial_probe;
}
+ /* Optional to use the Rx pin as wakeup irq */
+ dev_port->wakeup_irq = platform_get_irq(pdev, 1);
+ if ((dev_port->wakeup_irq < 0 && !is_console))
+ dev_info(&pdev->dev, "No wakeup IRQ configured\n");
+
uport->private_data = (void *)drv;
platform_set_drvdata(pdev, dev_port);
- if (drv->cons) {
+ if (is_console) {
dev_port->handle_rx = handle_rx_console;
dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(u32),
GFP_KERNEL);
@@ -1163,13 +1762,11 @@
dev_port->rx_fifo = devm_kzalloc(uport->dev,
sizeof(dev_port->rx_fifo_depth * sizeof(u32)),
GFP_KERNEL);
- pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
- pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n",
- line, uport->fifosize, (drv->cons ? 1 : 0));
+ line, uport->fifosize, is_console);
device_create_file(uport->dev, &dev_attr_loopback);
msm_geni_serial_debug_init(uport);
dev_port->port_setup = false;
@@ -1185,112 +1782,50 @@
struct uart_driver *drv =
(struct uart_driver *)port->uport.private_data;
+ wakeup_source_trash(&port->geni_wake);
uart_remove_one_port(drv, &port->uport);
+ msm_bus_scale_unregister(port->serial_rsc.bus_bw);
return 0;
}
-static void
-msm_geni_serial_early_console_write(struct console *con, const char *s,
- unsigned int n)
-{
- struct earlycon_device *dev = con->data;
-
- __msm_geni_serial_console_write(&dev->port, s, n);
-}
-
-static int __init
-msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
- const char *opt)
-{
- struct uart_port *uport = &dev->port;
- int ret = 0;
- struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
- u32 tx_trans_cfg = 0;
- u32 tx_parity_cfg = 0;
- u32 rx_trans_cfg = 0;
- u32 rx_parity_cfg = 0;
- u32 stop_bit = 0;
- u32 rx_stale = 0;
- u32 bits_per_char = 0;
- u32 s_clk_cfg = 0;
- u32 baud = 115200;
- u32 clk_div;
- unsigned long clk_rate;
-
- if (!uport->membase) {
- ret = -ENOMEM;
- goto exit_geni_serial_earlyconsetup;
- }
-
- if (get_se_proto(uport->membase) != UART) {
- ret = -ENXIO;
- goto exit_geni_serial_earlyconsetup;
- }
-
- msm_port->xfer_mode = FIFO_MODE;
- set_rfr_wm(msm_port);
- msm_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
- msm_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
- msm_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
- geni_se_init(uport->membase, msm_port->xfer_mode, msm_port->rx_wm,
- msm_port->rx_rfr);
- /*
- * Ignore Flow control.
- * Disable Tx Parity.
- * Don't check Parity during Rx.
- * Disable Rx Parity.
- * n = 8.
- * Stop bit = 0.
- * Stale timeout in bit-time (3 chars worth).
- */
- tx_trans_cfg |= UART_CTS_MASK;
- tx_parity_cfg = 0;
- rx_trans_cfg = 0;
- rx_parity_cfg = 0;
- bits_per_char = 0x8;
- stop_bit = 0;
- rx_stale = 0x18;
- clk_div = get_clk_div_rate(baud, &clk_rate);
- if (clk_div <= 0) {
- ret = -EINVAL;
- goto exit_geni_serial_earlyconsetup;
- }
-
- s_clk_cfg |= SER_CLK_EN;
- s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
-
- geni_serial_write_term_regs(uport, 0, tx_trans_cfg,
- tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
- stop_bit, rx_stale, s_clk_cfg);
-
- dev->con->write = msm_geni_serial_early_console_write;
- dev->con->setup = NULL;
- /*
- * Ensure that the early console setup completes before
- * returning.
- */
- mb();
-exit_geni_serial_earlyconsetup:
- return ret;
-}
-OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-uart",
- msm_geni_serial_earlycon_setup);
#ifdef CONFIG_PM
static int msm_geni_serial_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+ int ret = 0;
- return se_geni_resources_off(&port->serial_rsc);
+ ret = se_geni_resources_off(&port->serial_rsc);
+ if (ret) {
+ dev_err(dev, "%s: Error ret %d\n", __func__, ret);
+ goto exit_runtime_suspend;
+ }
+ if (port->wakeup_irq > 0)
+ enable_irq(port->wakeup_irq);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s: Current usage count %d\n", __func__,
+ atomic_read(&dev->power.usage_count));
+exit_runtime_suspend:
+ return ret;
}
static int msm_geni_serial_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+ int ret = 0;
- return se_geni_resources_on(&port->serial_rsc);
+ if (port->wakeup_irq > 0)
+ disable_irq(port->wakeup_irq);
+ ret = se_geni_resources_on(&port->serial_rsc);
+ if (ret) {
+ dev_err(dev, "%s: Error ret %d\n", __func__, ret);
+ goto exit_runtime_resume;
+ }
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s: Current usage count %d\n", __func__,
+ atomic_read(&dev->power.usage_count));
+exit_runtime_resume:
+ return ret;
}
static int msm_geni_serial_sys_suspend_noirq(struct device *dev)
@@ -1317,9 +1852,11 @@
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_port *uport = &port->uport;
- if (uart_console(uport))
+ if (uart_console(uport)) {
+ se_geni_resources_on(&port->serial_rsc);
uart_resume_port((struct uart_driver *)uport->private_data,
uport);
+ }
return 0;
}
#else
@@ -1366,13 +1903,6 @@
},
};
-static struct uart_driver msm_geni_console_driver = {
- .owner = THIS_MODULE,
- .driver_name = "msm_geni_console",
- .dev_name = "ttyMSM",
- .nr = GENI_UART_NR_PORTS,
- .cons = &cons_ops,
-};
static struct uart_driver msm_geni_serial_hs_driver = {
.owner = THIS_MODULE,
@@ -1393,7 +1923,14 @@
msm_geni_serial_ports[i].uport.line = i;
}
- ret = uart_register_driver(&msm_geni_console_driver);
+ for (i = 0; i < GENI_UART_CONS_PORTS; i++) {
+ msm_geni_console_port.uport.iotype = UPIO_MEM;
+ msm_geni_console_port.uport.ops = &msm_geni_console_pops;
+ msm_geni_console_port.uport.flags = UPF_BOOT_AUTOCONF;
+ msm_geni_console_port.uport.line = i;
+ }
+
+ ret = console_register(&msm_geni_console_driver);
if (ret)
return ret;
@@ -1405,7 +1942,7 @@
ret = platform_driver_register(&msm_geni_serial_platform_driver);
if (ret) {
- uart_unregister_driver(&msm_geni_console_driver);
+ console_unregister(&msm_geni_console_driver);
uart_unregister_driver(&msm_geni_serial_hs_driver);
return ret;
}
@@ -1419,7 +1956,7 @@
{
platform_driver_unregister(&msm_geni_serial_platform_driver);
uart_unregister_driver(&msm_geni_serial_hs_driver);
- uart_unregister_driver(&msm_geni_console_driver);
+ console_unregister(&msm_geni_console_driver);
}
module_exit(msm_geni_serial_exit);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 770454e..07390f8 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1085,7 +1085,7 @@
AUART_LINECTRL_BAUD_DIV_MAX);
baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
- div = u->uartclk * 32 / baud;
+ div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
}
ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 52c98ce..ee15d7d 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -155,4 +155,11 @@
If you compile this as a module, it will be called uio_mf624.
+config UIO_MSM_SHAREDMEM
+ bool "MSM shared memory driver"
+ default n
+ help
+ Provides the clients with their respective alloted shared memory
+ addresses which are used as transport buffer.
+
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index 8560dad..2282a69 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -9,3 +9,4 @@
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) += msm_sharedmem/
diff --git a/drivers/uio/msm_sharedmem/Makefile b/drivers/uio/msm_sharedmem/Makefile
new file mode 100644
index 0000000..e6b8570
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_UIO_MSM_SHAREDMEM) := \
+ msm_sharedmem.o \
+ remote_filesystem_access_v01.o \
+ sharedmem_qmi.o \
diff --git a/drivers/uio/msm_sharedmem/msm_sharedmem.c b/drivers/uio/msm_sharedmem/msm_sharedmem.c
new file mode 100644
index 0000000..b25f55a
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/msm_sharedmem.c
@@ -0,0 +1,240 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define DRIVER_NAME "msm_sharedmem"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+
+#include <linux/uio_driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+
+#include <soc/qcom/secure_buffer.h>
+
+#include "sharedmem_qmi.h"
+
+#define CLIENT_ID_PROP "qcom,client-id"
+
+#define MPSS_RMTS_CLIENT_ID 1
+
+static int uio_get_mem_index(struct uio_info *info, struct vm_area_struct *vma)
+{
+ if (vma->vm_pgoff >= MAX_UIO_MAPS)
+ return -EINVAL;
+
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+
+ return (int)vma->vm_pgoff;
+}
+
+static int sharedmem_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int result;
+ struct uio_mem *mem;
+ int mem_index = uio_get_mem_index(info, vma);
+
+ if (mem_index < 0) {
+ pr_err("mem_index is invalid errno %d\n", mem_index);
+ return mem_index;
+ }
+
+ mem = info->mem + mem_index;
+
+ if (vma->vm_end - vma->vm_start > mem->size) {
+ pr_err("vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
+ vma->vm_end, vma->vm_start,
+ (vma->vm_end - vma->vm_start), &mem->size);
+ return -EINVAL;
+ }
+ pr_debug("Attempting to setup mmap.\n");
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ result = remap_pfn_range(vma,
+ vma->vm_start,
+ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (result != 0)
+ pr_err("mmap Failed with errno %d\n", result);
+ else
+ pr_debug("mmap success\n");
+
+ return result;
+}
+
+/* Setup the shared ram permissions.
+ * This function currently supports the mpss client only.
+ */
+static void setup_shared_ram_perms(u32 client_id, phys_addr_t addr, u32 size)
+{
+ int ret;
+ u32 source_vmlist[1] = {VMID_HLOS};
+ int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
+ int dest_perms[2] = {PERM_READ|PERM_WRITE,
+ PERM_READ|PERM_WRITE};
+
+ if (client_id != MPSS_RMTS_CLIENT_ID)
+ return;
+
+ ret = hyp_assign_phys(addr, size, source_vmlist, 1, dest_vmids,
+ dest_perms, 2);
+ if (ret != 0) {
+ if (ret == -EINVAL)
+ pr_warn("hyp_assign_phys is not supported!");
+ else
+ pr_err("hyp_assign_phys failed IPA=0x016%pa size=%u err=%d\n",
+ &addr, size, ret);
+ }
+}
+
+static int msm_sharedmem_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct uio_info *info = NULL;
+ struct resource *clnt_res = NULL;
+ u32 client_id = ((u32)~0U);
+ u32 shared_mem_size = 0;
+ void *shared_mem = NULL;
+ phys_addr_t shared_mem_pyhsical = 0;
+ bool is_addr_dynamic = false;
+ struct sharemem_qmi_entry qmi_entry;
+
+ /* Get the addresses from platform-data */
+ if (!pdev->dev.of_node) {
+ pr_err("Node not found\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!clnt_res) {
+ pr_err("resource not found\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, CLIENT_ID_PROP,
+ &client_id);
+ if (ret) {
+ client_id = ((u32)~0U);
+ pr_warn("qcom,client-id property not found\n");
+ }
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ shared_mem_size = resource_size(clnt_res);
+ shared_mem_pyhsical = clnt_res->start;
+
+ if (shared_mem_size == 0) {
+ pr_err("Shared memory size is zero\n");
+ return -EINVAL;
+ }
+
+ if (shared_mem_pyhsical == 0) {
+ is_addr_dynamic = true;
+ shared_mem = dma_alloc_coherent(&pdev->dev, shared_mem_size,
+ &shared_mem_pyhsical, GFP_KERNEL);
+ if (shared_mem == NULL) {
+ pr_err("Shared mem alloc client=%s, size=%u\n",
+ clnt_res->name, shared_mem_size);
+ return -ENOMEM;
+ }
+ }
+
+ /* Set up the permissions for the shared ram that was allocated. */
+ setup_shared_ram_perms(client_id, shared_mem_pyhsical, shared_mem_size);
+
+ /* Setup device */
+ info->mmap = sharedmem_mmap; /* Custom mmap function. */
+ info->name = clnt_res->name;
+ info->version = "1.0";
+ info->mem[0].addr = shared_mem_pyhsical;
+ info->mem[0].size = shared_mem_size;
+ info->mem[0].memtype = UIO_MEM_PHYS;
+
+ ret = uio_register_device(&pdev->dev, info);
+ if (ret) {
+ pr_err("uio register failed ret=%d\n", ret);
+ goto out;
+ }
+ dev_set_drvdata(&pdev->dev, info);
+
+ qmi_entry.client_id = client_id;
+ qmi_entry.client_name = info->name;
+ qmi_entry.address = info->mem[0].addr;
+ qmi_entry.size = info->mem[0].size;
+ qmi_entry.is_addr_dynamic = is_addr_dynamic;
+
+ sharedmem_qmi_add_entry(&qmi_entry);
+ pr_info("Device created for client '%s'\n", clnt_res->name);
+out:
+ return ret;
+}
+
+static int msm_sharedmem_remove(struct platform_device *pdev)
+{
+ struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+ uio_unregister_device(info);
+
+ return 0;
+}
+
+static const struct of_device_id msm_sharedmem_of_match[] = {
+ {.compatible = "qcom,sharedmem-uio",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, msm_sharedmem_of_match);
+
+static struct platform_driver msm_sharedmem_driver = {
+ .probe = msm_sharedmem_probe,
+ .remove = msm_sharedmem_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_sharedmem_of_match,
+ },
+};
+
+
+static int __init msm_sharedmem_init(void)
+{
+ int result;
+
+ result = sharedmem_qmi_init();
+ if (result < 0) {
+ pr_err("sharedmem_qmi_init failed result = %d\n", result);
+ return result;
+ }
+
+ result = platform_driver_register(&msm_sharedmem_driver);
+ if (result != 0) {
+ pr_err("Platform driver registration failed\n");
+ return result;
+ }
+ return 0;
+}
+
+static void __exit msm_sharedmem_exit(void)
+{
+ platform_driver_unregister(&msm_sharedmem_driver);
+ sharedmem_qmi_exit();
+}
+
+module_init(msm_sharedmem_init);
+module_exit(msm_sharedmem_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c
new file mode 100644
index 0000000..b04c913
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.c
@@ -0,0 +1,80 @@
+ /* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "remote_filesystem_access_v01.h"
+
+struct elem_info rfsa_get_buff_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct rfsa_get_buff_addr_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct rfsa_get_buff_addr_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info rfsa_get_buff_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+ address_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct rfsa_get_buff_addr_resp_msg_v01,
+ address),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
diff --git a/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h
new file mode 100644
index 0000000..7ea8ce6
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/remote_filesystem_access_v01.h
@@ -0,0 +1,39 @@
+ /* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __REMOTE_FILESYSTEM_ACCESS_V01_H__
+#define __REMOTE_FILESYSTEM_ACCESS_V01_H__
+
+#define RFSA_SERVICE_ID_V01 0x1C
+#define RFSA_SERVICE_VERS_V01 0x01
+
+#define QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01 0x0023
+#define QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01 0x0023
+
+#define RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01 14
+#define RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01 18
+
+extern struct elem_info rfsa_get_buff_addr_req_msg_v01_ei[];
+extern struct elem_info rfsa_get_buff_addr_resp_msg_v01_ei[];
+
+struct rfsa_get_buff_addr_req_msg_v01 {
+ uint32_t client_id;
+ uint32_t size;
+};
+
+struct rfsa_get_buff_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t address_valid;
+ uint64_t address;
+};
+
+#endif /* __REMOTE_FILESYSTEM_ACCESS_V01_H__ */
diff --git a/drivers/uio/msm_sharedmem/sharedmem_qmi.c b/drivers/uio/msm_sharedmem/sharedmem_qmi.c
new file mode 100644
index 0000000..fd95dee
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/sharedmem_qmi.c
@@ -0,0 +1,453 @@
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRIVER_NAME "msm_sharedmem"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "sharedmem_qmi.h"
+#include "remote_filesystem_access_v01.h"
+
+#define RFSA_SERVICE_INSTANCE_NUM 1
+#define SHARED_ADDR_ENTRY_NAME_MAX_LEN 10
+
+struct shared_addr_entry {
+ u32 id;
+ u64 address;
+ u32 size;
+ u64 request_count;
+ bool is_addr_dynamic;
+ char name[SHARED_ADDR_ENTRY_NAME_MAX_LEN + 1];
+};
+
+struct shared_addr_list {
+ struct list_head node;
+ struct shared_addr_entry entry;
+};
+
+static struct shared_addr_list list;
+
+static struct qmi_handle *sharedmem_qmi_svc_handle;
+static void sharedmem_qmi_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, sharedmem_qmi_svc_recv_msg);
+static struct workqueue_struct *sharedmem_qmi_svc_workqueue;
+static struct dentry *dir_ent;
+
+static u32 rfsa_count;
+static u32 rmts_count;
+
+static DECLARE_RWSEM(sharedmem_list_lock); /* declare list lock semaphore */
+
+static struct work_struct sharedmem_qmi_init_work;
+
+static struct msg_desc rfsa_get_buffer_addr_req_desc = {
+ .max_msg_len = RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01,
+ .msg_id = QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01,
+ .ei_array = rfsa_get_buff_addr_req_msg_v01_ei,
+};
+
+static struct msg_desc rfsa_get_buffer_addr_resp_desc = {
+ .max_msg_len = RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01,
+ .msg_id = QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01,
+ .ei_array = rfsa_get_buff_addr_resp_msg_v01_ei,
+};
+
+void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry)
+{
+ struct shared_addr_list *list_entry;
+
+ list_entry = kzalloc(sizeof(*list_entry), GFP_KERNEL);
+
+ /* If we cannot add the entry log the failure and bail */
+ if (list_entry == NULL) {
+ pr_err("Alloc of new list entry failed\n");
+ return;
+ }
+
+ /* Copy as much of the client name that can fit in the entry. */
+ strlcpy(list_entry->entry.name, qmi_entry->client_name,
+ sizeof(list_entry->entry.name));
+
+ /* Setup the rest of the entry. */
+ list_entry->entry.id = qmi_entry->client_id;
+ list_entry->entry.address = qmi_entry->address;
+ list_entry->entry.size = qmi_entry->size;
+ list_entry->entry.is_addr_dynamic = qmi_entry->is_addr_dynamic;
+ list_entry->entry.request_count = 0;
+
+ down_write(&sharedmem_list_lock);
+ list_add_tail(&(list_entry->node), &(list.node));
+ up_write(&sharedmem_list_lock);
+ pr_debug("Added new entry to list\n");
+
+}
+
+static int get_buffer_for_client(u32 id, u32 size, u64 *address)
+{
+ int result = -ENOENT;
+ int client_found = 0;
+ struct list_head *curr_node;
+ struct shared_addr_list *list_entry;
+
+ if (size == 0)
+ return -ENOMEM;
+
+ down_read(&sharedmem_list_lock);
+
+ list_for_each(curr_node, &list.node) {
+ list_entry = list_entry(curr_node, struct shared_addr_list,
+ node);
+ if (list_entry->entry.id == id) {
+ if (list_entry->entry.size >= size) {
+ *address = list_entry->entry.address;
+ list_entry->entry.request_count++;
+ result = 0;
+ } else {
+ pr_err("Shared mem req too large for id=%u\n",
+ id);
+ result = -ENOMEM;
+ }
+ client_found = 1;
+ break;
+ }
+ }
+
+ up_read(&sharedmem_list_lock);
+
+ if (client_found != 1) {
+ pr_err("Unknown client id %u\n", id);
+ result = -ENOENT;
+ }
+ return result;
+}
+
+static int sharedmem_qmi_get_buffer(void *conn_h, void *req_handle, void *req)
+{
+ struct rfsa_get_buff_addr_req_msg_v01 *get_buffer_req;
+ struct rfsa_get_buff_addr_resp_msg_v01 get_buffer_resp;
+ int result;
+ u64 address = 0;
+
+ get_buffer_req = (struct rfsa_get_buff_addr_req_msg_v01 *)req;
+ pr_debug("req->client_id = 0x%X and req->size = %d\n",
+ get_buffer_req->client_id, get_buffer_req->size);
+
+ result = get_buffer_for_client(get_buffer_req->client_id,
+ get_buffer_req->size, &address);
+ if (result != 0)
+ return result;
+
+ if (address == 0) {
+ pr_err("Entry found for client id= 0x%X but address is zero\n",
+ get_buffer_req->client_id);
+ return -ENOMEM;
+ }
+
+ memset(&get_buffer_resp, 0, sizeof(get_buffer_resp));
+ get_buffer_resp.address_valid = 1;
+ get_buffer_resp.address = address;
+ get_buffer_resp.resp.result = QMI_RESULT_SUCCESS_V01;
+
+ result = qmi_send_resp_from_cb(sharedmem_qmi_svc_handle, conn_h,
+ req_handle,
+ &rfsa_get_buffer_addr_resp_desc,
+ &get_buffer_resp,
+ sizeof(get_buffer_resp));
+ return result;
+}
+
+
+static int sharedmem_qmi_connect_cb(struct qmi_handle *handle, void *conn_h)
+{
+ if (sharedmem_qmi_svc_handle != handle || !conn_h)
+ return -EINVAL;
+ return 0;
+}
+
+static int sharedmem_qmi_disconnect_cb(struct qmi_handle *handle, void *conn_h)
+{
+ if (sharedmem_qmi_svc_handle != handle || !conn_h)
+ return -EINVAL;
+ return 0;
+}
+
+static int sharedmem_qmi_req_desc_cb(unsigned int msg_id,
+ struct msg_desc **req_desc)
+{
+ int rc;
+
+ switch (msg_id) {
+ case QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01:
+ *req_desc = &rfsa_get_buffer_addr_req_desc;
+ rc = sizeof(struct rfsa_get_buff_addr_req_msg_v01);
+ break;
+
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static int sharedmem_qmi_req_cb(struct qmi_handle *handle, void *conn_h,
+ void *req_handle, unsigned int msg_id,
+ void *req)
+{
+ int rc = -ENOTSUPP;
+
+ if (sharedmem_qmi_svc_handle != handle || !conn_h)
+ return -EINVAL;
+
+ if (msg_id == QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01)
+ rc = sharedmem_qmi_get_buffer(conn_h, req_handle, req);
+
+ return rc;
+}
+
+#define DEBUG_BUF_SIZE (2048)
+static char *debug_buffer;
+static u32 debug_data_size;
+static struct mutex dbg_buf_lock; /* mutex for debug_buffer */
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *file_pos)
+{
+ return simple_read_from_buffer(buf, count, file_pos, debug_buffer,
+ debug_data_size);
+}
+
+static u32 fill_debug_info(char *buffer, u32 buffer_size)
+{
+ u32 size = 0;
+ struct list_head *curr_node;
+ struct shared_addr_list *list_entry;
+
+ memset(buffer, 0, buffer_size);
+ size += scnprintf(buffer + size, buffer_size - size, "\n");
+
+ down_read(&sharedmem_list_lock);
+ list_for_each(curr_node, &list.node) {
+ list_entry = list_entry(curr_node, struct shared_addr_list,
+ node);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "Client_name: %s\n", list_entry->entry.name);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "Client_id: 0x%08X\n", list_entry->entry.id);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "Buffer Size: 0x%08X (%d)\n",
+ list_entry->entry.size,
+ list_entry->entry.size);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "Address: 0x%016llX\n",
+ list_entry->entry.address);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "Address Allocation: %s\n",
+ (list_entry->entry.is_addr_dynamic ?
+ "Dynamic" : "Static"));
+ size += scnprintf(buffer + size, buffer_size - size,
+ "Request count: %llu\n",
+ list_entry->entry.request_count);
+ size += scnprintf(buffer + size, buffer_size - size, "\n\n");
+ }
+ up_read(&sharedmem_list_lock);
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "RFSA server start count = %u\n", rfsa_count);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "RMTS server start count = %u\n", rmts_count);
+
+ size += scnprintf(buffer + size, buffer_size - size, "\n");
+ return size;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ u32 buffer_size;
+
+ mutex_lock(&dbg_buf_lock);
+ if (debug_buffer != NULL) {
+ mutex_unlock(&dbg_buf_lock);
+ return -EBUSY;
+ }
+ buffer_size = DEBUG_BUF_SIZE;
+ debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (debug_buffer == NULL) {
+ mutex_unlock(&dbg_buf_lock);
+ return -ENOMEM;
+ }
+ debug_data_size = fill_debug_info(debug_buffer, buffer_size);
+ mutex_unlock(&dbg_buf_lock);
+ return 0;
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ mutex_lock(&dbg_buf_lock);
+ kfree(debug_buffer);
+ debug_buffer = NULL;
+ debug_data_size = 0;
+ mutex_unlock(&dbg_buf_lock);
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+ .release = debug_close,
+};
+
+static int rfsa_increment(void *data, u64 val)
+{
+ if (rfsa_count != ~0)
+ rfsa_count++;
+ return 0;
+}
+
+static int rmts_increment(void *data, u64 val)
+{
+ if (rmts_count != ~0)
+ rmts_count++;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rfsa_fops, NULL, rfsa_increment, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(rmts_fops, NULL, rmts_increment, "%llu\n");
+
+static void debugfs_init(void)
+{
+ struct dentry *f_ent;
+
+ mutex_init(&dbg_buf_lock);
+ dir_ent = debugfs_create_dir("rmt_storage", NULL);
+ if (IS_ERR(dir_ent)) {
+ pr_err("Failed to create debug_fs directory\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("info", 0400, dir_ent, NULL, &debug_ops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs info file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("rfsa", 0200, dir_ent, NULL, &rfsa_fops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs rfsa file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("rmts", 0200, dir_ent, NULL, &rmts_fops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs rmts file\n");
+ return;
+ }
+}
+
+static void debugfs_exit(void)
+{
+ debugfs_remove_recursive(dir_ent);
+ mutex_destroy(&dbg_buf_lock);
+}
+
+static void sharedmem_qmi_svc_recv_msg(struct work_struct *work)
+{
+ int rc;
+
+ do {
+ pr_debug("Notified about a Receive Event\n");
+ } while ((rc = qmi_recv_msg(sharedmem_qmi_svc_handle)) == 0);
+
+ if (rc != -ENOMSG)
+ pr_err("Error receiving message\n");
+}
+
+static void sharedmem_qmi_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *priv)
+{
+ switch (event) {
+ case QMI_RECV_MSG:
+ queue_delayed_work(sharedmem_qmi_svc_workqueue,
+ &work_recv_msg, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct qmi_svc_ops_options sharedmem_qmi_ops_options = {
+ .version = 1,
+ .service_id = RFSA_SERVICE_ID_V01,
+ .service_vers = RFSA_SERVICE_VERS_V01,
+ .service_ins = RFSA_SERVICE_INSTANCE_NUM,
+ .connect_cb = sharedmem_qmi_connect_cb,
+ .disconnect_cb = sharedmem_qmi_disconnect_cb,
+ .req_desc_cb = sharedmem_qmi_req_desc_cb,
+ .req_cb = sharedmem_qmi_req_cb,
+};
+
+
+static void sharedmem_register_qmi(void)
+{
+ int rc;
+
+ sharedmem_qmi_svc_workqueue =
+ create_singlethread_workqueue("sharedmem_qmi_work");
+ if (!sharedmem_qmi_svc_workqueue)
+ return;
+
+ sharedmem_qmi_svc_handle = qmi_handle_create(sharedmem_qmi_notify,
+ NULL);
+ if (!sharedmem_qmi_svc_handle) {
+ pr_err("Creating sharedmem_qmi qmi handle failed\n");
+ destroy_workqueue(sharedmem_qmi_svc_workqueue);
+ return;
+ }
+ rc = qmi_svc_register(sharedmem_qmi_svc_handle,
+ &sharedmem_qmi_ops_options);
+ if (rc < 0) {
+ pr_err("Registering sharedmem_qmi failed %d\n", rc);
+ qmi_handle_destroy(sharedmem_qmi_svc_handle);
+ destroy_workqueue(sharedmem_qmi_svc_workqueue);
+ return;
+ }
+ pr_info("qmi init successful\n");
+}
+
+static void sharedmem_qmi_init_worker(struct work_struct *work)
+{
+ sharedmem_register_qmi();
+ debugfs_init();
+}
+
+int sharedmem_qmi_init(void)
+{
+ INIT_LIST_HEAD(&list.node);
+ INIT_WORK(&sharedmem_qmi_init_work, sharedmem_qmi_init_worker);
+ schedule_work(&sharedmem_qmi_init_work);
+ return 0;
+}
+
+void sharedmem_qmi_exit(void)
+{
+ qmi_svc_unregister(sharedmem_qmi_svc_handle);
+ flush_workqueue(sharedmem_qmi_svc_workqueue);
+ qmi_handle_destroy(sharedmem_qmi_svc_handle);
+ destroy_workqueue(sharedmem_qmi_svc_workqueue);
+ debugfs_exit();
+}
diff --git a/drivers/uio/msm_sharedmem/sharedmem_qmi.h b/drivers/uio/msm_sharedmem/sharedmem_qmi.h
new file mode 100644
index 0000000..7353916
--- /dev/null
+++ b/drivers/uio/msm_sharedmem/sharedmem_qmi.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SHAREDMEM_QMI_H__
+#define __SHAREDMEM_QMI_H__
+
+#include <linux/module.h>
+
+struct sharemem_qmi_entry {
+ const char *client_name;
+ u32 client_id;
+ u64 address;
+ u32 size;
+ bool is_addr_dynamic;
+};
+
+int sharedmem_qmi_init(void);
+
+void sharedmem_qmi_exit(void);
+
+void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry);
+
+#endif /* __SHAREDMEM_QMI_H__ */
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 3889809..37591a4 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -24,7 +24,6 @@
switch (event) {
case CI_HDRC_CONTROLLER_RESET_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
- writel(0, USB_AHBBURST);
/* use AHB transactor, allow posted data writes */
writel(0x8, USB_AHBMODE);
usb_phy_init(ci->usb_phy);
@@ -47,7 +46,8 @@
.name = "ci_hdrc_msm",
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REGS_SHARED |
- CI_HDRC_DISABLE_STREAMING,
+ CI_HDRC_DISABLE_STREAMING |
+ CI_HDRC_OVERRIDE_AHB_BURST,
.notify_event = ci_hdrc_msm_notify_event,
};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 53d730e..ff45ebf 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -520,8 +520,10 @@
*/
tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
tbuf = kzalloc(tbuf_size, GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ status = -ENOMEM;
+ goto err_alloc;
+ }
bufp = tbuf;
@@ -734,6 +736,7 @@
}
kfree(tbuf);
+ err_alloc:
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
@@ -1433,7 +1436,7 @@
{
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_SETUP_MAP_SINGLE))
- dma_unmap_single(hcd->self.controller,
+ dma_unmap_single(hcd->self.sysdev,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c3d249f..edb7a9a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2661,8 +2661,15 @@
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 33e3d9f..3e459b0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1064,56 +1064,17 @@
int dwc3_core_pre_init(struct dwc3 *dwc)
{
- int ret;
+ int ret = 0;
dwc3_cache_hwparams(dwc);
-
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
-
if (!dwc->ev_buf) {
ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
- goto err1;
}
}
- ret = dwc3_core_init(dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to initialize core\n");
- goto err2;
- }
-
- ret = phy_power_on(dwc->usb2_generic_phy);
- if (ret < 0)
- goto err3;
-
- ret = phy_power_on(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err4;
-
- ret = dwc3_event_buffers_setup(dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to setup event buffers\n");
- goto err5;
- }
-
- return ret;
-
-err5:
- phy_power_off(dwc->usb3_generic_phy);
-err4:
- phy_power_off(dwc->usb2_generic_phy);
-err3:
- dwc3_core_exit(dwc);
-err2:
- dwc3_free_event_buffers(dwc);
-err1:
- dwc3_ulpi_exit(dwc);
-err0:
return ret;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index f786947..a159011 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -195,8 +195,6 @@
unsigned int max_power;
bool charging_disabled;
enum usb_otg_state otg_state;
- struct work_struct bus_vote_w;
- unsigned int bus_vote;
u32 bus_perf_client;
struct msm_bus_scale_pdata *bus_scale_table;
struct power_supply *usb_psy;
@@ -810,7 +808,8 @@
/* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
- DWC3_DEPCMD);
+ DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
+
ch_info->depcmd_hi_addr = 0;
ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
@@ -1964,17 +1963,6 @@
return 0;
}
-static void dwc3_msm_bus_vote_w(struct work_struct *w)
-{
- struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
- int ret;
-
- ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
- mdwc->bus_vote);
- if (ret)
- dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
-}
-
static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
{
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2135,8 +2123,12 @@
/* Remove bus voting */
if (mdwc->bus_perf_client) {
- mdwc->bus_vote = 0;
- schedule_work(&mdwc->bus_vote_w);
+ dbg_event(0xFF, "bus_devote_start", 0);
+ ret = msm_bus_scale_client_update_request(
+ mdwc->bus_perf_client, 0);
+ dbg_event(0xFF, "bus_devote_finish", 0);
+ if (ret)
+ dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
}
/*
@@ -2190,8 +2182,12 @@
/* Enable bus voting */
if (mdwc->bus_perf_client) {
- mdwc->bus_vote = 1;
- schedule_work(&mdwc->bus_vote_w);
+ dbg_event(0xFF, "bus_vote_start", 1);
+ ret = msm_bus_scale_client_update_request(
+ mdwc->bus_perf_client, 1);
+ dbg_event(0xFF, "bus_vote_finish", 1);
+ if (ret)
+ dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
}
/* Vote for TCXO while waking up USB HSPHY */
@@ -2371,9 +2367,47 @@
{
struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ union extcon_property_value val;
+ unsigned int extcon_id;
+ struct extcon_dev *edev = NULL;
+ int ret = 0;
dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
+ if (mdwc->vbus_active) {
+ edev = mdwc->extcon_vbus;
+ extcon_id = EXTCON_USB;
+ } else if (mdwc->id_state == DWC3_ID_GROUND) {
+ edev = mdwc->extcon_id;
+ extcon_id = EXTCON_USB_HOST;
+ }
+
+ /* Check speed and Type-C polarity values in order to configure PHY */
+ if (edev && extcon_get_state(edev, extcon_id)) {
+ ret = extcon_get_property(edev, extcon_id,
+ EXTCON_PROP_USB_SS, &val);
+
+ /* Use default dwc->maximum_speed if speed isn't reported */
+ if (!ret)
+ dwc->maximum_speed = (val.intval == 0) ?
+ USB_SPEED_HIGH : USB_SPEED_SUPER;
+
+ if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
+
+ dbg_event(0xFF, "speed", dwc->maximum_speed);
+
+ ret = extcon_get_property(edev, extcon_id,
+ EXTCON_PROP_USB_TYPEC_POLARITY, &val);
+ if (ret)
+ mdwc->typec_orientation = ORIENTATION_NONE;
+ else
+ mdwc->typec_orientation = val.intval ?
+ ORIENTATION_CC2 : ORIENTATION_CC1;
+
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+ }
+
/*
* exit LPM first to meet resume timeline from device side.
* resume_pending flag would prevent calling
@@ -2617,45 +2651,18 @@
{
struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
- struct extcon_dev *edev = ptr;
enum dwc3_id_state id;
- int cc_state;
- int speed;
-
- if (!edev) {
- dev_err(mdwc->dev, "%s: edev null\n", __func__);
- goto done;
- }
id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
- cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
- if (cc_state < 0)
- mdwc->typec_orientation = ORIENTATION_NONE;
- else
- mdwc->typec_orientation =
- cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
-
- dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
-
- speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
- /* Use default dwc->maximum_speed if extcon doesn't report speed. */
- if (speed >= 0)
- dwc->maximum_speed =
- (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
-
- if (dwc->maximum_speed > dwc->max_hw_supp_speed)
- dwc->maximum_speed = dwc->max_hw_supp_speed;
-
if (mdwc->id_state != id) {
mdwc->id_state = id;
dbg_event(0xFF, "id_state", mdwc->id_state);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
-done:
return NOTIFY_DONE;
}
@@ -2664,44 +2671,19 @@
{
struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
- struct extcon_dev *edev = ptr;
- int cc_state;
- int speed;
-
- if (!edev) {
- dev_err(mdwc->dev, "%s: edev null\n", __func__);
- goto done;
- }
dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
if (mdwc->vbus_active == event)
return NOTIFY_DONE;
- cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
- if (cc_state < 0)
- mdwc->typec_orientation = ORIENTATION_NONE;
- else
- mdwc->typec_orientation =
- cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
-
- dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
-
- speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
- /* Use default dwc->maximum_speed if extcon doesn't report speed. */
- if (speed >= 0)
- dwc->maximum_speed =
- (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
-
- if (dwc->maximum_speed > dwc->max_hw_supp_speed)
- dwc->maximum_speed = dwc->max_hw_supp_speed;
-
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart)
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
-done:
+
return NOTIFY_DONE;
}
+
/*
* Handle EUD based soft detach/attach event, and force USB high speed mode
* functionality on receiving soft attach event.
@@ -2717,12 +2699,6 @@
{
struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
- struct extcon_dev *edev = ptr;
-
- if (!edev) {
- dev_err(mdwc->dev, "%s: edev null\n", __func__);
- goto done;
- }
dbg_event(0xFF, "EUD_NB", event);
dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
@@ -2735,7 +2711,7 @@
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart)
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
-done:
+
return NOTIFY_DONE;
}
@@ -2822,7 +2798,7 @@
static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
{
struct device_node *node = mdwc->dev->of_node;
- int atomic_ctx = 1;
+ int atomic_ctx = 1, s1_bypass;
int ret;
if (!of_property_read_bool(node, "iommus"))
@@ -2846,6 +2822,15 @@
goto release_mapping;
}
+ s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
+ ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+ if (ret) {
+ dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
+ s1_bypass, ret);
+ goto release_mapping;
+ }
+
ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
if (ret) {
dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
@@ -2962,7 +2947,6 @@
INIT_LIST_HEAD(&mdwc->req_complete_list);
INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
- INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
@@ -3267,10 +3251,10 @@
}
/* Update initial VBUS/ID state from extcon */
- if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
+ if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
EXTCON_USB))
dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
- else if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
+ else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
EXTCON_USB_HOST))
dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
else if (!pval.intval) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a5d3209f..56df0f6 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -284,6 +284,7 @@
int status)
{
struct dwc3 *dwc = dep->dwc;
+ unsigned int unmap_after_complete = false;
req->started = false;
list_del(&req->list);
@@ -292,11 +293,19 @@
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number <= 1)
+ /*
+ * NOTICE we don't want to unmap before calling ->complete() if we're
+ * dealing with a bounced ep0 request. If we unmap it here, we would end
+ * up overwritting the contents of req->buf and this could confuse the
+ * gadget driver.
+ */
+ if (dwc->ep0_bounced && dep->number <= 1) {
dwc->ep0_bounced = false;
-
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
+ unmap_after_complete = true;
+ } else {
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
+ }
trace_dwc3_gadget_giveback(req);
@@ -304,8 +313,9 @@
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
- if (dep->number > 1)
- pm_runtime_put(dwc->dev);
+ if (unmap_after_complete)
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
}
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
@@ -1225,7 +1235,6 @@
return ret;
}
- pm_runtime_get(dwc->dev);
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->direction = dep->direction;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index e52bf45..e9e8f46 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -21,11 +21,12 @@
int dwc3_host_init(struct dwc3 *dwc)
{
- struct property_entry props[2];
+ struct property_entry props[3];
struct platform_device *xhci;
int ret, irq;
struct resource *res;
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int prop_idx = 0;
irq = platform_get_irq_byname(dwc3_pdev, "host");
if (irq == -EPROBE_DEFER)
@@ -85,8 +86,22 @@
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
- if (dwc->usb3_lpm_capable) {
- props[0].name = "usb3-lpm-capable";
+ if (dwc->usb3_lpm_capable)
+ props[prop_idx++].name = "usb3-lpm-capable";
+
+ /**
+ * WORKAROUND: dwc3 revisions <=3.00a have a limitation
+ * where Port Disable command doesn't work.
+ *
+ * The suggested workaround is that we avoid Port Disable
+ * completely.
+ *
+ * This following flag tells XHCI to do just that.
+ */
+ if (dwc->revision <= DWC3_REVISION_300A)
+ props[prop_idx++].name = "quirk-broken-port-ped";
+
+ if (prop_idx) {
ret = platform_device_add_properties(xhci, props);
if (ret) {
dev_err(dwc->dev, "failed to add properties to xHCI\n");
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index daca68b..46df732 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -611,8 +611,7 @@
{
struct acc_dev *dev = fp->private_data;
struct usb_request *req;
- ssize_t r = count;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
pr_debug("acc_read(%zu)\n", count);
@@ -633,6 +632,8 @@
goto done;
}
+ len = ALIGN(count, dev->ep_out->maxpacket);
+
if (dev->rx_done) {
// last req cancelled. try to get it.
req = dev->rx_req[0];
@@ -642,7 +643,7 @@
requeue_req:
/* queue a request */
req = dev->rx_req[0];
- req->length = count;
+ req->length = len;
dev->rx_done = 0;
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
@@ -941,6 +942,8 @@
memset(dev->serial, 0, sizeof(dev->serial));
dev->start_requested = 0;
dev->audio_mode = 0;
+ strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE);
+ strlcpy(dev->model, "Android", ACC_STRING_SIZE);
}
}
@@ -1251,13 +1254,13 @@
INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
INIT_WORK(&dev->hid_work, acc_hid_work);
- /* _acc_dev must be set before calling usb_gadget_register_driver */
- _acc_dev = dev;
-
ret = misc_register(&acc_device);
if (ret)
goto err;
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
return 0;
err:
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index db7903d..a2a9185 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -989,6 +989,7 @@
struct device *create_function_device(char *name);
+#define AUDIO_SOURCE_DEV_NAME_LENGTH 20
static struct usb_function_instance *audio_source_alloc_inst(void)
{
struct audio_source_instance *fi_audio;
@@ -997,6 +998,8 @@
struct device *dev;
void *err_ptr;
int err = 0;
+ char device_name[AUDIO_SOURCE_DEV_NAME_LENGTH];
+ static u8 count;
fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
if (!fi_audio)
@@ -1014,7 +1017,11 @@
config_group_init_type_name(&fi_audio->func_inst.group, "",
&audio_source_func_type);
- dev = create_function_device("f_audio_source");
+
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ "f_audio_source%d", count++);
+
+ dev = create_function_device(device_name);
if (IS_ERR(dev)) {
err_ptr = dev;
diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c
index 51e6104..e908ecf 100644
--- a/drivers/usb/gadget/function/f_diag.c
+++ b/drivers/usb/gadget/function/f_diag.c
@@ -307,21 +307,8 @@
ctxt->dpkts_tolaptop_pending--;
- if (!req->status) {
- if ((req->length >= ep->maxpacket) &&
- ((req->length % ep->maxpacket) == 0)) {
- ctxt->dpkts_tolaptop_pending++;
- req->length = 0;
- d_req->actual = req->actual;
- d_req->status = req->status;
- /* Queue zero length packet */
- if (!usb_ep_queue(ctxt->in, req, GFP_ATOMIC))
- return;
- ctxt->dpkts_tolaptop_pending--;
- } else {
- ctxt->dpkts_tolaptop++;
- }
- }
+ if (!req->status)
+ ctxt->dpkts_tolaptop++;
spin_lock_irqsave(&ctxt->lock, flags);
list_add_tail(&req->list, &ctxt->write_pool);
@@ -481,6 +468,7 @@
goto fail;
kmemleak_not_leak(req);
req->complete = diag_write_complete;
+ req->zero = true;
list_add_tail(&req->list, &ctxt->write_pool);
}
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index c807b12..12e94d5 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1562,13 +1562,6 @@
event->bNotificationType, req->status);
/* FALLTHROUGH */
case 0:
- /*
- * handle multiple pending resp available
- * notifications by queuing same until we're done,
- * rest of the notification require queuing new
- * request.
- */
- gsi_ctrl_send_notification(gsi);
break;
}
}
@@ -1663,6 +1656,14 @@
gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, 0);
}
+static void gsi_ctrl_send_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_gsi *gsi = req->context;
+
+ gsi_ctrl_send_notification(gsi);
+}
+
static int
gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
@@ -1749,6 +1750,8 @@
memcpy(req->buf, cpkt->buf, value);
gsi_ctrl_pkt_free(cpkt);
+ req->complete = gsi_ctrl_send_response_complete;
+ req->context = gsi;
log_event_dbg("copied encap_resp %d bytes",
value);
break;
@@ -3047,6 +3050,9 @@
{
struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
+ if (!opts->gsi)
+ return;
+
if (opts->gsi->c_port.ctrl_device.fops)
misc_deregister(&opts->gsi->c_port.ctrl_device);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e55ebcb4..cca261e 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -450,13 +450,23 @@
struct fsg_buffhd *bh = req->context;
if (req->status || req->actual != req->length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, req->length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->inreq_busy = 0;
+ bh->state = BUF_STATE_EMPTY;
+ return;
+ }
+
spin_lock(&common->lock);
bh->inreq_busy = 0;
bh->state = BUF_STATE_EMPTY;
@@ -469,15 +479,24 @@
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
- dump_msg(common, "bulk-out", req->buf, req->actual);
if (req->status || req->actual != bh->bulk_out_intended_length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, bh->bulk_out_intended_length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->outreq_busy = 0;
+ return;
+ }
+
+ dump_msg(common, "bulk-out", req->buf, req->actual);
spin_lock(&common->lock);
bh->outreq_busy = 0;
bh->state = BUF_STATE_FULL;
@@ -2266,6 +2285,8 @@
fsg->bulk_out_enabled = 0;
}
+ /* allow usb LPM after eps are disabled */
+ usb_gadget_autopm_put_async(common->gadget);
common->fsg = NULL;
wake_up(&common->fsg_wait);
}
@@ -2330,6 +2351,10 @@
{
struct fsg_dev *fsg = fsg_from_func(f);
fsg->common->new_fsg = fsg;
+
+ /* prevents usb LPM until thread runs to completion */
+ usb_gadget_autopm_get_async(fsg->common->gadget);
+
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
}
@@ -2472,8 +2497,14 @@
case FSG_STATE_CONFIG_CHANGE:
do_set_interface(common, common->new_fsg);
- if (common->new_fsg)
+ if (common->new_fsg) {
+ /*
+ * make sure delayed_status flag updated when set_alt
+ * returned.
+ */
+ msleep(200);
usb_composite_setup_continue(common->cdev);
+ }
break;
case FSG_STATE_EXIT:
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index fbe6910..217b7ca 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1238,7 +1238,7 @@
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 512;
+ opts->buflen = 1024;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index c6aa884..aaa0fc2 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -587,11 +587,17 @@
struct mtp_dev *dev = fp->private_data;
struct usb_composite_dev *cdev = dev->cdev;
struct usb_request *req;
- ssize_t r = count, xfer, len;
+ ssize_t r = count;
+ unsigned xfer;
int ret = 0;
+ size_t len;
DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
+ len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
+ if (len > MTP_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
/* we will block until we're online */
DBG(cdev, "mtp_read: waiting for online state\n");
ret = wait_event_interruptible(dev->read_wq,
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 7114784..17f6f60 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -493,11 +493,7 @@
NULL,
NULL);
- status = set_qdss_data_connection(
- qdss->gadget,
- qdss->port.data,
- qdss->port.data->address,
- 0);
+ status = set_qdss_data_connection(qdss, 0);
if (status)
pr_err("qdss_disconnect error");
}
@@ -543,11 +539,7 @@
}
pr_debug("usb_qdss_connect_work\n");
- status = set_qdss_data_connection(
- qdss->gadget,
- qdss->port.data,
- qdss->port.data->address,
- 1);
+ status = set_qdss_data_connection(qdss, 1);
if (status) {
pr_err("set_qdss_data_connection error(%d)", status);
return;
@@ -868,14 +860,9 @@
if (status)
pr_err("%s: uninit_data error\n", __func__);
- status = set_qdss_data_connection(
- gadget,
- qdss->port.data,
- qdss->port.data->address,
- 0);
+ status = set_qdss_data_connection(qdss, 0);
if (status)
pr_err("%s:qdss_disconnect error\n", __func__);
- usb_gadget_restart(gadget);
}
EXPORT_SYMBOL(usb_qdss_close);
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index e673e61..4ba2e9b 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -72,6 +72,5 @@
};
int uninit_data(struct usb_ep *ep);
-int set_qdss_data_connection(struct usb_gadget *gadget,
- struct usb_ep *data_ep, u8 data_addr, int enable);
+int set_qdss_data_connection(struct f_qdss *qdss, int enable);
#endif
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ed93f9d..38d58f3 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -538,14 +538,11 @@
*/
retval = 0;
if (*params->filter) {
- params->state = RNDIS_DATA_INITIALIZED;
- netif_carrier_on(params->dev);
- if (netif_running(params->dev))
- netif_wake_queue(params->dev);
+ pr_debug("%s(): disable flow control\n", __func__);
+ rndis_flow_control(params, false);
} else {
- params->state = RNDIS_INITIALIZED;
- netif_carrier_off(params->dev);
- netif_stop_queue(params->dev);
+ pr_err("%s(): enable flow control\n", __func__);
+ rndis_flow_control(params, true);
}
break;
@@ -690,12 +687,6 @@
{
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
- u8 *xbuf;
- u32 length;
-
- /* drain the response queue */
- while ((xbuf = rndis_get_next_response(params, &length)))
- rndis_free_response(params, xbuf);
r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
if (!r)
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
index c781d85..06eecd1 100644
--- a/drivers/usb/gadget/function/u_qdss.c
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -40,19 +40,25 @@
}
static int init_data(struct usb_ep *ep);
-int set_qdss_data_connection(struct usb_gadget *gadget,
- struct usb_ep *data_ep, u8 data_addr, int enable)
+int set_qdss_data_connection(struct f_qdss *qdss, int enable)
{
enum usb_ctrl usb_bam_type;
int res = 0;
int idx;
- struct f_qdss *qdss = data_ep->driver_data;
- struct usb_qdss_bam_connect_info bam_info = qdss->bam_info;
+ struct usb_qdss_bam_connect_info bam_info;
+ struct usb_gadget *gadget;
pr_debug("set_qdss_data_connection\n");
+ if (!qdss) {
+ pr_err("%s: qdss ptr is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ gadget = qdss->gadget;
usb_bam_type = usb_bam_get_bam_type(gadget->name);
+ bam_info = qdss->bam_info;
/* There is only one qdss pipe, so the pipe number can be set to 0 */
idx = usb_bam_get_connection_idx(usb_bam_type, QDSS_P_BAM,
PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE, 0);
@@ -67,14 +73,16 @@
kzalloc(sizeof(struct sps_mem_buffer), GFP_KERNEL);
if (!bam_info.data_fifo) {
pr_err("qdss_data_connection: memory alloc failed\n");
+ usb_bam_free_fifos(usb_bam_type, idx);
return -ENOMEM;
}
get_bam2bam_connection_info(usb_bam_type, idx,
&bam_info.usb_bam_pipe_idx,
NULL, bam_info.data_fifo, NULL);
- alloc_sps_req(data_ep);
- msm_data_fifo_config(data_ep, bam_info.data_fifo->phys_base,
+ alloc_sps_req(qdss->port.data);
+ msm_data_fifo_config(qdss->port.data,
+ bam_info.data_fifo->phys_base,
bam_info.data_fifo->size,
bam_info.usb_bam_pipe_idx);
init_data(qdss->port.data);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 7558021..47b2817 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -407,11 +407,20 @@
return -ENOMEM;
}
- xhci_queue_stop_endpoint(xhci, command, slot_id, i,
- suspend);
+
+ ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+ i, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto err_cmd_queue;
+ }
}
}
- xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto err_cmd_queue;
+ }
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -422,6 +431,8 @@
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
}
+
+err_cmd_queue:
xhci_free_command(xhci, cmd);
return ret;
}
@@ -458,6 +469,12 @@
return;
}
+ if (xhci->quirks & XHCI_BROKEN_PORT_PED) {
+ xhci_dbg(xhci,
+ "Broken Port Enabled/Disabled, ignoring port disable request.\n");
+ return;
+ }
+
/* Write 1 to disable the port */
writel(port_status | PORT_PE, addr);
port_status = readl(addr);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 374750f..b59efd2 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1507,6 +1507,11 @@
/* Some devices get this wrong */
if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
max_packet = 512;
+
+ if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_FULL
+ && max_packet < 8)
+ max_packet = 8;
+
/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
avg_trb_len = 8;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6ee10df..a0bc61f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -200,6 +200,8 @@
struct clk *clk;
int ret;
int irq;
+ u32 temp, imod;
+ unsigned long flags;
if (usb_disabled())
return -ENODEV;
@@ -308,6 +310,12 @@
if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
+ if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
+ if (device_property_read_u32(sysdev, "snps,xhci-imod-value", &imod))
+ imod = 0;
+
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
@@ -324,6 +332,8 @@
if (ret)
goto disable_usb_phy;
+ device_wakeup_enable(&hcd->self.root_hub->dev);
+
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
@@ -331,6 +341,20 @@
if (ret)
goto dealloc_usb2_hcd;
+ device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
+
+ /* override imod interval if specified */
+ if (imod) {
+ imod &= ER_IRQ_INTERVAL_MASK;
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl_relaxed(&xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= imod;
+ writel_relaxed(temp, &xhci->ir_set->irq_control);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ dev_dbg(&pdev->dev, "%s: imod set to %u\n", __func__, imod);
+ }
+
ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
if (ret)
dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e7e9c07..5d434e0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -675,7 +675,7 @@
void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_td *td)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
@@ -3153,7 +3153,7 @@
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
u32 *trb_buff_len, struct xhci_segment *seg)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index e6e985d..ec9ff3e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -113,12 +113,21 @@
ret = xhci_handshake(&xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
- if (!ret) {
+ if (!ret)
xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- } else
+ else
xhci_warn(xhci, "Host not halted after %u microseconds.\n",
XHCI_MAX_HALT_USEC);
+
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
+ if (delayed_work_pending(&xhci->cmd_timer)) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Cleanup command queue");
+ cancel_delayed_work(&xhci->cmd_timer);
+ xhci_cleanup_command_queue(xhci);
+ }
+
return ret;
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 0fe91df..6012da3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1665,6 +1665,9 @@
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
#define XHCI_MISSING_CAS (1 << 24)
+/* For controller with a broken Port Disable implementation */
+#define XHCI_BROKEN_PORT_PED (1 << 25)
+
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index da08047..3ee2938 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -373,8 +373,6 @@
static const unsigned int usbpd_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
- EXTCON_USB_CC,
- EXTCON_USB_SPEED,
EXTCON_NONE,
};
@@ -397,32 +395,43 @@
static inline void stop_usb_host(struct usbpd *pd)
{
- extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
+ extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
}
static inline void start_usb_host(struct usbpd *pd, bool ss)
{
enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+ union extcon_property_value val;
- extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
- cc == ORIENTATION_CC2);
- extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, ss);
- extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
+ val.intval = (cc == ORIENTATION_CC2);
+ extcon_set_property(pd->extcon, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY, val);
+
+ val.intval = ss;
+ extcon_set_property(pd->extcon, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS, val);
+
+ extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 1);
}
static inline void stop_usb_peripheral(struct usbpd *pd)
{
- extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+ extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
}
static inline void start_usb_peripheral(struct usbpd *pd)
{
enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+ union extcon_property_value val;
- extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
- cc == ORIENTATION_CC2);
- extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, 1);
- extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
+ val.intval = (cc == ORIENTATION_CC2);
+ extcon_set_property(pd->extcon, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY, val);
+
+ val.intval = 1;
+ extcon_set_property(pd->extcon, EXTCON_USB, EXTCON_PROP_USB_SS, val);
+
+ extcon_set_state_sync(pd->extcon, EXTCON_USB, 1);
}
static int set_power_role(struct usbpd *pd, enum power_role pr)
@@ -1417,6 +1426,7 @@
}
pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ dual_role_instance_changed(pd->dual_role);
}
@@ -1817,7 +1827,7 @@
regulator_disable(pd->vbus);
if (pd->current_dr != DR_DFP) {
- extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+ extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
pd->current_dr = DR_DFP;
pd_phy_update_roles(pd->current_dr, pd->current_pr);
}
@@ -2656,11 +2666,17 @@
static int usbpd_dr_prop_writeable(struct dual_role_phy_instance *dual_role,
enum dual_role_property prop)
{
+ struct usbpd *pd = dual_role_get_drvdata(dual_role);
+
switch (prop) {
case DUAL_ROLE_PROP_MODE:
+ return 1;
case DUAL_ROLE_PROP_DR:
case DUAL_ROLE_PROP_PR:
- return 1;
+ if (pd)
+ return pd->current_state == PE_SNK_READY ||
+ pd->current_state == PE_SRC_READY;
+ break;
default:
break;
}
@@ -3207,6 +3223,16 @@
goto put_psy;
}
+ /* Support reporting polarity and speed via properties */
+ extcon_set_property_capability(pd->extcon, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(pd->extcon, EXTCON_USB,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS);
+
pd->vbus = devm_regulator_get(parent, "vbus");
if (IS_ERR(pd->vbus)) {
ret = PTR_ERR(pd->vbus);
diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c
index 51fcb54..9ef8895 100644
--- a/drivers/usb/phy/class-dual-role.c
+++ b/drivers/usb/phy/class-dual-role.c
@@ -70,15 +70,7 @@
return ret;
}
-static void dual_role_changed_work(struct work_struct *work)
-{
- struct dual_role_phy_instance *dual_role =
- container_of(work, struct dual_role_phy_instance,
- changed_work);
-
- dev_dbg(&dual_role->dev, "%s\n", __func__);
- kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
-}
+static void dual_role_changed_work(struct work_struct *work);
void dual_role_instance_changed(struct dual_role_phy_instance *dual_role)
{
@@ -505,6 +497,17 @@
return ret;
}
+static void dual_role_changed_work(struct work_struct *work)
+{
+ struct dual_role_phy_instance *dual_role =
+ container_of(work, struct dual_role_phy_instance,
+ changed_work);
+
+ dev_dbg(&dual_role->dev, "%s\n", __func__);
+ sysfs_update_group(&dual_role->dev.kobj, &dual_role_attr_group);
+ kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
+}
+
/******************* Module Init ***********************************/
static int __init dual_role_class_init(void)
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index c59e33f..4f0a455 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -50,6 +50,15 @@
#define QUSB2PHY_PORT_TUNE1 0x23c
#define QUSB2PHY_TEST1 0x24C
+#define QUSB2PHY_PLL_CORE_INPUT_OVERRIDE 0x0a8
+#define CORE_PLL_RATE BIT(0)
+#define CORE_PLL_RATE_MUX BIT(1)
+#define CORE_PLL_EN BIT(2)
+#define CORE_PLL_EN_MUX BIT(3)
+#define CORE_PLL_EN_FROM_RESET BIT(4)
+#define CORE_RESET BIT(5)
+#define CORE_RESET_MUX BIT(6)
+
#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */
@@ -330,22 +339,30 @@
}
}
+static void qusb_phy_reset(struct qusb_phy *qphy)
+{
+ int ret;
+
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "%s: phy_reset assert failed\n",
+ __func__);
+ usleep_range(100, 150);
+
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "%s: phy_reset deassert failed\n",
+ __func__);
+}
+
static void qusb_phy_host_init(struct usb_phy *phy)
{
u8 reg;
- int ret;
struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
dev_dbg(phy->dev, "%s\n", __func__);
- /* Perform phy reset */
- ret = reset_control_assert(qphy->phy_reset);
- if (ret)
- dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
- usleep_range(100, 150);
- ret = reset_control_deassert(qphy->phy_reset);
- dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
-
+ qusb_phy_reset(qphy);
qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
qphy->host_init_seq_len, 0);
@@ -377,15 +394,7 @@
qusb_phy_enable_clocks(qphy, true);
- /* Perform phy reset */
- ret = reset_control_assert(qphy->phy_reset);
- if (ret)
- dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
- usleep_range(100, 150);
- ret = reset_control_deassert(qphy->phy_reset);
- if (ret)
- dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
-
+ qusb_phy_reset(qphy);
if (qphy->emulation) {
if (qphy->emu_init_seq)
qusb_phy_write_seq(qphy->emu_phy_base + 0x8000,
@@ -537,6 +546,11 @@
writel_relaxed(intr_mask,
qphy->base + QUSB2PHY_INTR_CTRL);
+ /* hold core PLL into reset */
+ writel_relaxed(CORE_PLL_EN_FROM_RESET |
+ CORE_RESET | CORE_RESET_MUX,
+ qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE);
+
/* enable phy auto-resume */
writel_relaxed(0x91,
qphy->base + QUSB2PHY_TEST1);
@@ -555,14 +569,7 @@
/* Disable all interrupts */
writel_relaxed(0x00,
qphy->base + QUSB2PHY_INTR_CTRL);
-
- /* Put PHY into non-driving mode */
- writel_relaxed(0x23,
- qphy->base + QUSB2PHY_PWR_CTRL1);
-
- /* Makes sure that above write goes through */
- wmb();
-
+ qusb_phy_reset(qphy);
qusb_phy_enable_clocks(qphy, false);
qusb_phy_enable_power(qphy, false, true);
}
@@ -576,6 +583,10 @@
writel_relaxed(0x00,
qphy->base + QUSB2PHY_INTR_CTRL);
+ /* bring core PLL out of reset */
+ writel_relaxed(CORE_PLL_EN_FROM_RESET,
+ qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE);
+
/* Makes sure that above write goes through */
wmb();
} else { /* Cable connect case */
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index ee521a0..8bdd9fd 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -46,13 +46,39 @@
#define ALFPS_DTCT_EN BIT(1)
#define ARCVR_DTCT_EVENT_SEL BIT(4)
-/* PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL bits */
+/*
+ * register bits
+ * PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL - for QMP USB PHY
+ * USB3_DP_COM_PHY_MODE_CTRL - for QMP USB DP Combo PHY
+ */
/* 0 - selects Lane A. 1 - selects Lane B */
#define SW_PORTSELECT BIT(0)
/* port select mux: 1 - sw control. 0 - HW control*/
#define SW_PORTSELECT_MX BIT(1)
+/* USB3_DP_PHY_USB3_DP_COM_SWI_CTRL bits */
+
+/* LANE related register read/write with USB3 */
+#define USB3_SWI_ACT_ACCESS_EN BIT(0)
+/* LANE related register read/write with DP */
+#define DP_SWI_ACT_ACCESS_EN BIT(1)
+
+/* USB3_DP_COM_RESET_OVRD_CTRL bits */
+
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* USB3_DP_COM_PHY_MODE_CTRL bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
enum qmp_phy_rev_reg {
USB3_PHY_PCS_STATUS,
USB3_PHY_AUTONOMOUS_MODE_CTRL,
@@ -60,6 +86,17 @@
USB3_PHY_POWER_DOWN_CONTROL,
USB3_PHY_SW_RESET,
USB3_PHY_START,
+
+ /* USB DP Combo PHY related */
+ USB3_DP_DP_PHY_PD_CTL,
+ USB3_DP_COM_POWER_DOWN_CTRL,
+ USB3_DP_COM_SW_RESET,
+ USB3_DP_COM_RESET_OVRD_CTRL,
+ USB3_DP_COM_PHY_MODE_CTRL,
+ USB3_DP_COM_TYPEC_CTRL,
+ USB3_DP_COM_SWI_CTRL,
+ USB3_PCS_MISC_CLAMP_ENABLE,
+ /* TypeC port select configuration (optional) */
USB3_PHY_PCS_MISC_TYPEC_CTRL,
USB3_PHY_REG_MAX,
};
@@ -89,6 +126,7 @@
struct clk *pipe_clk;
struct reset_control *phy_reset;
struct reset_control *phy_phy_reset;
+ struct reset_control *global_phy_reset;
bool power_enabled;
bool clk_enabled;
bool cable_connected;
@@ -111,10 +149,14 @@
{
.compatible = "qcom,usb-ssphy-qmp-v2",
},
+ {
+ .compatible = "qcom,usb-ssphy-qmp-dp-combo",
+ },
{ },
};
MODULE_DEVICE_TABLE(of, msm_usb_id_table);
+static void usb_qmp_powerup_phy(struct msm_ssphy_qmp *phy);
static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on);
static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
@@ -132,6 +174,21 @@
phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
}
+static void msm_ssusb_qmp_clamp_enable(struct msm_ssphy_qmp *phy, bool val)
+{
+ switch (phy->phy.type) {
+ case USB_PHY_TYPE_USB3_DP:
+ writel_relaxed(!val, phy->base +
+ phy->phy_reg[USB3_PCS_MISC_CLAMP_ENABLE]);
+ break;
+ case USB_PHY_TYPE_USB3:
+ writel_relaxed(!!val, phy->vls_clamp_reg);
+ break;
+ default:
+ break;
+ }
+}
+
static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
int enable)
{
@@ -152,11 +209,9 @@
val &= ~ARCVR_DTCT_EVENT_SEL;
writeb_relaxed(val, phy->base + autonomous_mode_offset);
}
-
- /* clamp phy level shifter to perform autonomous detection */
- writel_relaxed(0x1, phy->vls_clamp_reg);
+ msm_ssusb_qmp_clamp_enable(phy, true);
} else {
- writel_relaxed(0x0, phy->vls_clamp_reg);
+ msm_ssusb_qmp_clamp_enable(phy, false);
writeb_relaxed(0, phy->base + autonomous_mode_offset);
msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
}
@@ -273,12 +328,95 @@
return 0;
}
+static void usb_qmp_update_portselect_phymode(struct msm_ssphy_qmp *phy)
+{
+ int val;
+
+ /* perform lane selection */
+ val = -EINVAL;
+ if (phy->phy.flags & PHY_LANE_A)
+ val = SW_PORTSELECT_MX;
+ else if (phy->phy.flags & PHY_LANE_B)
+ val = SW_PORTSELECT | SW_PORTSELECT_MX;
+
+ /* PHY must be powered up before updating portselect and phymode. */
+ usb_qmp_powerup_phy(phy);
+
+ switch (phy->phy.type) {
+ case USB_PHY_TYPE_USB3_DP:
+ /* override hardware control for reset of qmp phy */
+ writel_relaxed(SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET,
+ phy->base + phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+
+ /* update port select */
+ if (val > 0) {
+ dev_err(phy->phy.dev,
+ "USB DP QMP PHY: Update TYPEC CTRL(%d)\n", val);
+ writel_relaxed(val, phy->base +
+ phy->phy_reg[USB3_DP_COM_TYPEC_CTRL]);
+ }
+
+ writel_relaxed(USB3_MODE | DP_MODE,
+ phy->base + phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+ break;
+ case USB_PHY_TYPE_USB3:
+ if (val > 0) {
+ dev_err(phy->phy.dev,
+ "USB QMP PHY: Update TYPEC CTRL(%d)\n", val);
+ writel_relaxed(val, phy->base +
+ phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
+ }
+ break;
+ default:
+ dev_err(phy->phy.dev, "portselect: Unknown USB QMP PHY type\n");
+ break;
+ }
+
+ /* Make sure above selection and reset sequence is gone through */
+ mb();
+}
+
+static void usb_qmp_powerup_phy(struct msm_ssphy_qmp *phy)
+{
+ switch (phy->phy.type) {
+ case USB_PHY_TYPE_USB3_DP:
+ /* power up USB3 and DP common logic block */
+ writel_relaxed(0x01,
+ phy->base + phy->phy_reg[USB3_DP_COM_POWER_DOWN_CTRL]);
+
+ /*
+ * Don't write 0x0 to DP_COM_SW_RESET here as portselect and
+ * phymode operation needs DP_COM_SW_RESET as 0x1.
+ * msm_ssphy_qmp_init() writes 0x0 to DP_COM_SW_RESET before
+ * initializing PHY.
+ */
+
+ /* intentional fall-through */
+ case USB_PHY_TYPE_USB3:
+ /* power up USB3 PHY */
+ writel_relaxed(0x01,
+ phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+ break;
+ default:
+ dev_err(phy->phy.dev, "phy_powerup: Unknown USB QMP PHY type\n");
+ break;
+ }
+
+ /* Make sure that above write completed to power up PHY */
+ mb();
+}
+
/* SSPHY Initialization */
static int msm_ssphy_qmp_init(struct usb_phy *uphy)
{
struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
phy);
- int ret, val;
+ int ret;
unsigned int init_timeout_usec = INIT_MAX_TIME_USEC;
const struct qmp_reg_val *reg = NULL;
@@ -297,11 +435,11 @@
msm_ssphy_qmp_enable_clks(phy, true);
- writel_relaxed(0x01,
- phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+ /* select appropriate port select and PHY mode if applicable */
+ usb_qmp_update_portselect_phymode(phy);
- /* Make sure that above write completed to get PHY into POWER DOWN */
- mb();
+ /* power up PHY */
+ usb_qmp_powerup_phy(phy);
reg = (struct qmp_reg_val *)phy->qmp_phy_init_seq;
@@ -312,20 +450,15 @@
return ret;
}
- /* perform lane selection */
- val = -EINVAL;
- if (phy->phy.flags & PHY_LANE_A)
- val = SW_PORTSELECT_MX;
+ /* perform software reset of PHY common logic */
+ if (phy->phy.type == USB_PHY_TYPE_USB3_DP)
+ writel_relaxed(0x00,
+ phy->base + phy->phy_reg[USB3_DP_COM_SW_RESET]);
- if (phy->phy.flags & PHY_LANE_B)
- val = SW_PORTSELECT | SW_PORTSELECT_MX;
-
- if (val > 0)
- writel_relaxed(val,
- phy->base + phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
-
- writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
+ /* perform software reset of PCS/Serdes */
writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_SW_RESET]);
+ /* start PCS/Serdes to operation mode */
+ writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
/* Make sure above write completed to bring PHY out of reset */
mb();
@@ -350,6 +483,41 @@
return 0;
}
+static int msm_ssphy_qmp_dp_combo_reset(struct usb_phy *uphy)
+{
+ struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+ phy);
+ int ret = 0;
+
+ dev_dbg(uphy->dev, "Global reset of QMP DP combo phy\n");
+ /* Assert global PHY reset */
+ ret = reset_control_assert(phy->global_phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "global_phy_reset assert failed\n");
+ goto exit;
+ }
+
+ /* Assert QMP USB PHY reset */
+ ret = reset_control_assert(phy->phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_reset assert failed\n");
+ goto exit;
+ }
+
+ /* De-Assert QMP USB PHY reset */
+ ret = reset_control_deassert(phy->phy_reset);
+ if (ret)
+ dev_err(uphy->dev, "phy_reset deassert failed\n");
+
+ /* De-Assert global PHY reset */
+ ret = reset_control_deassert(phy->global_phy_reset);
+ if (ret)
+ dev_err(uphy->dev, "global_phy_reset deassert failed\n");
+
+exit:
+ return ret;
+}
+
static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
{
struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
@@ -623,6 +791,11 @@
if (!phy)
return -ENOMEM;
+ phy->phy.type = USB_PHY_TYPE_USB3;
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,usb-ssphy-qmp-dp-combo"))
+ phy->phy.type = USB_PHY_TYPE_USB3_DP;
+
ret = msm_ssphy_qmp_get_clks(phy, dev);
if (ret)
goto err;
@@ -634,11 +807,22 @@
goto err;
}
- phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset");
- if (IS_ERR(phy->phy_phy_reset)) {
- ret = PTR_ERR(phy->phy_phy_reset);
- dev_dbg(dev, "failed to get phy_phy_reset\n");
- goto err;
+ if (phy->phy.type == USB_PHY_TYPE_USB3_DP) {
+ phy->global_phy_reset = devm_reset_control_get(dev,
+ "global_phy_reset");
+ if (IS_ERR(phy->global_phy_reset)) {
+ ret = PTR_ERR(phy->global_phy_reset);
+ dev_dbg(dev, "failed to get global_phy_reset\n");
+ goto err;
+ }
+ } else {
+ phy->phy_phy_reset = devm_reset_control_get(dev,
+ "phy_phy_reset");
+ if (IS_ERR(phy->phy_phy_reset)) {
+ ret = PTR_ERR(phy->phy_phy_reset);
+ dev_dbg(dev, "failed to get phy_phy_reset\n");
+ goto err;
+ }
}
of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size);
@@ -673,22 +857,25 @@
dev_err(dev, "failed getting qmp_phy_base\n");
return -ENODEV;
}
- phy->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(phy->base)) {
+
+ /*
+ * For USB QMP DP combo PHY, common set of registers shall be accessed
+ * by DP driver as well.
+ */
+ phy->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR_OR_NULL(phy->base)) {
ret = PTR_ERR(phy->base);
goto err;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "vls_clamp_reg");
- if (!res) {
- dev_err(dev, "failed getting vls_clamp_reg\n");
- return -ENODEV;
- }
- phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(phy->vls_clamp_reg)) {
- dev_err(dev, "couldn't find vls_clamp_reg address.\n");
- return PTR_ERR(phy->vls_clamp_reg);
+ if (phy->phy.type == USB_PHY_TYPE_USB3) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "vls_clamp_reg");
+ phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->vls_clamp_reg)) {
+ dev_err(dev, "couldn't find vls_clamp_reg address.\n");
+ return PTR_ERR(phy->vls_clamp_reg);
+ }
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -787,8 +974,11 @@
phy->phy.set_suspend = msm_ssphy_qmp_set_suspend;
phy->phy.notify_connect = msm_ssphy_qmp_notify_connect;
phy->phy.notify_disconnect = msm_ssphy_qmp_notify_disconnect;
- phy->phy.reset = msm_ssphy_qmp_reset;
- phy->phy.type = USB_PHY_TYPE_USB3;
+
+ if (phy->phy.type == USB_PHY_TYPE_USB3_DP)
+ phy->phy.reset = msm_ssphy_qmp_dp_combo_reset;
+ else
+ phy->phy.reset = msm_ssphy_qmp_reset;
ret = usb_add_phy_dev(&phy->phy);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 16cc183..9129f6c 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2071,6 +2071,20 @@
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/*
+ * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de>
+ * The INIC-3619 bridge is used in the StarTech SLSODDU33B
+ * SATA-USB enclosure for slimline optical drives.
+ *
+ * The quirk enables MakeMKV to properly exchange keys with
+ * an installed BD drive.
+ */
+UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209,
+ "Initio Corporation",
+ "INIC-3619",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Reported by Qinglin Ye <yestyle@gmail.com> */
UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
"Kingston",
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 37a37c4..6f2e729 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
#include <linux/efi.h>
#include <linux/errno.h>
#include <linux/fb.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <video/vga.h>
@@ -118,6 +119,8 @@
return false;
}
+static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
+
static int efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
@@ -127,7 +130,7 @@
unsigned int size_total;
char *option = NULL;
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
if (fb_get_options("efifb", &option))
@@ -327,3 +330,64 @@
};
builtin_platform_driver(efifb_driver);
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
+
+static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
+
+static void claim_efifb_bar(struct pci_dev *dev, int idx)
+{
+ u16 word;
+
+ pci_bar_found = true;
+
+ pci_read_config_word(dev, PCI_COMMAND, &word);
+ if (!(word & PCI_COMMAND_MEMORY)) {
+ pci_dev_disabled = true;
+ dev_err(&dev->dev,
+ "BAR %d: assigned to efifb but device is disabled!\n",
+ idx);
+ return;
+ }
+
+ if (pci_claim_resource(dev, idx)) {
+ pci_dev_disabled = true;
+ dev_err(&dev->dev,
+ "BAR %d: failed to claim resource for efifb!\n", idx);
+ return;
+ }
+
+ dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
+}
+
+static void efifb_fixup_resources(struct pci_dev *dev)
+{
+ u64 base = screen_info.lfb_base;
+ u64 size = screen_info.lfb_size;
+ int i;
+
+ if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)screen_info.ext_lfb_base << 32;
+
+ if (!base)
+ return;
+
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+ struct resource *res = &dev->resource[i];
+
+ if (!(res->flags & IORESOURCE_MEM))
+ continue;
+
+ if (res->start <= base && res->end >= base + size - 1) {
+ claim_efifb_bar(dev, i);
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
+ 16, efifb_fixup_resources);
+
+#endif
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d51..ea2f19f 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -644,7 +644,6 @@
break;
case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
@@ -655,7 +654,8 @@
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);
if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 59e9576..c5a567a 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -46,6 +46,7 @@
#define S3C2410_WTCON 0x00
#define S3C2410_WTDAT 0x04
#define S3C2410_WTCNT 0x08
+#define S3C2410_WTCLRINT 0x0c
#define S3C2410_WTCNT_MAXCNT 0xffff
@@ -72,6 +73,7 @@
#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c
#define QUIRK_HAS_PMU_CONFIG (1 << 0)
#define QUIRK_HAS_RST_STAT (1 << 1)
+#define QUIRK_HAS_WTCLRINT_REG (1 << 2)
/* These quirks require that we have a PMU register map */
#define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \
@@ -143,13 +145,18 @@
};
#ifdef CONFIG_OF
+static const struct s3c2410_wdt_variant drv_data_s3c6410 = {
+ .quirks = QUIRK_HAS_WTCLRINT_REG,
+};
+
static const struct s3c2410_wdt_variant drv_data_exynos5250 = {
.disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
.mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
.mask_bit = 20,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 20,
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
+ | QUIRK_HAS_WTCLRINT_REG,
};
static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
@@ -158,7 +165,8 @@
.mask_bit = 0,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 9,
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
+ | QUIRK_HAS_WTCLRINT_REG,
};
static const struct s3c2410_wdt_variant drv_data_exynos7 = {
@@ -167,12 +175,15 @@
.mask_bit = 23,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 23, /* A57 WDTRESET */
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
+ .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
+ | QUIRK_HAS_WTCLRINT_REG,
};
static const struct of_device_id s3c2410_wdt_match[] = {
{ .compatible = "samsung,s3c2410-wdt",
.data = &drv_data_s3c2410 },
+ { .compatible = "samsung,s3c6410-wdt",
+ .data = &drv_data_s3c6410 },
{ .compatible = "samsung,exynos5250-wdt",
.data = &drv_data_exynos5250 },
{ .compatible = "samsung,exynos5420-wdt",
@@ -418,6 +429,10 @@
dev_info(wdt->dev, "watchdog timer expired (irq)\n");
s3c2410wdt_keepalive(&wdt->wdt_device);
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_WTCLRINT_REG)
+ writel(0x1, wdt->reg_base + S3C2410_WTCLRINT);
+
return IRQ_HANDLED;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 12f2252..953275b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2080,11 +2080,6 @@
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
- if (ia_valid & ATTR_MODE) {
- err = posix_acl_chmod(inode, attr->ia_mode);
- if (err)
- goto out_put;
- }
if (mask) {
req->r_inode = inode;
@@ -2098,13 +2093,11 @@
ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req);
- if (mask & CEPH_SETATTR_SIZE)
+ ceph_free_cap_flush(prealloc_cf);
+
+ if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
__ceph_do_pending_vmtruncate(inode);
- ceph_free_cap_flush(prealloc_cf);
- return err;
-out_put:
- ceph_mdsc_put_request(req);
- ceph_free_cap_flush(prealloc_cf);
+
return err;
}
@@ -2123,7 +2116,12 @@
if (err != 0)
return err;
- return __ceph_setattr(inode, attr);
+ err = __ceph_setattr(inode, attr);
+
+ if (err >= 0 && (attr->ia_valid & ATTR_MODE))
+ err = posix_acl_chmod(inode, attr->ia_mode);
+
+ return err;
}
/*
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 203287f..94661cf 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -930,7 +930,6 @@
bool use_persistent:1; /* use persistent instead of durable handles */
#ifdef CONFIG_CIFS_SMB2
bool print:1; /* set if connection to printer share */
- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1cd0e2e..3925758 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2597,7 +2597,7 @@
wdata->credits = credits;
if (!wdata->cfile->invalidHandle ||
- !cifs_reopen_file(wdata->cfile, false))
+ !(rc = cifs_reopen_file(wdata->cfile, false)))
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
if (rc) {
@@ -3002,7 +3002,7 @@
rdata->credits = credits;
if (!rdata->cfile->invalidHandle ||
- !cifs_reopen_file(rdata->cfile, true))
+ !(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
error:
if (rc) {
@@ -3577,7 +3577,7 @@
}
if (!rdata->cfile->invalidHandle ||
- !cifs_reopen_file(rdata->cfile, true))
+ !(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
if (rc) {
add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index fc537c2..87b87e0 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1015,6 +1015,15 @@
return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
}
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+ if (server->tcpStatus == CifsGood)
+ return true;
+
+ return false;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1049,6 +1058,7 @@
.get_dfs_refer = CIFSGetDFSRefer,
.qfs_tcon = cifs_qfs_tcon,
.is_path_accessible = cifs_is_path_accessible,
+ .can_echo = cifs_can_echo,
.query_path_info = cifs_query_path_info,
.query_file_info = cifs_query_file_info,
.get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 8745722..8021853 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1084,9 +1084,6 @@
else
return -EIO;
- if (tcon && tcon->bad_network_name)
- return -ENOENT;
-
if ((tcon && tcon->seal) &&
((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
cifs_dbg(VFS, "encryption requested but no server support");
@@ -1104,6 +1101,10 @@
return -EINVAL;
}
+ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+ if (tcon)
+ tcon->tid = 0;
+
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
@@ -1184,8 +1185,6 @@
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
- if (tcon)
- tcon->bad_network_name = true;
}
goto tcon_exit;
}
@@ -1983,6 +1982,9 @@
struct cifs_tcon *tcon, *tcon2;
struct list_head tmp_list;
int tcon_exist = false;
+ int rc;
+ int resched = false;
+
/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
mutex_lock(&server->reconnect_mutex);
@@ -2010,13 +2012,18 @@
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
- if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+ if (!rc)
cifs_reopen_persistent_handles(tcon);
+ else
+ resched = true;
list_del_init(&tcon->rlist);
cifs_put_tcon(tcon);
}
cifs_dbg(FYI, "Reconnecting tcons finished\n");
+ if (resched)
+ queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
mutex_unlock(&server->reconnect_mutex);
/* now we can safely release srv struct */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a826864..3cb7fa2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -72,10 +72,9 @@
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
- EXT4_INODE_SIZE(inode->i_sb) -
- offset);
}
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) - offset);
}
return csum;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1536aeb..4e894d3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2532,17 +2532,14 @@
}
nfs4_stateid_copy(&stateid, &delegation->stateid);
- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
+ !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+ &delegation->flags)) {
rcu_read_unlock();
nfs_finish_clear_delegation_stateid(state, &stateid);
return;
}
- if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
- rcu_read_unlock();
- return;
- }
-
cred = get_rpccred(delegation->cred);
rcu_read_unlock();
status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index dba2ff8..4523346 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -358,6 +358,8 @@
{
unsigned int len, v, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
+ struct kvec *head = rqstp->rq_arg.head;
+ struct kvec *tail = rqstp->rq_arg.tail;
p = decode_fh(p, &args->fh);
if (!p)
@@ -367,6 +369,8 @@
args->count = ntohl(*p++);
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
+ if ((void *)p > head->iov_base + head->iov_len)
+ return 0;
/*
* The count must equal the amount of data passed.
*/
@@ -377,9 +381,8 @@
* Check to make sure that we got the right number of
* bytes.
*/
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- + rqstp->rq_arg.tail[0].iov_len - hdr;
+ hdr = (void*)p - head->iov_base;
+ dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
@@ -396,7 +399,7 @@
len = args->len = max_blocksize;
}
rqstp->rq_vec[0].iov_base = (void*)p;
- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
@@ -471,6 +474,8 @@
/* first copy and check from the first page */
old = (char*)p;
vec = &rqstp->rq_arg.head[0];
+ if ((void *)old > vec->iov_base + vec->iov_len)
+ return 0;
avail = vec->iov_len - (old - (char*)vec->iov_base);
while (len && avail && *old) {
*new++ = *old++;
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 010aff5..536009e 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -790,6 +790,7 @@
{ nfserr_serverfault, -ESERVERFAULT },
{ nfserr_serverfault, -ENFILE },
{ nfserr_io, -EUCLEAN },
+ { nfserr_perm, -ENOKEY },
};
int i;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index a2b65fc..1645b97 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -733,6 +733,37 @@
return nfserr;
}
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page. The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+ struct svc_procedure *proc)
+{
+ /*
+ * The ACL code has more careful bounds-checking and is not
+ * susceptible to this problem:
+ */
+ if (rqstp->rq_prog != NFS_PROGRAM)
+ return false;
+ /*
+ * Ditto NFSv4 (which can in theory have argument and reply both
+ * more than a page):
+ */
+ if (rqstp->rq_vers >= 4)
+ return false;
+ /* The reply will be small, we're OK: */
+ if (proc->pc_xdrressize > 0 &&
+ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+ return false;
+
+ return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
@@ -745,6 +776,11 @@
rqstp->rq_vers, rqstp->rq_proc);
proc = rqstp->rq_procinfo;
+ if (nfs_request_too_big(rqstp, proc)) {
+ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+ *statp = rpc_garbage_args;
+ return 1;
+ }
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 41b468a..de07ff6 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -280,6 +280,7 @@
struct nfsd_writeargs *args)
{
unsigned int len, hdr, dlen;
+ struct kvec *head = rqstp->rq_arg.head;
int v;
p = decode_fh(p, &args->fh);
@@ -300,9 +301,10 @@
* Check to make sure that we got the right number of
* bytes.
*/
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- - hdr;
+ hdr = (void*)p - head->iov_base;
+ if (hdr > head->iov_len)
+ return 0;
+ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
@@ -316,7 +318,7 @@
return 0;
rqstp->rq_vec[0].iov_base = (void*)p;
- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index 516ffb4..fe2cbeb 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -208,14 +208,19 @@
continue;
/*
* Skip ops whose filesystem we don't know about unless
- * it is being mounted.
+ * it is being mounted or unmounted. It is possible for
+ * a filesystem we don't know about to be unmounted if
+ * it fails to mount in the kernel after userspace has
+ * been sent the mount request.
*/
/* XXX: is there a better way to detect this? */
} else if (ret == -1 &&
!(op->upcall.type ==
ORANGEFS_VFS_OP_FS_MOUNT ||
op->upcall.type ==
- ORANGEFS_VFS_OP_GETATTR)) {
+ ORANGEFS_VFS_OP_GETATTR ||
+ op->upcall.type ==
+ ORANGEFS_VFS_OP_FS_UMOUNT)) {
gossip_debug(GOSSIP_DEV_DEBUG,
"orangefs: skipping op tag %llu %s\n",
llu(op->tag), get_opname_string(op));
@@ -402,8 +407,9 @@
/* remove the op from the in progress hash table */
op = orangefs_devreq_remove_op(head.tag);
if (!op) {
- gossip_err("WARNING: No one's waiting for tag %llu\n",
- llu(head.tag));
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "%s: No one's waiting for tag %llu\n",
+ __func__, llu(head.tag));
return ret;
}
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 38887cc..0748a26 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -671,8 +671,10 @@
*/
cdm_element_count =
orangefs_prepare_cdm_array(client_debug_array_string);
- if (cdm_element_count <= 0)
+ if (cdm_element_count <= 0) {
+ kfree(new);
goto out;
+ }
for (i = 0; i < cdm_element_count; i++) {
strlcat(new, "\t", string_size);
@@ -963,13 +965,13 @@
int ret;
ret = copy_from_user(&client_debug_array_string,
- (void __user *)arg,
- ORANGEFS_MAX_DEBUG_STRING_LEN);
+ (void __user *)arg,
+ ORANGEFS_MAX_DEBUG_STRING_LEN);
if (ret != 0) {
pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
__func__);
- return -EIO;
+ return -EFAULT;
}
/*
@@ -984,17 +986,18 @@
*/
client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
'\0';
-
+
pr_info("%s: client debug array string has been received.\n",
__func__);
if (!help_string_initialized) {
/* Build a proper debug help string. */
- if (orangefs_prepare_debugfs_help_string(0)) {
+ ret = orangefs_prepare_debugfs_help_string(0);
+ if (ret) {
gossip_err("%s: no debug help string \n",
__func__);
- return -EIO;
+ return ret;
}
}
@@ -1007,7 +1010,7 @@
help_string_initialized++;
- return ret;
+ return 0;
}
int orangefs_debugfs_new_debug(void __user *arg)
diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
index a3d84ff..f380f9ed 100644
--- a/fs/orangefs/orangefs-dev-proto.h
+++ b/fs/orangefs/orangefs-dev-proto.h
@@ -50,8 +50,7 @@
* Misc constants. Please retain them as multiples of 8!
* Otherwise 32-64 bit interactions will be messed up :)
*/
-#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400
-#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800
+#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800
/*
* The maximum number of directory entries in a single request is 96.
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 3bf803d..45dd8f2 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -249,6 +249,7 @@
char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
struct super_block *sb;
int mount_pending;
+ int no_list;
struct list_head list;
};
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 67c2435..629d8c9 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -263,8 +263,13 @@
if (!new_op)
return -ENOMEM;
new_op->upcall.req.features.features = 0;
- ret = service_operation(new_op, "orangefs_features", 0);
- orangefs_features = new_op->downcall.resp.features.features;
+ ret = service_operation(new_op, "orangefs_features",
+ ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
+ if (!ret)
+ orangefs_features =
+ new_op->downcall.resp.features.features;
+ else
+ orangefs_features = 0;
op_release(new_op);
} else {
orangefs_features = 0;
@@ -488,7 +493,7 @@
if (ret) {
d = ERR_PTR(ret);
- goto free_op;
+ goto free_sb_and_op;
}
/*
@@ -514,6 +519,9 @@
spin_unlock(&orangefs_superblocks_lock);
op_release(new_op);
+ /* Must be removed from the list now. */
+ ORANGEFS_SB(sb)->no_list = 0;
+
if (orangefs_userspace_version >= 20906) {
new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
if (!new_op)
@@ -528,6 +536,10 @@
return dget(sb->s_root);
+free_sb_and_op:
+ /* Will call orangefs_kill_sb with sb not in list. */
+ ORANGEFS_SB(sb)->no_list = 1;
+ deactivate_locked_super(sb);
free_op:
gossip_err("orangefs_mount: mount request failed with %d\n", ret);
if (ret == -EINVAL) {
@@ -553,12 +565,14 @@
*/
orangefs_unmount_sb(sb);
- /* remove the sb from our list of orangefs specific sb's */
-
- spin_lock(&orangefs_superblocks_lock);
- __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
- ORANGEFS_SB(sb)->list.prev = NULL;
- spin_unlock(&orangefs_superblocks_lock);
+ if (!ORANGEFS_SB(sb)->no_list) {
+ /* remove the sb from our list of orangefs specific sb's */
+ spin_lock(&orangefs_superblocks_lock);
+ /* not list_del_init */
+ __list_del_entry(&ORANGEFS_SB(sb)->list);
+ ORANGEFS_SB(sb)->list.prev = NULL;
+ spin_unlock(&orangefs_superblocks_lock);
+ }
/*
* make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 65d28f9..f998332 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -962,7 +962,14 @@
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+ pmd_t pmd = *pmdp;
+
+ /* See comment in change_huge_pmd() */
+ pmdp_invalidate(vma, addr, pmdp);
+ if (pmd_dirty(*pmdp))
+ pmd = pmd_mkdirty(pmd);
+ if (pmd_young(*pmdp))
+ pmd = pmd_mkyoung(pmd);
pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 14747a8..2964527 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -222,7 +222,7 @@
break;
case PERM_ANDROID_PACKAGE_CACHE:
if (info->d_uid != 0)
- gid = multiuser_get_cache_gid(info->d_uid);
+ gid = multiuser_get_ext_cache_gid(info->d_uid);
else
gid = multiuser_get_uid(info->userid, uid);
break;
@@ -252,7 +252,7 @@
goto retry_deleg;
}
if (error)
- pr_err("sdcardfs: Failed to touch up lower fs gid/uid.\n");
+ pr_debug("sdcardfs: Failed to touch up lower fs gid/uid for %s\n", name);
}
sdcardfs_put_lower_path(dentry, &path);
}
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index eee4eb5..1f6921e 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -113,6 +113,10 @@
if (lower_file->f_op->unlocked_ioctl)
err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+ /* some ioctls can change inode attributes (EXT2_IOC_SETFLAGS) */
+ if (!err)
+ sdcardfs_copy_and_fix_attrs(file_inode(file),
+ file_inode(lower_file));
out:
return err;
}
@@ -176,12 +180,6 @@
goto out;
}
saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
- err = do_munmap(current->mm, vma->vm_start,
- vma->vm_end - vma->vm_start);
- if (err) {
- pr_err("sdcardfs: do_munmap failed %d\n", err);
- goto out;
- }
}
/*
@@ -194,6 +192,9 @@
file->f_mapping->a_ops = &sdcardfs_aops; /* set our aops */
if (!SDCARDFS_F(file)->lower_vm_ops) /* save for our ->fault */
SDCARDFS_F(file)->lower_vm_ops = saved_vm_ops;
+ vma->vm_private_data = file;
+ get_file(lower_file);
+ vma->vm_file = lower_file;
out:
return err;
@@ -318,6 +319,75 @@
return err;
}
+/*
+ * Sdcardfs cannot use generic_file_llseek as ->llseek, because it would
+ * only set the offset of the upper file. So we have to implement our
+ * own method to set both the upper and lower file offsets
+ * consistently.
+ */
+static loff_t sdcardfs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ int err;
+ struct file *lower_file;
+
+ err = generic_file_llseek(file, offset, whence);
+ if (err < 0)
+ goto out;
+
+ lower_file = sdcardfs_lower_file(file);
+ err = generic_file_llseek(lower_file, offset, whence);
+
+out:
+ return err;
+}
+
+/*
+ * Sdcardfs read_iter, redirect modified iocb to lower read_iter
+ */
+ssize_t sdcardfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ int err;
+ struct file *file = iocb->ki_filp, *lower_file;
+
+ lower_file = sdcardfs_lower_file(file);
+ if (!lower_file->f_op->read_iter) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ get_file(lower_file); /* prevent lower_file from being released */
+ iocb->ki_filp = lower_file;
+ err = lower_file->f_op->read_iter(iocb, iter);
+ /* ? wait IO finish to update atime as ecryptfs ? */
+ iocb->ki_filp = file;
+ fput(lower_file);
+out:
+ return err;
+}
+
+/*
+ * Sdcardfs write_iter, redirect modified iocb to lower write_iter
+ */
+ssize_t sdcardfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ int err;
+ struct file *file = iocb->ki_filp, *lower_file;
+
+ lower_file = sdcardfs_lower_file(file);
+ if (!lower_file->f_op->write_iter) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ get_file(lower_file); /* prevent lower_file from being released */
+ iocb->ki_filp = lower_file;
+ err = lower_file->f_op->write_iter(iocb, iter);
+ iocb->ki_filp = file;
+ fput(lower_file);
+out:
+ return err;
+}
+
const struct file_operations sdcardfs_main_fops = {
.llseek = generic_file_llseek,
.read = sdcardfs_read,
@@ -332,11 +402,13 @@
.release = sdcardfs_file_release,
.fsync = sdcardfs_fsync,
.fasync = sdcardfs_fasync,
+ .read_iter = sdcardfs_read_iter,
+ .write_iter = sdcardfs_write_iter,
};
/* trimmed directory options */
const struct file_operations sdcardfs_dir_fops = {
- .llseek = generic_file_llseek,
+ .llseek = sdcardfs_file_llseek,
.read = generic_read_dir,
.iterate = sdcardfs_readdir,
.unlocked_ioctl = sdcardfs_unlocked_ioctl,
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index f028bfd..19154b7 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -91,7 +91,9 @@
struct sdcardfs_inode_info *info;
struct inode_data data;
struct inode *inode; /* the new inode to return */
- int err;
+
+ if (!igrab(lower_inode))
+ return ERR_PTR(-ESTALE);
data.id = id;
data.lower_inode = lower_inode;
@@ -106,22 +108,19 @@
sdcardfs_inode_set, /* inode init function */
&data); /* data passed to test+set fxns */
if (!inode) {
- err = -EACCES;
iput(lower_inode);
- return ERR_PTR(err);
+ return ERR_PTR(-ENOMEM);
}
- /* if found a cached inode, then just return it */
- if (!(inode->i_state & I_NEW))
+ /* if found a cached inode, then just return it (after iput) */
+ if (!(inode->i_state & I_NEW)) {
+ iput(lower_inode);
return inode;
+ }
/* initialize new inode */
info = SDCARDFS_I(inode);
inode->i_ino = lower_inode->i_ino;
- if (!igrab(lower_inode)) {
- err = -ESTALE;
- return ERR_PTR(err);
- }
sdcardfs_set_lower_inode(inode, lower_inode);
inode->i_version++;
@@ -164,27 +163,25 @@
}
/*
- * Connect a sdcardfs inode dentry/inode with several lower ones. This is
- * the classic stackable file system "vnode interposition" action.
- *
- * @dentry: sdcardfs's dentry which interposes on lower one
- * @sb: sdcardfs's super_block
- * @lower_path: the lower path (caller does path_get/put)
+ * Helper interpose routine, called directly by ->lookup to handle
+ * spliced dentries.
*/
-int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
- struct path *lower_path, userid_t id)
+static struct dentry *__sdcardfs_interpose(struct dentry *dentry,
+ struct super_block *sb,
+ struct path *lower_path,
+ userid_t id)
{
- int err = 0;
struct inode *inode;
struct inode *lower_inode;
struct super_block *lower_sb;
+ struct dentry *ret_dentry;
lower_inode = d_inode(lower_path->dentry);
lower_sb = sdcardfs_lower_super(sb);
/* check that the lower file system didn't cross a mount point */
if (lower_inode->i_sb != lower_sb) {
- err = -EXDEV;
+ ret_dentry = ERR_PTR(-EXDEV);
goto out;
}
@@ -196,14 +193,32 @@
/* inherit lower inode number for sdcardfs's inode */
inode = sdcardfs_iget(sb, lower_inode, id);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ ret_dentry = ERR_CAST(inode);
goto out;
}
- d_add(dentry, inode);
+ ret_dentry = d_splice_alias(inode, dentry);
+ dentry = ret_dentry ?: dentry;
update_derived_permission_lock(dentry);
out:
- return err;
+ return ret_dentry;
+}
+
+/*
+ * Connect an sdcardfs inode dentry/inode with several lower ones. This is
+ * the classic stackable file system "vnode interposition" action.
+ *
+ * @dentry: sdcardfs's dentry which interposes on lower one
+ * @sb: sdcardfs's super_block
+ * @lower_path: the lower path (caller does path_get/put)
+ */
+int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+ struct path *lower_path, userid_t id)
+{
+ struct dentry *ret_dentry;
+
+ ret_dentry = __sdcardfs_interpose(dentry, sb, lower_path, id);
+ return PTR_ERR(ret_dentry);
}
struct sdcardfs_name_data {
@@ -244,6 +259,7 @@
const struct qstr *name;
struct path lower_path;
struct qstr dname;
+ struct dentry *ret_dentry = NULL;
struct sdcardfs_sb_info *sbi;
sbi = SDCARDFS_SB(dentry->d_sb);
@@ -330,9 +346,13 @@
}
sdcardfs_set_lower_path(dentry, &lower_path);
- err = sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
- if (err) /* path_put underlying path on error */
+ ret_dentry =
+ __sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
+ if (IS_ERR(ret_dentry)) {
+ err = PTR_ERR(ret_dentry);
+ /* path_put underlying path on error */
sdcardfs_put_reset_lower_path(dentry);
+ }
goto out;
}
@@ -372,7 +392,9 @@
err = 0;
out:
- return ERR_PTR(err);
+ if (err)
+ return ERR_PTR(err);
+ return ret_dentry;
}
/*
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 7344635..953d215 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -471,10 +471,15 @@
pr_info("Completed sdcardfs module unload\n");
}
-MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
- " (http://www.fsl.cs.sunysb.edu/)");
-MODULE_DESCRIPTION("Wrapfs " SDCARDFS_VERSION
- " (http://wrapfs.filesystems.org/)");
+/* Original wrapfs authors */
+MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University (http://www.fsl.cs.sunysb.edu/)");
+
+/* Original sdcardfs authors */
+MODULE_AUTHOR("Woojoong Lee, Daeho Jeong, Kitae Lee, Yeongjin Gil System Memory Lab., Samsung Electronics");
+
+/* Current maintainer */
+MODULE_AUTHOR("Daniel Rosenberg, Google");
+MODULE_DESCRIPTION("Sdcardfs " SDCARDFS_VERSION);
MODULE_LICENSE("GPL");
module_init(init_sdcardfs_fs);
diff --git a/fs/sdcardfs/mmap.c b/fs/sdcardfs/mmap.c
index 51266f5..391d2a7 100644
--- a/fs/sdcardfs/mmap.c
+++ b/fs/sdcardfs/mmap.c
@@ -23,60 +23,45 @@
static int sdcardfs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int err;
- struct file *file, *lower_file;
+ struct file *file;
const struct vm_operations_struct *lower_vm_ops;
- struct vm_area_struct lower_vma;
- memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
- file = lower_vma.vm_file;
+ file = (struct file *)vma->vm_private_data;
lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
BUG_ON(!lower_vm_ops);
- lower_file = sdcardfs_lower_file(file);
- /*
- * XXX: vm_ops->fault may be called in parallel. Because we have to
- * resort to temporarily changing the vma->vm_file to point to the
- * lower file, a concurrent invocation of sdcardfs_fault could see a
- * different value. In this workaround, we keep a different copy of
- * the vma structure in our stack, so we never expose a different
- * value of the vma->vm_file called to us, even temporarily. A
- * better fix would be to change the calling semantics of ->fault to
- * take an explicit file pointer.
- */
- lower_vma.vm_file = lower_file;
- err = lower_vm_ops->fault(&lower_vma, vmf);
+ err = lower_vm_ops->fault(vma, vmf);
return err;
}
+static void sdcardfs_vm_open(struct vm_area_struct *vma)
+{
+ struct file *file = (struct file *)vma->vm_private_data;
+
+ get_file(file);
+}
+
+static void sdcardfs_vm_close(struct vm_area_struct *vma)
+{
+ struct file *file = (struct file *)vma->vm_private_data;
+
+ fput(file);
+}
+
static int sdcardfs_page_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
int err = 0;
- struct file *file, *lower_file;
+ struct file *file;
const struct vm_operations_struct *lower_vm_ops;
- struct vm_area_struct lower_vma;
- memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
- file = lower_vma.vm_file;
+ file = (struct file *)vma->vm_private_data;
lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
BUG_ON(!lower_vm_ops);
if (!lower_vm_ops->page_mkwrite)
goto out;
- lower_file = sdcardfs_lower_file(file);
- /*
- * XXX: vm_ops->page_mkwrite may be called in parallel.
- * Because we have to resort to temporarily changing the
- * vma->vm_file to point to the lower file, a concurrent
- * invocation of sdcardfs_page_mkwrite could see a different
- * value. In this workaround, we keep a different copy of the
- * vma structure in our stack, so we never expose a different
- * value of the vma->vm_file called to us, even temporarily.
- * A better fix would be to change the calling semantics of
- * ->page_mkwrite to take an explicit file pointer.
- */
- lower_vma.vm_file = lower_file;
- err = lower_vm_ops->page_mkwrite(&lower_vma, vmf);
+ err = lower_vm_ops->page_mkwrite(vma, vmf);
out:
return err;
}
@@ -98,4 +83,6 @@
const struct vm_operations_struct sdcardfs_vm_ops = {
.fault = sdcardfs_fault,
.page_mkwrite = sdcardfs_page_mkwrite,
+ .open = sdcardfs_vm_open,
+ .close = sdcardfs_vm_close,
};
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index 2e89b58..d0c925c 100644
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -23,6 +23,8 @@
#define AID_APP_END 19999 /* last app user */
#define AID_CACHE_GID_START 20000 /* start of gids for apps to mark cached data */
#define AID_EXT_GID_START 30000 /* start of gids for apps to mark external data */
+#define AID_EXT_CACHE_GID_START 40000 /* start of gids for apps to mark external cached data */
+#define AID_EXT_CACHE_GID_END 49999 /* end of gids for apps to mark external cached data */
#define AID_SHARED_GID_START 50000 /* start of gids for apps in each user to share */
typedef uid_t userid_t;
@@ -33,9 +35,9 @@
return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
}
-static inline gid_t multiuser_get_cache_gid(uid_t uid)
+static inline gid_t multiuser_get_ext_cache_gid(uid_t uid)
{
- return uid - AID_APP_START + AID_CACHE_GID_START;
+ return uid - AID_APP_START + AID_EXT_CACHE_GID_START;
}
static inline gid_t multiuser_get_ext_gid(uid_t uid)
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213..39c75a8 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
struct kobject *kobj = of->kn->parent->priv;
- size_t len;
+ ssize_t len;
/*
* If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@
if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (len < 0)
+ return len;
if (pos) {
if (len <= pos)
return 0;
len -= pos;
memmove(buf, buf + pos, len);
}
- return min(count, len);
+ return min_t(ssize_t, count, len);
}
/* kernfs write callback for regular sysfs files */
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ca16c5d..87ab02e 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -622,6 +622,11 @@
return err;
lock_2_inodes(dir, inode);
+
+ /* Handle O_TMPFILE corner case, it is allowed to link a O_TMPFILE. */
+ if (inode->i_nlink == 0)
+ ubifs_delete_orphan(c, inode->i_ino);
+
inc_nlink(inode);
ihold(inode);
inode->i_ctime = ubifs_current_time(inode);
@@ -641,6 +646,8 @@
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
drop_nlink(inode);
+ if (inode->i_nlink == 0)
+ ubifs_add_orphan(c, inode->i_ino);
unlock_2_inodes(dir, inode);
ubifs_release_budget(c, &req);
iput(inode);
@@ -1088,9 +1095,6 @@
struct timespec time;
unsigned int uninitialized_var(saved_nlink);
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
/*
* Budget request settings: deletion direntry, new direntry, removing
* the old inode, and changing old and new parent directory inodes.
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index d346d42..33db69b 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
#include "xfs_rmap_btree.h"
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
/*
* Per-AG Block Reservations
@@ -200,22 +201,30 @@
struct xfs_mount *mp = pag->pag_mount;
struct xfs_ag_resv *resv;
int error;
+ xfs_extlen_t reserved;
- resv = xfs_perag_resv(pag, type);
if (used > ask)
ask = used;
- resv->ar_asked = ask;
- resv->ar_reserved = resv->ar_orig_reserved = ask - used;
- mp->m_ag_max_usable -= ask;
+ reserved = ask - used;
- trace_xfs_ag_resv_init(pag, type, ask);
-
- error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
- if (error)
+ error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+ if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
+ xfs_warn(mp,
+"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
+ pag->pag_agno);
+ return error;
+ }
- return error;
+ mp->m_ag_max_usable -= ask;
+
+ resv = xfs_perag_resv(pag, type);
+ resv->ar_asked = ask;
+ resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+ trace_xfs_ag_resv_init(pag, type, ask);
+ return 0;
}
/* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@
xfs_ag_resv_init(
struct xfs_perag *pag)
{
+ struct xfs_mount *mp = pag->pag_mount;
+ xfs_agnumber_t agno = pag->pag_agno;
xfs_extlen_t ask;
xfs_extlen_t used;
int error = 0;
@@ -231,23 +242,45 @@
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(pag->pag_mount,
- pag->pag_agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
+ if (error)
+ goto out;
+
+ error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
ask, used);
- if (error)
- goto out;
+ if (error) {
+ /*
+ * Because we didn't have per-AG reservations when the
+ * finobt feature was added we might not be able to
+ * reserve all needed blocks. Warn and fall back to the
+ * old and potentially buggy code in that case, but
+ * ensure we do have the reservation for the refcountbt.
+ */
+ ask = used = 0;
+
+ mp->m_inotbt_nores = true;
+
+ error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+ &used);
+ if (error)
+ goto out;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+ ask, used);
+ if (error)
+ goto out;
+ }
}
/* Create the AGFL metadata reservation */
if (pag->pag_agfl_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
- &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
@@ -256,9 +289,16 @@
goto out;
}
+#ifdef DEBUG
+ /* need to read in the AGF for the ASSERT below to work */
+ error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+ if (error)
+ return error;
+
ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
pag->pagf_freeblks + pag->pagf_flcount);
+#endif
out:
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index f52fd63..5a508b0 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -769,8 +769,8 @@
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
} else if (dfops->dop_low) {
-try_another_ag:
args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
args.fsbno = *firstblock;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -796,17 +796,19 @@
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
args.fsbno == NULLFSBLOCK &&
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- dfops->dop_low = true;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
goto try_another_ag;
}
+ if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
+ xfs_iroot_realloc(ip, -1, whichfork);
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return -ENOSPC;
+ }
/*
* Allocation can't fail, the space was reserved.
*/
- ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(*firstblock == NULLFSBLOCK ||
- args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
- (dfops->dop_low &&
- args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
+ args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
ip->i_d.di_nblocks++;
@@ -1278,7 +1280,6 @@
/* REFERENCED */
xfs_extnum_t room; /* number of entries there's room for */
- bno = NULLFSBLOCK;
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
@@ -1291,9 +1292,7 @@
ASSERT(level > 0);
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
bno = be64_to_cpu(*pp);
- ASSERT(bno != NULLFSBLOCK);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+
/*
* Go down the tree until leaf level is reached, following the first
* pointer (leftmost) at each level.
@@ -1955,6 +1954,7 @@
*/
trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
+ xfs_bmbt_set_state(ep, new->br_state);
trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
(*nextents)++;
@@ -2293,6 +2293,7 @@
xfs_bmap_add_extent_unwritten_real(
struct xfs_trans *tp,
xfs_inode_t *ip, /* incore inode pointer */
+ int whichfork,
xfs_extnum_t *idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
@@ -2312,12 +2313,14 @@
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
- struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_mount *mp = ip->i_mount;
*logflagsp = 0;
cur = *curp;
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (whichfork == XFS_COW_FORK)
+ state |= BMAP_COWFORK;
ASSERT(*idx >= 0);
ASSERT(*idx <= xfs_iext_count(ifp));
@@ -2376,7 +2379,7 @@
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (*idx < xfs_iext_count(&ip->i_df) - 1) {
+ if (*idx < xfs_iext_count(ifp) - 1) {
state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
if (isnullstartblock(RIGHT.br_startblock))
@@ -2416,7 +2419,8 @@
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
xfs_iext_remove(ip, *idx + 1, 2, state);
- ip->i_d.di_nextents -= 2;
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2459,7 +2463,8 @@
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
xfs_iext_remove(ip, *idx + 1, 1, state);
- ip->i_d.di_nextents--;
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2494,7 +2499,8 @@
xfs_bmbt_set_state(ep, newext);
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
xfs_iext_remove(ip, *idx + 1, 1, state);
- ip->i_d.di_nextents--;
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2606,7 +2612,8 @@
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
xfs_iext_insert(ip, *idx, 1, new, state);
- ip->i_d.di_nextents++;
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2684,7 +2691,8 @@
++*idx;
xfs_iext_insert(ip, *idx, 1, new, state);
- ip->i_d.di_nextents++;
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2732,7 +2740,8 @@
++*idx;
xfs_iext_insert(ip, *idx, 2, &r[0], state);
- ip->i_d.di_nextents += 2;
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2786,17 +2795,17 @@
}
/* update reverse mappings */
- error = xfs_rmap_convert_extent(mp, dfops, ip, XFS_DATA_FORK, new);
+ error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
if (error)
goto done;
/* convert to a btree if necessary */
- if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
+ if (xfs_bmap_needs_btree(ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
- 0, &tmp_logflags, XFS_DATA_FORK);
+ 0, &tmp_logflags, whichfork);
*logflagsp |= tmp_logflags;
if (error)
goto done;
@@ -2808,7 +2817,7 @@
*curp = cur;
}
- xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
+ xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
done:
*logflagsp |= rval;
return error;
@@ -2900,7 +2909,8 @@
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock) +
startblockval(right.br_startblock);
- newlen = xfs_bmap_worst_indlen(ip, temp);
+ newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+ oldlen);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
nullstartblock((int)newlen));
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
@@ -2921,7 +2931,8 @@
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock);
- newlen = xfs_bmap_worst_indlen(ip, temp);
+ newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+ oldlen);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
nullstartblock((int)newlen));
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
@@ -2937,7 +2948,8 @@
temp = new->br_blockcount + right.br_blockcount;
oldlen = startblockval(new->br_startblock) +
startblockval(right.br_startblock);
- newlen = xfs_bmap_worst_indlen(ip, temp);
+ newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+ oldlen);
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
new->br_startoff,
nullstartblock((int)newlen), temp, right.br_state);
@@ -3913,17 +3925,13 @@
* the first block that was allocated.
*/
ASSERT(*ap->firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
- XFS_FSB_TO_AGNO(mp, args.fsbno) ||
- (ap->dfops->dop_low &&
- XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
- XFS_FSB_TO_AGNO(mp, args.fsbno)));
+ XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
+ XFS_FSB_TO_AGNO(mp, args.fsbno));
ap->blkno = args.fsbno;
if (*ap->firstblock == NULLFSBLOCK)
*ap->firstblock = args.fsbno;
- ASSERT(nullfb || fb_agno == args.agno ||
- (ap->dfops->dop_low && fb_agno < args.agno));
+ ASSERT(nullfb || fb_agno <= args.agno);
ap->length = args.len;
if (!(ap->flags & XFS_BMAPI_COWFORK))
ap->ip->i_d.di_nblocks += args.len;
@@ -4249,6 +4257,19 @@
return 0;
}
+/*
+ * Add a delayed allocation extent to an inode. Blocks are reserved from the
+ * global pool and the extent inserted into the inode in-core extent tree.
+ *
+ * On entry, got refers to the first extent beyond the offset of the extent to
+ * allocate or eof is specified if no such extent exists. On return, got refers
+ * to the extent record that was inserted to the inode fork.
+ *
+ * Note that the allocated extent may have been merged with contiguous extents
+ * during insertion into the inode fork. Thus, got does not reflect the current
+ * state of the inode fork on return. If necessary, the caller can use lastx to
+ * look up the updated record in the inode fork.
+ */
int
xfs_bmapi_reserve_delalloc(
struct xfs_inode *ip,
@@ -4335,13 +4356,8 @@
got->br_startblock = nullstartblock(indlen);
got->br_blockcount = alen;
got->br_state = XFS_EXT_NORM;
- xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
- /*
- * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
- * might have merged it into one of the neighbouring ones.
- */
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+ xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
/*
* Tag the inode if blocks were preallocated. Note that COW fork
@@ -4353,10 +4369,6 @@
if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
xfs_inode_set_cowblocks_tag(ip);
- ASSERT(got->br_startoff <= aoff);
- ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
- ASSERT(isnullstartblock(got->br_startblock));
- ASSERT(got->br_state == XFS_EXT_NORM);
return 0;
out_unreserve_blocks:
@@ -4461,10 +4473,16 @@
bma->got.br_state = XFS_EXT_NORM;
/*
- * A wasdelay extent has been initialized, so shouldn't be flagged
- * as unwritten.
+ * In the data fork, a wasdelay extent has been initialized, so
+ * shouldn't be flagged as unwritten.
+ *
+ * For the cow fork, however, we convert delalloc reservations
+ * (extents allocated for speculative preallocation) to
+ * allocated unwritten extents, and only convert the unwritten
+ * extents to real extents when we're about to write the data.
*/
- if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
+ if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
+ (bma->flags & XFS_BMAPI_PREALLOC) &&
xfs_sb_version_hasextflgbit(&mp->m_sb))
bma->got.br_state = XFS_EXT_UNWRITTEN;
@@ -4515,8 +4533,6 @@
(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
return 0;
- ASSERT(whichfork != XFS_COW_FORK);
-
/*
* Modify (by adding) the state flag, if writing.
*/
@@ -4541,8 +4557,8 @@
return error;
}
- error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
- &bma->cur, mval, bma->firstblock, bma->dfops,
+ error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
+ &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
&tmp_logflags);
/*
* Log the inode core unconditionally in the unwritten extent conversion
@@ -4551,8 +4567,12 @@
* in the transaction for the sake of fsync(), even if nothing has
* changed, because fsync() will not force the log for this transaction
* unless it sees the inode pinned.
+ *
+ * Note: If we're only converting cow fork extents, there aren't
+ * any on-disk updates to make, so we don't need to log anything.
*/
- bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
+ if (whichfork != XFS_COW_FORK)
+ bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
if (error)
return error;
@@ -4626,15 +4646,15 @@
ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
- ASSERT(tp != NULL);
+ ASSERT(tp != NULL ||
+ (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
+ (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
ASSERT(len > 0);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
- ASSERT(!(flags & XFS_BMAPI_PREALLOC) || whichfork != XFS_COW_FORK);
- ASSERT(!(flags & XFS_BMAPI_CONVERT) || whichfork != XFS_COW_FORK);
/* zeroing is for currently only for data extents, not metadata */
ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
@@ -4840,13 +4860,9 @@
if (bma.cur) {
if (!error) {
ASSERT(*firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *firstblock) ==
+ XFS_FSB_TO_AGNO(mp, *firstblock) <=
XFS_FSB_TO_AGNO(mp,
- bma.cur->bc_private.b.firstblock) ||
- (dfops->dop_low &&
- XFS_FSB_TO_AGNO(mp, *firstblock) <
- XFS_FSB_TO_AGNO(mp,
- bma.cur->bc_private.b.firstblock)));
+ bma.cur->bc_private.b.firstblock));
*firstblock = bma.cur->bc_private.b.firstblock;
}
xfs_btree_del_cursor(bma.cur,
@@ -4881,34 +4897,59 @@
xfs_filblks_t len2 = *indlen2;
xfs_filblks_t nres = len1 + len2; /* new total res. */
xfs_filblks_t stolen = 0;
+ xfs_filblks_t resfactor;
/*
* Steal as many blocks as we can to try and satisfy the worst case
* indlen for both new extents.
*/
- while (nres > ores && avail) {
- nres--;
- avail--;
- stolen++;
- }
+ if (ores < nres && avail)
+ stolen = XFS_FILBLKS_MIN(nres - ores, avail);
+ ores += stolen;
+
+ /* nothing else to do if we've satisfied the new reservation */
+ if (ores >= nres)
+ return stolen;
/*
- * The only blocks available are those reserved for the original
- * extent and what we can steal from the extent being removed.
- * If this still isn't enough to satisfy the combined
- * requirements for the two new extents, skim blocks off of each
- * of the new reservations until they match what is available.
+ * We can't meet the total required reservation for the two extents.
+ * Calculate the percent of the overall shortage between both extents
+ * and apply this percentage to each of the requested indlen values.
+ * This distributes the shortage fairly and reduces the chances that one
+ * of the two extents is left with nothing when extents are repeatedly
+ * split.
*/
- while (nres > ores) {
- if (len1) {
- len1--;
- nres--;
+ resfactor = (ores * 100);
+ do_div(resfactor, nres);
+ len1 *= resfactor;
+ do_div(len1, 100);
+ len2 *= resfactor;
+ do_div(len2, 100);
+ ASSERT(len1 + len2 <= ores);
+ ASSERT(len1 < *indlen1 && len2 < *indlen2);
+
+ /*
+ * Hand out the remainder to each extent. If one of the two reservations
+ * is zero, we want to make sure that one gets a block first. The loop
+ * below starts with len1, so hand len2 a block right off the bat if it
+ * is zero.
+ */
+ ores -= (len1 + len2);
+ ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
+ if (ores && !len2 && *indlen2) {
+ len2++;
+ ores--;
+ }
+ while (ores) {
+ if (len1 < *indlen1) {
+ len1++;
+ ores--;
}
- if (nres == ores)
+ if (!ores)
break;
- if (len2) {
- len2--;
- nres--;
+ if (len2 < *indlen2) {
+ len2++;
+ ores--;
}
}
@@ -5656,8 +5697,8 @@
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
- &lastx, &cur, &del, firstblock, dfops,
- &logflags);
+ whichfork, &lastx, &cur, &del,
+ firstblock, dfops, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5714,8 +5755,9 @@
prev.br_state = XFS_EXT_UNWRITTEN;
lastx--;
error = xfs_bmap_add_extent_unwritten_real(tp,
- ip, &lastx, &cur, &prev,
- firstblock, dfops, &logflags);
+ ip, whichfork, &lastx, &cur,
+ &prev, firstblock, dfops,
+ &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5723,8 +5765,9 @@
ASSERT(del.br_state == XFS_EXT_NORM);
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
- ip, &lastx, &cur, &del,
- firstblock, dfops, &logflags);
+ ip, whichfork, &lastx, &cur,
+ &del, firstblock, dfops,
+ &logflags);
if (error)
goto error0;
goto nodelete;
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index f76c169..5c39186 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -453,8 +453,8 @@
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
-try_another_ag:
args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
/*
* Make sure there is sufficient room left in the AG to
* complete a full tree split for an extent insert. If
@@ -494,8 +494,8 @@
if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
args.fsbno == NULLFSBLOCK &&
args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- cur->bc_private.b.dfops->dop_low = true;
args.fsbno = cur->bc_private.b.firstblock;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
goto try_another_ag;
}
@@ -512,7 +512,7 @@
goto error0;
cur->bc_private.b.dfops->dop_low = true;
}
- if (args.fsbno == NULLFSBLOCK) {
+ if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
*stat = 0;
return 0;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 21e6a6a..2849d3f 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -810,7 +810,8 @@
xfs_daddr_t d; /* real disk block address */
int error;
- ASSERT(fsbno != NULLFSBLOCK);
+ if (!XFS_FSB_SANITY_CHECK(mp, fsbno))
+ return -EFSCORRUPTED;
d = XFS_FSB_TO_DADDR(mp, fsbno);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
mp->m_bsize, lock, &bp, ops);
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index c2b01d1..3b0fc1a 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -491,7 +491,7 @@
#define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
#define XFS_FSB_SANITY_CHECK(mp,fsb) \
- (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
+ (fsb && XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
/*
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index f2dc1a9..1bdf288 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2633,7 +2633,7 @@
/*
* Readahead the dir/attr block.
*/
-xfs_daddr_t
+int
xfs_da_reada_buf(
struct xfs_inode *dp,
xfs_dablk_t bno,
@@ -2664,7 +2664,5 @@
if (mapp != &map)
kmem_free(mapp);
- if (error)
- return -1;
- return mappedbno;
+ return error;
}
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 98c75cb..4e29cb6 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -201,7 +201,7 @@
xfs_dablk_t bno, xfs_daddr_t mappedbno,
struct xfs_buf **bpp, int whichfork,
const struct xfs_buf_ops *ops);
-xfs_daddr_t xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
+int xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
xfs_daddr_t mapped_bno, int whichfork,
const struct xfs_buf_ops *ops);
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 75a5574..bbd1238 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -155,6 +155,42 @@
.verify_write = xfs_dir3_free_write_verify,
};
+/* Everything ok in the free block header? */
+static bool
+xfs_dir3_free_header_check(
+ struct xfs_inode *dp,
+ xfs_dablk_t fbno,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = dp->i_mount;
+ unsigned int firstdb;
+ int maxbests;
+
+ maxbests = dp->d_ops->free_max_bests(mp->m_dir_geo);
+ firstdb = (xfs_dir2_da_to_db(mp->m_dir_geo, fbno) -
+ xfs_dir2_byte_to_db(mp->m_dir_geo, XFS_DIR2_FREE_OFFSET)) *
+ maxbests;
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
+
+ if (be32_to_cpu(hdr3->firstdb) != firstdb)
+ return false;
+ if (be32_to_cpu(hdr3->nvalid) > maxbests)
+ return false;
+ if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
+ return false;
+ } else {
+ struct xfs_dir2_free_hdr *hdr = bp->b_addr;
+
+ if (be32_to_cpu(hdr->firstdb) != firstdb)
+ return false;
+ if (be32_to_cpu(hdr->nvalid) > maxbests)
+ return false;
+ if (be32_to_cpu(hdr->nvalid) < be32_to_cpu(hdr->nused))
+ return false;
+ }
+ return true;
+}
static int
__xfs_dir3_free_read(
@@ -168,11 +204,22 @@
err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
XFS_DATA_FORK, &xfs_dir3_free_buf_ops);
+ if (err || !*bpp)
+ return err;
+
+ /* Check things that we can't do in the verifier. */
+ if (!xfs_dir3_free_header_check(dp, fbno, *bpp)) {
+ xfs_buf_ioerror(*bpp, -EFSCORRUPTED);
+ xfs_verifier_error(*bpp);
+ xfs_trans_brelse(tp, *bpp);
+ return -EFSCORRUPTED;
+ }
/* try read returns without an error or *bpp if it lands in a hole */
- if (!err && tp && *bpp)
+ if (tp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_FREE_BUF);
- return err;
+
+ return 0;
}
int
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index d45c037..a2818f6 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -51,8 +51,7 @@
struct xfs_mount *mp)
{
if (xfs_sb_version_hasalign(&mp->m_sb) &&
- mp->m_sb.sb_inoalignmt >=
- XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+ mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
return mp->m_sb.sb_inoalignmt;
return 1;
}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 6c6b959..b9c351f 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@
}
STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *start,
union xfs_btree_ptr *new,
- int *stat)
+ int *stat,
+ enum xfs_ag_resv_type resv)
{
xfs_alloc_arg_t args; /* block allocation args */
int error; /* error return value */
@@ -103,6 +104,7 @@
args.maxlen = 1;
args.prod = 1;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
+ args.resv = resv;
error = xfs_alloc_vextent(&args);
if (error) {
@@ -123,6 +125,27 @@
}
STATIC int
+xfs_inobt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ return __xfs_inobt_alloc_block(cur, start, new, stat,
+ XFS_AG_RESV_METADATA);
+}
+
+STATIC int
xfs_inobt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
@@ -328,7 +351,7 @@
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
- .alloc_block = xfs_inobt_alloc_block,
+ .alloc_block = xfs_finobt_alloc_block,
.free_block = xfs_inobt_free_block,
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
@@ -478,3 +501,64 @@
return 0;
}
#endif /* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+ struct xfs_mount *mp)
+{
+ /* Bail out if we're uninitialized, which can happen in mkfs. */
+ if (mp->m_inobt_mxr[0] == 0)
+ return 0;
+
+ return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+ (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+ XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum,
+ xfs_extlen_t *tree_blocks)
+{
+ struct xfs_buf *agbp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+ if (error)
+ return error;
+
+ cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+ error = xfs_btree_count_blocks(cur, tree_blocks);
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_buf_relse(agbp);
+
+ return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_extlen_t *ask,
+ xfs_extlen_t *used)
+{
+ xfs_extlen_t tree_len = 0;
+ int error;
+
+ if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+ return 0;
+
+ error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+ if (error)
+ return error;
+
+ *ask += xfs_inobt_max_size(mp);
+ *used += tree_len;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453..aa81e2e 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@
#define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_extlen_t *ask, xfs_extlen_t *used);
+
#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 222e103..25c1e07 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -26,6 +26,7 @@
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
+#include "xfs_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
@@ -429,11 +430,13 @@
/* REFERENCED */
int nrecs;
int size;
+ int level;
ifp = XFS_IFORK_PTR(ip, whichfork);
dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
size = XFS_BMAP_BROOT_SPACE(mp, dfp);
nrecs = be16_to_cpu(dfp->bb_numrecs);
+ level = be16_to_cpu(dfp->bb_level);
/*
* blow out if -- fork has less extents than can fit in
@@ -446,7 +449,8 @@
XFS_IFORK_MAXEXT(ip, whichfork) ||
XFS_BMDR_SPACE_CALC(nrecs) >
XFS_DFORK_SIZE(dip, mp, whichfork) ||
- XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
+ XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) ||
+ level == 0 || level > XFS_BTREE_MAXLEVELS) {
xfs_warn(mp, "corrupt inode %Lu (btree).",
(unsigned long long) ip->i_ino);
XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
@@ -497,15 +501,14 @@
* We know that the size is valid (it's checked in iformat_btree)
*/
ifp->if_bytes = ifp->if_real_bytes = 0;
- ifp->if_flags |= XFS_IFEXTENTS;
xfs_iext_add(ifp, 0, nextents);
error = xfs_bmap_read_extents(tp, ip, whichfork);
if (error) {
xfs_iext_destroy(ifp);
- ifp->if_flags &= ~XFS_IFEXTENTS;
return error;
}
xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
+ ifp->if_flags |= XFS_IFEXTENTS;
return 0;
}
/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 06763f5..0457abe 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -279,54 +279,49 @@
struct xfs_ioend *ioend =
container_of(work, struct xfs_ioend, io_work);
struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ xfs_off_t offset = ioend->io_offset;
+ size_t size = ioend->io_size;
int error = ioend->io_bio->bi_error;
/*
- * Set an error if the mount has shut down and proceed with end I/O
- * processing so it can perform whatever cleanups are necessary.
+ * Just clean up the in-memory strutures if the fs has been shut down.
*/
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
error = -EIO;
-
- /*
- * For a CoW extent, we need to move the mapping from the CoW fork
- * to the data fork. If instead an error happened, just dump the
- * new blocks.
- */
- if (ioend->io_type == XFS_IO_COW) {
- if (error)
- goto done;
- if (ioend->io_bio->bi_error) {
- error = xfs_reflink_cancel_cow_range(ip,
- ioend->io_offset, ioend->io_size);
- goto done;
- }
- error = xfs_reflink_end_cow(ip, ioend->io_offset,
- ioend->io_size);
- if (error)
- goto done;
+ goto done;
}
/*
- * For unwritten extents we need to issue transactions to convert a
- * range to normal written extens after the data I/O has finished.
- * Detecting and handling completion IO errors is done individually
- * for each case as different cleanup operations need to be performed
- * on error.
+ * Clean up any COW blocks on an I/O error.
*/
- if (ioend->io_type == XFS_IO_UNWRITTEN) {
- if (error)
- goto done;
- error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
- ioend->io_size);
- } else if (ioend->io_append_trans) {
- error = xfs_setfilesize_ioend(ioend, error);
- } else {
- ASSERT(!xfs_ioend_is_append(ioend) ||
- ioend->io_type == XFS_IO_COW);
+ if (unlikely(error)) {
+ switch (ioend->io_type) {
+ case XFS_IO_COW:
+ xfs_reflink_cancel_cow_range(ip, offset, size, true);
+ break;
+ }
+
+ goto done;
+ }
+
+ /*
+ * Success: commit the COW or unwritten blocks if needed.
+ */
+ switch (ioend->io_type) {
+ case XFS_IO_COW:
+ error = xfs_reflink_end_cow(ip, offset, size);
+ break;
+ case XFS_IO_UNWRITTEN:
+ error = xfs_iomap_write_unwritten(ip, offset, size);
+ break;
+ default:
+ ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+ break;
}
done:
+ if (ioend->io_append_trans)
+ error = xfs_setfilesize_ioend(ioend, error);
xfs_destroy_ioend(ioend, error);
}
@@ -486,6 +481,12 @@
struct xfs_ioend *ioend,
int status)
{
+ /* Convert CoW extents to regular */
+ if (!status && ioend->io_type == XFS_IO_COW) {
+ status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
+ ioend->io_offset, ioend->io_size);
+ }
+
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
ioend->io_type != XFS_IO_UNWRITTEN &&
@@ -1257,44 +1258,6 @@
bh_result->b_size = mapping_size;
}
-/* Bounce unaligned directio writes to the page cache. */
-static int
-xfs_bounce_unaligned_dio_write(
- struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_bmbt_irec irec;
- xfs_fileoff_t delta;
- bool shared;
- bool x;
- int error;
-
- irec = *imap;
- if (offset_fsb > irec.br_startoff) {
- delta = offset_fsb - irec.br_startoff;
- irec.br_blockcount -= delta;
- irec.br_startblock += delta;
- irec.br_startoff = offset_fsb;
- }
- error = xfs_reflink_trim_around_shared(ip, &irec, &shared, &x);
- if (error)
- return error;
-
- /*
- * We're here because we're trying to do a directio write to a
- * region that isn't aligned to a filesystem block. If any part
- * of the extent is shared, fall back to buffered mode to handle
- * the RMW. This is done by returning -EREMCHG ("remote addr
- * changed"), which is caught further up the call stack.
- */
- if (shared) {
- trace_xfs_reflink_bounce_dio_write(ip, imap);
- return -EREMCHG;
- }
- return 0;
-}
-
STATIC int
__xfs_get_blocks(
struct inode *inode,
@@ -1432,13 +1395,6 @@
if (imap.br_startblock != HOLESTARTBLOCK &&
imap.br_startblock != DELAYSTARTBLOCK &&
(create || !ISUNWRITTEN(&imap))) {
- if (create && direct && !is_cow) {
- error = xfs_bounce_unaligned_dio_write(ip, offset_fsb,
- &imap);
- if (error)
- return error;
- }
-
xfs_map_buffer(inode, bh_result, &imap, offset);
if (ISUNWRITTEN(&imap))
set_buffer_unwritten(bh_result);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index efb8ccd..5328ecd 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -917,17 +917,18 @@
*/
int
xfs_free_eofblocks(
- xfs_mount_t *mp,
- xfs_inode_t *ip,
- bool need_iolock)
+ struct xfs_inode *ip)
{
- xfs_trans_t *tp;
- int error;
- xfs_fileoff_t end_fsb;
- xfs_fileoff_t last_fsb;
- xfs_filblks_t map_len;
- int nimaps;
- xfs_bmbt_irec_t imap;
+ struct xfs_trans *tp;
+ int error;
+ xfs_fileoff_t end_fsb;
+ xfs_fileoff_t last_fsb;
+ xfs_filblks_t map_len;
+ int nimaps;
+ struct xfs_bmbt_irec imap;
+ struct xfs_mount *mp = ip->i_mount;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
/*
* Figure out if there are any blocks beyond the end
@@ -944,6 +945,10 @@
error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ /*
+ * If there are blocks after the end of file, truncate the file to its
+ * current size to free them up.
+ */
if (!error && (nimaps != 0) &&
(imap.br_startblock != HOLESTARTBLOCK ||
ip->i_delayed_blks)) {
@@ -954,22 +959,13 @@
if (error)
return error;
- /*
- * There are blocks after the end of file.
- * Free them up now by truncating the file to
- * its current size.
- */
- if (need_iolock) {
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
- return -EAGAIN;
- }
+ /* wait on dio to ensure i_size has settled */
+ inode_dio_wait(VFS_I(ip));
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
&tp);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
- if (need_iolock)
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
@@ -997,8 +993,6 @@
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- if (need_iolock)
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
}
return error;
}
@@ -1324,8 +1318,16 @@
/*
* Now that we've unmap all full blocks we'll have to zero out any
* partial block at the beginning and/or end. xfs_zero_range is
- * smart enough to skip any holes, including those we just created.
+ * smart enough to skip any holes, including those we just created,
+ * but we must take care not to zero beyond EOF and enlarge i_size.
*/
+
+ if (offset >= XFS_ISIZE(ip))
+ return 0;
+
+ if (offset + len > XFS_ISIZE(ip))
+ len = XFS_ISIZE(ip) - offset;
+
return xfs_zero_range(ip, offset, len, NULL);
}
@@ -1393,10 +1395,16 @@
xfs_fileoff_t stop_fsb;
xfs_fileoff_t next_fsb;
xfs_fileoff_t shift_fsb;
+ uint resblks;
ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
if (direction == SHIFT_LEFT) {
+ /*
+ * Reserve blocks to cover potential extent merges after left
+ * shift operations.
+ */
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
next_fsb = XFS_B_TO_FSB(mp, offset + len);
stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
} else {
@@ -1404,6 +1412,7 @@
* If right shift, delegate the work of initialization of
* next_fsb to xfs_bmap_shift_extent as it has ilock held.
*/
+ resblks = 0;
next_fsb = NULLFSBLOCK;
stop_fsb = XFS_B_TO_FSB(mp, offset);
}
@@ -1415,7 +1424,7 @@
* into the accessible region of the file.
*/
if (xfs_can_free_eofblocks(ip, true)) {
- error = xfs_free_eofblocks(mp, ip, false);
+ error = xfs_free_eofblocks(ip);
if (error)
return error;
}
@@ -1445,21 +1454,14 @@
}
while (!error && !done) {
- /*
- * We would need to reserve permanent block for transaction.
- * This will come into picture when after shifting extent into
- * hole we found that adjacent extents can be merged which
- * may lead to freeing of a block during record update.
- */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
- XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
+ &tp);
if (error)
break;
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
- ip->i_gdquot, ip->i_pdquot,
- XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
+ ip->i_gdquot, ip->i_pdquot, resblks, 0,
XFS_QMOPT_RES_REGBLKS);
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 68a621a..f100539 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -63,8 +63,7 @@
/* EOF block manipulation functions */
bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
-int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
- bool need_iolock);
+int xfs_free_eofblocks(struct xfs_inode *ip);
int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
struct xfs_swapext *sx);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2975cb2..0306168 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1162,6 +1162,7 @@
*/
bp->b_last_error = 0;
bp->b_retries = 0;
+ bp->b_first_retry_time = 0;
xfs_buf_do_callbacks(bp);
bp->b_fspriv = NULL;
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 162dc18..29c2f99 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -45,18 +45,7 @@
struct rb_node **rbp;
struct rb_node *parent = NULL;
- new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
- if (!new) {
- /*
- * No Memory! Since it is now not possible to track the free
- * block, make this a synchronous transaction to insure that
- * the block is not reused before this transaction commits.
- */
- trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
- xfs_trans_set_sync(tp);
- return;
- }
-
+ new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP);
new->agno = agno;
new->bno = bno;
new->length = len;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 9a5d64b..1209ad2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -554,6 +554,15 @@
if ((iocb->ki_pos & mp->m_blockmask) ||
((iocb->ki_pos + count) & mp->m_blockmask)) {
unaligned_io = 1;
+
+ /*
+ * We can't properly handle unaligned direct I/O to reflink
+ * files yet, as we can't unshare a partial block.
+ */
+ if (xfs_is_reflink_inode(ip)) {
+ trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
+ return -EREMCHG;
+ }
iolock = XFS_IOLOCK_EXCL;
} else {
iolock = XFS_IOLOCK_SHARED;
@@ -675,8 +684,10 @@
struct xfs_inode *ip = XFS_I(inode);
ssize_t ret;
int enospc = 0;
- int iolock = XFS_IOLOCK_EXCL;
+ int iolock;
+write_retry:
+ iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, iolock);
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
@@ -686,7 +697,6 @@
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
-write_retry:
trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
if (likely(ret >= 0))
@@ -702,18 +712,21 @@
* running at the same time.
*/
if (ret == -EDQUOT && !enospc) {
+ xfs_rw_iunlock(ip, iolock);
enospc = xfs_inode_free_quota_eofblocks(ip);
if (enospc)
goto write_retry;
enospc = xfs_inode_free_quota_cowblocks(ip);
if (enospc)
goto write_retry;
+ iolock = 0;
} else if (ret == -ENOSPC && !enospc) {
struct xfs_eofblocks eofb = {0};
enospc = 1;
xfs_flush_inodes(ip->i_mount);
- eofb.eof_scan_owner = ip->i_ino; /* for locking */
+
+ xfs_rw_iunlock(ip, iolock);
eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
xfs_icache_free_eofblocks(ip->i_mount, &eofb);
goto write_retry;
@@ -721,7 +734,8 @@
current->backing_dev_info = NULL;
out:
- xfs_rw_iunlock(ip, iolock);
+ if (iolock)
+ xfs_rw_iunlock(ip, iolock);
return ret;
}
@@ -987,9 +1001,9 @@
*/
mode = xfs_ilock_data_map_shared(ip);
if (ip->i_d.di_nextents > 0)
- xfs_dir3_data_readahead(ip, 0, -1);
+ error = xfs_dir3_data_readahead(ip, 0, -1);
xfs_iunlock(ip, mode);
- return 0;
+ return error;
}
STATIC int
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 29cc988..3fb1f3f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1324,13 +1324,10 @@
int flags,
void *args)
{
- int ret;
+ int ret = 0;
struct xfs_eofblocks *eofb = args;
- bool need_iolock = true;
int match;
- ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
-
if (!xfs_can_free_eofblocks(ip, false)) {
/* inode could be preallocated or append-only */
trace_xfs_inode_free_eofblocks_invalid(ip);
@@ -1358,21 +1355,19 @@
if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
XFS_ISIZE(ip) < eofb->eof_min_file_size)
return 0;
-
- /*
- * A scan owner implies we already hold the iolock. Skip it in
- * xfs_free_eofblocks() to avoid deadlock. This also eliminates
- * the possibility of EAGAIN being returned.
- */
- if (eofb->eof_scan_owner == ip->i_ino)
- need_iolock = false;
}
- ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
-
- /* don't revisit the inode if we're not waiting */
- if (ret == -EAGAIN && !(flags & SYNC_WAIT))
- ret = 0;
+ /*
+ * If the caller is waiting, return -EAGAIN to keep the background
+ * scanner moving and revisit the inode in a subsequent pass.
+ */
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ if (flags & SYNC_WAIT)
+ ret = -EAGAIN;
+ return ret;
+ }
+ ret = xfs_free_eofblocks(ip);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
@@ -1419,15 +1414,10 @@
struct xfs_eofblocks eofb = {0};
struct xfs_dquot *dq;
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-
/*
- * Set the scan owner to avoid a potential livelock. Otherwise, the scan
- * can repeatedly trylock on the inode we're currently processing. We
- * run a sync scan to increase effectiveness and use the union filter to
+ * Run a sync scan to increase effectiveness and use the union filter to
* cover all applicable quotas in a single scan.
*/
- eofb.eof_scan_owner = ip->i_ino;
eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
@@ -1579,12 +1569,9 @@
{
int ret;
struct xfs_eofblocks *eofb = args;
- bool need_iolock = true;
int match;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
-
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
@@ -1617,28 +1604,16 @@
if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
XFS_ISIZE(ip) < eofb->eof_min_file_size)
return 0;
-
- /*
- * A scan owner implies we already hold the iolock. Skip it in
- * xfs_free_eofblocks() to avoid deadlock. This also eliminates
- * the possibility of EAGAIN being returned.
- */
- if (eofb->eof_scan_owner == ip->i_ino)
- need_iolock = false;
}
/* Free the CoW blocks */
- if (need_iolock) {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- }
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+ ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
- if (need_iolock) {
- xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- }
+ xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index a1e02f4..8a7c849 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -27,7 +27,6 @@
kgid_t eof_gid;
prid_t eof_prid;
__u64 eof_min_file_size;
- xfs_ino_t eof_scan_owner;
};
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
@@ -102,7 +101,6 @@
dst->eof_flags = src->eof_flags;
dst->eof_prid = src->eof_prid;
dst->eof_min_file_size = src->eof_min_file_size;
- dst->eof_scan_owner = NULLFSINO;
dst->eof_uid = INVALID_UID;
if (src->eof_flags & XFS_EOF_FLAGS_UID) {
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 512ff13..e50636c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1624,7 +1624,7 @@
/* Remove all pending CoW reservations. */
error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
- last_block);
+ last_block, true);
if (error)
goto out;
@@ -1701,32 +1701,34 @@
if (xfs_can_free_eofblocks(ip, false)) {
/*
- * If we can't get the iolock just skip truncating the blocks
- * past EOF because we could deadlock with the mmap_sem
- * otherwise. We'll get another chance to drop them once the
- * last reference to the inode is dropped, so we'll never leak
- * blocks permanently.
+ * Check if the inode is being opened, written and closed
+ * frequently and we have delayed allocation blocks outstanding
+ * (e.g. streaming writes from the NFS server), truncating the
+ * blocks past EOF will cause fragmentation to occur.
*
- * Further, check if the inode is being opened, written and
- * closed frequently and we have delayed allocation blocks
- * outstanding (e.g. streaming writes from the NFS server),
- * truncating the blocks past EOF will cause fragmentation to
- * occur.
- *
- * In this case don't do the truncation, either, but we have to
- * be careful how we detect this case. Blocks beyond EOF show
- * up as i_delayed_blks even when the inode is clean, so we
- * need to truncate them away first before checking for a dirty
- * release. Hence on the first dirty close we will still remove
- * the speculative allocation, but after that we will leave it
- * in place.
+ * In this case don't do the truncation, but we have to be
+ * careful how we detect this case. Blocks beyond EOF show up as
+ * i_delayed_blks even when the inode is clean, so we need to
+ * truncate them away first before checking for a dirty release.
+ * Hence on the first dirty close we will still remove the
+ * speculative allocation, but after that we will leave it in
+ * place.
*/
if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
return 0;
-
- error = xfs_free_eofblocks(mp, ip, true);
- if (error && error != -EAGAIN)
- return error;
+ /*
+ * If we can't get the iolock just skip truncating the blocks
+ * past EOF because we could deadlock with the mmap_sem
+ * otherwise. We'll get another chance to drop them once the
+ * last reference to the inode is dropped, so we'll never leak
+ * blocks permanently.
+ */
+ if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ error = xfs_free_eofblocks(ip);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ if (error)
+ return error;
+ }
/* delalloc blocks after truncation means it really is dirty */
if (ip->i_delayed_blks)
@@ -1801,22 +1803,23 @@
int error;
/*
- * The ifree transaction might need to allocate blocks for record
- * insertion to the finobt. We don't want to fail here at ENOSPC, so
- * allow ifree to dip into the reserved block pool if necessary.
- *
- * Freeing large sets of inodes generally means freeing inode chunks,
- * directory and file data blocks, so this should be relatively safe.
- * Only under severe circumstances should it be possible to free enough
- * inodes to exhaust the reserve block pool via finobt expansion while
- * at the same time not creating free space in the filesystem.
+ * We try to use a per-AG reservation for any block needed by the finobt
+ * tree, but as the finobt feature predates the per-AG reservation
+ * support a degraded file system might not have enough space for the
+ * reservation at mount time. In that case try to dip into the reserved
+ * pool and pray.
*
* Send a warning if the reservation does happen to fail, as the inode
* now remains allocated and sits on the unlinked list until the fs is
* repaired.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
- XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+ if (unlikely(mp->m_inotbt_nores)) {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+ XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+ &tp);
+ } else {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+ }
if (error) {
if (error == -ENOSPC) {
xfs_warn_ratelimited(mp,
@@ -1912,8 +1915,11 @@
* cache. Post-eof blocks must be freed, lest we end up with
* broken free space accounting.
*/
- if (xfs_can_free_eofblocks(ip, true))
- xfs_free_eofblocks(mp, ip, false);
+ if (xfs_can_free_eofblocks(ip, true)) {
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ xfs_free_eofblocks(ip);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ }
return;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index e888961..3605624 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -637,6 +637,11 @@
goto out_unlock;
}
+ /*
+ * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+ * them out if the write happens to fail.
+ */
+ iomap->flags = IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
done:
if (isnullstartblock(got.br_startblock))
@@ -685,7 +690,7 @@
int nres;
if (whichfork == XFS_COW_FORK)
- flags |= XFS_BMAPI_COWFORK;
+ flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
/*
* Make sure that the dquots are there.
@@ -1061,7 +1066,8 @@
struct xfs_inode *ip,
loff_t offset,
loff_t length,
- ssize_t written)
+ ssize_t written,
+ struct iomap *iomap)
{
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t start_fsb;
@@ -1080,14 +1086,14 @@
end_fsb = XFS_B_TO_FSB(mp, offset + length);
/*
- * Trim back delalloc blocks if we didn't manage to write the whole
- * range reserved.
+ * Trim delalloc blocks if they were allocated by this write and we
+ * didn't manage to write the whole range.
*
* We don't need to care about racing delalloc as we hold i_mutex
* across the reserve/allocate/unreserve calls. If there are delalloc
* blocks in the range, they are ours.
*/
- if (start_fsb < end_fsb) {
+ if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
XFS_FSB_TO_B(mp, end_fsb) - 1);
@@ -1117,7 +1123,7 @@
{
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
- length, written);
+ length, written, iomap);
return 0;
}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b341f10..13796f2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -502,8 +502,7 @@
xfs_set_inoalignment(xfs_mount_t *mp)
{
if (xfs_sb_version_hasalign(&mp->m_sb) &&
- mp->m_sb.sb_inoalignmt >=
- XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+ mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
else
mp->m_inoalign_mask = 0;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 819b80b..1bf878b 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_dmevmask; /* DMI events for this FS */
__uint64_t m_flags; /* global mount flags */
+ bool m_inotbt_nores; /* no per-AG finobt resv. */
int m_ialloc_inos; /* inodes in inode allocation */
int m_ialloc_blks; /* blocks in inode allocation */
int m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 4d3f74e..2252f16 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -82,11 +82,22 @@
* mappings are a reservation against the free space in the filesystem;
* adjacent mappings can also be combined into fewer larger mappings.
*
+ * As an optimization, the CoW extent size hint (cowextsz) creates
+ * outsized aligned delalloc reservations in the hope of landing out of
+ * order nearby CoW writes in a single extent on disk, thereby reducing
+ * fragmentation and improving future performance.
+ *
+ * D: --RRRRRRSSSRRRRRRRR--- (data fork)
+ * C: ------DDDDDDD--------- (CoW fork)
+ *
* When dirty pages are being written out (typically in writepage), the
- * delalloc reservations are converted into real mappings by allocating
- * blocks and replacing the delalloc mapping with real ones. A delalloc
- * mapping can be replaced by several real ones if the free space is
- * fragmented.
+ * delalloc reservations are converted into unwritten mappings by
+ * allocating blocks and replacing the delalloc mapping with real ones.
+ * A delalloc mapping can be replaced by several unwritten ones if the
+ * free space is fragmented.
+ *
+ * D: --RRRRRRSSSRRRRRRRR---
+ * C: ------UUUUUUU---------
*
* We want to adapt the delalloc mechanism for copy-on-write, since the
* write paths are similar. The first two steps (creating the reservation
@@ -101,13 +112,29 @@
* Block-aligned directio writes will use the same mechanism as buffered
* writes.
*
+ * Just prior to submitting the actual disk write requests, we convert
+ * the extents representing the range of the file actually being written
+ * (as opposed to extra pieces created for the cowextsize hint) to real
+ * extents. This will become important in the next step:
+ *
+ * D: --RRRRRRSSSRRRRRRRR---
+ * C: ------UUrrUUU---------
+ *
* CoW remapping must be done after the data block write completes,
* because we don't want to destroy the old data fork map until we're sure
* the new block has been written. Since the new mappings are kept in a
* separate fork, we can simply iterate these mappings to find the ones
* that cover the file blocks that we just CoW'd. For each extent, simply
* unmap the corresponding range in the data fork, map the new range into
- * the data fork, and remove the extent from the CoW fork.
+ * the data fork, and remove the extent from the CoW fork. Because of
+ * the presence of the cowextsize hint, however, we must be careful
+ * only to remap the blocks that we've actually written out -- we must
+ * never remap delalloc reservations nor CoW staging blocks that have
+ * yet to be written. This corresponds exactly to the real extents in
+ * the CoW fork:
+ *
+ * D: --RRRRRRrrSRRRRRRRR---
+ * C: ------UU--UUU---------
*
* Since the remapping operation can be applied to an arbitrary file
* range, we record the need for the remap step as a flag in the ioend
@@ -296,6 +323,65 @@
return 0;
}
+/* Convert part of an unwritten CoW extent to a real one. */
+STATIC int
+xfs_reflink_convert_cow_extent(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap,
+ xfs_fileoff_t offset_fsb,
+ xfs_filblks_t count_fsb,
+ struct xfs_defer_ops *dfops)
+{
+ struct xfs_bmbt_irec irec = *imap;
+ xfs_fsblock_t first_block;
+ int nimaps = 1;
+
+ if (imap->br_state == XFS_EXT_NORM)
+ return 0;
+
+ xfs_trim_extent(&irec, offset_fsb, count_fsb);
+ trace_xfs_reflink_convert_cow(ip, &irec);
+ if (irec.br_blockcount == 0)
+ return 0;
+ return xfs_bmapi_write(NULL, ip, irec.br_startoff, irec.br_blockcount,
+ XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, &first_block,
+ 0, &irec, &nimaps, dfops);
+}
+
+/* Convert all of the unwritten CoW extents in a file's range to real ones. */
+int
+xfs_reflink_convert_cow(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t count)
+{
+ struct xfs_bmbt_irec got;
+ struct xfs_defer_ops dfops;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
+ xfs_extnum_t idx;
+ bool found;
+ int error = 0;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ /* Convert all the extents to real from unwritten. */
+ for (found = xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
+ found && got.br_startoff < end_fsb;
+ found = xfs_iext_get_extent(ifp, ++idx, &got)) {
+ error = xfs_reflink_convert_cow_extent(ip, &got, offset_fsb,
+ end_fsb - offset_fsb, &dfops);
+ if (error)
+ break;
+ }
+
+ /* Finish up. */
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
/* Allocate all CoW reservations covering a range of blocks in a file. */
static int
__xfs_reflink_allocate_cow(
@@ -328,6 +414,7 @@
goto out_unlock;
ASSERT(nimaps == 1);
+ /* Make sure there's a CoW reservation for it. */
error = xfs_reflink_reserve_cow(ip, &imap, &shared);
if (error)
goto out_trans_cancel;
@@ -337,14 +424,16 @@
goto out_trans_cancel;
}
+ /* Allocate the entire reservation as unwritten blocks. */
xfs_trans_ijoin(tp, ip, 0);
error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
- XFS_BMAPI_COWFORK, &first_block,
+ XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
&imap, &nimaps, &dfops);
if (error)
goto out_trans_cancel;
+ /* Finish up. */
error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_trans_cancel;
@@ -389,11 +478,12 @@
if (error) {
trace_xfs_reflink_allocate_cow_range_error(ip, error,
_RET_IP_);
- break;
+ return error;
}
}
- return error;
+ /* Convert the CoW extents to regular. */
+ return xfs_reflink_convert_cow(ip, offset, count);
}
/*
@@ -481,14 +571,18 @@
}
/*
- * Cancel all pending CoW reservations for some block range of an inode.
+ * Cancel CoW reservations for some block range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
*/
int
xfs_reflink_cancel_cow_blocks(
struct xfs_inode *ip,
struct xfs_trans **tpp,
xfs_fileoff_t offset_fsb,
- xfs_fileoff_t end_fsb)
+ xfs_fileoff_t end_fsb,
+ bool cancel_real)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, prev, del;
@@ -515,7 +609,7 @@
&idx, &got, &del);
if (error)
break;
- } else {
+ } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
xfs_trans_ijoin(*tpp, ip, 0);
xfs_defer_init(&dfops, &firstfsb);
@@ -558,13 +652,17 @@
}
/*
- * Cancel all pending CoW reservations for some byte range of an inode.
+ * Cancel CoW reservations for some byte range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
*/
int
xfs_reflink_cancel_cow_range(
struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t count)
+ xfs_off_t count,
+ bool cancel_real)
{
struct xfs_trans *tp;
xfs_fileoff_t offset_fsb;
@@ -590,7 +688,8 @@
xfs_trans_ijoin(tp, ip, 0);
/* Scrape out the old CoW reservations */
- error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
+ error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
+ cancel_real);
if (error)
goto out_cancel;
@@ -669,6 +768,16 @@
ASSERT(!isnullstartblock(got.br_startblock));
+ /*
+ * Don't remap unwritten extents; these are
+ * speculatively preallocated CoW extents that have been
+ * allocated but have not yet been involved in a write.
+ */
+ if (got.br_state == XFS_EXT_UNWRITTEN) {
+ idx--;
+ goto next_extent;
+ }
+
/* Unmap the old blocks in the data fork. */
xfs_defer_init(&dfops, &firstfsb);
rlen = del.br_blockcount;
@@ -885,13 +994,14 @@
xfs_reflink_update_dest(
struct xfs_inode *dest,
xfs_off_t newlen,
- xfs_extlen_t cowextsize)
+ xfs_extlen_t cowextsize,
+ bool is_dedupe)
{
struct xfs_mount *mp = dest->i_mount;
struct xfs_trans *tp;
int error;
- if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
+ if (is_dedupe && newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
return 0;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
@@ -912,6 +1022,10 @@
dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
}
+ if (!is_dedupe) {
+ xfs_trans_ichgtime(tp, dest,
+ XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ }
xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
error = xfs_trans_commit(tp);
@@ -1428,7 +1542,8 @@
!(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
cowextsize = src->i_d.di_cowextsize;
- ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize);
+ ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
+ is_dedupe);
out_unlock:
xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
@@ -1580,7 +1695,7 @@
* We didn't find any shared blocks so turn off the reflink flag.
* First, get rid of any leftover CoW mappings.
*/
- error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
+ error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
if (error)
return error;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 97ea9b4..a57966f 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -30,6 +30,8 @@
struct xfs_bmbt_irec *imap, bool *shared);
extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
xfs_off_t offset, xfs_off_t count);
+extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t count);
extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
struct xfs_bmbt_irec *imap, bool *need_alloc);
extern int xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
@@ -37,9 +39,9 @@
extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
- xfs_fileoff_t end_fsb);
+ xfs_fileoff_t end_fsb, bool cancel_real);
extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
- xfs_off_t count);
+ xfs_off_t count, bool cancel_real);
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ade4691..dbbd3f1 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -948,7 +948,7 @@
XFS_STATS_INC(ip->i_mount, vn_remove);
if (xfs_is_reflink_inode(ip)) {
- error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+ error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
xfs_warn(ip->i_mount,
"Error %d while evicting CoW blocks for inode %llu.",
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 0907752..828f383 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3183,6 +3183,7 @@
__field(xfs_fileoff_t, lblk)
__field(xfs_extlen_t, len)
__field(xfs_fsblock_t, pblk)
+ __field(int, state)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
@@ -3190,13 +3191,15 @@
__entry->lblk = irec->br_startoff;
__entry->len = irec->br_blockcount;
__entry->pblk = irec->br_startblock;
+ __entry->state = irec->br_state;
),
- TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x pblk %llu",
+ TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x pblk %llu st %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->lblk,
__entry->len,
- __entry->pblk)
+ __entry->pblk,
+ __entry->state)
);
#define DEFINE_INODE_IREC_EVENT(name) \
DEFINE_EVENT(xfs_inode_irec_class, name, \
@@ -3345,11 +3348,12 @@
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
+DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
DEFINE_IOMAP_EVENT(xfs_reflink_find_cow_mapping);
DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
new file mode 100644
index 0000000..558d136
--- /dev/null
+++ b/include/crypto/ice.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_H_
+
+#include <linux/platform_device.h>
+
+struct request;
+
+enum ice_cryto_algo_mode {
+ ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
+ ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
+};
+
+enum ice_crpto_key_size {
+ ICE_CRYPTO_KEY_SIZE_128 = 0x0,
+ ICE_CRYPTO_KEY_SIZE_256 = 0x2,
+};
+
+enum ice_crpto_key_mode {
+ ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
+ ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
+ ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
+ ICE_CRYPTO_USE_LUT_SW_KEY = 0x3
+};
+
+struct ice_crypto_setting {
+ enum ice_crpto_key_size key_size;
+ enum ice_cryto_algo_mode algo_mode;
+ enum ice_crpto_key_mode key_mode;
+ short key_index;
+
+};
+
+struct ice_data_setting {
+ struct ice_crypto_setting crypto_data;
+ bool sw_forced_context_switch;
+ bool decr_bypass;
+ bool encr_bypass;
+};
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
+
+#ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
+#else
+static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+ return 0;
+}
+#endif
+
+struct qcom_ice_variant_ops {
+ const char *name;
+ int (*init)(struct platform_device *, void *, ice_error_cb);
+ int (*reset)(struct platform_device *);
+ int (*resume)(struct platform_device *);
+ int (*suspend)(struct platform_device *);
+ int (*config_start)(struct platform_device *, struct request *,
+ struct ice_data_setting *, bool);
+ int (*config_end)(struct request *);
+ int (*status)(struct platform_device *);
+ void (*debug)(struct platform_device *);
+};
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 1d4f365..f6d9af3e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -166,6 +166,16 @@
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
}
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+ return req->base.flags;
+}
+
static inline struct crypto_ahash *crypto_spawn_ahash(
struct crypto_ahash_spawn *spawn)
{
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef190..f5678aa 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -21,12 +21,15 @@
#define MIPI_DSI_MSG_REQ_ACK BIT(0)
/* use Low Power Mode to transmit message */
#define MIPI_DSI_MSG_USE_LPM BIT(1)
+/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */
+#define MIPI_DSI_MSG_UNICAST BIT(2)
/**
* struct mipi_dsi_msg - read/write DSI buffer
* @channel: virtual channel id
* @type: payload data type
* @flags: flags controlling this message transmission
+ * @ctrl: ctrl index to transmit on
* @tx_len: length of @tx_buf
* @tx_buf: data to be written
* @rx_len: length of @rx_buf
@@ -36,6 +39,7 @@
u8 channel;
u8 type;
u16 flags;
+ u32 ctrl;
size_t tx_len;
const void *tx_buf;
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 0d5f426..61766a4 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -226,23 +226,18 @@
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
-#define INTEL_BDW_RSVDM_IDS(info) \
+#define INTEL_BDW_RSVD_IDS(info) \
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
- INTEL_VGA_DEVICE(0x163E, info) /* ULX */
-
-#define INTEL_BDW_RSVDD_IDS(info) \
+ INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_IDS(info) \
INTEL_BDW_GT12_IDS(info), \
INTEL_BDW_GT3_IDS(info), \
- INTEL_BDW_RSVDM_IDS(info), \
- INTEL_BDW_GT12_IDS(info), \
- INTEL_BDW_GT3_IDS(info), \
- INTEL_BDW_RSVDD_IDS(info)
+ INTEL_BDW_RSVD_IDS(info)
#define INTEL_CHV_IDS(info) \
INTEL_VGA_DEVICE(0x22b0, info), \
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f9..1487011 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@
* @ref_type: The type of reference.
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
*
* Checks that the base object is shareable and adds a ref object to it.
*
@@ -243,7 +245,8 @@
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed);
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
new file mode 100644
index 0000000..c1350ce
--- /dev/null
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_10NM_PLL_CLK_H
+#define __MDSS_10NM_PLL_CLK_H
+
+/* DSI PLL clocks */
+#define VCO_CLK_0 0
+#define BITCLK_SRC_0_CLK 1
+#define BYTECLK_SRC_0_CLK 2
+#define POST_BIT_DIV_0_CLK 3
+#define POST_VCO_DIV_0_CLK 4
+#define BYTECLK_MUX_0_CLK 5
+#define PCLK_SRC_MUX_0_CLK 6
+#define PCLK_SRC_0_CLK 7
+#define PCLK_MUX_0_CLK 8
+#define VCO_CLK_1 9
+#define BITCLK_SRC_1_CLK 10
+#define BYTECLK_SRC_1_CLK 11
+#define POST_BIT_DIV_1_CLK 12
+#define POST_VCO_DIV_1_CLK 13
+#define BYTECLK_MUX_1_CLK 14
+#define PCLK_SRC_MUX_1_CLK 15
+#define PCLK_SRC_1_CLK 16
+#define PCLK_MUX_1_CLK 17
+#endif
diff --git a/include/dt-bindings/clock/qcom,aop-qmp.h b/include/dt-bindings/clock/qcom,aop-qmp.h
new file mode 100644
index 0000000..b88dc36
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,aop-qmp.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_AOP_QMP_H
+#define _DT_BINDINGS_CLK_QCOM_AOP_QMP_H
+
+#define QDSS_CLK_LEVEL_OFF 0
+#define QDSS_CLK_LEVEL_DYNAMIC 1
+#define QDSS_CLK_LEVEL_TURBO 2
+#define QDSS_CLK_LEVEL_NOMINAL 3
+#define QDSS_CLK_LEVEL_SVS_L1 4
+#define QDSS_CLK_LEVEL_SVS 5
+#define QDSS_CLK_LEVEL_LOW_SVS 6
+#define QDSS_CLK_LEVEL_MIN_SVS 7
+
+/* clocks id */
+#define QDSS_CLK 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index e169172..0d9d9f6 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -34,71 +34,70 @@
#define CAM_CC_CSIPHY0_CLK 17
#define CAM_CC_CSIPHY1_CLK 18
#define CAM_CC_CSIPHY2_CLK 19
-#define CAM_CC_DEBUG_CLK 20
-#define CAM_CC_FAST_AHB_CLK_SRC 21
-#define CAM_CC_FD_CORE_CLK 22
-#define CAM_CC_FD_CORE_CLK_SRC 23
-#define CAM_CC_FD_CORE_UAR_CLK 24
-#define CAM_CC_ICP_APB_CLK 25
-#define CAM_CC_ICP_ATB_CLK 26
-#define CAM_CC_ICP_CLK 27
-#define CAM_CC_ICP_CLK_SRC 28
-#define CAM_CC_ICP_CTI_CLK 29
-#define CAM_CC_ICP_TS_CLK 30
-#define CAM_CC_IFE_0_AXI_CLK 31
-#define CAM_CC_IFE_0_CLK 32
-#define CAM_CC_IFE_0_CLK_SRC 33
-#define CAM_CC_IFE_0_CPHY_RX_CLK 34
-#define CAM_CC_IFE_0_CSID_CLK 35
-#define CAM_CC_IFE_0_CSID_CLK_SRC 36
-#define CAM_CC_IFE_0_DSP_CLK 37
-#define CAM_CC_IFE_1_AXI_CLK 38
-#define CAM_CC_IFE_1_CLK 39
-#define CAM_CC_IFE_1_CLK_SRC 40
-#define CAM_CC_IFE_1_CPHY_RX_CLK 41
-#define CAM_CC_IFE_1_CSID_CLK 42
-#define CAM_CC_IFE_1_CSID_CLK_SRC 43
-#define CAM_CC_IFE_1_DSP_CLK 44
-#define CAM_CC_IFE_LITE_CLK 45
-#define CAM_CC_IFE_LITE_CLK_SRC 46
-#define CAM_CC_IFE_LITE_CPHY_RX_CLK 47
-#define CAM_CC_IFE_LITE_CSID_CLK 48
-#define CAM_CC_IFE_LITE_CSID_CLK_SRC 49
-#define CAM_CC_IPE_0_AHB_CLK 50
-#define CAM_CC_IPE_0_AREG_CLK 51
-#define CAM_CC_IPE_0_AXI_CLK 52
-#define CAM_CC_IPE_0_CLK 53
-#define CAM_CC_IPE_0_CLK_SRC 54
-#define CAM_CC_IPE_1_AHB_CLK 55
-#define CAM_CC_IPE_1_AREG_CLK 56
-#define CAM_CC_IPE_1_AXI_CLK 57
-#define CAM_CC_IPE_1_CLK 58
-#define CAM_CC_IPE_1_CLK_SRC 59
-#define CAM_CC_JPEG_CLK 60
-#define CAM_CC_JPEG_CLK_SRC 61
-#define CAM_CC_LRME_CLK 62
-#define CAM_CC_LRME_CLK_SRC 63
-#define CAM_CC_MCLK0_CLK 64
-#define CAM_CC_MCLK0_CLK_SRC 65
-#define CAM_CC_MCLK1_CLK 66
-#define CAM_CC_MCLK1_CLK_SRC 67
-#define CAM_CC_MCLK2_CLK 68
-#define CAM_CC_MCLK2_CLK_SRC 69
-#define CAM_CC_MCLK3_CLK 70
-#define CAM_CC_MCLK3_CLK_SRC 71
-#define CAM_CC_PLL0 72
-#define CAM_CC_PLL0_OUT_EVEN 73
-#define CAM_CC_PLL1 74
-#define CAM_CC_PLL1_OUT_EVEN 75
-#define CAM_CC_PLL2 76
-#define CAM_CC_PLL2_OUT_EVEN 77
-#define CAM_CC_PLL2_OUT_ODD 78
-#define CAM_CC_PLL3 79
-#define CAM_CC_PLL3_OUT_EVEN 80
-#define CAM_CC_PLL_TEST_CLK 81
-#define CAM_CC_SLOW_AHB_CLK_SRC 82
-#define CAM_CC_SOC_AHB_CLK 83
-#define CAM_CC_SYS_TMR_CLK 84
+#define CAM_CC_FAST_AHB_CLK_SRC 20
+#define CAM_CC_FD_CORE_CLK 21
+#define CAM_CC_FD_CORE_CLK_SRC 22
+#define CAM_CC_FD_CORE_UAR_CLK 23
+#define CAM_CC_ICP_APB_CLK 24
+#define CAM_CC_ICP_ATB_CLK 25
+#define CAM_CC_ICP_CLK 26
+#define CAM_CC_ICP_CLK_SRC 27
+#define CAM_CC_ICP_CTI_CLK 28
+#define CAM_CC_ICP_TS_CLK 29
+#define CAM_CC_IFE_0_AXI_CLK 30
+#define CAM_CC_IFE_0_CLK 31
+#define CAM_CC_IFE_0_CLK_SRC 32
+#define CAM_CC_IFE_0_CPHY_RX_CLK 33
+#define CAM_CC_IFE_0_CSID_CLK 34
+#define CAM_CC_IFE_0_CSID_CLK_SRC 35
+#define CAM_CC_IFE_0_DSP_CLK 36
+#define CAM_CC_IFE_1_AXI_CLK 37
+#define CAM_CC_IFE_1_CLK 38
+#define CAM_CC_IFE_1_CLK_SRC 39
+#define CAM_CC_IFE_1_CPHY_RX_CLK 40
+#define CAM_CC_IFE_1_CSID_CLK 41
+#define CAM_CC_IFE_1_CSID_CLK_SRC 42
+#define CAM_CC_IFE_1_DSP_CLK 43
+#define CAM_CC_IFE_LITE_CLK 44
+#define CAM_CC_IFE_LITE_CLK_SRC 45
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 46
+#define CAM_CC_IFE_LITE_CSID_CLK 47
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 48
+#define CAM_CC_IPE_0_AHB_CLK 49
+#define CAM_CC_IPE_0_AREG_CLK 50
+#define CAM_CC_IPE_0_AXI_CLK 51
+#define CAM_CC_IPE_0_CLK 52
+#define CAM_CC_IPE_0_CLK_SRC 53
+#define CAM_CC_IPE_1_AHB_CLK 54
+#define CAM_CC_IPE_1_AREG_CLK 55
+#define CAM_CC_IPE_1_AXI_CLK 56
+#define CAM_CC_IPE_1_CLK 57
+#define CAM_CC_IPE_1_CLK_SRC 58
+#define CAM_CC_JPEG_CLK 59
+#define CAM_CC_JPEG_CLK_SRC 60
+#define CAM_CC_LRME_CLK 61
+#define CAM_CC_LRME_CLK_SRC 62
+#define CAM_CC_MCLK0_CLK 63
+#define CAM_CC_MCLK0_CLK_SRC 64
+#define CAM_CC_MCLK1_CLK 65
+#define CAM_CC_MCLK1_CLK_SRC 66
+#define CAM_CC_MCLK2_CLK 67
+#define CAM_CC_MCLK2_CLK_SRC 68
+#define CAM_CC_MCLK3_CLK 69
+#define CAM_CC_MCLK3_CLK_SRC 70
+#define CAM_CC_PLL0 71
+#define CAM_CC_PLL0_OUT_EVEN 72
+#define CAM_CC_PLL1 73
+#define CAM_CC_PLL1_OUT_EVEN 74
+#define CAM_CC_PLL2 75
+#define CAM_CC_PLL2_OUT_EVEN 76
+#define CAM_CC_PLL2_OUT_ODD 77
+#define CAM_CC_PLL3 78
+#define CAM_CC_PLL3_OUT_EVEN 79
+#define CAM_CC_PLL_TEST_CLK 80
+#define CAM_CC_SLOW_AHB_CLK_SRC 81
+#define CAM_CC_SOC_AHB_CLK 82
+#define CAM_CC_SYS_TMR_CLK 83
#define TITAN_CAM_CC_BPS_BCR 0
#define TITAN_CAM_CC_CAMNOC_BCR 1
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index b1988e4..24dd11e 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -14,49 +14,48 @@
#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
#define _DT_BINDINGS_CLK_MSM_DISP_CC_SDM845_H
-#define DISP_CC_DEBUG_CLK 0
-#define DISP_CC_MDSS_AHB_CLK 1
-#define DISP_CC_MDSS_AXI_CLK 2
-#define DISP_CC_MDSS_BYTE0_CLK 3
-#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
-#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
-#define DISP_CC_MDSS_BYTE1_CLK 6
-#define DISP_CC_MDSS_BYTE1_CLK_SRC 7
-#define DISP_CC_MDSS_BYTE1_INTF_CLK 8
-#define DISP_CC_MDSS_DP_AUX_CLK 9
-#define DISP_CC_MDSS_DP_AUX_CLK_SRC 10
-#define DISP_CC_MDSS_DP_CRYPTO_CLK 11
-#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 12
-#define DISP_CC_MDSS_DP_LINK_CLK 13
-#define DISP_CC_MDSS_DP_LINK_CLK_SRC 14
-#define DISP_CC_MDSS_DP_LINK_INTF_CLK 15
-#define DISP_CC_MDSS_DP_PIXEL1_CLK 16
-#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 17
-#define DISP_CC_MDSS_DP_PIXEL_CLK 18
-#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 19
-#define DISP_CC_MDSS_ESC0_CLK 20
-#define DISP_CC_MDSS_ESC0_CLK_SRC 21
-#define DISP_CC_MDSS_ESC1_CLK 22
-#define DISP_CC_MDSS_ESC1_CLK_SRC 23
-#define DISP_CC_MDSS_MDP_CLK 24
-#define DISP_CC_MDSS_MDP_CLK_SRC 25
-#define DISP_CC_MDSS_MDP_LUT_CLK 26
-#define DISP_CC_MDSS_PCLK0_CLK 27
-#define DISP_CC_MDSS_PCLK0_CLK_SRC 28
-#define DISP_CC_MDSS_PCLK1_CLK 29
-#define DISP_CC_MDSS_PCLK1_CLK_SRC 30
-#define DISP_CC_MDSS_QDSS_AT_CLK 31
-#define DISP_CC_MDSS_QDSS_TSCTR_DIV8_CLK 32
-#define DISP_CC_MDSS_ROT_CLK 33
-#define DISP_CC_MDSS_ROT_CLK_SRC 34
-#define DISP_CC_MDSS_RSCC_AHB_CLK 35
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK 36
-#define DISP_CC_MDSS_VSYNC_CLK 37
-#define DISP_CC_MDSS_VSYNC_CLK_SRC 38
-#define DISP_CC_PLL0 39
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 40
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 41
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 42
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AXI_CLK 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 4
+#define DISP_CC_MDSS_BYTE1_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK 8
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 10
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_CLK 12
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK 17
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 18
+#define DISP_CC_MDSS_ESC0_CLK 19
+#define DISP_CC_MDSS_ESC0_CLK_SRC 20
+#define DISP_CC_MDSS_ESC1_CLK 21
+#define DISP_CC_MDSS_ESC1_CLK_SRC 22
+#define DISP_CC_MDSS_MDP_CLK 23
+#define DISP_CC_MDSS_MDP_CLK_SRC 24
+#define DISP_CC_MDSS_MDP_LUT_CLK 25
+#define DISP_CC_MDSS_PCLK0_CLK 26
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 27
+#define DISP_CC_MDSS_PCLK1_CLK 28
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 29
+#define DISP_CC_MDSS_QDSS_AT_CLK 30
+#define DISP_CC_MDSS_QDSS_TSCTR_DIV8_CLK 31
+#define DISP_CC_MDSS_ROT_CLK 32
+#define DISP_CC_MDSS_ROT_CLK_SRC 33
+#define DISP_CC_MDSS_RSCC_AHB_CLK 34
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 35
+#define DISP_CC_MDSS_VSYNC_CLK 36
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 37
+#define DISP_CC_PLL0 38
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 39
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 40
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 41
#define DISP_CC_MDSS_CORE_BCR 0
#define DISP_CC_MDSS_GCC_CLOCKS_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index d52e335..73a8c0b 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -35,167 +35,158 @@
#define GCC_CPUSS_GNOC_CLK 17
#define GCC_CPUSS_RBCPR_CLK 18
#define GCC_CPUSS_RBCPR_CLK_SRC 19
-#define GCC_CXO_TX1_CLKREF_CLK 20
-#define GCC_DDRSS_GPU_AXI_CLK 21
-#define GCC_DISP_AHB_CLK 22
-#define GCC_DISP_AXI_CLK 23
-#define GCC_DISP_GPLL0_CLK_SRC 24
-#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
-#define GCC_DISP_XO_CLK 26
-#define GCC_GP1_CLK 27
-#define GCC_GP1_CLK_SRC 28
-#define GCC_GP2_CLK 29
-#define GCC_GP2_CLK_SRC 30
-#define GCC_GP3_CLK 31
-#define GCC_GP3_CLK_SRC 32
-#define GCC_GPU_CFG_AHB_CLK 33
-#define GCC_GPU_GPLL0_CLK_SRC 34
-#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
-#define GCC_GPU_MEMNOC_GFX_CLK 36
-#define GCC_GPU_SNOC_DVM_GFX_CLK 37
-#define GCC_MSS_AXIS2_CLK 38
-#define GCC_MSS_CFG_AHB_CLK 39
-#define GCC_MSS_GPLL0_DIV_CLK_SRC 40
-#define GCC_MSS_MFAB_AXIS_CLK 41
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK 42
-#define GCC_MSS_SNOC_AXI_CLK 43
-#define GCC_PCIE_0_AUX_CLK 44
-#define GCC_PCIE_0_AUX_CLK_SRC 45
-#define GCC_PCIE_0_CFG_AHB_CLK 46
-#define GCC_PCIE_0_CLKREF_CLK 47
-#define GCC_PCIE_0_MSTR_AXI_CLK 48
-#define GCC_PCIE_0_PIPE_CLK 49
-#define GCC_PCIE_0_SLV_AXI_CLK 50
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 51
-#define GCC_PCIE_1_AUX_CLK 52
-#define GCC_PCIE_1_AUX_CLK_SRC 53
-#define GCC_PCIE_1_CFG_AHB_CLK 54
-#define GCC_PCIE_1_CLKREF_CLK 55
-#define GCC_PCIE_1_MSTR_AXI_CLK 56
-#define GCC_PCIE_1_PIPE_CLK 57
-#define GCC_PCIE_1_SLV_AXI_CLK 58
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 59
-#define GCC_PCIE_PHY_AUX_CLK 60
-#define GCC_PCIE_PHY_REFGEN_CLK 61
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 62
-#define GCC_PDM2_CLK 63
-#define GCC_PDM2_CLK_SRC 64
-#define GCC_PDM_AHB_CLK 65
-#define GCC_PDM_XO4_CLK 66
-#define GCC_PRNG_AHB_CLK 67
-#define GCC_QMIP_CAMERA_AHB_CLK 68
-#define GCC_QMIP_DISP_AHB_CLK 69
-#define GCC_QMIP_VIDEO_AHB_CLK 70
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK 71
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC 72
-#define GCC_QUPV3_WRAP0_CORE_CLK 73
-#define GCC_QUPV3_WRAP0_S0_CLK 74
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC 75
-#define GCC_QUPV3_WRAP0_S1_CLK 76
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC 77
-#define GCC_QUPV3_WRAP0_S2_CLK 78
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC 79
-#define GCC_QUPV3_WRAP0_S3_CLK 80
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC 81
-#define GCC_QUPV3_WRAP0_S4_CLK 82
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC 83
-#define GCC_QUPV3_WRAP0_S5_CLK 84
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC 85
-#define GCC_QUPV3_WRAP0_S6_CLK 86
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC 87
-#define GCC_QUPV3_WRAP0_S7_CLK 88
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC 89
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK 90
-#define GCC_QUPV3_WRAP1_CORE_CLK 91
-#define GCC_QUPV3_WRAP1_S0_CLK 92
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC 93
-#define GCC_QUPV3_WRAP1_S1_CLK 94
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC 95
-#define GCC_QUPV3_WRAP1_S2_CLK 96
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC 97
-#define GCC_QUPV3_WRAP1_S3_CLK 98
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC 99
-#define GCC_QUPV3_WRAP1_S4_CLK 100
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC 101
-#define GCC_QUPV3_WRAP1_S5_CLK 102
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC 103
-#define GCC_QUPV3_WRAP1_S6_CLK 104
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC 105
-#define GCC_QUPV3_WRAP1_S7_CLK 106
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC 107
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK 108
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK 109
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK 110
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK 111
-#define GCC_RX1_USB2_CLKREF_CLK 112
-#define GCC_RX2_QLINK_CLKREF_CLK 113
-#define GCC_RX3_MODEM_CLKREF_CLK 114
-#define GCC_SDCC2_AHB_CLK 115
-#define GCC_SDCC2_APPS_CLK 116
-#define GCC_SDCC2_APPS_CLK_SRC 117
-#define GCC_SDCC4_AHB_CLK 118
-#define GCC_SDCC4_APPS_CLK 119
-#define GCC_SDCC4_APPS_CLK_SRC 120
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 121
-#define GCC_TSIF_AHB_CLK 122
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK 123
-#define GCC_TSIF_REF_CLK 124
-#define GCC_TSIF_REF_CLK_SRC 125
-#define GCC_UFS_CARD_AHB_CLK 126
-#define GCC_UFS_CARD_AXI_CLK 127
-#define GCC_UFS_CARD_AXI_CLK_SRC 128
-#define GCC_UFS_CARD_CLKREF_CLK 129
-#define GCC_UFS_CARD_ICE_CORE_CLK 130
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 131
-#define GCC_UFS_CARD_PHY_AUX_CLK 132
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 133
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 134
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 135
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 136
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK 137
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 138
-#define GCC_UFS_MEM_CLKREF_CLK 139
-#define GCC_UFS_PHY_AHB_CLK 140
-#define GCC_UFS_PHY_AXI_CLK 141
-#define GCC_UFS_PHY_AXI_CLK_SRC 142
-#define GCC_UFS_PHY_ICE_CORE_CLK 143
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 144
-#define GCC_UFS_PHY_PHY_AUX_CLK 145
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 146
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 147
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 148
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 149
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK 150
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 151
-#define GCC_USB30_PRIM_MASTER_CLK 152
-#define GCC_USB30_PRIM_MASTER_CLK_SRC 153
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK 154
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 155
-#define GCC_USB30_PRIM_SLEEP_CLK 156
-#define GCC_USB30_SEC_MASTER_CLK 157
-#define GCC_USB30_SEC_MASTER_CLK_SRC 158
-#define GCC_USB30_SEC_MOCK_UTMI_CLK 159
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 160
-#define GCC_USB30_SEC_SLEEP_CLK 161
-#define GCC_USB3_PRIM_CLKREF_CLK 162
-#define GCC_USB3_PRIM_PHY_AUX_CLK 163
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 164
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 165
-#define GCC_USB3_PRIM_PHY_PIPE_CLK 166
-#define GCC_USB3_SEC_CLKREF_CLK 167
-#define GCC_USB3_SEC_PHY_AUX_CLK 168
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 169
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK 170
-#define GCC_USB3_SEC_PHY_PIPE_CLK 171
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 172
-#define GCC_VIDEO_AHB_CLK 173
-#define GCC_VIDEO_AXI_CLK 174
-#define GCC_VIDEO_XO_CLK 175
-#define GPLL0 176
-#define GPLL0_OUT_EVEN 177
-#define GPLL0_OUT_MAIN 178
-#define GPLL1 179
-#define GPLL1_OUT_MAIN 180
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DISP_AHB_CLK 21
+#define GCC_DISP_AXI_CLK 22
+#define GCC_DISP_GPLL0_CLK_SRC 23
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 24
+#define GCC_DISP_XO_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_MSS_AXIS2_CLK 37
+#define GCC_MSS_CFG_AHB_CLK 38
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 39
+#define GCC_MSS_MFAB_AXIS_CLK 40
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 41
+#define GCC_MSS_SNOC_AXI_CLK 42
+#define GCC_PCIE_0_AUX_CLK 43
+#define GCC_PCIE_0_AUX_CLK_SRC 44
+#define GCC_PCIE_0_CFG_AHB_CLK 45
+#define GCC_PCIE_0_CLKREF_CLK 46
+#define GCC_PCIE_0_MSTR_AXI_CLK 47
+#define GCC_PCIE_0_PIPE_CLK 48
+#define GCC_PCIE_0_SLV_AXI_CLK 49
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 50
+#define GCC_PCIE_1_AUX_CLK 51
+#define GCC_PCIE_1_AUX_CLK_SRC 52
+#define GCC_PCIE_1_CFG_AHB_CLK 53
+#define GCC_PCIE_1_CLKREF_CLK 54
+#define GCC_PCIE_1_MSTR_AXI_CLK 55
+#define GCC_PCIE_1_PIPE_CLK 56
+#define GCC_PCIE_1_SLV_AXI_CLK 57
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 58
+#define GCC_PCIE_PHY_AUX_CLK 59
+#define GCC_PCIE_PHY_REFGEN_CLK 60
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 61
+#define GCC_PDM2_CLK 62
+#define GCC_PDM2_CLK_SRC 63
+#define GCC_PDM_AHB_CLK 64
+#define GCC_PDM_XO4_CLK 65
+#define GCC_PRNG_AHB_CLK 66
+#define GCC_QMIP_CAMERA_AHB_CLK 67
+#define GCC_QMIP_DISP_AHB_CLK 68
+#define GCC_QMIP_VIDEO_AHB_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S5_CLK 80
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S6_CLK 82
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S7_CLK 84
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S0_CLK 86
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S1_CLK 88
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S2_CLK 90
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S3_CLK 92
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S4_CLK 94
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S5_CLK 96
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S6_CLK 98
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S7_CLK 100
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 101
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 102
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 103
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 105
+#define GCC_SDCC2_AHB_CLK 106
+#define GCC_SDCC2_APPS_CLK 107
+#define GCC_SDCC2_APPS_CLK_SRC 108
+#define GCC_SDCC4_AHB_CLK 109
+#define GCC_SDCC4_APPS_CLK 110
+#define GCC_SDCC4_APPS_CLK_SRC 111
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 112
+#define GCC_TSIF_AHB_CLK 113
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 114
+#define GCC_TSIF_REF_CLK 115
+#define GCC_TSIF_REF_CLK_SRC 116
+#define GCC_UFS_CARD_AHB_CLK 117
+#define GCC_UFS_CARD_AXI_CLK 118
+#define GCC_UFS_CARD_AXI_CLK_SRC 119
+#define GCC_UFS_CARD_CLKREF_CLK 120
+#define GCC_UFS_CARD_ICE_CORE_CLK 121
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 122
+#define GCC_UFS_CARD_PHY_AUX_CLK 123
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 124
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 125
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 126
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 127
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 128
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 129
+#define GCC_UFS_MEM_CLKREF_CLK 130
+#define GCC_UFS_PHY_AHB_CLK 131
+#define GCC_UFS_PHY_AXI_CLK 132
+#define GCC_UFS_PHY_AXI_CLK_SRC 133
+#define GCC_UFS_PHY_ICE_CORE_CLK 134
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 135
+#define GCC_UFS_PHY_PHY_AUX_CLK 136
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 137
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 139
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 141
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 142
+#define GCC_USB30_PRIM_MASTER_CLK 143
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 144
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 146
+#define GCC_USB30_PRIM_SLEEP_CLK 147
+#define GCC_USB30_SEC_MASTER_CLK 148
+#define GCC_USB30_SEC_MASTER_CLK_SRC 149
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 150
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 151
+#define GCC_USB30_SEC_SLEEP_CLK 152
+#define GCC_USB3_PRIM_CLKREF_CLK 153
+#define GCC_USB3_PRIM_PHY_AUX_CLK 154
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 155
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 156
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 157
+#define GCC_USB3_SEC_CLKREF_CLK 158
+#define GCC_USB3_SEC_PHY_AUX_CLK 159
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 160
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 161
+#define GCC_USB3_SEC_PHY_PIPE_CLK 162
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 163
+#define GCC_VIDEO_AHB_CLK 164
+#define GCC_VIDEO_AXI_CLK 165
+#define GCC_VIDEO_XO_CLK 166
+#define GPLL0 167
+#define GPLL0_OUT_EVEN 168
+#define GPLL0_OUT_MAIN 169
+#define GPLL1 170
+#define GPLL1_OUT_MAIN 171
/* GCC reset clocks */
#define GCC_GPU_BCR 0
@@ -223,5 +214,13 @@
#define GCC_USB3PHY_PHY_SEC_BCR 22
#define GCC_USB3_DP_PHY_SEC_BCR 23
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 24
+#define GCC_PCIE_0_PHY_BCR 25
+#define GCC_PCIE_1_PHY_BCR 26
+
+/* Dummy clocks for rate measurement */
+#define MEASURE_ONLY_SNOC_CLK 0
+#define MEASURE_ONLY_CNOC_CLK 1
+#define MEASURE_ONLY_BIMC_CLK 2
+#define MEASURE_ONLY_IPA_2X_CLK 3
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
new file mode 100644
index 0000000..6243588
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDX24_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SDX24_H
+
+/* GCC clock registers */
+#define GCC_BLSP1_AHB_CLK 0
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 1
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 2
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 3
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 5
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 6
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 7
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 8
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 9
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 10
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 11
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 12
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 13
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 14
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 16
+#define GCC_BLSP1_SLEEP_CLK 17
+#define GCC_BLSP1_UART1_APPS_CLK 18
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 19
+#define GCC_BLSP1_UART2_APPS_CLK 20
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 21
+#define GCC_BLSP1_UART3_APPS_CLK 22
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 23
+#define GCC_BLSP1_UART4_APPS_CLK 24
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 25
+#define GCC_BOOT_ROM_AHB_CLK 26
+#define GCC_CE1_AHB_CLK 27
+#define GCC_CE1_AXI_CLK 28
+#define GCC_CE1_CLK 29
+#define GCC_CPUSS_AHB_CLK 30
+#define GCC_CPUSS_AHB_CLK_SRC 31
+#define GCC_CPUSS_GNOC_CLK 32
+#define GCC_CPUSS_GPLL0_CLK_SRC 33
+#define GCC_CPUSS_RBCPR_CLK 34
+#define GCC_CPUSS_RBCPR_CLK_SRC 35
+#define GCC_GP1_CLK 36
+#define GCC_GP1_CLK_SRC 37
+#define GCC_GP2_CLK 38
+#define GCC_GP2_CLK_SRC 39
+#define GCC_GP3_CLK 40
+#define GCC_GP3_CLK_SRC 41
+#define GCC_MSS_CFG_AHB_CLK 42
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 43
+#define GCC_MSS_SNOC_AXI_CLK 44
+#define GCC_PCIE_AUX_CLK 45
+#define GCC_PCIE_AUX_PHY_CLK_SRC 46
+#define GCC_PCIE_CFG_AHB_CLK 47
+#define GCC_PCIE_MSTR_AXI_CLK 48
+#define GCC_PCIE_PHY_REFGEN_CLK 49
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 50
+#define GCC_PCIE_PIPE_CLK 51
+#define GCC_PCIE_SLEEP_CLK 52
+#define GCC_PCIE_SLV_AXI_CLK 53
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 54
+#define GCC_PDM2_CLK 55
+#define GCC_PDM2_CLK_SRC 56
+#define GCC_PDM_AHB_CLK 57
+#define GCC_PDM_XO4_CLK 58
+#define GCC_PRNG_AHB_CLK 59
+#define GCC_SDCC1_AHB_CLK 60
+#define GCC_SDCC1_APPS_CLK 61
+#define GCC_SDCC1_APPS_CLK_SRC 62
+#define GCC_SPMI_FETCHER_AHB_CLK 63
+#define GCC_SPMI_FETCHER_CLK 64
+#define GCC_SPMI_FETCHER_CLK_SRC 65
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 66
+#define GCC_USB30_MASTER_CLK 67
+#define GCC_USB30_MASTER_CLK_SRC 68
+#define GCC_USB30_MOCK_UTMI_CLK 69
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 70
+#define GCC_USB30_SLEEP_CLK 71
+#define GCC_USB3_PHY_AUX_CLK 72
+#define GCC_USB3_PHY_AUX_CLK_SRC 73
+#define GCC_USB3_PHY_PIPE_CLK 74
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 75
+#define GCC_XO_DIV4_CLK 76
+#define GPLL0 77
+#define GPLL0_OUT_EVEN 78
+
+/* GDSCs */
+#define PCIE_GDSC 0
+#define USB30_GDSC 1
+
+/* CPU clocks */
+#define CLOCK_A7SS 0
+
+/* GCC reset clocks */
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_UART3_BCR 5
+#define GCC_BLSP1_UART4_BCR 6
+#define GCC_CE1_BCR 7
+#define GCC_PCIE_BCR 8
+#define GCC_PCIE_PHY_BCR 9
+#define GCC_PDM_BCR 10
+#define GCC_PRNG_BCR 11
+#define GCC_SDCC1_BCR 12
+#define GCC_SPMI_FETCHER_BCR 13
+#define GCC_USB30_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 13de1e1..c43a9f8 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -17,37 +17,36 @@
/* GPUCC clock registers */
#define GPU_CC_ACD_AHB_CLK 0
#define GPU_CC_ACD_CXO_CLK 1
-#define GPU_CC_AHB_CLK 2
+#define GPU_CC_AHB_CLK 2
#define GPU_CC_CRC_AHB_CLK 3
#define GPU_CC_CX_APB_CLK 4
#define GPU_CC_CX_GMU_CLK 5
#define GPU_CC_CX_QDSS_AT_CLK 6
#define GPU_CC_CX_QDSS_TRIG_CLK 7
-#define GPU_CC_CX_QDSS_TSCTR_CLK 8
-#define GPU_CC_CX_SNOC_DVM_CLK 9
+#define GPU_CC_CX_QDSS_TSCTR_CLK 8
+#define GPU_CC_CX_SNOC_DVM_CLK 9
#define GPU_CC_CXO_AON_CLK 10
-#define GPU_CC_CXO_CLK 11
-#define GPU_CC_DEBUG_CLK 12
-#define GPU_CC_GX_CXO_CLK 13
-#define GPU_CC_GX_GMU_CLK 14
-#define GPU_CC_GX_QDSS_TSCTR_CLK 15
-#define GPU_CC_GX_VSENSE_CLK 16
-#define GPU_CC_PLL0_OUT_MAIN 17
-#define GPU_CC_PLL0_OUT_ODD 18
-#define GPU_CC_PLL0_OUT_TEST 19
-#define GPU_CC_PLL1 20
-#define GPU_CC_PLL1_OUT_EVEN 21
-#define GPU_CC_PLL1_OUT_MAIN 22
-#define GPU_CC_PLL1_OUT_ODD 23
-#define GPU_CC_PLL1_OUT_TEST 24
-#define GPU_CC_PLL_TEST_CLK 25
-#define GPU_CC_RBCPR_AHB_CLK 26
-#define GPU_CC_RBCPR_CLK 27
-#define GPU_CC_RBCPR_CLK_SRC 28
-#define GPU_CC_SLEEP_CLK 29
-#define GPU_CC_GMU_CLK_SRC 30
-#define GPU_CC_CX_GFX3D_CLK 31
-#define GPU_CC_CX_GFX3D_SLV_CLK 32
+#define GPU_CC_CXO_CLK 11
+#define GPU_CC_GX_CXO_CLK 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_GX_QDSS_TSCTR_CLK 14
+#define GPU_CC_GX_VSENSE_CLK 15
+#define GPU_CC_PLL0_OUT_MAIN 16
+#define GPU_CC_PLL0_OUT_ODD 17
+#define GPU_CC_PLL0_OUT_TEST 18
+#define GPU_CC_PLL1 19
+#define GPU_CC_PLL1_OUT_EVEN 20
+#define GPU_CC_PLL1_OUT_MAIN 21
+#define GPU_CC_PLL1_OUT_ODD 22
+#define GPU_CC_PLL1_OUT_TEST 23
+#define GPU_CC_PLL_TEST_CLK 24
+#define GPU_CC_RBCPR_AHB_CLK 25
+#define GPU_CC_RBCPR_CLK 26
+#define GPU_CC_RBCPR_CLK_SRC 27
+#define GPU_CC_SLEEP_CLK 28
+#define GPU_CC_GMU_CLK_SRC 29
+#define GPU_CC_CX_GFX3D_CLK 30
+#define GPU_CC_CX_GFX3D_SLV_CLK 31
/* GPUCC reset clock registers */
#define GPUCC_GPU_CC_ACD_BCR 0
@@ -63,5 +62,5 @@
#define GPU_CC_PLL0 0
#define GPU_CC_PLL0_OUT_EVEN 1
#define GPU_CC_GX_GFX3D_CLK_SRC 2
-#define GPU_CC_GX_GFX3D_CLK 3
+#define GPU_CC_GX_GFX3D_CLK 3
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 778d11b..a31fa20 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -17,19 +17,17 @@
/* RPMh controlled clocks */
#define RPMH_CXO_CLK 0
#define RPMH_CXO_CLK_A 1
-#define RPMH_LN_BB_CLK1 2
-#define RPMH_LN_BB_CLK1_A 3
-#define RPMH_LN_BB_CLK2 4
-#define RPMH_LN_BB_CLK2_A 5
-#define RPMH_LN_BB_CLK3 6
-#define RPMH_LN_BB_CLK3_A 7
-#define RPMH_RF_CLK1 8
-#define RPMH_RF_CLK1_A 9
-#define RPMH_RF_CLK2 10
-#define RPMH_RF_CLK2_A 11
-#define RPMH_RF_CLK3 12
-#define RPMH_RF_CLK3_A 13
-#define RPMH_QDSS_CLK 14
-#define RPMH_QDSS_A_CLK 15
+#define RPMH_LN_BB_CLK2 2
+#define RPMH_LN_BB_CLK2_A 3
+#define RPMH_LN_BB_CLK3 4
+#define RPMH_LN_BB_CLK3_A 5
+#define RPMH_RF_CLK1 6
+#define RPMH_RF_CLK1_A 7
+#define RPMH_RF_CLK2 8
+#define RPMH_RF_CLK2_A 9
+#define RPMH_RF_CLK3 10
+#define RPMH_RF_CLK3_A 11
+#define RPMH_QDSS_CLK 12
+#define RPMH_QDSS_A_CLK 13
#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
index 723d2e0..b362852d 100644
--- a/include/dt-bindings/clock/qcom,videocc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,18 +16,17 @@
#define VIDEO_CC_APB_CLK 0
#define VIDEO_CC_AT_CLK 1
-#define VIDEO_CC_DEBUG_CLK 2
-#define VIDEO_CC_QDSS_TRIG_CLK 3
-#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 4
-#define VIDEO_CC_VCODEC0_AXI_CLK 5
-#define VIDEO_CC_VCODEC0_CORE_CLK 6
-#define VIDEO_CC_VCODEC1_AXI_CLK 7
-#define VIDEO_CC_VCODEC1_CORE_CLK 8
-#define VIDEO_CC_VENUS_AHB_CLK 9
-#define VIDEO_CC_VENUS_CLK_SRC 10
-#define VIDEO_CC_VENUS_CTL_AXI_CLK 11
-#define VIDEO_CC_VENUS_CTL_CORE_CLK 12
-#define VIDEO_PLL0 13
+#define VIDEO_CC_QDSS_TRIG_CLK 2
+#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3
+#define VIDEO_CC_VCODEC0_AXI_CLK 4
+#define VIDEO_CC_VCODEC0_CORE_CLK 5
+#define VIDEO_CC_VCODEC1_AXI_CLK 6
+#define VIDEO_CC_VCODEC1_CORE_CLK 7
+#define VIDEO_CC_VENUS_AHB_CLK 8
+#define VIDEO_CC_VENUS_CLK_SRC 9
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 10
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 11
+#define VIDEO_PLL0 12
#define VIDEO_CC_INTERFACE_BCR 0
#define VIDEO_CC_VCODEC0_BCR 1
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 8135da9..be2210c 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -244,7 +244,8 @@
#define MSM_BUS_MASTER_PIMEM 141
#define MSM_BUS_MASTER_MEM_NOC_SNOC 142
#define MSM_BUS_MASTER_IPA_CORE 143
-#define MSM_BUS_MASTER_MASTER_LAST 144
+#define MSM_BUS_MASTER_ALC 144
+#define MSM_BUS_MASTER_MASTER_LAST 145
#define MSM_BUS_MASTER_LLCC_DISPLAY 20000
#define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
diff --git a/include/dt-bindings/msm/power-on.h b/include/dt-bindings/msm/power-on.h
new file mode 100644
index 0000000..f43841e
--- /dev/null
+++ b/include/dt-bindings/msm/power-on.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED 0x00
+#define PON_POWER_OFF_WARM_RESET 0x01
+#define PON_POWER_OFF_SHUTDOWN 0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN 0x05
+#define PON_POWER_OFF_HARD_RESET 0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET 0x08
+#define PON_POWER_OFF_MAX_TYPE 0x10
+
+#endif
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index b5abfda..4c5bca38 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,9 +14,6 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
-#include <linux/linkage.h>
-#include <linux/types.h>
-
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
@@ -60,6 +57,13 @@
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+#define ARM_SMCCC_QUIRK_NONE 0
+#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/types.h>
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -72,33 +76,59 @@
};
/**
- * arm_smccc_smc() - make SMC calls
+ * struct arm_smccc_quirk - Contains quirk information
+ * @id: quirk identification
+ * @state: quirk specific information
+ * @a6: Qualcomm quirk entry for returning post-smc call contents of a6
+ */
+struct arm_smccc_quirk {
+ int id;
+ union {
+ unsigned long a6;
+ } state;
+};
+
+/**
+ * __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
+ * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
*
* This function is used to make SMC calls following SMC Calling Convention.
* The content of the supplied param are copied to registers 0 to 7 prior
* to the SMC instruction. The return values are updated with the content
- * from register 0 to 3 on return from the SMC instruction.
+ * from register 0 to 3 on return from the SMC instruction. An optional
+ * quirk structure provides vendor specific behavior.
*/
-asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
+asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res);
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
/**
- * arm_smccc_hvc() - make HVC calls
+ * __arm_smccc_hvc() - make HVC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
+ * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
*
* This function is used to make HVC calls following SMC Calling
* Convention. The content of the supplied param are copied to registers 0
* to 7 prior to the HVC instruction. The return values are updated with
- * the content from register 0 to 3 on return from the HVC instruction.
+ * the content from register 0 to 3 on return from the HVC instruction. An
+ * optional quirk structure provides vendor specific behavior.
*/
-asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
+asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res);
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL)
+
+#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__)
+
+#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL)
+
+#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+
+#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fc08c407..744ea4f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -125,6 +125,7 @@
* BVEC_POOL_IDX()
*/
#define BIO_RESET_BITS 10
+#define BIO_INLINECRYPT 15
/*
* We support 6 different bvec pools, the last one is magic in that it
@@ -162,6 +163,7 @@
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
+ __REQ_BARRIER, /* marks flush req as barrier */
/* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -207,7 +209,7 @@
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
- REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
+ REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_BARRIER)
#define REQ_CLONE_MASK REQ_COMMON_MASK
/* This mask is used for both bio and request merge checking */
@@ -220,6 +222,7 @@
#define REQ_SORTED (1ULL << __REQ_SORTED)
#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
#define REQ_FUA (1ULL << __REQ_FUA)
+#define REQ_BARRIER (1ULL << __REQ_BARRIER)
#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
#define REQ_STARTED (1ULL << __REQ_STARTED)
#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index af84de6..0353461 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -579,6 +579,24 @@
*/
int subsys_cgroup_allow_attach(struct cgroup_taskset *tset);
+static inline void cgroup_init_kthreadd(void)
+{
+ /*
+ * kthreadd is inherited by all kthreads, keep it in the root so
+ * that the new kthreads are guaranteed to stay in the root until
+ * initialization is finished.
+ */
+ current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+ /*
+ * This kthread finished initialization. The creator should have
+ * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+ */
+ current->no_cgroup_migration = 0;
+}
#else /* !CONFIG_CGROUPS */
@@ -600,6 +618,8 @@
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 8fd5fba..b1f2d00 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -36,7 +36,11 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
- /* unused */
+#define CLK_ENABLE_HAND_OFF BIT(13) /* enable clock when registered. */
+ /*
+ * hand-off enable_count & prepare_count
+ * to first consumer that enables clk
+ */
#define CLK_IS_MEASURE BIT(14) /* measure clock */
struct clk;
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 7d41026..7d8b528 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -19,7 +19,7 @@
#define _LINUX_CORESIGHT_PMU_H
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
+#define CORESIGHT_ETM_PMU_SEED 0x01
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
@@ -33,7 +33,7 @@
* the common convention is to have data trace IDs be I(N) + 1,
* set instruction trace IDs as a function of the CPU number.
*/
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
+ return (CORESIGHT_ETM_PMU_SEED + cpu);
}
#endif
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 373dbd5..ec9c128 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -855,7 +855,9 @@
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
- return -ENODEV;
+ unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
+
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
}
#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 22fd849..b871c0c 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -65,12 +65,6 @@
#define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */
#define EXTCON_JACK_SPDIF_OUT 27
-/* connector orientation 0 - CC1, 1 - CC2 */
-#define EXTCON_USB_CC 28
-
-/* connector speed 0 - High Speed, 1 - super speed */
-#define EXTCON_USB_SPEED 29
-
/* Display external connector */
#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */
#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 5d3a4cd..e3d181e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -435,6 +435,8 @@
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
+int iommu_fwspec_get_id(struct device *dev, u32 *id);
+int iommu_is_available(struct device *dev);
#else /* CONFIG_IOMMU_API */
@@ -705,6 +707,15 @@
return -ENODEV;
}
+static inline int iommu_fwspec_get_id(struct device *dev, u32 *id)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_is_available(struct device *dev)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01c0b9c..8c58db2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2546988..8b35bdb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -739,6 +739,12 @@
return false;
}
+static inline void mem_cgroup_update_page_stat(struct page *page,
+ enum mem_cgroup_stat_index idx,
+ int nr)
+{
+}
+
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d265f60..72dd7ba 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -12,7 +12,11 @@
#include <linux/device.h>
#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+
+#define MMC_CARD_CMDQ_BLK_SIZE 512
struct mmc_cid {
unsigned int manfid;
@@ -52,6 +56,7 @@
u8 sec_feature_support;
u8 rel_sectors;
u8 rel_param;
+ bool enhanced_rpmb_supported;
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
@@ -83,11 +88,13 @@
bool hpi; /* HPI support bit */
unsigned int hpi_cmd; /* cmd used as HPI */
bool bkops; /* background support bit */
- bool man_bkops_en; /* manual bkops enable bit */
+ u8 bkops_en; /* bkops enable */
unsigned int data_sector_size; /* 512 bytes or 4KB */
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
+ u8 raw_ext_csd_cmdq; /* 15 */
+ u8 raw_ext_csd_cache_ctrl; /* 33 */
bool ffu_capable; /* Firmware upgrade support */
#define MMC_FIRMWARE_LEN 8
u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
@@ -95,7 +102,10 @@
u8 raw_partition_support; /* 160 */
u8 raw_rpmb_size_mult; /* 168 */
u8 raw_erased_mem_count; /* 181 */
+ u8 raw_ext_csd_bus_width; /* 183 */
u8 strobe_support; /* 184 */
+#define MMC_STROBE_SUPPORT (1 << 0)
+ u8 raw_ext_csd_hs_timing; /* 185 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
u8 raw_driver_strength; /* 197 */
@@ -116,13 +126,20 @@
u8 raw_pwr_cl_200_360; /* 237 */
u8 raw_pwr_cl_ddr_52_195; /* 238 */
u8 raw_pwr_cl_ddr_52_360; /* 239 */
+ u8 cache_flush_policy; /* 240 */
+#define MMC_BKOPS_URGENCY_MASK 0x3
u8 raw_pwr_cl_ddr_200_360; /* 253 */
u8 raw_bkops_status; /* 246 */
u8 raw_sectors[4]; /* 212 - 4 bytes */
u8 pre_eol_info; /* 267 */
u8 device_life_time_est_typ_a; /* 268 */
u8 device_life_time_est_typ_b; /* 269 */
+ u8 cmdq_depth; /* 307 */
+ u8 cmdq_support; /* 308 */
+ u8 barrier_support; /* 486 */
+ u8 barrier_en;
+ u8 fw_version; /* 254 */
unsigned int feature_support;
#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */
};
@@ -193,7 +210,8 @@
wide_bus:1,
high_power:1,
high_speed:1,
- disable_cd:1;
+ disable_cd:1,
+ async_intr_sup:1;
};
struct sdio_cis {
@@ -222,6 +240,28 @@
MMC_BLK_NEW_REQUEST,
};
+enum mmc_packed_stop_reasons {
+ EXCEEDS_SEGMENTS = 0,
+ EXCEEDS_SECTORS,
+ WRONG_DATA_DIR,
+ FLUSH_OR_DISCARD,
+ EMPTY_QUEUE,
+ REL_WRITE,
+ THRESHOLD,
+ LARGE_SEC_ALIGN,
+ RANDOM,
+ FUA,
+ MAX_REASONS,
+};
+
+struct mmc_wr_pack_stats {
+ u32 *packing_events;
+ u32 pack_stop_reason[MAX_REASONS];
+ spinlock_t lock;
+ bool enabled;
+ bool print_in_read;
+};
+
/* The number of MMC physical partitions. These consist of:
* boot partitions (2), general purpose partitions (4) and
* RPMB partition (1) in MMC v4.4.
@@ -246,6 +286,62 @@
#define MMC_BLK_DATA_AREA_RPMB (1<<3)
};
+enum {
+ MMC_BKOPS_NO_OP,
+ MMC_BKOPS_NOT_CRITICAL,
+ MMC_BKOPS_PERF_IMPACT,
+ MMC_BKOPS_CRITICAL,
+ MMC_BKOPS_NUM_SEVERITY_LEVELS,
+};
+
+/**
+ * struct mmc_bkops_stats - BKOPS statistics
+ * @lock: spinlock used for synchronizing the debugfs and the runtime accesses
+ * to this structure. No need to call with spin_lock_irq api
+ * @manual_start: number of times START_BKOPS was sent to the device
+ * @hpi: number of times HPI was sent to the device
+ * @auto_start: number of times AUTO_EN was set to 1
+ * @auto_stop: number of times AUTO_EN was set to 0
+ * @level: number of times the device reported the need for each level of
+ * bkops handling
+ * @enabled: control over whether statistics should be gathered
+ *
+ * This structure is used to collect statistics regarding the bkops
+ * configuration and use-patterns. It is collected during runtime and can be
+ * shown to the user via a debugfs entry.
+ */
+struct mmc_bkops_stats {
+ spinlock_t lock;
+ unsigned int manual_start;
+ unsigned int hpi;
+ unsigned int auto_start;
+ unsigned int auto_stop;
+ unsigned int level[MMC_BKOPS_NUM_SEVERITY_LEVELS];
+ bool enabled;
+};
+
+/**
+ * struct mmc_bkops_info - BKOPS data
+ * @stats: statistic information regarding bkops
+ * @needs_check: indication whether need to check with the device
+ * whether it requires handling of BKOPS (CMD8)
+ * @needs_manual: indication whether have to send START_BKOPS
+ * to the device
+ */
+struct mmc_bkops_info {
+ struct mmc_bkops_stats stats;
+ bool needs_check;
+ bool needs_bkops;
+ u32 retry_counter;
+};
+
+enum mmc_pon_type {
+ MMC_LONG_PON = 1,
+ MMC_SHRT_PON,
+};
+
+#define MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD 6 /* microseconds */
+
/*
* MMC device
*/
@@ -253,6 +349,10 @@
struct mmc_host *host; /* the host this device belongs to */
struct device dev; /* the device */
u32 ocr; /* the current OCR setting */
+ unsigned long clk_scaling_lowest; /* lowest scaleable
+ * frequency */
+ unsigned long clk_scaling_highest; /* highest scaleable
+ * frequency */
unsigned int rca; /* relative card address of device */
unsigned int type; /* card type */
#define MMC_TYPE_MMC 0 /* MMC card */
@@ -265,8 +365,10 @@
#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
-#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */
+#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing manual BKOPS */
#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */
+#define MMC_STATE_CMDQ (1<<12) /* card is in cmd queue mode */
+#define MMC_STATE_AUTO_BKOPS (1<<13) /* card is doing auto BKOPS */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -285,6 +387,15 @@
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ /* byte mode */
+#define MMC_QUIRK_INAND_DATA_TIMEOUT (1<<14) /* For incorrect data timeout */
+#define MMC_QUIRK_CACHE_DISABLE (1 << 15) /* prevent cache enable */
+#define MMC_QUIRK_QCA6574_SETTINGS (1 << 16) /* QCA6574 card settings*/
+#define MMC_QUIRK_QCA9377_SETTINGS (1 << 17) /* QCA9377 card settings*/
+
+
+/* Make sure CMDQ is empty before queuing DCMD */
+#define MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD (1 << 18)
unsigned int erase_size; /* erase size in sectors */
@@ -320,6 +431,16 @@
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
+ unsigned int part_curr;
+
+ struct mmc_wr_pack_stats wr_pack_stats; /* packed commands stats*/
+ struct notifier_block reboot_notify;
+ enum mmc_pon_type pon_type;
+ u8 *cached_ext_csd;
+ bool cmdq_init;
+ struct mmc_bkops_info bkops;
+ bool err_in_sdr104;
+ bool sdr104_blocked;
};
/*
@@ -382,6 +503,19 @@
#define END_FIXUP { NULL }
+/* extended CSD mapping to mmc version */
+enum mmc_version_ext_csd_rev {
+ MMC_V4_0,
+ MMC_V4_1,
+ MMC_V4_2,
+ MMC_V4_41 = 5,
+ MMC_V4_5,
+ MMC_V4_51 = MMC_V4_5,
+ MMC_V5_0,
+ MMC_V5_01 = MMC_V5_0,
+ MMC_V5_1
+};
+
#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
_cis_vendor, _cis_device, \
_fixup, _data, _ext_csd_rev) \
@@ -457,6 +591,8 @@
#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+#define mmc_card_cmdq(c) ((c)->state & MMC_STATE_CMDQ)
+#define mmc_card_doing_auto_bkops(c) ((c)->state & MMC_STATE_AUTO_BKOPS)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -467,6 +603,12 @@
#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+#define mmc_card_set_cmdq(c) ((c)->state |= MMC_STATE_CMDQ)
+#define mmc_card_clr_cmdq(c) ((c)->state &= ~MMC_STATE_CMDQ)
+#define mmc_card_set_auto_bkops(c) ((c)->state |= MMC_STATE_AUTO_BKOPS)
+#define mmc_card_clr_auto_bkops(c) ((c)->state &= ~MMC_STATE_AUTO_BKOPS)
+
+#define mmc_card_strobe(c) (((c)->ext_csd).strobe_support & MMC_STROBE_SUPPORT)
/*
* Quirk add/remove for MMC products.
@@ -542,10 +684,37 @@
return c->quirks & MMC_QUIRK_BROKEN_HPI;
}
+static inline bool mmc_card_support_auto_bkops(const struct mmc_card *c)
+{
+ return c->ext_csd.rev >= MMC_V5_1;
+}
+
+static inline bool mmc_card_configured_manual_bkops(const struct mmc_card *c)
+{
+ return c->ext_csd.bkops_en & EXT_CSD_BKOPS_MANUAL_EN;
+}
+
+static inline bool mmc_card_configured_auto_bkops(const struct mmc_card *c)
+{
+ return c->ext_csd.bkops_en & EXT_CSD_BKOPS_AUTO_EN;
+}
+
+static inline bool mmc_enable_qca6574_settings(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_QCA6574_SETTINGS;
+}
+
+static inline bool mmc_enable_qca9377_settings(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_QCA9377_SETTINGS;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
+#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
+#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d)
/*
* MMC device driver (e.g., Flash card, I/O card...)
@@ -562,5 +731,9 @@
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
-
+extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
+ struct mmc_card *card);
+extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
+extern int mmc_send_pon(struct mmc_card *card);
+extern void mmc_blk_cmdq_req_done(struct mmc_request *mrq);
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2b58cbd..959414b 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -71,6 +71,8 @@
unsigned int busy_timeout; /* busy detect timeout in ms */
/* Set this flag only for blocking sanitize request */
bool sanitize_busy;
+ /* Set this flag only for blocking bkops request */
+ bool bkops_busy;
struct mmc_data *data; /* data segment associated with cmd */
struct mmc_request *mrq; /* associated request */
@@ -96,6 +98,7 @@
int sg_count; /* mapped sg entries */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
+ bool fault_injected; /* fault injected */
};
struct mmc_host;
@@ -109,6 +112,8 @@
struct completion cmd_completion;
void (*done)(struct mmc_request *);/* completion function */
struct mmc_host *host;
+ struct mmc_cmdq_req *cmdq_req;
+ struct request *req;
/* Allow other commands during this ongoing data transfer or busy wait */
bool cap_cmd_during_tfr;
@@ -118,8 +123,39 @@
#endif
};
+struct mmc_bus_ops {
+ void (*remove)(struct mmc_host *);
+ void (*detect)(struct mmc_host *);
+ int (*pre_suspend)(struct mmc_host *);
+ int (*suspend)(struct mmc_host *);
+ int (*resume)(struct mmc_host *);
+ int (*runtime_suspend)(struct mmc_host *);
+ int (*runtime_resume)(struct mmc_host *);
+ int (*runtime_idle)(struct mmc_host *);
+ int (*power_save)(struct mmc_host *);
+ int (*power_restore)(struct mmc_host *);
+ int (*alive)(struct mmc_host *);
+ int (*shutdown)(struct mmc_host *);
+ int (*reset)(struct mmc_host *);
+ int (*change_bus_speed)(struct mmc_host *, unsigned long *);
+};
+
struct mmc_card;
struct mmc_async_req;
+struct mmc_cmdq_req;
+
+extern int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks);
+extern int mmc_cmdq_halt(struct mmc_host *host, bool enable);
+extern int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host);
+extern void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err);
+extern int mmc_cmdq_start_req(struct mmc_host *host,
+ struct mmc_cmdq_req *cmdq_req);
+extern int mmc_cmdq_prepare_flush(struct mmc_command *cmd);
+extern int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
+ struct mmc_cmdq_req *cmdq_req);
+extern int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg);
extern int mmc_stop_bkops(struct mmc_card *);
extern int mmc_read_bkops_status(struct mmc_card *);
@@ -134,10 +170,16 @@
extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
-extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
+extern void mmc_check_bkops(struct mmc_card *card);
+extern void mmc_start_manual_bkops(struct mmc_card *card);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
+extern int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index,
+ u8 value, unsigned int timeout_ms,
+ bool use_busy_signal, bool ignore_timeout);
extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
+extern int mmc_suspend_clk_scaling(struct mmc_host *host);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
@@ -164,6 +206,7 @@
extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
extern int mmc_hw_reset(struct mmc_host *host);
+extern int mmc_cmdq_hw_reset(struct mmc_host *host);
extern int mmc_can_reset(struct mmc_card *card);
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
@@ -174,11 +217,22 @@
extern void mmc_get_card(struct mmc_card *card);
extern void mmc_put_card(struct mmc_card *card);
+extern void __mmc_put_card(struct mmc_card *card);
+extern void mmc_set_ios(struct mmc_host *host);
extern int mmc_flush_cache(struct mmc_card *);
+extern int mmc_cache_barrier(struct mmc_card *);
extern int mmc_detect_card_removed(struct mmc_host *host);
+extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);
+
+extern void mmc_deferred_scaling(struct mmc_host *host);
+extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
+ bool lock_needed);
+extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
+ bool lock_needed, bool is_cmdq_dcmd);
+
/**
* mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index b7b92bf..ecfc173 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,6 +15,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/device.h>
+#include <linux/devfreq.h>
#include <linux/fault-inject.h>
#include <linux/blkdev.h>
@@ -22,9 +23,14 @@
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/pm.h>
+#include <linux/mmc/ring_buffer.h>
+
+#define MMC_AUTOSUSPEND_DELAY_MS 3000
struct mmc_ios {
unsigned int clock; /* clock rate */
+ unsigned int old_rate; /* saved clock rate */
+ unsigned long clk_ts; /* time stamp of last updated clock */
unsigned short vdd;
/* vdd stores the bit number of the selected voltage range from below. */
@@ -83,7 +89,31 @@
bool enhanced_strobe; /* hs400es selection */
};
+/* states to represent load on the host */
+enum mmc_load {
+ MMC_LOAD_HIGH,
+ MMC_LOAD_LOW,
+};
+
+struct mmc_cmdq_host_ops {
+ int (*init)(struct mmc_host *host);
+ int (*enable)(struct mmc_host *host);
+ void (*disable)(struct mmc_host *host, bool soft);
+ int (*request)(struct mmc_host *host, struct mmc_request *mrq);
+ void (*post_req)(struct mmc_host *host, int tag, int err);
+ int (*halt)(struct mmc_host *host, bool halt);
+ void (*reset)(struct mmc_host *host, bool soft);
+ void (*dumpstate)(struct mmc_host *host);
+};
+
struct mmc_host_ops {
+ int (*init)(struct mmc_host *host);
+ /*
+ * 'enable' is called when the host is claimed and 'disable' is called
+ * when the host is released. 'enable' and 'disable' are deprecated.
+ */
+ int (*enable)(struct mmc_host *host);
+ int (*disable)(struct mmc_host *host);
/*
* It is optional for the host to implement pre_req and post_req in
* order to support double buffering of requests (prepare one
@@ -147,6 +177,7 @@
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ int (*enhanced_strobe)(struct mmc_host *host);
/* Prepare enhanced strobe depending host driver */
void (*hs400_enhanced_strobe)(struct mmc_host *host,
struct mmc_ios *ios);
@@ -162,11 +193,42 @@
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+
+ unsigned long (*get_max_frequency)(struct mmc_host *host);
+ unsigned long (*get_min_frequency)(struct mmc_host *host);
+
+ int (*notify_load)(struct mmc_host *, enum mmc_load);
+ void (*notify_halt)(struct mmc_host *mmc, bool halt);
+ void (*force_err_irq)(struct mmc_host *host, u64 errmask);
};
struct mmc_card;
struct device;
+struct mmc_cmdq_req {
+ unsigned int cmd_flags;
+ u32 blk_addr;
+ /* active mmc request */
+ struct mmc_request mrq;
+ struct mmc_data data;
+ struct mmc_command cmd;
+#define DCMD (1 << 0)
+#define QBR (1 << 1)
+#define DIR (1 << 2)
+#define PRIO (1 << 3)
+#define REL_WR (1 << 4)
+#define DAT_TAG (1 << 5)
+#define FORCED_PRG (1 << 6)
+ unsigned int cmdq_req_flags;
+
+ unsigned int resp_idx;
+ unsigned int resp_arg;
+ unsigned int dev_pend_tasks;
+ bool resp_err;
+ int tag; /* used for command queuing */
+ u8 ctx_id;
+};
+
struct mmc_async_req {
/* active mmc request */
struct mmc_request *mrq;
@@ -193,6 +255,33 @@
void *handler_priv;
};
+
+/**
+ * mmc_cmdq_context_info - describes the contexts of cmdq
+ * @active_reqs requests being processed
+ * @data_active_reqs data requests being processed
+ * @curr_state state of cmdq engine
+ * @cmdq_ctx_lock acquire this before accessing this structure
+ * @queue_empty_wq workqueue for waiting for all
+ * the outstanding requests to be completed
+ * @wait waiting for all conditions described in
+ * mmc_cmdq_ready_wait to be satisified before
+ * issuing the new request to LLD.
+ */
+struct mmc_cmdq_context_info {
+ unsigned long active_reqs; /* in-flight requests */
+ unsigned long data_active_reqs; /* in-flight data requests */
+ unsigned long curr_state;
+#define CMDQ_STATE_ERR 0
+#define CMDQ_STATE_DCMD_ACTIVE 1
+#define CMDQ_STATE_HALT 2
+#define CMDQ_STATE_CQ_DISABLE 3
+#define CMDQ_STATE_REQ_TIMED_OUT 4
+ wait_queue_head_t queue_empty_wq;
+ wait_queue_head_t wait;
+ int active_small_sector_read_reqs;
+};
+
/**
* mmc_context_info - synchronization details for mmc context
* @is_done_rcv wake up reason was done request
@@ -217,11 +306,68 @@
struct regulator *vqmmc; /* Optional Vccq supply */
};
+enum dev_state {
+ DEV_SUSPENDING = 1,
+ DEV_SUSPENDED,
+ DEV_RESUMED,
+};
+
+/**
+ * struct mmc_devfeq_clk_scaling - main context for MMC clock scaling logic
+ *
+ * @lock: spinlock to protect statistics
+ * @devfreq: struct that represent mmc-host as a client for devfreq
+ * @devfreq_profile: MMC device profile, mostly polling interval and callbacks
+ * @ondemand_gov_data: struct supplied to ondemmand governor (thresholds)
+ * @state: load state, can be HIGH or LOW. used to notify mmc_host_ops callback
+ * @start_busy: timestamped armed once a data request is started
+ * @measure_interval_start: timestamped armed once a measure interval started
+ * @devfreq_abort: flag to sync between different contexts relevant to devfreq
+ * @skip_clk_scale_freq_update: flag that enable/disable frequency change
+ * @freq_table_sz: table size of frequencies supplied to devfreq
+ * @freq_table: frequencies table supplied to devfreq
+ * @curr_freq: current frequency
+ * @polling_delay_ms: polling interval for status collection used by devfreq
+ * @upthreshold: up-threshold supplied to ondemand governor
+ * @downthreshold: down-threshold supplied to ondemand governor
+ * @need_freq_change: flag indicating if a frequency change is required
+ * @clk_scaling_in_progress: flag indicating if there's ongoing frequency change
+ * @is_busy_started: flag indicating if a request is handled by the HW
+ * @enable: flag indicating if the clock scaling logic is enabled for this host
+ */
+struct mmc_devfeq_clk_scaling {
+ spinlock_t lock;
+ struct devfreq *devfreq;
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_gov_data;
+ enum mmc_load state;
+ ktime_t start_busy;
+ ktime_t measure_interval_start;
+ atomic_t devfreq_abort;
+ bool skip_clk_scale_freq_update;
+ int freq_table_sz;
+ u32 *freq_table;
+ unsigned long total_busy_time_us;
+ unsigned long target_freq;
+ unsigned long curr_freq;
+ unsigned long polling_delay_ms;
+ unsigned int upthreshold;
+ unsigned int downthreshold;
+ unsigned int lower_bus_speed_mode;
+#define MMC_SCALING_LOWER_DDR52_MODE 1
+ bool need_freq_change;
+ bool clk_scaling_in_progress;
+ bool is_busy_started;
+ bool enable;
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
+ struct mmc_devfeq_clk_scaling clk_scaling;
int index;
const struct mmc_host_ops *ops;
+ const struct mmc_cmdq_host_ops *cmdq_ops;
struct mmc_pwrseq *pwrseq;
unsigned int f_min;
unsigned int f_max;
@@ -313,6 +459,17 @@
#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 23) /* Allow write packing control */
+#define MMC_CAP2_CLK_SCALE (1 << 24) /* Allow dynamic clk scaling */
+#define MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE (1 << 25) /* Allows Asynchronous SDIO irq while card is in 4-bit mode */
+#define MMC_CAP2_NONHOTPLUG (1 << 26) /*Don't support hotplug*/
+/* Some hosts need additional tuning */
+#define MMC_CAP2_HS400_POST_TUNING (1 << 27)
+#define MMC_CAP2_CMD_QUEUE (1 << 28) /* support eMMC command queue */
+#define MMC_CAP2_SANITIZE (1 << 29) /* Support Sanitize */
+#define MMC_CAP2_SLEEP_AWAKE (1 << 30) /* Use Sleep/Awake (CMD5) */
+/* use max discard ignoring max_busy_timeout parameter */
+#define MMC_CAP2_MAX_DISCARD_SIZE (1 << 31)
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -341,6 +498,7 @@
spinlock_t lock; /* lock for claim and bus ops */
struct mmc_ios ios; /* current io bus settings */
+ struct mmc_ios cached_ios;
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
@@ -368,6 +526,7 @@
wait_queue_head_t wq;
struct task_struct *claimer; /* task that has host claimed */
+ struct task_struct *suspend_task;
int claim_cnt; /* "claim" nesting count */
struct delayed_work detect;
@@ -377,6 +536,10 @@
const struct mmc_bus_ops *bus_ops; /* current bus driver */
unsigned int bus_refs; /* reference counter */
+ unsigned int bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
bool sdio_irq_pending;
@@ -424,10 +587,45 @@
struct io_latency_state io_lat_s;
#endif
+ bool sdr104_wa;
+
+ /*
+ * Set to 1 to just stop the SDCLK to the card without
+ * actually disabling the clock from it's source.
+ */
+ bool card_clock_off;
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+ struct {
+
+ unsigned long rbytes_drv; /* Rd bytes MMC Host */
+ unsigned long wbytes_drv; /* Wr bytes MMC Host */
+ ktime_t rtime_drv; /* Rd time MMC Host */
+ ktime_t wtime_drv; /* Wr time MMC Host */
+ ktime_t start;
+ } perf;
+ bool perf_enable;
+#endif
+ struct mmc_trace_buffer trace_buf;
+ enum dev_state dev_status;
+ bool wakeup_on_idle;
+ struct mmc_cmdq_context_info cmdq_ctx;
+ int num_cq_slots;
+ int dcmd_cq_slot;
+ bool cmdq_thist_enabled;
+ /*
+ * several cmdq supporting host controllers are extensions
+ * of legacy controllers. This variable can be used to store
+ * a reference to the cmdq extension of the existing host
+ * controller.
+ */
+ void *cmdq_private;
+ struct mmc_request *err_mrq;
unsigned long private[0] ____cacheline_aligned;
};
struct mmc_host *mmc_alloc_host(int extra, struct device *);
+extern bool mmc_host_may_gate_card(struct mmc_card *);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
@@ -446,11 +644,30 @@
return (void *)host->private;
}
+static inline void *mmc_cmdq_private(struct mmc_host *host)
+{
+ return host->cmdq_private;
+}
+
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & \
+ MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & \
+ MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+ if (manual)
+ host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+ else
+ host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
int mmc_power_save_host(struct mmc_host *host);
int mmc_power_restore_host(struct mmc_host *host);
@@ -522,6 +739,12 @@
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
}
+static inline bool mmc_card_and_host_support_async_int(struct mmc_host *host)
+{
+ return ((host->caps2 & MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) &&
+ (host->card->cccr.async_intr_sup));
+}
+
static inline int mmc_host_uhs(struct mmc_host *host)
{
return host->caps &
@@ -530,11 +753,51 @@
MMC_CAP_UHS_DDR50);
}
+static inline void mmc_host_clear_sdr104(struct mmc_host *host)
+{
+ host->caps &= ~MMC_CAP_UHS_SDR104;
+}
+
+static inline void mmc_host_set_sdr104(struct mmc_host *host)
+{
+ host->caps |= MMC_CAP_UHS_SDR104;
+}
+
static inline int mmc_host_packed_wr(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_PACKED_WR;
}
+static inline void mmc_host_set_halt(struct mmc_host *host)
+{
+ set_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_clr_halt(struct mmc_host *host)
+{
+ clear_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static inline int mmc_host_halt(struct mmc_host *host)
+{
+ return test_bit(CMDQ_STATE_HALT, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_set_cq_disable(struct mmc_host *host)
+{
+ set_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+static inline void mmc_host_clr_cq_disable(struct mmc_host *host)
+{
+ clear_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
+static inline int mmc_host_cq_disable(struct mmc_host *host)
+{
+ return test_bit(CMDQ_STATE_CQ_DISABLE, &host->cmdq_ctx.curr_state);
+}
+
#ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_hold(struct mmc_host *host);
void mmc_host_clk_release(struct mmc_host *host);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 68f60b8..2186a36 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -165,6 +165,7 @@
* OCR bits are mostly in host.h
*/
#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */
+#define MMC_CARD_SECTOR_ADDR 0x40000000 /* Card supports sectors */
/*
* Card Command Classes (CCC)
@@ -214,6 +215,8 @@
* EXT_CSD fields
*/
+#define EXT_CSD_CMDQ 15 /* R/W */
+#define EXT_CSD_BARRIER_CTRL 31 /* R/W */
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
@@ -267,6 +270,7 @@
#define EXT_CSD_PWR_CL_200_360 237 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
+#define EXT_CSD_CACHE_FLUSH_POLICY 240 /* RO */
#define EXT_CSD_BKOPS_STATUS 246 /* RO */
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
@@ -276,6 +280,9 @@
#define EXT_CSD_PRE_EOL_INFO 267 /* RO */
#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A 268 /* RO */
#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B 269 /* RO */
+#define EXT_CSD_CMDQ_DEPTH 307 /* RO */
+#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */
+#define EXT_CSD_BARRIER_SUPPORT 486 /* RO */
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
@@ -288,7 +295,8 @@
* EXT_CSD field definitions
*/
-#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR (1<<4)
#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)
#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10)
@@ -361,6 +369,9 @@
#define EXT_CSD_PACKED_EVENT_EN BIT(3)
+#define EXT_CSD_BKOPS_MANUAL_EN BIT(0)
+#define EXT_CSD_BKOPS_AUTO_EN BIT(1)
+
/*
* EXCEPTION_EVENT_STATUS field
*/
diff --git a/include/linux/mmc/ring_buffer.h b/include/linux/mmc/ring_buffer.h
new file mode 100644
index 0000000..e6bf163
--- /dev/null
+++ b/include/linux/mmc/ring_buffer.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MMC_RING_BUFFER__
+#define __MMC_RING_BUFFER__
+
+#include <linux/mmc/card.h>
+#include <linux/smp.h>
+
+#include "core.h"
+
+#define MMC_TRACE_RBUF_SZ_ORDER 2 /* 2^2 pages */
+#define MMC_TRACE_RBUF_SZ (PAGE_SIZE * (1 << MMC_TRACE_RBUF_SZ_ORDER))
+#define MMC_TRACE_EVENT_SZ 256
+#define MMC_TRACE_RBUF_NUM_EVENTS (MMC_TRACE_RBUF_SZ / MMC_TRACE_EVENT_SZ)
+
+struct mmc_host;
+struct mmc_trace_buffer {
+ int wr_idx;
+ bool stop_tracing;
+ spinlock_t trace_lock;
+ char *data;
+};
+
+#ifdef CONFIG_MMC_RING_BUFFER
+void mmc_stop_tracing(struct mmc_host *mmc);
+void mmc_trace_write(struct mmc_host *mmc, const char *fmt, ...);
+void mmc_trace_init(struct mmc_host *mmc);
+void mmc_trace_free(struct mmc_host *mmc);
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s);
+#else
+static inline void mmc_stop_tracing(struct mmc_host *mmc) {}
+static inline void mmc_trace_write(struct mmc_host *mmc,
+ const char *fmt, ...) {}
+static inline void mmc_trace_init(struct mmc_host *mmc) {}
+static inline void mmc_trace_free(struct mmc_host *mmc) {}
+static inline void mmc_dump_trace_buffer(struct mmc_host *mmc,
+ struct seq_file *s) {}
+#endif
+
+#define MMC_TRACE(mmc, fmt, ...) \
+ mmc_trace_write(mmc, fmt, ##__VA_ARGS__)
+
+#endif /* __MMC_RING_BUFFER__ */
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 17446d3..3fc07d7 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -102,6 +102,7 @@
#define SDIO_BUS_WIDTH_1BIT 0x00
#define SDIO_BUS_WIDTH_RESERVED 0x01
#define SDIO_BUS_WIDTH_4BIT 0x02
+#define SDIO_BUS_WIDTH_8BIT 0x03
#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */
#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */
@@ -163,6 +164,10 @@
#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXTENSION 0x16
+#define SDIO_SUPPORT_ASYNC_INTR (1<<0)
+#define SDIO_ENABLE_ASYNC_INTR (1<<1)
/*
* Function Basic Registers (FBR)
*/
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
index 16e3bb2..26e948f 100644
--- a/include/linux/msm-bus.h
+++ b/include/linux/msm-bus.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,8 +55,14 @@
struct msm_bus_vectors *vectors;
};
+struct msm_bus_lat_vectors {
+ uint64_t fal_ns; /* First Access Latency */
+ uint64_t idle_t_ns; /* Idle Time */
+};
+
struct msm_bus_scale_pdata {
struct msm_bus_paths *usecase;
+ struct msm_bus_lat_vectors *usecase_lat;
int num_usecases;
const char *name;
/*
@@ -66,6 +72,11 @@
* of the CPU state.
*/
unsigned int active_only;
+ /*
+ * If the ALC(Active Latency Client) flag is set to 1,
+ * use lat_usecases for latency voting.
+ */
+ unsigned int alc;
};
struct msm_bus_client_handle {
diff --git a/include/linux/msm_adreno_devfreq.h b/include/linux/msm_adreno_devfreq.h
index 1e580d3..2b94289 100644
--- a/include/linux/msm_adreno_devfreq.h
+++ b/include/linux/msm_adreno_devfreq.h
@@ -21,6 +21,10 @@
#define ADRENO_DEVFREQ_NOTIFY_RETIRE 2
#define ADRENO_DEVFREQ_NOTIFY_IDLE 3
+#define DEVFREQ_FLAG_WAKEUP_MAXFREQ 0x2
+#define DEVFREQ_FLAG_FAST_HINT 0x4
+#define DEVFREQ_FLAG_SLOW_HINT 0x8
+
struct device;
int kgsl_devfreq_add_notifier(struct device *device,
diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h
index 8316aaa..b9527d3 100644
--- a/include/linux/msm_pcie.h
+++ b/include/linux/msm_pcie.h
@@ -157,18 +157,6 @@
int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
u32 offset, u32 mask, u32 value);
-/*
- * msm_pcie_configure_sid - calculates the SID for a PCIe endpoint.
- * @dev: device structure
- * @sid: the calculated SID
- * @domain: the domain number of the Root Complex
- *
- * This function calculates the SID for a PCIe endpoint device.
- *
- * Return: 0 on success, negative value on error
- */
-int msm_pcie_configure_sid(struct device *dev, u32 *sid,
- int *domain);
#else /* !CONFIG_PCI_MSM */
static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
void *user, void *data, u32 options)
@@ -206,12 +194,6 @@
{
return -ENODEV;
}
-
-static inline int msm_pcie_configure_sid(struct device *dev, u32 *sid,
- int *domain)
-{
- return -ENODEV;
-}
#endif /* CONFIG_PCI_MSM */
#endif /* __MSM_PCIE_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f020ab4..3e5dbbe 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2513,6 +2513,8 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
+#define PCI_VENDOR_ID_HUAWEI 0x19e5
+
#define PCI_VENDOR_ID_NETRONOME 0x19ee
#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e25f183..bd22670 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -806,6 +806,7 @@
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
+void phy_trigger_machine(struct phy_device *phydev, bool sync);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_ksettings_get(struct phy_device *phydev,
diff --git a/include/linux/platform_data/qcom_crypto_device.h b/include/linux/platform_data/qcom_crypto_device.h
new file mode 100644
index 0000000..eadaa42
--- /dev/null
+++ b/include/linux/platform_data/qcom_crypto_device.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CRYPTO_DEVICE__H
+#define __QCOM_CRYPTO_DEVICE__H
+
+struct msm_ce_hw_support {
+ uint32_t ce_shared;
+ uint32_t shared_ce_resource;
+ uint32_t hw_key_support;
+ uint32_t sha_hmac;
+ void *bus_scale_table;
+};
+
+#endif /* __QCOM_CRYPTO_DEVICE__H */
diff --git a/drivers/power/supply/qcom/pmic-voter.h b/include/linux/pmic-voter.h
similarity index 90%
rename from drivers/power/supply/qcom/pmic-voter.h
rename to include/linux/pmic-voter.h
index 031b9a0..f202bf7 100644
--- a/drivers/power/supply/qcom/pmic-voter.h
+++ b/include/linux/pmic-voter.h
@@ -24,6 +24,9 @@
NUM_VOTABLE_TYPES,
};
+bool is_client_vote_enabled(struct votable *votable, const char *client_str);
+bool is_client_vote_enabled_locked(struct votable *votable,
+ const char *client_str);
int get_client_vote(struct votable *votable, const char *client_str);
int get_client_vote_locked(struct votable *votable, const char *client_str);
int get_effective_result(struct votable *votable);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b46d6a8..77912a1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -217,6 +217,7 @@
POWER_SUPPLY_PROP_DP_DM,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
POWER_SUPPLY_PROP_CURRENT_QNOVO,
POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
POWER_SUPPLY_PROP_RERUN_AICL,
@@ -245,6 +246,7 @@
POWER_SUPPLY_PROP_DIE_HEALTH,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index e1ad51e..12b3d51e8 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -39,6 +39,7 @@
struct clk *m_ahb_clk;
struct clk *s_ahb_clk;
struct msm_bus_client_handle *bus_bw;
+ unsigned int bus_mas;
unsigned long ab;
unsigned long ib;
struct pinctrl *geni_pinctrl;
@@ -85,8 +86,10 @@
#define SE_GENI_TX_WATERMARK_REG (0x80C)
#define SE_GENI_RX_WATERMARK_REG (0x810)
#define SE_GENI_RX_RFR_WATERMARK_REG (0x814)
+#define SE_GENI_IOS (0x908)
#define SE_GENI_M_GP_LENGTH (0x910)
#define SE_GENI_S_GP_LENGTH (0x914)
+#define SE_GSI_EVENT_EN (0xE18)
#define SE_IRQ_EN (0xE1C)
#define SE_HW_PARAM_0 (0xE24)
#define SE_HW_PARAM_1 (0xE28)
@@ -220,6 +223,16 @@
#define RX_LAST_BYTE_VALID_SHFT (28)
#define RX_FIFO_WC_MSK (GENMASK(24, 0))
+/* SE_GSI_EVENT_EN fields */
+#define DMA_RX_EVENT_EN (BIT(0))
+#define DMA_TX_EVENT_EN (BIT(1))
+#define GENI_M_EVENT_EN (BIT(2))
+#define GENI_S_EVENT_EN (BIT(3))
+
+/* SE_GENI_IOS fields */
+#define IO2_DATA_IN (BIT(1))
+#define RX_DATA_IN (BIT(0))
+
/* SE_IRQ_EN fields */
#define DMA_RX_IRQ_EN (BIT(0))
#define DMA_TX_IRQ_EN (BIT(1))
@@ -248,6 +261,17 @@
#define RX_DMA_IRQ_DELAY_MSK (GENMASK(8, 6))
#define RX_DMA_IRQ_DELAY_SHFT (6)
+static inline unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
+{
+ return readl_relaxed_no_log(base + offset);
+}
+
+static inline void geni_write_reg_nolog(unsigned int value, void __iomem *base,
+ int offset)
+{
+ return writel_relaxed_no_log(value, (base + offset));
+}
+
static inline unsigned int geni_read_reg(void __iomem *base, int offset)
{
return readl_relaxed(base + offset);
@@ -256,7 +280,7 @@
static inline void geni_write_reg(unsigned int value, void __iomem *base,
int offset)
{
- return writel_relaxed(value, (base + offset));
+ writel_relaxed(value, (base + offset));
}
static inline int get_se_proto(void __iomem *base)
@@ -320,9 +344,11 @@
int ret = 0;
unsigned int io_mode = 0;
unsigned int geni_dma_mode = 0;
+ unsigned int gsi_event_en = 0;
io_mode = geni_read_reg(base, SE_IRQ_EN);
geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+ gsi_event_en = geni_read_reg(base, SE_GSI_EVENT_EN);
switch (mode) {
case FIFO_MODE:
@@ -330,15 +356,23 @@
io_mode |= (GENI_M_IRQ_EN | GENI_S_IRQ_EN);
io_mode |= (DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
geni_dma_mode &= ~GENI_DMA_MODE_EN;
+ gsi_event_en = 0;
break;
}
+ case GSI_DMA:
+ geni_dma_mode |= GENI_DMA_MODE_EN;
+ io_mode &= ~(DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
+ gsi_event_en |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
+ GENI_M_EVENT_EN | GENI_S_EVENT_EN);
+ break;
default:
ret = -ENXIO;
goto exit_set_mode;
}
geni_write_reg(io_mode, base, SE_IRQ_EN);
geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+ geni_write_reg(gsi_event_en, base, SE_GSI_EVENT_EN);
exit_set_mode:
return ret;
}
@@ -418,7 +452,7 @@
geni_write_reg(M_GENI_CMD_ABORT, base, SE_GENI_M_CMD_CTRL_REG);
}
-static inline void qcom_geni_abort_s_cmd(void __iomem *base)
+static inline void geni_abort_s_cmd(void __iomem *base)
{
geni_write_reg(S_GENI_CMD_ABORT, base, SE_GENI_S_CMD_CTRL_REG);
}
@@ -450,11 +484,11 @@
return rx_fifo_depth;
}
-static inline void se_config_packing(void __iomem *base, int bpw,
- int pack_words, bool msb_to_lsb)
+static inline void se_get_packing_config(int bpw, int pack_words,
+ bool msb_to_lsb, unsigned long *cfg0,
+ unsigned long *cfg1)
{
u32 cfg[4] = {0};
- unsigned long cfg0, cfg1;
int len = ((bpw < 8) ? (bpw - 1) : 7);
int idx = ((msb_to_lsb == 1) ? len : 0);
int iter = (bpw * pack_words) >> 3;
@@ -466,8 +500,16 @@
if (i == iter - 1)
cfg[i] |= 1;
}
- cfg0 = cfg[0] | (cfg[1] << 10);
- cfg1 = cfg[2] | (cfg[3] << 10);
+ *cfg0 = cfg[0] | (cfg[1] << 10);
+ *cfg1 = cfg[2] | (cfg[3] << 10);
+}
+
+static inline void se_config_packing(void __iomem *base, int bpw,
+ int pack_words, bool msb_to_lsb)
+{
+ unsigned long cfg0, cfg1;
+
+ se_get_packing_config(bpw, pack_words, msb_to_lsb, &cfg0, &cfg1);
geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h
new file mode 100644
index 0000000..252464a
--- /dev/null
+++ b/include/linux/qcrypto.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+
+#define QCRYPTO_CTX_KEY_MASK 0x000000ff
+#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
+#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002
+
+#define QCRYPTO_CTX_XTS_MASK 0x0000ff00
+#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100
+#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
+/*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+ unsigned int flags);
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);*/
+
+struct crypto_engine_entry {
+ u32 hw_instance;
+ u32 ce_device;
+ int shared;
+};
+
+int qcrypto_get_num_engines(void);
+void qcrypto_get_engine_list(size_t num_engines,
+ struct crypto_engine_entry *arr);
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+ unsigned int fde_pfe,
+ unsigned int hw_inst);
+
+
+struct qcrypto_func_set {
+ int (*cipher_set)(struct ablkcipher_request *req,
+ unsigned int fde_pfe,
+ unsigned int hw_inst);
+ int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+ int (*get_num_engines)(void);
+ void (*get_engine_list)(size_t num_engines,
+ struct crypto_engine_entry *arr);
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */
diff --git a/include/linux/qpnp/qpnp-misc.h b/include/linux/qpnp/qpnp-misc.h
new file mode 100644
index 0000000..7d95bf2
--- /dev/null
+++ b/include/linux/qpnp/qpnp-misc.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_MISC_H
+#define __QPNP_MISC_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_QPNP_MISC
+/**
+ * qpnp_misc_irqs_available - check if IRQs are available
+ *
+ * @consumer_dev: device struct
+ *
+ * This function returns true if the MISC interrupts are available
+ * based on a check in the MISC peripheral revision registers.
+ *
+ * Any consumer of this function needs to reference a MISC device phandle
+ * using the "qcom,misc-ref" property in their device tree node.
+ */
+
+int qpnp_misc_irqs_available(struct device *consumer_dev);
+
+/**
+ * qpnp_misc_read_reg - read register from misc device
+ *
+ * @node: device node pointer
+ * @address: address offset in misc peripheral to be read
+ * @val: data read from register
+ *
+ * This function returns zero if reading the MISC register succeeds.
+ *
+ */
+
+int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val);
+#else
+static inline int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+ return 0;
+}
+static inline int qpnp_misc_read_reg(struct device_node *node, u16 addr,
+ u8 *val)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/qpnp/qpnp-pbs.h b/include/linux/qpnp/qpnp-pbs.h
new file mode 100644
index 0000000..39497ac
--- /dev/null
+++ b/include/linux/qpnp/qpnp-pbs.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_PBS_H
+#define _QPNP_PBS_H
+
+#ifdef CONFIG_QPNP_PBS
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap);
+#else
+static inline int qpnp_pbs_trigger_event(struct device_node *dev_node,
+ u8 bitmap) {
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index a0e2283..7fca674 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -208,6 +208,12 @@
#define PM660_V1P1_REV3 0x01
#define PM660_V1P1_REV4 0x01
+/* PM660L REV_ID */
+#define PM660L_V1P1_REV1 0x00
+#define PM660L_V1P1_REV2 0x00
+#define PM660L_V1P1_REV3 0x01
+#define PM660L_V1P1_REV4 0x01
+
/* PMI8998 FAB_ID */
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
@@ -229,6 +235,9 @@
/* SMB1381 */
#define SMB1381_SUBTYPE 0x17
+/* SMB1355 */
+#define SMB1355_SUBTYPE 0x1C
+
struct pmic_revid_data {
u8 rev1;
u8 rev2;
diff --git a/include/linux/random.h b/include/linux/random.h
index 7bd2403..16ab429 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -37,7 +37,6 @@
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
-extern int random_int_secret_init(void);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 867de7d..52524a8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1167,6 +1167,8 @@
struct capacity_state *cap_states; /* ptr to capacity state array */
};
+extern bool sched_is_energy_aware(void);
+
unsigned long capacity_curr_of(int cpu);
struct sched_group;
@@ -1751,6 +1753,10 @@
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+#ifdef CONFIG_CGROUPS
+ /* disallow userland-initiated cgroup migration */
+ unsigned no_cgroup_migration:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 60cc768..f3fa9e6 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -79,6 +79,7 @@
* @current_state: current client state
* @crtc_id: crtc_id associated with this rsc client.
* @rsc_index: rsc index of a client - only index "0" valid.
+ * @id: Index of client. It will be assigned during client_create call
* @list: list to attach client master list
*/
struct sde_rsc_client {
@@ -86,6 +87,7 @@
short current_state;
int crtc_id;
u32 rsc_index;
+ u32 id;
struct list_head list;
};
diff --git a/include/linux/seemp_instrumentation.h b/include/linux/seemp_instrumentation.h
new file mode 100644
index 0000000..21bc436
--- /dev/null
+++ b/include/linux/seemp_instrumentation.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __SEEMP_LOGK_STUB__
+#define __SEEMP_LOGK_STUB__
+
+#ifdef CONFIG_SEEMP_CORE
+#include <linux/kernel.h>
+
+#define MAX_BUF_SIZE 188
+
+#define SEEMP_LOGK_API_SIZE sizeof(int)
+
+/* Write: api_id + skip encoding byte + params */
+#define SEEMP_LOGK_RECORD(api_id, format, ...) do { \
+ *((int *)(buf - SEEMP_LOGK_API_SIZE)) = api_id; \
+ snprintf(buf + 1, MAX_BUF_SIZE - 1, format, ##__VA_ARGS__); \
+} while (0)
+
+extern void *(*seemp_logk_kernel_begin)(char **buf);
+extern void (*seemp_logk_kernel_end)(void *blck);
+
+static inline void *seemp_setup_buf(char **buf)
+{
+ void *blck;
+
+ if (seemp_logk_kernel_begin && seemp_logk_kernel_end) {
+ blck = seemp_logk_kernel_begin(buf);
+ if (!*buf) {
+ seemp_logk_kernel_end(blck);
+ return NULL;
+ }
+ } else {
+ return NULL;
+ }
+ return blck;
+}
+/*
+ * NOTE: only sendto is going to be instrumented
+ * since send sys call internally calls sendto
+ * with 2 extra parameters
+ */
+static inline void seemp_logk_sendto(int fd, void __user *buff, size_t len,
+ unsigned int flags, struct sockaddr __user *addr, int addr_len)
+{
+ char *buf = NULL;
+ void *blck = NULL;
+
+ /*sets up buf and blck correctly*/
+ blck = seemp_setup_buf(&buf);
+ if (!blck)
+ return;
+
+ /*fill the buf*/
+ SEEMP_LOGK_RECORD(SEEMP_API_kernel__sendto, "len=%u,fd=%d",
+ (unsigned int)len, fd);
+
+ seemp_logk_kernel_end(blck);
+}
+#else
+static inline void seemp_logk_sendto(int fd, void __user *buff,
+ size_t len, unsigned int flags, struct sockaddr __user *addr,
+ int addr_len)
+{
+}
+#endif
+#endif
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 6e22b54..c146ebc 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@
};
union {
unsigned long nr_segs;
- int idx;
+ struct {
+ int idx;
+ int start_idx;
+ };
};
};
@@ -81,6 +84,7 @@
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
+void iov_iter_revert(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 263f20a..ffb6393 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -45,6 +45,7 @@
USB_PHY_TYPE_UNDEFINED,
USB_PHY_TYPE_USB2,
USB_PHY_TYPE_USB3,
+ USB_PHY_TYPE_USB3_DP,
};
/* OTG defines lots of enumeration states before device reset */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 262fa64..0583431 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -103,6 +103,7 @@
int msm_vidc_g_fmt(void *instance, struct v4l2_format *f);
int msm_vidc_s_ctrl(void *instance, struct v4l2_control *a);
int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
+int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
int msm_vidc_release_buffer(void *instance, int buffer_type,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 677a047..6d27dae 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4245,6 +4245,32 @@
struct ieee80211_regdomain *rd);
/**
+ * regulatory_hint_user - hint to the wireless core a regulatory domain
+ * which the driver has received from an application
+ * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
+ * should be in. If @rd is set this should be NULL. Note that if you
+ * set this to NULL you should still set rd->alpha2 to some accepted
+ * alpha2.
+ * @user_reg_hint_type: the type of user regulatory hint.
+ *
+ * Wireless drivers can use this function to hint to the wireless core
+ * the current regulatory domain as specified by trusted applications,
+ * it is the driver's responsibilty to estbalish which applications it
+ * trusts.
+ *
+ * The wiphy should be registered to cfg80211 prior to this call.
+ * For cfg80211 drivers this means you must first use wiphy_register(),
+ * for mac80211 drivers you must first use ieee80211_register_hw().
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENOMEM or an -EINVAL.
+ *
+ * Return: 0 on success. -ENOMEM, -EINVAL.
+ */
+int regulatory_hint_user(const char *alpha2,
+ enum nl80211_user_reg_hint_type user_reg_hint_type);
+
+/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
* @wiphy: the wireless device we want to process the regulatory domain on
* @regd: the custom regulatory domain to use for this wiphy
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8ec7c30..931b494 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -260,27 +260,6 @@
#define SCSI_INQ_PQ_NOT_CON 0x01
#define SCSI_INQ_PQ_NOT_CAP 0x03
-
-/*
- * Here are some scsi specific ioctl commands which are sometimes useful.
- *
- * Note that include/linux/cdrom.h also defines IOCTL 0x5300 - 0x5395
- */
-
-/* Used to obtain PUN and LUN info. Conflicts with CDROMAUDIOBUFSIZ */
-#define SCSI_IOCTL_GET_IDLUN 0x5382
-
-/* 0x5383 and 0x5384 were used for SCSI_IOCTL_TAGGED_{ENABLE,DISABLE} */
-
-/* Used to obtain the host number of a device. */
-#define SCSI_IOCTL_PROBE_HOST 0x5385
-
-/* Used to obtain the bus number for a device */
-#define SCSI_IOCTL_GET_BUS_NUMBER 0x5386
-
-/* Used to obtain the PCI location of a device */
-#define SCSI_IOCTL_GET_PCI 0x5387
-
/* Pull a u32 out of a SCSI message (using BE SCSI conventions) */
static inline __u32 scsi_to_u32(__u8 *ptr)
{
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 6b567d7..7ef984a 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -17,6 +17,21 @@
#define ICNSS_MAX_IRQ_REGISTRATIONS 12
#define ICNSS_MAX_TIMESTAMP_LEN 32
+enum icnss_uevent {
+ ICNSS_UEVENT_FW_READY,
+ ICNSS_UEVENT_FW_CRASHED,
+ ICNSS_UEVENT_FW_DOWN,
+};
+
+struct icnss_uevent_fw_down_data {
+ bool crashed;
+};
+
+struct icnss_uevent_data {
+ enum icnss_uevent uevent;
+ void *data;
+};
+
struct icnss_driver_ops {
char *name;
int (*probe)(struct device *dev);
@@ -28,6 +43,7 @@
int (*pm_resume)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
+ int (*uevent)(struct device *dev, struct icnss_uevent_data *uevent);
};
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index a7b87aa..dbae8e8 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,7 +62,7 @@
#define MSM_DUMP_MINOR(val) (val & 0xFFFFF)
-#define MAX_NUM_ENTRIES 0x120
+#define MAX_NUM_ENTRIES 0x140
enum msm_dump_data_ids {
MSM_DUMP_DATA_CPU_CTX = 0x00,
@@ -82,10 +82,12 @@
MSM_DUMP_DATA_VSENSE = 0xE9,
MSM_DUMP_DATA_RPM = 0xEA,
MSM_DUMP_DATA_SCANDUMP = 0xEB,
+ MSM_DUMP_DATA_RPMH = 0xEC,
MSM_DUMP_DATA_TMC_ETF = 0xF0,
MSM_DUMP_DATA_TMC_REG = 0x100,
MSM_DUMP_DATA_LOG_BUF = 0x110,
MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
+ MSM_DUMP_DATA_SCANDUMP_PER_CPU = 0x130,
MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
};
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
new file mode 100644
index 0000000..0efea04
--- /dev/null
+++ b/include/soc/qcom/qseecomi.h
@@ -0,0 +1,729 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOMI_H_
+#define __QSEECOMI_H_
+
+#include <linux/qseecom.h>
+
+#define QSEECOM_KEY_ID_SIZE 32
+
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD -19 /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
+#define QSEOS_RESULT_FAIL_KS_OP -64
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS -65
+#define QSEOS_RESULT_FAIL_MAX_KEYS -66
+#define QSEOS_RESULT_FAIL_SAVE_KS -67
+#define QSEOS_RESULT_FAIL_LOAD_KS -68
+#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE -69
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE -70
+#define QSEOS_RESULT_FAIL_INCORRECT_PSWD -71
+#define QSEOS_RESULT_FAIL_MAX_ATTEMPT -72
+#define QSEOS_RESULT_FAIL_PENDING_OPERATION -73
+
+enum qseecom_command_scm_resp_type {
+ QSEOS_APP_ID = 0xEE01,
+ QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+ QSEOS_APP_START_COMMAND = 0x01,
+ QSEOS_APP_SHUTDOWN_COMMAND,
+ QSEOS_APP_LOOKUP_COMMAND,
+ QSEOS_REGISTER_LISTENER,
+ QSEOS_DEREGISTER_LISTENER,
+ QSEOS_CLIENT_SEND_DATA_COMMAND,
+ QSEOS_LISTENER_DATA_RSP_COMMAND,
+ QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+ QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+ QSEOS_GET_APP_STATE_COMMAND,
+ QSEOS_LOAD_SERV_IMAGE_COMMAND,
+ QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+ QSEOS_APP_REGION_NOTIFICATION,
+ QSEOS_REGISTER_LOG_BUF_COMMAND,
+ QSEOS_RPMB_PROVISION_KEY_COMMAND,
+ QSEOS_RPMB_ERASE_COMMAND,
+ QSEOS_GENERATE_KEY = 0x11,
+ QSEOS_DELETE_KEY,
+ QSEOS_MAX_KEY_COUNT,
+ QSEOS_SET_KEY,
+ QSEOS_UPDATE_KEY_USERINFO,
+ QSEOS_TEE_OPEN_SESSION,
+ QSEOS_TEE_INVOKE_COMMAND,
+ QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
+ QSEOS_TEE_CLOSE_SESSION,
+ QSEOS_TEE_REQUEST_CANCELLATION,
+ QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
+ QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+ QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+ QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+ QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
+ QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
+ QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
+ QSEOS_FSM_IKE_REQ_CMD = 0x203,
+ QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
+ QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
+ QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
+ QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
+ QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
+
+ QSEOS_CMD_MAX = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+ QSEOS_RESULT_SUCCESS = 0,
+ QSEOS_RESULT_INCOMPLETE,
+ QSEOS_RESULT_BLOCKED_ON_LISTENER,
+ QSEOS_RESULT_FAILURE = 0xFFFFFFFF
+};
+
+enum qseecom_pipe_type {
+ QSEOS_PIPE_ENC = 0x1,
+ QSEOS_PIPE_ENC_XTS = 0x2,
+ QSEOS_PIPE_AUTH = 0x4,
+ QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
+/* QSEE Reentrancy support phase */
+enum qseecom_qsee_reentrancy_phase {
+ QSEE_REENTRANCY_PHASE_0 = 0,
+ QSEE_REENTRANCY_PHASE_1,
+ QSEE_REENTRANCY_PHASE_2,
+ QSEE_REENTRANCY_PHASE_3,
+ QSEE_REENTRANCY_PHASE_MAX = 0xFF
+};
+
+__packed struct qsee_apps_region_info_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t addr;
+ uint32_t size;
+};
+
+__packed struct qsee_apps_region_info_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint64_t addr;
+ uint32_t size;
+};
+
+__packed struct qseecom_check_app_ireq {
+ uint32_t qsee_cmd_id;
+ char app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_load_app_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t mdt_len; /* Length of the mdt file */
+ uint32_t img_len; /* Length of .bxx and .mdt files */
+ uint32_t phy_addr; /* phy addr of the start of image */
+ char app_name[MAX_APP_NAME_SIZE]; /* application name*/
+};
+
+__packed struct qseecom_load_app_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t mdt_len;
+ uint32_t img_len;
+ uint64_t phy_addr;
+ char app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_unload_app_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t app_id;
+};
+
+__packed struct qseecom_load_lib_image_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t mdt_len;
+ uint32_t img_len;
+ uint32_t phy_addr;
+};
+
+__packed struct qseecom_load_lib_image_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t mdt_len;
+ uint32_t img_len;
+ uint64_t phy_addr;
+};
+
+__packed struct qseecom_unload_lib_image_ireq {
+ uint32_t qsee_cmd_id;
+};
+
+__packed struct qseecom_register_listener_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t listener_id;
+ uint32_t sb_ptr;
+ uint32_t sb_len;
+};
+
+__packed struct qseecom_register_listener_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t listener_id;
+ uint64_t sb_ptr;
+ uint32_t sb_len;
+};
+
+__packed struct qseecom_unregister_listener_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t listener_id;
+};
+
+__packed struct qseecom_client_send_data_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t app_id;
+ uint32_t req_ptr;
+ uint32_t req_len;
+ uint32_t rsp_ptr;/* First 4 bytes should be the return status */
+ uint32_t rsp_len;
+ uint32_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_data_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t app_id;
+ uint64_t req_ptr;
+ uint32_t req_len;
+ uint64_t rsp_ptr;
+ uint32_t rsp_len;
+ uint64_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_reg_log_buf_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t phy_addr;
+ uint32_t len;
+};
+
+__packed struct qseecom_reg_log_buf_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint64_t phy_addr;
+ uint32_t len;
+};
+
+/* send_data resp */
+__packed struct qseecom_client_listener_data_irsp {
+ uint32_t qsee_cmd_id;
+ uint32_t listener_id;
+ uint32_t status;
+ uint32_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_listener_data_64bit_irsp {
+ uint32_t qsee_cmd_id;
+ uint32_t listener_id;
+ uint32_t status;
+ uint64_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ * buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_command_scm_resp {
+ uint32_t result;
+ enum qseecom_command_scm_resp_type resp_type;
+ unsigned int data;
+};
+
+struct qseecom_rpmb_provision_key {
+ uint32_t key_type;
+};
+
+__packed struct qseecom_client_send_service_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t key_type; /* in */
+ unsigned int req_len; /* in */
+ uint32_t rsp_ptr; /* in/out */
+ unsigned int rsp_len; /* in/out */
+};
+
+__packed struct qseecom_client_send_service_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t key_type;
+ unsigned int req_len;
+ uint64_t rsp_ptr;
+ unsigned int rsp_len;
+};
+
+__packed struct qseecom_key_generate_ireq {
+ uint32_t qsee_command_id;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+ uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_select_ireq {
+ uint32_t qsee_command_id;
+ uint32_t ce;
+ uint32_t pipe;
+ uint32_t pipe_type;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+ uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_delete_ireq {
+ uint32_t qsee_command_id;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+ uint8_t hash32[QSEECOM_HASH_SIZE];
+
+};
+
+__packed struct qseecom_key_userinfo_update_ireq {
+ uint32_t qsee_command_id;
+ uint32_t flags;
+ uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+ uint8_t current_hash32[QSEECOM_HASH_SIZE];
+ uint8_t new_hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_max_count_query_ireq {
+ uint32_t flags;
+};
+
+__packed struct qseecom_key_max_count_query_irsp {
+ uint32_t max_key_count;
+};
+
+__packed struct qseecom_qteec_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t app_id;
+ uint32_t req_ptr;
+ uint32_t req_len;
+ uint32_t resp_ptr;
+ uint32_t resp_len;
+ uint32_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_qteec_64bit_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t app_id;
+ uint64_t req_ptr;
+ uint32_t req_len;
+ uint64_t resp_ptr;
+ uint32_t resp_len;
+ uint64_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_fsm_key_req {
+ uint32_t qsee_cmd_id;
+ uint32_t req_ptr;
+ uint32_t req_len;
+ uint32_t rsp_ptr;
+ uint32_t rsp_len;
+};
+
+__packed struct qseecom_continue_blocked_request_ireq {
+ uint32_t qsee_cmd_id;
+ uint32_t app_id;
+};
+
+
+/********** ARMV8 SMC INTERFACE TZ MACRO *******************/
+
+#define TZ_SVC_APP_MGR 1 /* Application management */
+#define TZ_SVC_LISTENER 2 /* Listener service management */
+#define TZ_SVC_EXTERNAL 3 /* External image loading */
+#define TZ_SVC_RPMB 4 /* RPMB */
+#define TZ_SVC_KEYSTORE 5 /* Keystore management */
+#define TZ_SVC_ES 16 /* Enterprise Security */
+#define TZ_SVC_MDTP 18 /* Mobile Device Theft */
+
+/*----------------------------------------------------------------------------
+ * Owning Entity IDs (defined by ARM SMC doc)
+ * ---------------------------------------------------------------------------
+ */
+#define TZ_OWNER_ARM 0 /** ARM Architecture call ID */
+#define TZ_OWNER_CPU 1 /** CPU service call ID */
+#define TZ_OWNER_SIP 2 /** SIP service call ID */
+#define TZ_OWNER_OEM 3 /** OEM service call ID */
+#define TZ_OWNER_STD 4 /** Standard service call ID */
+
+/** Values 5-47 are reserved for future use */
+
+/** Trusted Application call IDs */
+#define TZ_OWNER_TZ_APPS 48
+#define TZ_OWNER_TZ_APPS_RESERVED 49
+/** Trusted OS Call IDs */
+#define TZ_OWNER_QSEE_OS 50
+#define TZ_OWNER_MOBI_OS 51
+#define TZ_OWNER_OS_RESERVED_3 52
+#define TZ_OWNER_OS_RESERVED_4 53
+#define TZ_OWNER_OS_RESERVED_5 54
+#define TZ_OWNER_OS_RESERVED_6 55
+#define TZ_OWNER_OS_RESERVED_7 56
+#define TZ_OWNER_OS_RESERVED_8 57
+#define TZ_OWNER_OS_RESERVED_9 58
+#define TZ_OWNER_OS_RESERVED_10 59
+#define TZ_OWNER_OS_RESERVED_11 60
+#define TZ_OWNER_OS_RESERVED_12 61
+#define TZ_OWNER_OS_RESERVED_13 62
+#define TZ_OWNER_OS_RESERVED_14 63
+
+#define TZ_SVC_INFO 6 /* Misc. information services */
+
+/** Trusted Application call groups */
+#define TZ_SVC_APP_ID_PLACEHOLDER 0 /* SVC bits will contain App ID */
+
+/** General helper macro to create a bitmask from bits low to high. */
+#define TZ_MASK_BITS(h, l) ((0xffffffff >> (32 - ((h - l) + 1))) << l)
+
+/*
+ * Macro used to define an SMC ID based on the owner ID,
+ * service ID, and function number.
+ */
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+ ((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_SYSCALL_PARAM_NARGS_MASK TZ_MASK_BITS(3, 0)
+#define TZ_SYSCALL_PARAM_TYPE_MASK TZ_MASK_BITS(1, 0)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
+ p4, p5, p6, p7, p8, p9, p10) \
+ ((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
+ ((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
+ ((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
+ ((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
+ ((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
+ ((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
+ ((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
+ ((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
+ ((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
+ ((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
+ ((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
+
+/*
+ * Macros used to create the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
+ TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
+ TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
+ TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
+ TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
+ TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
+ TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
+ TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
+ TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+ TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
+ TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
+
+/*
+ * Macro used to obtain the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID) CMD_ID ## _PARAM_ID
+
+/** Helper macro to extract the owning entity from the SMC ID. */
+#define TZ_SYSCALL_OWNER_ID(r0) ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
+
+/** Helper macro for checking whether an owning entity is of type trusted OS. */
+#define IS_OWNER_TRUSTED_OS(owner_id) \
+ (((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
+
+#define TZ_SYSCALL_PARAM_TYPE_VAL 0x0 /* type of value */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RO 0x1 /* type of buffer RO */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RW 0x2 /* type of buffer RW */
+
+#define TZ_OS_APP_START_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
+
+#define TZ_OS_APP_START_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_3( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_SHUTDOWN_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
+
+#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_LOOKUP_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
+
+#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_GET_STATE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
+
+#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_3( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_REGISTER_LISTENER_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
+
+#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
+
+#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_3( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_3( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
+
+
+#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_ERASE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
+
+#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_GEN_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
+
+#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_DEL_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
+
+#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_UPDATE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
+
+#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_2( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_3( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_MDTP_CIPHER_DIP_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
+
+#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_7( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_7( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+ TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_7( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_4( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#endif /* __QSEECOMI_H_ */
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
index 34434fd..75e6ccd 100644
--- a/include/soc/qcom/rpmh.h
+++ b/include/soc/qcom/rpmh.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,8 @@
int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int *n);
+int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable);
+
int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n);
int rpmh_invalidate(struct rpmh_client *rc);
@@ -70,6 +72,9 @@
enum rpmh_state state, struct tcs_cmd *cmd, int *n)
{ return -ENODEV; }
+static inline int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
+{ return -ENODEV; }
+
static inline int rpmh_write_control(struct rpmh_client *rc,
struct tcs_cmd *cmd, int n)
{ return -ENODEV; }
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 2656d5d..b54eefc 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -96,6 +96,8 @@
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmhamster")
#define early_machine_is_msmfalcon() \
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmfalcon")
+#define early_machine_is_sdxpoorwills() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdxpoorwills")
#define early_machine_is_sdm845() \
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
#define early_machine_is_sdm830() \
@@ -137,6 +139,7 @@
#define early_machine_is_apqcobalt() 0
#define early_machine_is_msmhamster() 0
#define early_machine_is_msmfalcon() 0
+#define early_machine_is_sdxpoorwills() 0
#define early_machine_is_sdm845() 0
#define early_machine_is_sdm830() 0
#endif
@@ -198,6 +201,7 @@
MSM_CPU_COBALT,
MSM_CPU_HAMSTER,
MSM_CPU_FALCON,
+ SDX_CPU_SDXPOORWILLS,
MSM_CPU_SDM845,
MSM_CPU_SDM830,
};
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 9ea0736..5478417 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
struct subsys_device;
+extern struct bus_type subsys_bus_type;
enum {
RESET_SOC = 0,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6233e8f..0383c60 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -705,6 +705,7 @@
u64 unpacked_lun;
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
+ bool lun_shutdown;
bool lun_access_ro;
u32 lun_index;
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 7586072..ad19e73 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -192,6 +192,42 @@
TP_ARGS(core, phase)
);
+DECLARE_EVENT_CLASS(clk_state_dump,
+
+ TP_PROTO(const char *name, unsigned int prepare_count,
+ unsigned int enable_count, unsigned long rate, unsigned int vdd_level),
+
+ TP_ARGS(name, prepare_count, enable_count, rate, vdd_level),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(unsigned int, prepare_count)
+ __field(unsigned int, enable_count)
+ __field(unsigned long, rate)
+ __field(unsigned int, vdd_level)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->prepare_count = prepare_count;
+ __entry->enable_count = enable_count;
+ __entry->rate = rate;
+ __entry->vdd_level = vdd_level;
+ ),
+
+ TP_printk("%s\tprepare:enable cnt [%u:%u]\trate: vdd_level [%lu:%u]",
+ __get_str(name), __entry->prepare_count, __entry->enable_count,
+ __entry->rate, __entry->vdd_level)
+);
+
+DEFINE_EVENT(clk_state_dump, clk_state,
+
+ TP_PROTO(const char *name, unsigned int prepare_count,
+ unsigned int enable_count, unsigned long rate, unsigned int vdd_level),
+
+ TP_ARGS(name, prepare_count, enable_count, rate, vdd_level)
+);
+
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
index a72f9b9..917b3f0 100644
--- a/include/trace/events/mmc.h
+++ b/include/trace/events/mmc.h
@@ -176,7 +176,152 @@
__entry->hold_retune, __entry->retune_period)
);
-#endif /* _TRACE_MMC_H */
+TRACE_EVENT(mmc_cmd_rw_start,
+ TP_PROTO(unsigned int cmd, unsigned int arg, unsigned int flags),
+ TP_ARGS(cmd, arg, flags),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned int, arg)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ __entry->flags = flags;
+ ),
+ TP_printk("cmd=%u,arg=0x%08x,flags=0x%08x",
+ __entry->cmd, __entry->arg, __entry->flags)
+);
+
+TRACE_EVENT(mmc_cmd_rw_end,
+ TP_PROTO(unsigned int cmd, unsigned int status, unsigned int resp),
+ TP_ARGS(cmd, status, resp),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned int, status)
+ __field(unsigned int, resp)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->status = status;
+ __entry->resp = resp;
+ ),
+ TP_printk("cmd=%u,int_status=0x%08x,response=0x%08x",
+ __entry->cmd, __entry->status, __entry->resp)
+);
+
+TRACE_EVENT(mmc_data_rw_end,
+ TP_PROTO(unsigned int cmd, unsigned int status),
+ TP_ARGS(cmd, status),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned int, status)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->status = status;
+ ),
+ TP_printk("cmd=%u,int_status=0x%08x",
+ __entry->cmd, __entry->status)
+);
+
+DECLARE_EVENT_CLASS(mmc_adma_class,
+ TP_PROTO(unsigned int cmd, unsigned int len),
+ TP_ARGS(cmd, len),
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned int, len)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->len = len;
+ ),
+ TP_printk("cmd=%u,sg_len=0x%08x", __entry->cmd, __entry->len)
+);
+
+DEFINE_EVENT(mmc_adma_class, mmc_adma_table_pre,
+ TP_PROTO(unsigned int cmd, unsigned int len),
+ TP_ARGS(cmd, len));
+
+DEFINE_EVENT(mmc_adma_class, mmc_adma_table_post,
+ TP_PROTO(unsigned int cmd, unsigned int len),
+ TP_ARGS(cmd, len));
+
+TRACE_EVENT(mmc_clk,
+ TP_PROTO(char *print_info),
+
+ TP_ARGS(print_info),
+
+ TP_STRUCT__entry(
+ __string(print_info, print_info)
+ ),
+
+ TP_fast_assign(
+ __assign_str(print_info, print_info);
+ ),
+
+ TP_printk("%s",
+ __get_str(print_info)
+ )
+);
+
+DECLARE_EVENT_CLASS(mmc_pm_template,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+
+ TP_ARGS(dev_name, err, usecs),
+
+ TP_STRUCT__entry(
+ __field(s64, usecs)
+ __field(int, err)
+ __string(dev_name, dev_name)
+ ),
+
+ TP_fast_assign(
+ __entry->usecs = usecs;
+ __entry->err = err;
+ __assign_str(dev_name, dev_name);
+ ),
+
+ TP_printk(
+ "took %lld usecs, %s err %d",
+ __entry->usecs,
+ __get_str(dev_name),
+ __entry->err
+ )
+);
+
+DEFINE_EVENT(mmc_pm_template, mmc_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_runtime_suspend,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_runtime_resume,
+ TP_PROTO(const char *dev_name, int err, s64 usecs),
+ TP_ARGS(dev_name, err, usecs));
+#endif /* if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 2b4a8ff..031ae49 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -47,35 +47,40 @@
TRACE_EVENT(cdev_update,
- TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
+ TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target,
+ unsigned long min_target),
- TP_ARGS(cdev, target),
+ TP_ARGS(cdev, target, min_target),
TP_STRUCT__entry(
__string(type, cdev->type)
__field(unsigned long, target)
+ __field(unsigned long, min_target)
),
TP_fast_assign(
__assign_str(type, cdev->type);
__entry->target = target;
+ __entry->min_target = min_target;
),
- TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
+ TP_printk("type=%s target=%lu min_target=%lu", __get_str(type),
+ __entry->target, __entry->min_target)
);
TRACE_EVENT(thermal_zone_trip,
TP_PROTO(struct thermal_zone_device *tz, int trip,
- enum thermal_trip_type trip_type),
+ enum thermal_trip_type trip_type, bool is_trip),
- TP_ARGS(tz, trip, trip_type),
+ TP_ARGS(tz, trip, trip_type, is_trip),
TP_STRUCT__entry(
__string(thermal_zone, tz->type)
__field(int, id)
__field(int, trip)
__field(enum thermal_trip_type, trip_type)
+ __field(bool, is_trip)
),
TP_fast_assign(
@@ -83,10 +88,13 @@
__entry->id = tz->id;
__entry->trip = trip;
__entry->trip_type = trip_type;
+ __entry->is_trip = is_trip;
),
- TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s",
- __get_str(thermal_zone), __entry->id, __entry->trip,
+ TP_printk("thermal_zone=%s id=%d %s=%d trip_type=%s",
+ __get_str(thermal_zone), __entry->id,
+ (__entry->is_trip) ? "trip" : "hyst",
+ __entry->trip,
show_tzt_type(__entry->trip_type))
);
diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h
index 143dacb..deb98c7 100644
--- a/include/uapi/asm-generic/ioctls.h
+++ b/include/uapi/asm-generic/ioctls.h
@@ -77,6 +77,9 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCPMGET 0x5441 /* PM get */
+#define TIOCPMPUT 0x5442 /* PM put */
+#define TIOCPMACT 0x5443 /* PM is active */
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index fda50e9..eb18389 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -316,6 +316,7 @@
#define DRM_SDE_WB_CONFIG 0x40
#define DRM_MSM_REGISTER_EVENT 0x41
#define DRM_MSM_DEREGISTER_EVENT 0x42
+#define DRM_MSM_RMFB2 0x43
/* sde custom events */
#define DRM_EVENT_HISTOGRAM 0x80000000
@@ -335,6 +336,8 @@
DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_RMFB2 DRM_IOW((DRM_COMMAND_BASE + \
+ DRM_MSM_RMFB2), unsigned int)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index b1bf6aa..74034c6 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -344,4 +344,16 @@
uint64_t modes;
};
+#define SDE_MAX_ROI_V1 4
+
+/**
+ * struct sde_drm_roi_v1 - list of regions of interest for a drm object
+ * @num_rects: number of valid rectangles in the roi array
+ * @roi: list of roi rectangles
+ */
+struct sde_drm_roi_v1 {
+ uint32_t num_rects;
+ struct drm_clip_rect roi[SDE_MAX_ROI_V1];
+};
+
#endif /* _SDE_DRM_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 33ba430..7c1899e 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -414,6 +414,8 @@
header-y += sdla.h
header-y += seccomp.h
header-y += securebits.h
+header-y += seemp_api.h
+header-y += seemp_param_id.h
header-y += selinux_netlink.h
header-y += sem.h
header-y += serial_core.h
diff --git a/include/uapi/linux/fips_status.h b/include/uapi/linux/fips_status.h
index d7cda94..7daf27b 100644
--- a/include/uapi/linux/fips_status.h
+++ b/include/uapi/linux/fips_status.h
@@ -5,24 +5,24 @@
#include <linux/ioctl.h>
/**
- * fips_status: global FIPS140-2 status
- * @FIPS140_STATUS_NA:
- * Not a FIPS140-2 compliant Build.
- * The flag status won't
- * change throughout
- * the lifetime
- * @FIPS140_STATUS_PASS_CRYPTO:
- * KAT self tests are passed.
- * @FIPS140_STATUS_QCRYPTO_ALLOWED:
- * Integrity test is passed.
- * @FIPS140_STATUS_PASS:
- * All tests are passed and build
- * is in FIPS140-2 mode
- * @FIPS140_STATUS_FAIL:
- * One of the test is failed.
- * This will block all requests
- * to crypto modules
- */
+* fips_status: global FIPS140-2 status
+* @FIPS140_STATUS_NA:
+* Not a FIPS140-2 compliant Build.
+* The flag status won't
+* change throughout
+* the lifetime
+* @FIPS140_STATUS_PASS_CRYPTO:
+* KAT self tests are passed.
+* @FIPS140_STATUS_QCRYPTO_ALLOWED:
+* Integrity test is passed.
+* @FIPS140_STATUS_PASS:
+* All tests are passed and build
+* is in FIPS140-2 mode
+* @FIPS140_STATUS_FAIL:
+* One of the test is failed.
+* This will block all requests
+* to crypto modules
+*/
enum fips_status {
FIPS140_STATUS_NA = 0,
FIPS140_STATUS_PASS_CRYPTO = 1,
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index f6598d1..316e838 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,7 +34,7 @@
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
-#define RTF_PCPU 0x40000000
+#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
#define RTF_LOCAL 0x80000000
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 7e385b8..8fec144 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -63,6 +63,61 @@
* commands in array in sequence to card.
*/
#define MMC_IOC_MULTI_CMD _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_multi_cmd)
+
+/**
+ * There are four request types that are applicable for rpmb accesses- two
+ * under read category and two under write. They are
+ *
+ * Reads
+ * -------
+ * 1. Read Write Counter
+ * 2. Authenticated data read
+ *
+ *
+ * Writes
+ * -------
+ * 1. Provision RPMB key (though it might be done in a secure environment)
+ * 2. Authenticated data write
+ *
+ * While its given that the rpmb data frames are going to have that
+ * information encoded in it and the frames should be generated by a secure
+ * piece of code, the request types can be classified as above.
+ *
+ * So here are the set of commands that should be executed atomically in the
+ * ioctl for rpmb read operation
+ * 1. Switch partition
+ * 2. Set block count
+ * 3. Write data frame - CMD25 to write the rpmb data frame
+ * 4. Set block count
+ * 5. Read the data - CMD18 to do the actual read
+ *
+ * Similarly for rpmb write operation, these are the commands that should be
+ * executed atomically in the ioctl for rpmb write operation
+ * 1. Switch partition
+ * 2. Set block count
+ * 3. Write data frame - CMD25 to write the rpmb data frame with data
+ * 4. Set block count
+ * 5. Read the data - CMD25 to write rpmb data frame indicating that rpmb
+ * result register is about to be read
+ * 6. Set block count
+ * 7. Read rpmb result - CMD18 to read the rpmb result register
+ *
+ * Each of the above commands should be sent individually via struct mmc_ioc_cmd
+ * and fields like is_acmd that are not needed for rpmb operations will be
+ * ignored.
+ */
+#define MMC_IOC_MAX_RPMB_CMD 3
+struct mmc_ioc_rpmb {
+ struct mmc_ioc_cmd cmds[MMC_IOC_MAX_RPMB_CMD];
+}
+;
+/*
+ * This ioctl is meant for use with rpmb partitions. This is needed since the
+ * access procedure for this particular partition is different from regular
+ * or normal partitions.
+ */
+#define MMC_IOC_RPMB_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_rpmb)
+
/*
* Since this ioctl is only meant to enhance (and not replace) normal access
* to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
diff --git a/include/uapi/linux/mmc/mmc.h b/include/uapi/linux/mmc/mmc.h
index f75ae94..8de329b 100644
--- a/include/uapi/linux/mmc/mmc.h
+++ b/include/uapi/linux/mmc/mmc.h
@@ -29,6 +29,7 @@
#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */
#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
+#define MMC_SEND_TUNING_BLOCK_HS400 MMC_SEND_TUNING_BLOCK_HS200
#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
@@ -64,4 +65,9 @@
#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */
#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
+/* class 11 */
+#define MMC_CMDQ_TASK_MGMT 48 /* ac [31:0] task ID R1b */
+#define DISCARD_QUEUE 0x1
+#define DISCARD_TASK 0x2
+
#endif /* UAPI_MMC_MMC_H */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 1e6ccf4..817feba 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -161,6 +161,7 @@
IPA_CLIENT_Q6_DECOMP_PROD,
IPA_CLIENT_Q6_DECOMP2_PROD,
IPA_CLIENT_UC_USB_PROD,
+ IPA_CLIENT_ETHERNET_PROD,
/* Below PROD client type is only for test purpose */
IPA_CLIENT_TEST_PROD,
@@ -200,6 +201,8 @@
IPA_CLIENT_Q6_DECOMP_CONS,
IPA_CLIENT_Q6_DECOMP2_CONS,
IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS,
+ IPA_CLIENT_ETHERNET_CONS,
+
/* Below CONS client type is only for test purpose */
IPA_CLIENT_TEST_CONS,
IPA_CLIENT_TEST1_CONS,
@@ -417,6 +420,7 @@
IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_ODU_ADAPT_PROD,
IPA_RM_RESOURCE_MHI_PROD,
+ IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_PROD_MAX,
IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
@@ -427,6 +431,7 @@
IPA_RM_RESOURCE_APPS_CONS,
IPA_RM_RESOURCE_ODU_ADAPT_CONS,
IPA_RM_RESOURCE_MHI_CONS,
+ IPA_RM_RESOURCE_ETHERNET_CONS,
IPA_RM_RESOURCE_MAX
};
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index c190446..f05155b 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -67,6 +67,8 @@
#define KGSL_CONTEXT_TYPE_RS 4
#define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E
+#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
+
#define KGSL_CONTEXT_INVALID 0xffffffff
/*
diff --git a/include/uapi/linux/qcedev.h b/include/uapi/linux/qcedev.h
index 6fee15d..fb51c23 100644
--- a/include/uapi/linux/qcedev.h
+++ b/include/uapi/linux/qcedev.h
@@ -20,14 +20,14 @@
#define QCEDEV_AES_KEY_192 24
#define QCEDEV_AES_KEY_256 32
/**
- *qcedev_oper_enum: Operation types
- * @QCEDEV_OPER_ENC: Encrypt
- * @QCEDEV_OPER_DEC: Decrypt
- * @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by
- * user. Key already set by an external processor.
- * @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by
- * user. Key already set by an external processor.
- */
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC: Encrypt
+* @QCEDEV_OPER_DEC: Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by
+* user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by
+* user. Key already set by an external processor.
+*/
enum qcedev_oper_enum {
QCEDEV_OPER_DEC = 0,
QCEDEV_OPER_ENC = 1,
@@ -37,11 +37,11 @@
};
/**
- *qcedev_oper_enum: Cipher algorithm types
- * @QCEDEV_ALG_DES: DES
- * @QCEDEV_ALG_3DES: 3DES
- * @QCEDEV_ALG_AES: AES
- */
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES: DES
+* @QCEDEV_ALG_3DES: 3DES
+* @QCEDEV_ALG_AES: AES
+*/
enum qcedev_cipher_alg_enum {
QCEDEV_ALG_DES = 0,
QCEDEV_ALG_3DES = 1,
@@ -50,15 +50,15 @@
};
/**
- *qcedev_cipher_mode_enum : AES mode
- * @QCEDEV_AES_MODE_CBC: CBC
- * @QCEDEV_AES_MODE_ECB: ECB
- * @QCEDEV_AES_MODE_CTR: CTR
- * @QCEDEV_AES_MODE_XTS: XTS
- * @QCEDEV_AES_MODE_CCM: CCM
- * @QCEDEV_DES_MODE_CBC: CBC
- * @QCEDEV_DES_MODE_ECB: ECB
- */
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC: CBC
+* @QCEDEV_AES_MODE_ECB: ECB
+* @QCEDEV_AES_MODE_CTR: CTR
+* @QCEDEV_AES_MODE_XTS: XTS
+* @QCEDEV_AES_MODE_CCM: CCM
+* @QCEDEV_DES_MODE_CBC: CBC
+* @QCEDEV_DES_MODE_ECB: ECB
+*/
enum qcedev_cipher_mode_enum {
QCEDEV_AES_MODE_CBC = 0,
QCEDEV_AES_MODE_ECB = 1,
@@ -71,13 +71,13 @@
};
/**
- *enum qcedev_sha_alg_enum : Secure Hashing Algorithm
- * @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits)
- * @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit)
- * @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits)
- * @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit)
- * @QCEDEV_ALG_AES_CMAC: Configurable MAC size
- */
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC: Configurable MAC size
+*/
enum qcedev_sha_alg_enum {
QCEDEV_ALG_SHA1 = 0,
QCEDEV_ALG_SHA256 = 1,
@@ -88,12 +88,12 @@
};
/**
- * struct buf_info - Buffer information
- * @offset: Offset from the base address of the buffer
- * (Used when buffer is allocated using PMEM)
- * @vaddr: Virtual buffer address pointer
- * @len: Size of the buffer
- */
+* struct buf_info - Buffer information
+* @offset: Offset from the base address of the buffer
+* (Used when buffer is allocated using PMEM)
+* @vaddr: Virtual buffer address pointer
+* @len: Size of the buffer
+*/
struct buf_info {
union {
uint32_t offset;
@@ -103,26 +103,26 @@
};
/**
- * struct qcedev_vbuf_info - Source and destination Buffer information
- * @src: Array of buf_info for input/source
- * @dst: Array of buf_info for output/destination
- */
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src: Array of buf_info for input/source
+* @dst: Array of buf_info for output/destination
+*/
struct qcedev_vbuf_info {
struct buf_info src[QCEDEV_MAX_BUFFERS];
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
- * struct qcedev_pmem_info - Stores PMEM buffer information
- * @fd_src: Handle to /dev/adsp_pmem used to allocate
- * memory for input/src buffer
- * @src: Array of buf_info for input/source
- * @fd_dst: Handle to /dev/adsp_pmem used to allocate
- * memory for output/dst buffer
- * @dst: Array of buf_info for output/destination
- * @pmem_src_offset: The offset from input/src buffer
- * (allocated by PMEM)
- */
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src: Handle to /dev/adsp_pmem used to allocate
+* memory for input/src buffer
+* @src: Array of buf_info for input/source
+* @fd_dst: Handle to /dev/adsp_pmem used to allocate
+* memory for output/dst buffer
+* @dst: Array of buf_info for output/destination
+* @pmem_src_offset: The offset from input/src buffer
+* (allocated by PMEM)
+*/
struct qcedev_pmem_info {
int fd_src;
struct buf_info src[QCEDEV_MAX_BUFFERS];
@@ -131,52 +131,52 @@
};
/**
- * struct qcedev_cipher_op_req - Holds the ciphering request information
- * @use_pmem (IN): Flag to indicate if buffer source is PMEM
- * QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
- * @pmem (IN): Stores PMEM buffer information.
- * Refer struct qcedev_pmem_info
- * @vbuf (IN/OUT): Stores Source and destination Buffer information
- * Refer to struct qcedev_vbuf_info
- * @data_len (IN): Total Length of input/src and output/dst in bytes
- * @in_place_op (IN): Indicates whether the operation is inplace where
- * source == destination
- * When using PMEM allocated memory, must set this to 1
- * @enckey (IN): 128 bits of confidentiality key
- * enckey[0] bit 127-120, enckey[1] bit 119-112,..
- * enckey[15] bit 7-0
- * @encklen (IN): Length of the encryption key(set to 128 bits/16
- * bytes in the driver)
- * @iv (IN/OUT): Initialisation vector data
- * This is updated by the driver, incremented by
- * number of blocks encrypted/decrypted.
- * @ivlen (IN): Length of the IV
- * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
- * for AES-128 CTR mode only)
- * @alg (IN): Type of ciphering algorithm: AES/DES/3DES
- * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
- * Apllicabel when using AES algorithm only
- * @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
- * QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
- *
- *If use_pmem is set to 0, the driver assumes that memory was not allocated
- * via PMEM, and kernel will need to allocate memory and copy data from user
- * space buffer (data_src/dta_dst) and process accordingly and copy data back
- * to the user space buffer
- *
- * If use_pmem is set to 1, the driver assumes that memory was allocated via
- * PMEM.
- * The kernel driver will use the fd_src to determine the kernel virtual address
- * base that maps to the user space virtual address base for the buffer
- * allocated in user space.
- * The final input/src and output/dst buffer pointer will be determined
- * by adding the offsets to the kernel virtual addr.
- *
- * If use of hardware key is supported in the target, user can configure the
- * key parameters (encklen, enckey) to use the hardware key.
- * In order to use the hardware key, set encklen to 0 and set the enckey
- * data array to 0.
- */
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN): Flag to indicate if buffer source is PMEM
+* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN): Stores PMEM buffer information.
+* Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT): Stores Source and destination Buffer information
+* Refer to struct qcedev_vbuf_info
+* @data_len (IN): Total Length of input/src and output/dst in bytes
+* @in_place_op (IN): Indicates whether the operation is inplace where
+* source == destination
+* When using PMEM allocated memory, must set this to 1
+* @enckey (IN): 128 bits of confidentiality key
+* enckey[0] bit 127-120, enckey[1] bit 119-112,..
+* enckey[15] bit 7-0
+* @encklen (IN): Length of the encryption key(set to 128 bits/16
+* bytes in the driver)
+* @iv (IN/OUT): Initialisation vector data
+* This is updated by the driver, incremented by
+* number of blocks encrypted/decrypted.
+* @ivlen (IN): Length of the IV
+* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
+* for AES-128 CTR mode only)
+* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
+* Apllicabel when using AES algorithm only
+* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key parameters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
struct qcedev_cipher_op_req {
uint8_t use_pmem;
union {
@@ -197,16 +197,16 @@
};
/**
- * struct qcedev_sha_op_req - Holds the hashing request information
- * @data (IN): Array of pointers to the data to be hashed
- * @entries (IN): Number of buf_info entries in the data array
- * @data_len (IN): Length of data to be hashed
- * @digest (IN/OUT): Returns the hashed data information
- * @diglen (OUT): Size of the hashed/digest data
- * @authkey (IN): Pointer to authentication key for HMAC
- * @authklen (IN): Size of the authentication key
- * @alg (IN): Secure Hash algorithm
- */
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN): Array of pointers to the data to be hashed
+* @entries (IN): Number of buf_info entries in the data array
+* @data_len (IN): Length of data to be hashed
+* @digest (IN/OUT): Returns the hashed data information
+* @diglen (OUT): Size of the hashed/digest data
+* @authkey (IN): Pointer to authentication key for HMAC
+* @authklen (IN): Size of the authentication key
+* @alg (IN): Secure Hash algorithm
+*/
struct qcedev_sha_op_req {
struct buf_info data[QCEDEV_MAX_BUFFERS];
uint32_t entries;
@@ -219,20 +219,16 @@
};
/**
- * struct qfips_verify_t - Holds data for FIPS Integrity test
- * @kernel_size (IN): Size of kernel Image
- * @kernel (IN): pointer to buffer containing the kernel Image
- */
+* struct qfips_verify_t - Holds data for FIPS Integrity test
+* @kernel_size (IN): Size of kernel Image
+* @kernel (IN): pointer to buffer containing the kernel Image
+*/
struct qfips_verify_t {
unsigned int kernel_size;
void *kernel;
};
struct file;
-/* temporiraly comment out for msm-4.9 headfile upgrade */
-/* extern long qcedev_ioctl(struct file *file,
- * unsigned int cmd, unsigned long arg);
- */
#define QCEDEV_IOC_MAGIC 0x87
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index b29072e..94e9b00 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -277,10 +277,6 @@
struct file;
-/* temporiraly comment out for msm-4.9 headfile upgrade */
-/* extern long qseecom_ioctl(struct file *file,
- * unsigned int cmd, unsigned long arg);
- */
#define QSEECOM_IOC_MAGIC 0x97
diff --git a/include/uapi/linux/seemp_api.h b/include/uapi/linux/seemp_api.h
new file mode 100644
index 0000000..4dfc257
--- /dev/null
+++ b/include/uapi/linux/seemp_api.h
@@ -0,0 +1,395 @@
+#ifndef _SEEMP_API_H_
+#define _SEEMP_API_H_
+
+#define SEEMP_API_kernel__oom_adjust_write 0
+#define SEEMP_API_kernel__sendto 1
+#define SEEMP_API_kernel__recvfrom 2
+#define SEEMP_API_View__onTouchEvent 3
+#define SEEMP_API_View__onKeyDown 4
+#define SEEMP_API_View__onKeyUp 5
+#define SEEMP_API_View__onTrackBallEvent 6
+#define SEEMP_API_android_provider_Settings__get_ANDROID_ID_ 7
+#define SEEMP_API_TelephonyManager__getDeviceId 8
+#define SEEMP_API_TelephonyManager__getLine1Number 9
+#define SEEMP_API_Telephony__query 10
+#define SEEMP_API_CallerInfo__getCallerId 11
+#define SEEMP_API_CallerInfo__getCallerInfo 12
+#define SEEMP_API_ContentResolver__query 13
+#define SEEMP_API_AccountManagerService__getPassword 14
+#define SEEMP_API_AccountManagerService__getUserData 15
+#define SEEMP_API_AccountManagerService__addAccount 16
+#define SEEMP_API_AccountManagerService__removeAccount 17
+#define SEEMP_API_AccountManagerService__setPassword 18
+#define SEEMP_API_AccountManagerService__clearPassword 19
+#define SEEMP_API_AccountManagerService__setUserData 20
+#define SEEMP_API_AccountManagerService__editProperties 21
+#define SEEMP_API_AccountManager__getPassword 22
+#define SEEMP_API_AccountManager__getUserData 23
+#define SEEMP_API_AccountManager__addAccountExplicitly 24
+#define SEEMP_API_AccountManager__removeAccount 25
+#define SEEMP_API_AccountManager__setPassword 26
+#define SEEMP_API_AccountManager__clearPassword 27
+#define SEEMP_API_AccountManager__setUserData 28
+#define SEEMP_API_AccountManager__addAccount 29
+#define SEEMP_API_AccountManager__editProperties 30
+#define SEEMP_API_AccountManager__doWork 31
+#define SEEMP_API_Browser__getAllBookmarks 32
+#define SEEMP_API_Browser__getAllVisitedUrls 33
+#define SEEMP_API_Browser__getVisitedLike 34
+#define SEEMP_API_Browser__getVisitedHistory 35
+#define SEEMP_API_Browser__requestAllIcons 36
+#define SEEMP_API_ContentResolver__insert 37
+#define SEEMP_API_CalendarContract__insert 38
+#define SEEMP_API_CalendarContract__alarmExists 39
+#define SEEMP_API_CalendarContract__findNextAlarmTime 40
+#define SEEMP_API_CalendarContract__query 41
+#define SEEMP_API_LocationManager___requestLocationUpdates 42
+#define SEEMP_API_LocationManager__addGpsStatusListener 43
+#define SEEMP_API_LocationManager__addNmeaListener 44
+#define SEEMP_API_LocationManager__addProximityAlert 45
+#define SEEMP_API_LocationManager__getLastKnownLocation 46
+#define SEEMP_API_LocationManager__requestLocationUpdates 47
+#define SEEMP_API_LocationManager__sendExtraCommand 48
+#define SEEMP_API_TelephonyManager__getCellLocation 49
+#define SEEMP_API_TelephonyManager__getNeighboringCellInfo 50
+#define SEEMP_API_GeolocationService__registerForLocationUpdates 51
+#define SEEMP_API_GeolocationService__setEnableGps 52
+#define SEEMP_API_GeolocationService__start 53
+#define SEEMP_API_WebChromeClient__onGeolocationPermissionsShowPrompt 54
+#define SEEMP_API_WifiManager__getScanResults 55
+#define SEEMP_API_adB__enable 56
+#define SEEMP_API_adB__disable 57
+#define SEEMP_API_adB__startDiscovery 58
+#define SEEMP_API_adB__listenUsingInsecureRfcommWithServiceRecord 59
+#define SEEMP_API_adB__listenUsingSecureRfcommWithServiceRecord 60
+#define SEEMP_API_adB__getBondedDevices 61
+#define SEEMP_API_adB__getRemoteDevice 62
+#define SEEMP_API_adB__getState 63
+#define SEEMP_API_adB__getProfileConnectionState 64
+#define SEEMP_API_Camera__takePicture 65
+#define SEEMP_API_Camera__setPreviewCallback 66
+#define SEEMP_API_Camera__setPreviewCallbackWithBuffer 67
+#define SEEMP_API_Camera__setOneShotPreviewCallback 68
+#define SEEMP_API_android_media_MediaRecorder__start 69
+#define SEEMP_API_AudioRecord__startRecording 70
+#define SEEMP_API_AudioRecord__start 71
+#define SEEMP_API_SpeechRecognizer__startListening 72
+#define SEEMP_API_at_SmsManager__sendDataMessage 73
+#define SEEMP_API_at_SmsManager__sendMultipartTextMessage 74
+#define SEEMP_API_at_SmsManager__sendTextMessage 75
+#define SEEMP_API_at_gsm_SmsManager__sendDataMessage 76
+#define SEEMP_API_at_gsm_SmsManager__sendMultipartTextMessage 77
+#define SEEMP_API_at_gsm_SmsManager__sendTextMessage 78
+#define SEEMP_API_at_SmsManager__copyMessageToIcc 79
+#define SEEMP_API_at_SmsManager__deleteMessageFromIcc 80
+#define SEEMP_API_at_SmsManager__updateMessageOnIcc 81
+#define SEEMP_API_at_gsm_SmsManager__copyMessageToSim 82
+#define SEEMP_API_at_gsm_SmsManager__deleteMessageFromSim 83
+#define SEEMP_API_at_gsm_SmsManager__updateMessageOnSim 84
+#define SEEMP_API_at_gsm_SmsManager__getAllMessagesFromSim 85
+#define SEEMP_API_ContactsContract__getLookupUri 86
+#define SEEMP_API_ContactsContract__lookupContact 87
+#define SEEMP_API_ContactsContract__openContactPhotoInputStream 88
+#define SEEMP_API_ContactsContract__getContactLookupUri 89
+#define SEEMP_API_PackageManagerService__installPackage 90
+#define SEEMP_API_URL__openConnection 91
+#define SEEMP_API_URI__URI 92
+#define SEEMP_API_HttpGet__HttpGet 93
+#define SEEMP_API_HttpPut__HttpPut 94
+#define SEEMP_API_HttpPost__HttpPost 95
+#define SEEMP_API_apS__get_ACCELEROMETER_ROTATION_ 96
+#define SEEMP_API_apS__get_USER_ROTATION_ 97
+#define SEEMP_API_apS__get_ADB_ENABLED_ 98
+#define SEEMP_API_apS__get_DEBUG_APP_ 99
+#define SEEMP_API_apS__get_WAIT_FOR_DEBUGGER_ 100
+#define SEEMP_API_apS__get_AIRPLANE_MODE_ON_ 101
+#define SEEMP_API_apS__get_AIRPLANE_MODE_RADIOS_ 102
+#define SEEMP_API_apS__get_ALARM_ALERT_ 103
+#define SEEMP_API_apS__get_NEXT_ALARM_FORMATTED_ 104
+#define SEEMP_API_apS__get_ALWAYS_FINISH_ACTIVITIES_ 105
+#define SEEMP_API_apS__get_LOGGING_ID_ 106
+#define SEEMP_API_apS__get_ANIMATOR_DURATION_SCALE_ 107
+#define SEEMP_API_apS__get_WINDOW_ANIMATION_SCALE_ 108
+#define SEEMP_API_apS__get_FONT_SCALE_ 109
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_ 110
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_MODE_ 111
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_MODE_AUTOMATIC_ 112
+#define SEEMP_API_apS__get_SCREEN_BRIGHTNESS_MODE_MANUAL_ 113
+#define SEEMP_API_apS__get_SCREEN_OFF_TIMEOUT_ 114
+#define SEEMP_API_apS__get_DIM_SCREEN_ 115
+#define SEEMP_API_apS__get_TRANSITION_ANIMATION_SCALE_ 116
+#define SEEMP_API_apS__get_STAY_ON_WHILE_PLUGGED_IN_ 117
+#define SEEMP_API_apS__get_WALLPAPER_ACTIVITY_ 118
+#define SEEMP_API_apS__get_SHOW_PROCESSES_ 119
+#define SEEMP_API_apS__get_SHOW_WEB_SUGGESTIONS_ 120
+#define SEEMP_API_apS__get_SHOW_GTALK_SERVICE_STATUS_ 121
+#define SEEMP_API_apS__get_USE_GOOGLE_MAIL_ 122
+#define SEEMP_API_apS__get_AUTO_TIME_ 123
+#define SEEMP_API_apS__get_AUTO_TIME_ZONE_ 124
+#define SEEMP_API_apS__get_DATE_FORMAT_ 125
+#define SEEMP_API_apS__get_TIME_12_24_ 126
+#define SEEMP_API_apS__get_BLUETOOTH_DISCOVERABILITY_ 127
+#define SEEMP_API_apS__get_BLUETOOTH_DISCOVERABILITY_TIMEOUT_ 128
+#define SEEMP_API_apS__get_BLUETOOTH_ON_ 129
+#define SEEMP_API_apS__get_DEVICE_PROVISIONED_ 130
+#define SEEMP_API_apS__get_SETUP_WIZARD_HAS_RUN_ 131
+#define SEEMP_API_apS__get_DTMF_TONE_WHEN_DIALING_ 132
+#define SEEMP_API_apS__get_END_BUTTON_BEHAVIOR_ 133
+#define SEEMP_API_apS__get_RINGTONE_ 134
+#define SEEMP_API_apS__get_MODE_RINGER_ 135
+#define SEEMP_API_apS__get_INSTALL_NON_MARKET_APPS_ 136
+#define SEEMP_API_apS__get_LOCATION_PROVIDERS_ALLOWED_ 137
+#define SEEMP_API_apS__get_LOCK_PATTERN_ENABLED_ 138
+#define SEEMP_API_apS__get_LOCK_PATTERN_TACTILE_FEEDBACK_ENABLED_ 139
+#define SEEMP_API_apS__get_LOCK_PATTERN_VISIBLE_ 140
+#define SEEMP_API_apS__get_NETWORK_PREFERENCE_ 141
+#define SEEMP_API_apS__get_DATA_ROAMING_ 142
+#define SEEMP_API_apS__get_HTTP_PROXY_ 143
+#define SEEMP_API_apS__get_PARENTAL_CONTROL_ENABLED_ 144
+#define SEEMP_API_apS__get_PARENTAL_CONTROL_LAST_UPDATE_ 145
+#define SEEMP_API_apS__get_PARENTAL_CONTROL_REDIRECT_URL_ 146
+#define SEEMP_API_apS__get_RADIO_BLUETOOTH_ 147
+#define SEEMP_API_apS__get_RADIO_CELL_ 148
+#define SEEMP_API_apS__get_RADIO_NFC_ 149
+#define SEEMP_API_apS__get_RADIO_WIFI_ 150
+#define SEEMP_API_apS__get_SYS_PROP_SETTING_VERSION_ 151
+#define SEEMP_API_apS__get_SETTINGS_CLASSNAME_ 152
+#define SEEMP_API_apS__get_TEXT_AUTO_CAPS_ 153
+#define SEEMP_API_apS__get_TEXT_AUTO_PUNCTUATE_ 154
+#define SEEMP_API_apS__get_TEXT_AUTO_REPLACE_ 155
+#define SEEMP_API_apS__get_TEXT_SHOW_PASSWORD_ 156
+#define SEEMP_API_apS__get_USB_MASS_STORAGE_ENABLED_ 157
+#define SEEMP_API_apS__get_VIBRATE_ON_ 158
+#define SEEMP_API_apS__get_HAPTIC_FEEDBACK_ENABLED_ 159
+#define SEEMP_API_apS__get_VOLUME_ALARM_ 160
+#define SEEMP_API_apS__get_VOLUME_BLUETOOTH_SCO_ 161
+#define SEEMP_API_apS__get_VOLUME_MUSIC_ 162
+#define SEEMP_API_apS__get_VOLUME_NOTIFICATION_ 163
+#define SEEMP_API_apS__get_VOLUME_RING_ 164
+#define SEEMP_API_apS__get_VOLUME_SYSTEM_ 165
+#define SEEMP_API_apS__get_VOLUME_VOICE_ 166
+#define SEEMP_API_apS__get_SOUND_EFFECTS_ENABLED_ 167
+#define SEEMP_API_apS__get_MODE_RINGER_STREAMS_AFFECTED_ 168
+#define SEEMP_API_apS__get_MUTE_STREAMS_AFFECTED_ 169
+#define SEEMP_API_apS__get_NOTIFICATION_SOUND_ 170
+#define SEEMP_API_apS__get_APPEND_FOR_LAST_AUDIBLE_ 171
+#define SEEMP_API_apS__get_WIFI_MAX_DHCP_RETRY_COUNT_ 172
+#define SEEMP_API_apS__get_WIFI_MOBILE_DATA_TRANSITION_WAKELOCK_TIMEOUT_MS_ 173
+#define SEEMP_API_apS__get_WIFI_NETWORKS_AVAILABLE_NOTIFICATION_ON_ 174
+#define SEEMP_API_apS__get_WIFI_NETWORKS_AVAILABLE_REPEAT_DELAY_ 175
+#define SEEMP_API_apS__get_WIFI_NUM_OPEN_NETWORKS_KEPT_ 176
+#define SEEMP_API_apS__get_WIFI_ON_ 177
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_ 178
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_DEFAULT_ 179
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_NEVER_ 180
+#define SEEMP_API_apS__get_WIFI_SLEEP_POLICY_NEVER_WHILE_PLUGGED_ 181
+#define SEEMP_API_apS__get_WIFI_STATIC_DNS1_ 182
+#define SEEMP_API_apS__get_WIFI_STATIC_DNS2_ 183
+#define SEEMP_API_apS__get_WIFI_STATIC_GATEWAY_ 184
+#define SEEMP_API_apS__get_WIFI_STATIC_IP_ 185
+#define SEEMP_API_apS__get_WIFI_STATIC_NETMASK_ 186
+#define SEEMP_API_apS__get_WIFI_USE_STATIC_IP_ 187
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_ACCEPTABLE_PACKET_LOSS_PERCENTAGE_ 188
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_AP_COUNT_ 189
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_BACKGROUND_CHECK_DELAY_MS_ 190
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_BACKGROUND_CHECK_ENABLED_ 191
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_BACKGROUND_CHECK_TIMEOUT_MS_ 192
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_INITIAL_IGNORED_PING_COUNT_ 193
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_MAX_AP_CHECKS_ 194
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_ON_ 195
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_PING_COUNT_ 196
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_PING_DELAY_MS_ 197
+#define SEEMP_API_apS__get_WIFI_WATCHDOG_PING_TIMEOUT_MS_ 198
+#define SEEMP_API_apS__put_ACCELEROMETER_ROTATION_ 199
+#define SEEMP_API_apS__put_USER_ROTATION_ 200
+#define SEEMP_API_apS__put_ADB_ENABLED_ 201
+#define SEEMP_API_apS__put_DEBUG_APP_ 202
+#define SEEMP_API_apS__put_WAIT_FOR_DEBUGGER_ 203
+#define SEEMP_API_apS__put_AIRPLANE_MODE_ON_ 204
+#define SEEMP_API_apS__put_AIRPLANE_MODE_RADIOS_ 205
+#define SEEMP_API_apS__put_ALARM_ALERT_ 206
+#define SEEMP_API_apS__put_NEXT_ALARM_FORMATTED_ 207
+#define SEEMP_API_apS__put_ALWAYS_FINISH_ACTIVITIES_ 208
+#define SEEMP_API_apS__put_ANDROID_ID_ 209
+#define SEEMP_API_apS__put_LOGGING_ID_ 210
+#define SEEMP_API_apS__put_ANIMATOR_DURATION_SCALE_ 211
+#define SEEMP_API_apS__put_WINDOW_ANIMATION_SCALE_ 212
+#define SEEMP_API_apS__put_FONT_SCALE_ 213
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_ 214
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_MODE_ 215
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_MODE_AUTOMATIC_ 216
+#define SEEMP_API_apS__put_SCREEN_BRIGHTNESS_MODE_MANUAL_ 217
+#define SEEMP_API_apS__put_SCREEN_OFF_TIMEOUT_ 218
+#define SEEMP_API_apS__put_DIM_SCREEN_ 219
+#define SEEMP_API_apS__put_TRANSITION_ANIMATION_SCALE_ 220
+#define SEEMP_API_apS__put_STAY_ON_WHILE_PLUGGED_IN_ 221
+#define SEEMP_API_apS__put_WALLPAPER_ACTIVITY_ 222
+#define SEEMP_API_apS__put_SHOW_PROCESSES_ 223
+#define SEEMP_API_apS__put_SHOW_WEB_SUGGESTIONS_ 224
+#define SEEMP_API_apS__put_SHOW_GTALK_SERVICE_STATUS_ 225
+#define SEEMP_API_apS__put_USE_GOOGLE_MAIL_ 226
+#define SEEMP_API_apS__put_AUTO_TIME_ 227
+#define SEEMP_API_apS__put_AUTO_TIME_ZONE_ 228
+#define SEEMP_API_apS__put_DATE_FORMAT_ 229
+#define SEEMP_API_apS__put_TIME_12_24_ 230
+#define SEEMP_API_apS__put_BLUETOOTH_DISCOVERABILITY_ 231
+#define SEEMP_API_apS__put_BLUETOOTH_DISCOVERABILITY_TIMEOUT_ 232
+#define SEEMP_API_apS__put_BLUETOOTH_ON_ 233
+#define SEEMP_API_apS__put_DEVICE_PROVISIONED_ 234
+#define SEEMP_API_apS__put_SETUP_WIZARD_HAS_RUN_ 235
+#define SEEMP_API_apS__put_DTMF_TONE_WHEN_DIALING_ 236
+#define SEEMP_API_apS__put_END_BUTTON_BEHAVIOR_ 237
+#define SEEMP_API_apS__put_RINGTONE_ 238
+#define SEEMP_API_apS__put_MODE_RINGER_ 239
+#define SEEMP_API_apS__put_INSTALL_NON_MARKET_APPS_ 240
+#define SEEMP_API_apS__put_LOCATION_PROVIDERS_ALLOWED_ 241
+#define SEEMP_API_apS__put_LOCK_PATTERN_ENABLED_ 242
+#define SEEMP_API_apS__put_LOCK_PATTERN_TACTILE_FEEDBACK_ENABLED_ 243
+#define SEEMP_API_apS__put_LOCK_PATTERN_VISIBLE_ 244
+#define SEEMP_API_apS__put_NETWORK_PREFERENCE_ 245
+#define SEEMP_API_apS__put_DATA_ROAMING_ 246
+#define SEEMP_API_apS__put_HTTP_PROXY_ 247
+#define SEEMP_API_apS__put_PARENTAL_CONTROL_ENABLED_ 248
+#define SEEMP_API_apS__put_PARENTAL_CONTROL_LAST_UPDATE_ 249
+#define SEEMP_API_apS__put_PARENTAL_CONTROL_REDIRECT_URL_ 250
+#define SEEMP_API_apS__put_RADIO_BLUETOOTH_ 251
+#define SEEMP_API_apS__put_RADIO_CELL_ 252
+#define SEEMP_API_apS__put_RADIO_NFC_ 253
+#define SEEMP_API_apS__put_RADIO_WIFI_ 254
+#define SEEMP_API_apS__put_SYS_PROP_SETTING_VERSION_ 255
+#define SEEMP_API_apS__put_SETTINGS_CLASSNAME_ 256
+#define SEEMP_API_apS__put_TEXT_AUTO_CAPS_ 257
+#define SEEMP_API_apS__put_TEXT_AUTO_PUNCTUATE_ 258
+#define SEEMP_API_apS__put_TEXT_AUTO_REPLACE_ 259
+#define SEEMP_API_apS__put_TEXT_SHOW_PASSWORD_ 260
+#define SEEMP_API_apS__put_USB_MASS_STORAGE_ENABLED_ 261
+#define SEEMP_API_apS__put_VIBRATE_ON_ 262
+#define SEEMP_API_apS__put_HAPTIC_FEEDBACK_ENABLED_ 263
+#define SEEMP_API_apS__put_VOLUME_ALARM_ 264
+#define SEEMP_API_apS__put_VOLUME_BLUETOOTH_SCO_ 265
+#define SEEMP_API_apS__put_VOLUME_MUSIC_ 266
+#define SEEMP_API_apS__put_VOLUME_NOTIFICATION_ 267
+#define SEEMP_API_apS__put_VOLUME_RING_ 268
+#define SEEMP_API_apS__put_VOLUME_SYSTEM_ 269
+#define SEEMP_API_apS__put_VOLUME_VOICE_ 270
+#define SEEMP_API_apS__put_SOUND_EFFECTS_ENABLED_ 271
+#define SEEMP_API_apS__put_MODE_RINGER_STREAMS_AFFECTED_ 272
+#define SEEMP_API_apS__put_MUTE_STREAMS_AFFECTED_ 273
+#define SEEMP_API_apS__put_NOTIFICATION_SOUND_ 274
+#define SEEMP_API_apS__put_APPEND_FOR_LAST_AUDIBLE_ 275
+#define SEEMP_API_apS__put_WIFI_MAX_DHCP_RETRY_COUNT_ 276
+#define SEEMP_API_apS__put_WIFI_MOBILE_DATA_TRANSITION_WAKELOCK_TIMEOUT_MS_ 277
+#define SEEMP_API_apS__put_WIFI_NETWORKS_AVAILABLE_NOTIFICATION_ON_ 278
+#define SEEMP_API_apS__put_WIFI_NETWORKS_AVAILABLE_REPEAT_DELAY_ 279
+#define SEEMP_API_apS__put_WIFI_NUM_OPEN_NETWORKS_KEPT_ 280
+#define SEEMP_API_apS__put_WIFI_ON_ 281
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_ 282
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_DEFAULT_ 283
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_NEVER_ 284
+#define SEEMP_API_apS__put_WIFI_SLEEP_POLICY_NEVER_WHILE_PLUGGED_ 285
+#define SEEMP_API_apS__put_WIFI_STATIC_DNS1_ 286
+#define SEEMP_API_apS__put_WIFI_STATIC_DNS2_ 287
+#define SEEMP_API_apS__put_WIFI_STATIC_GATEWAY_ 288
+#define SEEMP_API_apS__put_WIFI_STATIC_IP_ 289
+#define SEEMP_API_apS__put_WIFI_STATIC_NETMASK_ 290
+#define SEEMP_API_apS__put_WIFI_USE_STATIC_IP_ 291
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_ACCEPTABLE_PACKET_LOSS_PERCENTAGE_ 292
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_AP_COUNT_ 293
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_BACKGROUND_CHECK_DELAY_MS_ 294
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_BACKGROUND_CHECK_ENABLED_ 295
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_BACKGROUND_CHECK_TIMEOUT_MS_ 296
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_INITIAL_IGNORED_PING_COUNT_ 297
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_MAX_AP_CHECKS_ 298
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_ON_ 299
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_PING_COUNT_ 300
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_PING_DELAY_MS_ 301
+#define SEEMP_API_apS__put_WIFI_WATCHDOG_PING_TIMEOUT_MS_ 302
+#define SEEMP_API_Poll__setCumulativeWifiRxMBytes 303
+#define SEEMP_API_Poll__setInstantaneousWifiRxMBytes 304
+#define SEEMP_API_Poll__setCumulativeWifiRxPackets 305
+#define SEEMP_API_Poll__setInstantaneousWifiRxPackets 306
+#define SEEMP_API_Poll__setCumulativeWifiTxMBytes 307
+#define SEEMP_API_Poll__setInstantaneousWifiTxMBytes 308
+#define SEEMP_API_Poll__setCumulativeWifiTxPackets 309
+#define SEEMP_API_Poll__setInstantaneousWifiTxPackets 310
+#define SEEMP_API_Poll__setCumulativeWifiRxTcpMBytes 311
+#define SEEMP_API_Poll__setInstantaneousWifiRxTcpMBytes 312
+#define SEEMP_API_Poll__setCumulativeWifiRxTcpPackets 313
+#define SEEMP_API_Poll__setInstantaneousWifiRxTcpPackets 314
+#define SEEMP_API_Poll__setCumulativeWifiRxUdpMBytes 315
+#define SEEMP_API_Poll__setInstantaneousWifiRxUdpMBytes 316
+#define SEEMP_API_Poll__setCumulativeWifiRxUdpPackets 317
+#define SEEMP_API_Poll__setInstantaneousWifiRxUdpPackets 318
+#define SEEMP_API_Poll__setCumulativeWifiRxOtherMBytes 319
+#define SEEMP_API_Poll__setInstantaneousWifiRxOtherMBytes 320
+#define SEEMP_API_Poll__setCumulativeWifiRxOtherPackets 321
+#define SEEMP_API_Poll__setInstantaneousWifiRxOtherPackets 322
+#define SEEMP_API_Poll__setCumulativeWifiTxTcpMBytes 323
+#define SEEMP_API_Poll__setInstantaneousWifiTxTcpMBytes 324
+#define SEEMP_API_Poll__setCumulativeWifiTxTcpPackets 325
+#define SEEMP_API_Poll__setInstantaneousWifiTxTcpPackets 326
+#define SEEMP_API_Poll__setCumulativeWifiTxUdpMBytes 327
+#define SEEMP_API_Poll__setInstantaneousWifiTxUdpMBytes 328
+#define SEEMP_API_Poll__setCumulativeWifiTxUdpPackets 329
+#define SEEMP_API_Poll__setInstantaneousWifiTxUdpPackets 330
+#define SEEMP_API_Poll__setCumulativeWifiTxOtherMBytes 331
+#define SEEMP_API_Poll__setInstantaneousWifiTxOtherMBytes 332
+#define SEEMP_API_Poll__setCumulativeWifiTxOtherPackets 333
+#define SEEMP_API_Poll__setInstantaneousWifiTxOtherPackets 334
+#define SEEMP_API_Poll__setCumulativeMobileRxMBytes 335
+#define SEEMP_API_Poll__setInstantaneousMobileRxMBytes 336
+#define SEEMP_API_Poll__setCumulativeMobileRxPackets 337
+#define SEEMP_API_Poll__setInstantaneousMobileRxPackets 338
+#define SEEMP_API_Poll__setCumulativeMobileTxMBytes 339
+#define SEEMP_API_Poll__setInstantaneousMobileTxMBytes 340
+#define SEEMP_API_Poll__setCumulativeMobileTxPackets 341
+#define SEEMP_API_Poll__setInstantaneousMobileTxPackets 342
+#define SEEMP_API_Poll__setCumulativeMobileRxTcpMBytes 343
+#define SEEMP_API_Poll__setInstantaneousMobileRxTcpMBytes 344
+#define SEEMP_API_Poll__setCumulativeMobileRxTcpPackets 345
+#define SEEMP_API_Poll__setInstantaneousMobileRxTcpPackets 346
+#define SEEMP_API_Poll__setCumulativeMobileRxUdpMBytes 347
+#define SEEMP_API_Poll__setInstantaneousMobileRxUdpMBytes 348
+#define SEEMP_API_Poll__setCumulativeMobileRxUdpPackets 349
+#define SEEMP_API_Poll__setInstantaneousMobileRxUdpPackets 350
+#define SEEMP_API_Poll__setCumulativeMobileRxOtherMBytes 351
+#define SEEMP_API_Poll__setInstantaneousMobileRxOtherMBytes 352
+#define SEEMP_API_Poll__setCumulativeMobileRxOtherPackets 353
+#define SEEMP_API_Poll__setInstantaneousMobileRxOtherPackets 354
+#define SEEMP_API_Poll__setCumulativeMobileTxTcpMBytes 355
+#define SEEMP_API_Poll__setInstantaneousMobileTxTcpMBytes 356
+#define SEEMP_API_Poll__setCumulativeMobileTxTcpPackets 357
+#define SEEMP_API_Poll__setInstantaneousMobileTxTcpPackets 358
+#define SEEMP_API_Poll__setCumulativeMobileTxUdpMBytes 359
+#define SEEMP_API_Poll__setInstantaneousMobileTxUdpMBytes 360
+#define SEEMP_API_Poll__setCumulativeMobileTxUdpPackets 361
+#define SEEMP_API_Poll__setInstantaneousMobileTxUdpPackets 362
+#define SEEMP_API_Poll__setCumulativeMobileTxOtherMBytes 363
+#define SEEMP_API_Poll__setInstantaneousMobileTxOtherMBytes 364
+#define SEEMP_API_Poll__setCumulativeMobileTxOtherPackets 365
+#define SEEMP_API_Poll__setInstantaneousMobileTxOtherPackets 366
+#define SEEMP_API_Poll__setNumSockets 367
+#define SEEMP_API_Poll__setNumTcpStateListen 368
+#define SEEMP_API_Poll__setNumTcpStateEstablished 369
+#define SEEMP_API_Poll__setNumLocalIp 370
+#define SEEMP_API_Poll__setNumLocalPort 371
+#define SEEMP_API_Poll__setNumRemoteIp 372
+#define SEEMP_API_Poll__setNumRemotePort 373
+#define SEEMP_API_Poll__setNumRemoteTuple 374
+#define SEEMP_API_Poll__setNumInode 375
+#define SEEMP_API_Instrumentation__startActivitySync 376
+#define SEEMP_API_Instrumentation__execStartActivity 377
+#define SEEMP_API_Instrumentation__execStartActivitiesAsUser 378
+#define SEEMP_API_Instrumentation__execStartActivityAsCaller 379
+#define SEEMP_API_Instrumentation__execStartActivityFromAppTask 380
+#define SEEMP_API_ah_SystemSensorManager__registerListenerImpl 381
+#define SEEMP_API_ah_SystemSensorManager__unregisterListenerImpl 382
+#define SEEMP_API_WindowManagerImpl__addView 383
+#define SEEMP_API_WindowManagerImpl__updateViewLayout 384
+#define SEEMP_API_ActivityManagerService__applyOomAdjLocked 385
+#define SEEMP_API_ProcessRecord__makeActive 386
+#define SEEMP_API_ProcessRecord__makeInactive 387
+#define SEEMP_API_TelephonyManager__getSimSerialNumber 388
+#define SEEMP_API_TelephonyManager__getSubscriberId 389
+
+#endif /* _SEEMP_API_H_*/
diff --git a/include/uapi/linux/seemp_param_id.h b/include/uapi/linux/seemp_param_id.h
new file mode 100644
index 0000000..c72c579
--- /dev/null
+++ b/include/uapi/linux/seemp_param_id.h
@@ -0,0 +1,93 @@
+#ifndef _PARAM_ID_H_
+#define _PARAM_ID_H_
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define PARAM_ID_LEN 0
+#define PARAM_ID_OOM_ADJ 1
+#define PARAM_ID_APP_UID 2
+#define PARAM_ID_APP_PID 3
+#define PARAM_ID_VALUE 4
+#define PARAM_ID_SIZE 5
+#define PARAM_ID_FD 6
+#define PARAM_ID_RATE 7
+#define PARAM_ID_SENSOR 8
+#define PARAM_ID_WINDOW_TYPE 9
+#define PARAM_ID_WINDOW_FLAG 10
+#define NUM_PARAM_IDS 11
+
+static inline int param_id_index(const char *param, const char *end)
+{
+ int id = -1;
+ int len = ((end != NULL) ? (end - param) : (int)strlen(param));
+
+ if ((len == 3) && !memcmp(param, "len", 3))
+ id = 0;
+ else if ((len == 7) && !memcmp(param, "oom_adj", 7))
+ id = 1;
+ else if ((len == 7) && !memcmp(param, "app_uid", 7))
+ id = 2;
+ else if ((len == 7) && !memcmp(param, "app_pid", 7))
+ id = 3;
+ else if ((len == 5) && !memcmp(param, "value", 5))
+ id = 4;
+ else if ((len == 4) && !memcmp(param, "size", 4))
+ id = 5;
+ else if ((len == 2) && !memcmp(param, "fd", 2))
+ id = 6;
+ else if ((len == 4) && !memcmp(param, "rate", 4))
+ id = 7;
+ else if ((len == 6) && !memcmp(param, "sensor", 6))
+ id = 8;
+ else if ((len == 11) && !memcmp(param, "window_type", 11))
+ id = 9;
+ else if ((len == 11) && !memcmp(param, "window_flag", 11))
+ id = 10;
+
+ return id;
+}
+
+static inline const char *get_param_id_name(int id)
+{
+ const char *name = "?";
+
+ switch (id) {
+ case 0:
+ name = "len";
+ break;
+ case 1:
+ name = "oom_adj";
+ break;
+ case 2:
+ name = "app_uid";
+ break;
+ case 3:
+ name = "app_pid";
+ break;
+ case 4:
+ name = "value";
+ break;
+ case 5:
+ name = "size";
+ break;
+ case 6:
+ name = "fd";
+ break;
+ case 7:
+ name = "rate";
+ break;
+ case 8:
+ name = "sensor";
+ break;
+ case 9:
+ name = "window_type";
+ break;
+ case 10:
+ name = "window_flag";
+ break;
+ }
+ return name;
+}
+
+#endif /* _PARAM_ID_H_ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a62870e..cf96ac1 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1061,6 +1061,8 @@
(V4L2_CID_MPEG_MSM_VIDC_BASE + 106)
#define V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX \
(V4L2_CID_MPEG_MSM_VIDC_BASE + 107)
+#define V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 108)
enum v4l2_mpeg_vidc_video_venc_iframesize_type {
V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 86cb858..4f1f9e9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1890,7 +1890,7 @@
#define V4L2_DEC_CMD_RESUME (3)
#define V4L2_QCOM_CMD_FLUSH (4)
#define V4L2_DEC_QCOM_CMD_RECONFIG_HINT (5)
-
+#define V4L2_QCOM_CMD_SESSION_CONTINUE (6)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 5f375c4..478f7fe 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -1,3 +1,4 @@
+header-y += cam_cpas.h
header-y += cam_defs.h
header-y += cam_isp.h
header-y += cam_isp_vfe.h
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
new file mode 100644
index 0000000..300bd87
--- /dev/null
+++ b/include/uapi/media/cam_cpas.h
@@ -0,0 +1,23 @@
+#ifndef __UAPI_CAM_CPAS_H__
+#define __UAPI_CAM_CPAS_H__
+
+#include "cam_defs.h"
+
+#define CAM_FAMILY_CAMERA_SS 1
+#define CAM_FAMILY_CPAS_SS 2
+
+/**
+ * struct cam_cpas_query_cap - CPAS query device capability payload
+ *
+ * @camera_family : Camera family type
+ * @reserved : Reserved field for alignment
+ * @camera_version : Camera version
+ *
+ */
+struct cam_cpas_query_cap {
+ uint32_t camera_family;
+ uint32_t reserved;
+ struct cam_hw_version camera_version;
+};
+
+#endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index 12afef9..cf56211 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -21,12 +21,13 @@
#define CAM_HANDLE_MEM_HANDLE 2
/**
- * struct cam_control - struct used by ioctl control for camera
+ * struct cam_control - Structure used by ioctl control for camera
+ *
* @op_code: This is the op code for camera control
- * @size: control command size
- * @handle_type: user pointer or shared memory handle
- * @reserved: reserved field for 64 bit alignment
- * @handle: control command payload
+ * @size: Control command size
+ * @handle_type: User pointer or shared memory handle
+ * @reserved: Reserved field for 64 bit alignment
+ * @handle: Control command payload
*/
struct cam_control {
uint32_t op_code;
@@ -46,7 +47,7 @@
* @major : Hardware version major
* @minor : Hardware version minor
* @incr : Hardware version increment
- * @reserved : reserved for 64 bit aligngment
+ * @reserved : Reserved for 64 bit aligngment
*/
struct cam_hw_version {
uint32_t major;
@@ -141,6 +142,7 @@
/* camera buffer direction */
#define CAM_BUF_INPUT 1
#define CAM_BUF_OUTPUT 2
+#define CAM_BUF_IN_OUT 3
/* camera packet device Type */
#define CAM_PACKET_DEV_BASE 0
@@ -168,12 +170,20 @@
#define CAM_PACKET_MAX_PLANES 3
/**
- * struct cam_plane_cfg - plane configuration info
+ * struct cam_plane_cfg - Plane configuration info
*
- * @width: plane width in pixels
- * @height: plane height in lines
- * @plane_stride: plane stride in pixel
- * @slice_height: slice height in line (not used by ISP)
+ * @width: Plane width in pixels
+ * @height: Plane height in lines
+ * @plane_stride: Plane stride in pixel
+ * @slice_height: Slice height in line (not used by ISP)
+ * @meta_stride: UBWC metadata stride
+ * @meta_size: UBWC metadata plane size
+ * @meta_offset: UBWC metadata offset
+ * @packer_config: UBWC packer config
+ * @mode_config: UBWC mode config
+ * @tile_config: UBWC tile config
+ * @h_init: UBWC horizontal initial coordinate in pixels
+ * @v_init: UBWC vertical initial coordinate in lines
*
*/
struct cam_plane_cfg {
@@ -181,60 +191,26 @@
uint32_t height;
uint32_t plane_stride;
uint32_t slice_height;
-};
-
-/**
- * struct cam_buf_io_cfg - Buffer io configuration for buffers
- *
- * @mem_handle: mem_handle array for the buffers.
- * @offsets: offsets for each planes in the buffer
- * @planes: per plane information
- * @width: main plane width in pixel
- * @height: main plane height in lines
- * @format: format of the buffer
- * @color_space: color space for the buffer
- * @color_pattern: color pattern in the buffer
- * @bpp: bit per pixel
- * @rotation: rotation information for the buffer
- * @resource_type: resource type associated with the buffer
- * @fence: fence handle
- * @cmd_buf_index: command buffer index to patch the buffer info
- * @cmd_buf_offset: offset within the command buffer to patch
- * @flag: flags for extra information
- * @direction: buffer direction: input or output
- * @padding: padding for the structure
- *
- */
-struct cam_buf_io_cfg {
- int32_t mem_handle[CAM_PACKET_MAX_PLANES];
- uint32_t offsets[CAM_PACKET_MAX_PLANES];
- struct cam_plane_cfg planes[CAM_PACKET_MAX_PLANES];
- uint32_t width;
- uint32_t height;
- uint32_t format;
- uint32_t color_space;
- uint32_t color_pattern;
- uint32_t bpp;
- uint32_t rotation;
- uint32_t resource_type;
- int32_t fence;
- uint32_t cmd_buf_index;
- uint32_t cmd_buf_offset;
- uint32_t flag;
- uint32_t direction;
- uint32_t padding;
+ uint32_t meta_stride;
+ uint32_t meta_size;
+ uint32_t meta_offset;
+ uint32_t packer_config;
+ uint32_t mode_config;
+ uint32_t tile_config;
+ uint32_t h_init;
+ uint32_t v_init;
};
/**
* struct cam_cmd_buf_desc - Command buffer descriptor
*
- * @mem_handle: command buffer handle
- * @offset: command start offset
- * @size: size of the command buffer in bytes
- * @length: used memory in command buffer in bytes
- * @type: type of the command buffer
- * @meta_data: data type for private command buffer
- * between UMD and KMD
+ * @mem_handle: Command buffer handle
+ * @offset: Command start offset
+ * @size: Size of the command buffer in bytes
+ * @length: Used memory in command buffer in bytes
+ * @type: Type of the command buffer
+ * @meta_data: Data type for private command buffer
+ * Between UMD and KMD
*
*/
struct cam_cmd_buf_desc {
@@ -247,13 +223,69 @@
};
/**
- * struct cam_packet_header - camera packet header
+ * struct cam_buf_io_cfg - Buffer io configuration for buffers
*
- * @op_code: camera packet opcode
- * @size: size of the camera packet in bytes
- * @request_id: request id for this camera packet
- * @flags: flags for the camera packet
- * @dev_type: camera packet device type
+ * @mem_handle: Mem_handle array for the buffers.
+ * @offsets: Offsets for each planes in the buffer
+ * @planes: Per plane information
+ * @width: Main plane width in pixel
+ * @height: Main plane height in lines
+ * @format: Format of the buffer
+ * @color_space: Color space for the buffer
+ * @color_pattern: Color pattern in the buffer
+ * @bpp: Bit per pixel
+ * @rotation: Rotation information for the buffer
+ * @resource_type: Resource type associated with the buffer
+ * @fence: Fence handle
+ * @early_fence: Fence handle for early signal
+ * @aux_cmd_buf: An auxiliary command buffer that may be
+ * used for programming the IO
+ * @direction: Direction of the config
+ * @batch_size: Batch size in HFR mode
+ * @subsample_pattern: Subsample pattern. Used in HFR mode. It
+ * should be consistent with batchSize and
+ * CAMIF programming.
+ * @subsample_period: Subsample period. Used in HFR mode. It
+ * should be consistent with batchSize and
+ * CAMIF programming.
+ * @framedrop_pattern: Framedrop pattern
+ * @framedrop_period: Framedrop period
+ * @flag: Flags for extra information
+ * @direction: Buffer direction: input or output
+ * @padding: Padding for the structure
+ *
+ */
+struct cam_buf_io_cfg {
+ int32_t mem_handle[CAM_PACKET_MAX_PLANES];
+ uint32_t offsets[CAM_PACKET_MAX_PLANES];
+ struct cam_plane_cfg planes[CAM_PACKET_MAX_PLANES];
+ uint32_t format;
+ uint32_t color_space;
+ uint32_t color_pattern;
+ uint32_t bpp;
+ uint32_t rotation;
+ uint32_t resource_type;
+ int32_t fence;
+ int32_t early_fence;
+ struct cam_cmd_buf_desc aux_cmd_buf;
+ uint32_t direction;
+ uint32_t batch_size;
+ uint32_t subsample_pattern;
+ uint32_t subsample_period;
+ uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
+ uint32_t flag;
+ uint32_t padding;
+};
+
+/**
+ * struct cam_packet_header - Camera packet header
+ *
+ * @op_code: Camera packet opcode
+ * @size: Size of the camera packet in bytes
+ * @request_id: Request id for this camera packet
+ * @flags: Flags for the camera packet
+ * @padding: Padding
*
*/
struct cam_packet_header {
@@ -261,16 +293,16 @@
uint32_t size;
uint64_t request_id;
uint32_t flags;
- uint32_t dev_type;
+ uint32_t padding;
};
/**
* struct cam_patch_desc - Patch structure
*
- * @dst_buf_hdl: memory handle for the dest buffer
- * @dst_offset: offset byte in the dest buffer
- * @src_buf_hdl: memory handle for the source buffer
- * @src_offset: offset byte in the source buffer
+ * @dst_buf_hdl: Memory handle for the dest buffer
+ * @dst_offset: Offset byte in the dest buffer
+ * @src_buf_hdl: Memory handle for the source buffer
+ * @src_offset: Offset byte in the source buffer
*
*/
struct cam_patch_desc {
@@ -281,20 +313,20 @@
};
/**
- * struct cam_packet - cam packet structure
+ * struct cam_packet - Camera packet structure
*
- * @header: camera packet header
- * @cmd_buf_offset: command buffer start offset
- * @num_cmd_buf: number of the command buffer in the packet
- * @io_config_offset: buffer io configuration start offset
- * @num_io_configs: number of the buffer io configurations
- * @patch_offset: patch offset for the patch structure
- * @num_patches: number of the patch structure
- * @kmd_cmd_buf_index: command buffer index which contains extra
- * space for the KMD buffer
- * @kmd_cmd_buf_offset: offset from the beginning of the command
- * buffer for KMD usage.
- * @payload: camera packet payload
+ * @header: Camera packet header
+ * @cmd_buf_offset: Command buffer start offset
+ * @num_cmd_buf: Number of the command buffer in the packet
+ * @io_config_offset: Buffer io configuration start offset
+ * @num_io_configs: Number of the buffer io configurations
+ * @patch_offset: Patch offset for the patch structure
+ * @num_patches: Number of the patch structure
+ * @kmd_cmd_buf_index: Command buffer index which contains extra
+ * space for the KMD buffer
+ * @kmd_cmd_buf_offset: Offset from the beginning of the command
+ * buffer for KMD usage.
+ * @payload: Camera packet payload
*
*/
struct cam_packet {
@@ -313,10 +345,10 @@
/* Release Device */
/**
- * struct cam_release_dev_cmd - control payload for release devices
+ * struct cam_release_dev_cmd - Control payload for release devices
*
- * @session_handle: session handle for the release
- * @dev_handle: device handle for the release
+ * @session_handle: Session handle for the release
+ * @dev_handle: Device handle for the release
*/
struct cam_release_dev_cmd {
int32_t session_handle;
@@ -325,10 +357,10 @@
/* Start/Stop device */
/**
- * struct cam_start_stop_dev_cmd - control payload for start/stop device
+ * struct cam_start_stop_dev_cmd - Control payload for start/stop device
*
- * @session_handle: session handle for the start/stop command
- * @dev_handle: device handle for the start/stop command
+ * @session_handle: Session handle for the start/stop command
+ * @dev_handle: Device handle for the start/stop command
*
*/
struct cam_start_stop_dev_cmd {
@@ -338,13 +370,13 @@
/* Configure Device */
/**
- * struct cam_config_dev_cmd - command payload for configure device
+ * struct cam_config_dev_cmd - Command payload for configure device
*
- * @session_handle: session handle for the command
- * @dev_handle: device handle for the command
- * @offset: offset byte in the packet handle.
- * @packet_handle: packet memory handle for the actual packet:
- * struct cam_packet.
+ * @session_handle: Session handle for the command
+ * @dev_handle: Device handle for the command
+ * @offset: Offset byte in the packet handle.
+ * @packet_handle: Packet memory handle for the actual packet:
+ * struct cam_packet.
*
*/
struct cam_config_dev_cmd {
@@ -356,11 +388,11 @@
/* Query Device Caps */
/**
- * struct cam_query_cap_cmd - payload for query device capability
+ * struct cam_query_cap_cmd - Payload for query device capability
*
- * @size: handle size
- * @handle_type: user pointer or shared memory handle
- * @caps_handle: device specific query command payload
+ * @size: Handle size
+ * @handle_type: User pointer or shared memory handle
+ * @caps_handle: Device specific query command payload
*
*/
struct cam_query_cap_cmd {
@@ -371,16 +403,16 @@
/* Acquire Device */
/**
- * struct cam_acquire_dev_cmd - control payload for acquire devices
+ * struct cam_acquire_dev_cmd - Control payload for acquire devices
*
- * @session_handle: session handle for the acquire command
- * @dev_handle: device handle to be returned
- * @handle_type: resource handle type:
- * 1 = user poniter, 2 = mem handle
- * @num_resources: number of the resources to be acquired
- * @resources_hdl: resource handle that refers to the actual
- * resource array. Each item in this
- * array is device specific resource structure
+ * @session_handle: Session handle for the acquire command
+ * @dev_handle: Device handle to be returned
+ * @handle_type: Resource handle type:
+ * 1 = user pointer, 2 = mem handle
+ * @num_resources: Number of the resources to be acquired
+ * @resources_hdl: Resource handle that refers to the actual
+ * resource array. Each item in this
+ * array is device specific resource structure
*
*/
struct cam_acquire_dev_cmd {
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 3e2b24c..b736755 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -20,6 +20,8 @@
#define CAM_CPAS_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 7)
#define CAM_CSIPHY_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 8)
#define CAM_ACTUATOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 9)
+#define CAM_CCI_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 10)
+#define CAM_FLASH_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 11)
/* cam_req_mgr hdl info */
#define CAM_REQ_MGR_HDL_IDX_POS 8
@@ -188,9 +190,6 @@
#define CAM_REQ_MGR_MAP_BUF (CAM_COMMON_OPCODE_MAX + 10)
#define CAM_REQ_MGR_RELEASE_BUF (CAM_COMMON_OPCODE_MAX + 11)
#define CAM_REQ_MGR_CACHE_OPS (CAM_COMMON_OPCODE_MAX + 12)
-#define CAM_REQ_MGR_GET_MMU_HDLS_DEBUG (CAM_COMMON_OPCODE_MAX + 13)
-#define CAM_REQ_MGR_GET_IO_BUF_DEBUG (CAM_COMMON_OPCODE_MAX + 14)
-#define CAM_REQ_MGR_GET_KMD_BUF_DEBUG (CAM_COMMON_OPCODE_MAX + 15)
/* end of cam_req_mgr opcodes */
#define CAM_MEM_FLAG_HW_READ_WRITE (1<<0)
@@ -204,6 +203,7 @@
#define CAM_MEM_FLAG_STATS_BUF_TYPE (1<<8)
#define CAM_MEM_FLAG_PACKET_BUF_TYPE (1<<9)
#define CAM_MEM_FLAG_CACHE (1<<10)
+#define CAM_MEM_FLAG_HW_SHARED_ACCESS (1<<11)
#define CAM_MEM_MMU_MAX_HANDLE 16
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
index 883c9be..be87b1e 100644
--- a/include/uapi/media/msm_media_info.h
+++ b/include/uapi/media/msm_media_info.h
@@ -1075,7 +1075,7 @@
alignment = 128;
break;
case COLOR_FMT_RGB565_UBWC:
- alignment = 128;
+ alignment = 256;
bpp = 2;
break;
case COLOR_FMT_RGBA8888_UBWC:
@@ -1279,6 +1279,8 @@
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
4096);
rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index fad00e0..9a0da84 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -2,6 +2,8 @@
header-y += fc/
header-y += ufs/
header-y += scsi_bsg_fc.h
+header-y += sg.h
+header-y += scsi_ioctl.h
header-y += scsi_netlink.h
header-y += scsi_netlink_fc.h
header-y += cxlflash_ioctl.h
diff --git a/include/scsi/scsi_ioctl.h b/include/uapi/scsi/scsi_ioctl.h
similarity index 66%
rename from include/scsi/scsi_ioctl.h
rename to include/uapi/scsi/scsi_ioctl.h
index 8d19d1d..d9ce5cc 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/uapi/scsi/scsi_ioctl.h
@@ -1,6 +1,8 @@
#ifndef _SCSI_IOCTL_H
#define _SCSI_IOCTL_H
+#include <linux/types.h>
+
#define SCSI_IOCTL_SEND_COMMAND 1
#define SCSI_IOCTL_TEST_UNIT_READY 2
#define SCSI_IOCTL_BENCHMARK_COMMAND 3
@@ -15,9 +17,25 @@
#define SCSI_REMOVAL_PREVENT 1
#define SCSI_REMOVAL_ALLOW 0
-#ifdef __KERNEL__
+/*
+ * Here are some scsi specific ioctl commands which are sometimes useful.
+ *
+ * Note that include/linux/cdrom.h also defines IOCTL 0x5300 - 0x5395
+ */
-struct scsi_device;
+/* Used to obtain PUN and LUN info. Conflicts with CDROMAUDIOBUFSIZ */
+#define SCSI_IOCTL_GET_IDLUN 0x5382
+
+/* 0x5383 and 0x5384 were used for SCSI_IOCTL_TAGGED_{ENABLE,DISABLE} */
+
+/* Used to obtain the host number of a device. */
+#define SCSI_IOCTL_PROBE_HOST 0x5385
+
+/* Used to obtain the bus number for a device */
+#define SCSI_IOCTL_GET_BUS_NUMBER 0x5386
+
+/* Used to obtain the PCI location of a device */
+#define SCSI_IOCTL_GET_PCI 0x5387
/*
* Structures used for scsi_ioctl et al.
@@ -40,9 +58,11 @@
unsigned char host_wwn[8]; // include NULL term.
} Scsi_FCTargAddress;
+#ifdef __KERNEL__
+struct scsi_device;
+
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/sg.h b/include/uapi/scsi/sg.h
similarity index 99%
rename from include/scsi/sg.h
rename to include/uapi/scsi/sg.h
index 3afec70..08d3beb 100644
--- a/include/scsi/sg.h
+++ b/include/uapi/scsi/sg.h
@@ -2,6 +2,7 @@
#define _SCSI_GENERIC_H
#include <linux/compiler.h>
+#include <linux/param.h>
/*
* History:
@@ -209,7 +210,6 @@
/* Alternate style type names, "..._t" variants preferred */
typedef struct sg_io_hdr Sg_io_hdr;
-typedef struct sg_io_vec Sg_io_vec;
typedef struct sg_scsi_id Sg_scsi_id;
typedef struct sg_req_info Sg_req_info;
diff --git a/init/main.c b/init/main.c
index c91ca2c..aca8f3e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -867,7 +867,6 @@
do_ctors();
usermodehelper_enable();
do_initcalls();
- random_int_secret_init();
}
static void __init do_pre_smp_initcalls(void)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 85d1c94..7c9f94c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1829,14 +1829,15 @@
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
- regs[i].range = dst_reg->off;
+ /* keep the maximum range already checked */
+ regs[i].range = max(regs[i].range, dst_reg->off);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
reg = &state->spilled_regs[i / BPF_REG_SIZE];
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
- reg->range = dst_reg->off;
+ reg->range = max(reg->range, dst_reg->off);
}
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index eadd942..6670008 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2877,7 +2877,7 @@
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid) &&
- !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
+ !ns_capable(tcred->user_ns, CAP_SYS_NICE))
ret = -EACCES;
if (!ret && cgroup_on_dfl(dst_cgrp)) {
@@ -2941,11 +2941,12 @@
tsk = tsk->group_leader;
/*
- * Workqueue threads may acquire PF_NO_SETAFFINITY and become
- * trapped in a cpuset, or RT worker may be born in a cgroup
- * with no rt_runtime allocated. Just say no.
+ * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+ * If userland migrates such a kthread to a non-root cgroup, it can
+ * become trapped in a cpuset, or RT kthread may be born in a
+ * cgroup with no rt_runtime allocated. Just say no.
*/
- if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+ if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out_unlock_rcu;
}
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 4732628..30e0107 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -1,11 +1,14 @@
# KEEP ALPHABETICALLY SORTED
# CONFIG_DEVKMEM is not set
# CONFIG_DEVMEM is not set
+# CONFIG_FHANDLE is not set
# CONFIG_INET_LRO is not set
# CONFIG_MODULES is not set
# CONFIG_OABI_COMPAT is not set
# CONFIG_SYSVIPC is not set
+# CONFIG_USELIB is not set
CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ARMV8_DEPRECATED=y
@@ -23,7 +26,10 @@
CONFIG_FB=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
CONFIG_INET6_AH=y
+CONFIG_INET6_DIAG_DESTROY=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_INET=y
@@ -60,6 +66,9 @@
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
CONFIG_NET=y
CONFIG_NETDEVICES=y
CONFIG_NETFILTER=y
@@ -137,9 +146,9 @@
CONFIG_PROFILING=y
CONFIG_QFMT_V2=y
CONFIG_QUOTA=y
+CONFIG_QUOTACTL=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QUOTA_TREE=y
-CONFIG_QUOTACTL=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_RTC_CLASS=y
CONFIG_RT_GROUP_SCHED=y
@@ -153,16 +162,16 @@
CONFIG_SWP_EMULATION=y
CONFIG_SYNC=y
CONFIG_TUN=y
-CONFIG_UID_CPUTIME=y
+CONFIG_UID_SYS_STATS=y
CONFIG_UNIX=y
-CONFIG_USB_GADGET=y
CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_GADGET=y
CONFIG_USB_OTG_WAKELOCK=y
CONFIG_XFRM_USER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index abec2ca..36ec6c1 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -14,6 +14,7 @@
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_COMPACTION=y
+CONFIG_CPU_SW_DOMAIN_PAN=y
CONFIG_DEBUG_RODATA=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 7c23144..1d203e1 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1060,9 +1060,37 @@
return ret;
}
+static int switch_to_rt_policy(void)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ unsigned int policy = current->policy;
+ int err;
+
+ /* Nobody should be attempting hotplug from these policy contexts. */
+ if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
+ policy == SCHED_DEADLINE)
+ return -EPERM;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return 1;
+
+ /* Only SCHED_NORMAL left. */
+ err = sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
+ return err;
+
+}
+
+static int switch_to_fair_policy(void)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ return sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
+}
+
static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
{
int err = 0;
+ int switch_err = 0;
if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -1073,6 +1101,10 @@
return -EINVAL;
}
+ switch_err = switch_to_rt_policy();
+ if (switch_err < 0)
+ return switch_err;
+
err = try_online_node(cpu_to_node(cpu));
if (err)
return err;
@@ -1087,6 +1119,14 @@
err = _cpu_up(cpu, 0, target);
out:
cpu_maps_update_done();
+
+ if (!switch_err) {
+ switch_err = switch_to_fair_policy();
+ if (switch_err)
+ pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+ switch_err, current->comm, current->pid);
+ }
+
return err;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 87b9cd9..41f376d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -372,6 +372,7 @@
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
+static DEFINE_PER_CPU(bool, is_idle);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -3605,23 +3606,31 @@
static int perf_event_read(struct perf_event *event, bool group)
{
int event_cpu, ret = 0;
+ bool active_event_skip_read = false;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
+ event_cpu = READ_ONCE(event->oncpu);
+
+ if (event->state == PERF_EVENT_STATE_ACTIVE) {
+ if ((unsigned int)event_cpu >= nr_cpu_ids)
+ return 0;
+ if (cpu_isolated(event_cpu) ||
+ (event->attr.exclude_idle &&
+ per_cpu(is_idle, event_cpu)))
+ active_event_skip_read = true;
+ }
+
if (event->state == PERF_EVENT_STATE_ACTIVE &&
- !cpu_isolated(event->oncpu)) {
+ !active_event_skip_read) {
struct perf_read_data data = {
.event = event,
.group = group,
.ret = 0,
};
- event_cpu = READ_ONCE(event->oncpu);
- if ((unsigned)event_cpu >= nr_cpu_ids)
- return 0;
-
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
@@ -3635,10 +3644,12 @@
* Therefore, either way, we'll have an up-to-date event count
* after this.
*/
- (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
+ (void)smp_call_function_single(event_cpu,
+ __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret;
- } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ } else if (event->state == PERF_EVENT_STATE_INACTIVE ||
+ active_event_skip_read) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@@ -3731,7 +3742,8 @@
if (!task) {
/* Must be root to operate on a CPU event: */
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ if (!is_kernel_event(event) && perf_paranoid_cpu() &&
+ !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
@@ -10849,6 +10861,26 @@
.priority = INT_MIN,
};
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case IDLE_START:
+ __this_cpu_write(is_idle, true);
+ break;
+ case IDLE_END:
+ __this_cpu_write(is_idle, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+ .notifier_call = event_idle_notif,
+};
+
+
void __init perf_event_init(void)
{
int ret;
@@ -10862,6 +10894,7 @@
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_event_init_cpu(smp_processor_id());
+ idle_notifier_register(&perf_event_idle_nb);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
@@ -10916,6 +10949,7 @@
}
device_initcall(perf_event_sysfs_init);
+#ifdef CONFIG_HOTPLUG_CPU
static int perf_cpu_hp_init(void)
{
int ret;
@@ -10930,6 +10964,7 @@
return ret;
}
subsys_initcall(perf_cpu_hp_init);
+#endif
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *
diff --git a/kernel/kthread.c b/kernel/kthread.c
index be2cc1f..c2c911a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -18,6 +18,7 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/cgroup.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
@@ -205,6 +206,7 @@
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
+ cgroup_kthread_ready();
__kthread_parkme(&self);
ret = threadfn(data);
}
@@ -530,6 +532,7 @@
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
+ cgroup_init_kthreadd();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/padata.c b/kernel/padata.c
index 7848f05..b4a3c0a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -190,19 +190,20 @@
reorder = &next_queue->reorder;
+ spin_lock(&reorder->lock);
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
- spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- spin_unlock(&reorder->lock);
pd->processed++;
+ spin_unlock(&reorder->lock);
goto out;
}
+ spin_unlock(&reorder->lock);
if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 49ba7c1..a5caece 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -181,11 +181,17 @@
WARN_ON(!task->ptrace || task->parent != current);
+ /*
+ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+ * Recheck state under the lock to close this race.
+ */
spin_lock_irq(&task->sighand->siglock);
- if (__fatal_signal_pending(task))
- wake_up_state(task, __TASK_TRACED);
- else
- task->state = TASK_TRACED;
+ if (task->state == __TASK_TRACED) {
+ if (__fatal_signal_pending(task))
+ wake_up_state(task, __TASK_TRACED);
+ else
+ task->state = TASK_TRACED;
+ }
spin_unlock_irq(&task->sighand->siglock);
}
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 1dde338..1040a43 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -23,7 +23,7 @@
#include <trace/events/sched.h>
-#define MAX_CPUS_PER_CLUSTER 4
+#define MAX_CPUS_PER_CLUSTER 6
#define MAX_CLUSTERS 2
struct cluster_data {
@@ -35,6 +35,7 @@
unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
unsigned int active_cpus;
unsigned int num_cpus;
+ unsigned int nr_isolated_cpus;
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
@@ -166,7 +167,9 @@
unsigned int val[MAX_CPUS_PER_CLUSTER];
int ret, i;
- ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ ret = sscanf(buf, "%u %u %u %u %u %u\n",
+ &val[0], &val[1], &val[2], &val[3],
+ &val[4], &val[5]);
if (ret != 1 && ret != state->num_cpus)
return -EINVAL;
@@ -199,7 +202,9 @@
unsigned int val[MAX_CPUS_PER_CLUSTER];
int ret, i;
- ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ ret = sscanf(buf, "%u %u %u %u %u %u\n",
+ &val[0], &val[1], &val[2], &val[3],
+ &val[4], &val[5]);
if (ret != 1 && ret != state->num_cpus)
return -EINVAL;
@@ -260,6 +265,7 @@
ssize_t count = 0;
unsigned int cpu;
+ spin_lock_irq(&state_lock);
for_each_possible_cpu(cpu) {
c = &per_cpu(cpu_state, cpu);
cluster = c->cluster;
@@ -293,8 +299,12 @@
count += snprintf(buf + count, PAGE_SIZE - count,
"\tNeed CPUs: %u\n", cluster->need_cpus);
count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNr isolated CPUs: %u\n",
+ cluster->nr_isolated_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
"\tBoost: %u\n", (unsigned int) cluster->boost);
}
+ spin_unlock_irq(&state_lock);
return count;
}
@@ -308,7 +318,9 @@
unsigned long flags;
int ret;
- ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
+ ret = sscanf(buf, "%u %u %u %u %u %u\n",
+ &val[0], &val[1], &val[2], &val[3],
+ &val[4], &val[5]);
if (ret != state->num_cpus)
return -EINVAL;
@@ -525,7 +537,7 @@
unsigned int need)
{
return (need < cluster->active_cpus || (need > cluster->active_cpus &&
- sched_isolate_count(&cluster->cpu_mask, false)));
+ cluster->nr_isolated_cpus));
}
static bool eval_need(struct cluster_data *cluster)
@@ -535,9 +547,8 @@
unsigned int need_cpus = 0, last_need, thres_idx;
int ret = 0;
bool need_flag = false;
- unsigned int active_cpus;
unsigned int new_need;
- s64 now;
+ s64 now, elapsed;
if (unlikely(!cluster->inited))
return 0;
@@ -547,8 +558,8 @@
if (cluster->boost) {
need_cpus = cluster->max_cpus;
} else {
- active_cpus = get_active_cpu_count(cluster);
- thres_idx = active_cpus ? active_cpus - 1 : 0;
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
list_for_each_entry(c, &cluster->lru, sib) {
if (c->busy >= cluster->busy_up_thres[thres_idx])
c->is_busy = true;
@@ -564,17 +575,16 @@
last_need = cluster->need_cpus;
now = ktime_to_ms(ktime_get());
- if (new_need == last_need) {
- cluster->need_ts = now;
- spin_unlock_irqrestore(&state_lock, flags);
- return 0;
- }
-
- if (need_cpus > cluster->active_cpus) {
+ if (new_need > cluster->active_cpus) {
ret = 1;
- } else if (need_cpus < cluster->active_cpus) {
- s64 elapsed = now - cluster->need_ts;
+ } else {
+ if (new_need == last_need) {
+ cluster->need_ts = now;
+ spin_unlock_irqrestore(&state_lock, flags);
+ return 0;
+ }
+ elapsed = now - cluster->need_ts;
ret = elapsed >= cluster->offline_delay_ms;
}
@@ -582,7 +592,7 @@
cluster->need_ts = now;
cluster->need_cpus = new_need;
}
- trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
+ trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
ret && need_flag);
spin_unlock_irqrestore(&state_lock, flags);
@@ -718,6 +728,7 @@
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_isolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -742,12 +753,14 @@
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
/*
@@ -757,6 +770,7 @@
if (cluster->active_cpus <= cluster->max_cpus)
return;
+ nr_isolated = 0;
num_cpus = cluster->num_cpus;
spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
@@ -774,12 +788,14 @@
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -790,6 +806,7 @@
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_unisolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -814,12 +831,14 @@
if (!sched_unisolate_cpu(c->cpu)) {
c->isolated_by_us = false;
move_cpu_lru(c);
+ nr_unisolated++;
} else {
pr_debug("Unable to unisolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus -= nr_unisolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -885,10 +904,11 @@
struct cpu_data *state = &per_cpu(cpu_state, cpu);
struct cluster_data *cluster = state->cluster;
unsigned int need;
- int ret = NOTIFY_OK;
+ bool do_wakeup, unisolated = false;
+ unsigned long flags;
if (unlikely(!cluster || !cluster->inited))
- return NOTIFY_OK;
+ return NOTIFY_DONE;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
@@ -911,6 +931,7 @@
if (state->isolated_by_us) {
sched_unisolate_cpu_unlocked(cpu);
state->isolated_by_us = false;
+ unisolated = true;
}
/* Move a CPU to the end of the LRU when it goes offline. */
@@ -919,13 +940,20 @@
state->busy = 0;
cluster->active_cpus = get_active_cpu_count(cluster);
break;
+ default:
+ return NOTIFY_DONE;
}
need = apply_limits(cluster, cluster->need_cpus);
- if (adjustment_possible(cluster, need))
+ spin_lock_irqsave(&state_lock, flags);
+ if (unisolated)
+ cluster->nr_isolated_cpus--;
+ do_wakeup = adjustment_possible(cluster, need);
+ spin_unlock_irqrestore(&state_lock, flags);
+ if (do_wakeup)
wake_up_core_ctl_thread(cluster);
- return ret;
+ return NOTIFY_OK;
}
static struct notifier_block __refdata cpu_notifier = {
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 69e0689..27d96e2 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -12,11 +12,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
+#include <linux/kthread.h>
#include <linux/slab.h>
#include <trace/events/power.h>
#include "sched.h"
+#define SUGOV_KTHREAD_PRIORITY 50
+
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
@@ -32,11 +35,14 @@
u64 last_freq_update_time;
s64 freq_update_delay_ns;
unsigned int next_freq;
+ unsigned int cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
- struct work_struct work;
+ struct kthread_work work;
struct mutex work_lock;
+ struct kthread_worker worker;
+ struct task_struct *thread;
bool work_in_progress;
bool need_freq_update;
@@ -46,7 +52,6 @@
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned int cached_raw_freq;
unsigned long iowait_boost;
unsigned long iowait_boost_max;
u64 last_update;
@@ -110,7 +115,7 @@
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
- * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @sg_policy: schedutil policy object to compute the new frequency for.
* @util: Current CPU utilization.
* @max: CPU capacity.
*
@@ -130,19 +135,18 @@
* next_freq (as calculated above) is returned, subject to policy min/max and
* cpufreq driver limitations.
*/
-static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
- unsigned long max)
+static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ unsigned long util, unsigned long max)
{
- struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
freq = (freq + (freq >> 2)) * util / max;
- if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
return sg_policy->next_freq;
- sg_cpu->cached_raw_freq = freq;
+ sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -207,7 +211,7 @@
} else {
sugov_get_util(&util, &max);
sugov_iowait_boost(sg_cpu, &util, &max);
- next_f = get_next_freq(sg_cpu, util, max);
+ next_f = get_next_freq(sg_policy, util, max);
}
sugov_update_commit(sg_policy, time, next_f);
}
@@ -261,7 +265,7 @@
sugov_iowait_boost(j_sg_cpu, &util, &max);
}
- return get_next_freq(sg_cpu, util, max);
+ return get_next_freq(sg_policy, util, max);
}
static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -291,7 +295,7 @@
raw_spin_unlock(&sg_policy->update_lock);
}
-static void sugov_work(struct work_struct *work)
+static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
@@ -308,7 +312,21 @@
struct sugov_policy *sg_policy;
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
- schedule_work_on(smp_processor_id(), &sg_policy->work);
+
+ /*
+ * For RT and deadline tasks, the schedutil governor shoots the
+ * frequency to maximum. Special care must be taken to ensure that this
+ * kthread doesn't result in the same behavior.
+ *
+ * This is (mostly) guaranteed by the work_in_progress flag. The flag is
+ * updated only at the end of the sugov_work() function and before that
+ * the schedutil governor rejects all other frequency scaling requests.
+ *
+ * There is a very rare case though, where the RT thread yields right
+ * after the work_in_progress flag is cleared. The effects of that are
+ * neglected for now.
+ */
+ kthread_queue_work(&sg_policy->worker, &sg_policy->work);
}
/************************** sysfs interface ************************/
@@ -371,19 +389,64 @@
return NULL;
sg_policy->policy = policy;
- init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- INIT_WORK(&sg_policy->work, sugov_work);
- mutex_init(&sg_policy->work_lock);
raw_spin_lock_init(&sg_policy->update_lock);
return sg_policy;
}
static void sugov_policy_free(struct sugov_policy *sg_policy)
{
- mutex_destroy(&sg_policy->work_lock);
kfree(sg_policy);
}
+static int sugov_kthread_create(struct sugov_policy *sg_policy)
+{
+ struct task_struct *thread;
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+ struct cpufreq_policy *policy = sg_policy->policy;
+ int ret;
+
+ /* kthread only required for slow path */
+ if (policy->fast_switch_enabled)
+ return 0;
+
+ kthread_init_work(&sg_policy->work, sugov_work);
+ kthread_init_worker(&sg_policy->worker);
+ thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
+ "sugov:%d",
+ cpumask_first(policy->related_cpus));
+ if (IS_ERR(thread)) {
+ pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
+ return PTR_ERR(thread);
+ }
+
+ ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
+ if (ret) {
+ kthread_stop(thread);
+ pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+ return ret;
+ }
+
+ sg_policy->thread = thread;
+ kthread_bind_mask(thread, policy->related_cpus);
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ mutex_init(&sg_policy->work_lock);
+
+ wake_up_process(thread);
+
+ return 0;
+}
+
+static void sugov_kthread_stop(struct sugov_policy *sg_policy)
+{
+ /* kthread only required for slow path */
+ if (sg_policy->policy->fast_switch_enabled)
+ return;
+
+ kthread_flush_worker(&sg_policy->worker);
+ kthread_stop(sg_policy->thread);
+ mutex_destroy(&sg_policy->work_lock);
+}
+
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
{
struct sugov_tunables *tunables;
@@ -416,16 +479,24 @@
if (policy->governor_data)
return -EBUSY;
+ cpufreq_enable_fast_switch(policy);
+
sg_policy = sugov_policy_alloc(policy);
- if (!sg_policy)
- return -ENOMEM;
+ if (!sg_policy) {
+ ret = -ENOMEM;
+ goto disable_fast_switch;
+ }
+
+ ret = sugov_kthread_create(sg_policy);
+ if (ret)
+ goto free_sg_policy;
mutex_lock(&global_tunables_lock);
if (global_tunables) {
if (WARN_ON(have_governor_per_policy())) {
ret = -EINVAL;
- goto free_sg_policy;
+ goto stop_kthread;
}
policy->governor_data = sg_policy;
sg_policy->tunables = global_tunables;
@@ -437,7 +508,7 @@
tunables = sugov_tunables_alloc(sg_policy);
if (!tunables) {
ret = -ENOMEM;
- goto free_sg_policy;
+ goto stop_kthread;
}
tunables->rate_limit_us = LATENCY_MULTIPLIER;
@@ -454,20 +525,25 @@
if (ret)
goto fail;
- out:
+out:
mutex_unlock(&global_tunables_lock);
-
- cpufreq_enable_fast_switch(policy);
return 0;
- fail:
+fail:
policy->governor_data = NULL;
sugov_tunables_free(tunables);
- free_sg_policy:
+stop_kthread:
+ sugov_kthread_stop(sg_policy);
+
+free_sg_policy:
mutex_unlock(&global_tunables_lock);
sugov_policy_free(sg_policy);
+
+disable_fast_switch:
+ cpufreq_disable_fast_switch(policy);
+
pr_err("initialization failed (error %d)\n", ret);
return ret;
}
@@ -478,8 +554,6 @@
struct sugov_tunables *tunables = sg_policy->tunables;
unsigned int count;
- cpufreq_disable_fast_switch(policy);
-
mutex_lock(&global_tunables_lock);
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
@@ -489,7 +563,9 @@
mutex_unlock(&global_tunables_lock);
+ sugov_kthread_stop(sg_policy);
sugov_policy_free(sg_policy);
+ cpufreq_disable_fast_switch(policy);
}
static int sugov_start(struct cpufreq_policy *policy)
@@ -502,25 +578,19 @@
sg_policy->next_freq = UINT_MAX;
sg_policy->work_in_progress = false;
sg_policy->need_freq_update = false;
+ sg_policy->cached_raw_freq = 0;
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+ memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->sg_policy = sg_policy;
- if (policy_is_shared(policy)) {
- sg_cpu->util = 0;
- sg_cpu->max = 0;
- sg_cpu->flags = SCHED_CPUFREQ_RT;
- sg_cpu->last_update = 0;
- sg_cpu->cached_raw_freq = 0;
- sg_cpu->iowait_boost = 0;
- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_shared);
- } else {
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_single);
- }
+ sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ policy_is_shared(policy) ?
+ sugov_update_shared :
+ sugov_update_single);
}
return 0;
}
@@ -535,8 +605,10 @@
synchronize_sched();
- irq_work_sync(&sg_policy->irq_work);
- cancel_work_sync(&sg_policy->work);
+ if (!policy->fast_switch_enabled) {
+ irq_work_sync(&sg_policy->irq_work);
+ kthread_cancel_work_sync(&sg_policy->work);
+ }
}
static void sugov_limits(struct cpufreq_policy *policy)
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index b0656b7..05dd2cb 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -56,6 +56,9 @@
int sd_level, i, nstates, cpu;
const __be32 *val;
+ if (!sched_is_energy_aware())
+ return;
+
for_each_possible_cpu(cpu) {
cn = of_get_cpu_node(cpu, NULL);
if (!cn) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2a8643c..6fb615e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5343,6 +5343,15 @@
return sched_feat(ENERGY_AWARE);
}
+/*
+ * Externally visible function. Let's keep the one above
+ * so that the check is inlined/optimized in the sched paths.
+ */
+bool sched_is_energy_aware(void)
+{
+ return energy_aware();
+}
+
struct energy_env {
struct sched_group *sg_top;
struct sched_group *sg_cap;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 7f686ff..1b4bb23 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -81,7 +81,7 @@
static unsigned int sync_cpu;
static ktime_t ktime_last;
-static bool walt_ktime_suspended;
+static __read_mostly bool walt_ktime_suspended;
static unsigned int task_load(struct task_struct *p)
{
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 4223c4a..b1c7852 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,6 +49,7 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
@@ -1616,20 +1617,41 @@
}
#ifdef CONFIG_HOTPLUG_CPU
-static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base,
+static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
+ struct hrtimer_cpu_base *new_base,
+ unsigned int i, bool wait,
bool remove_pinned)
{
struct hrtimer *timer;
struct timerqueue_node *node;
struct timerqueue_head pinned;
int is_pinned;
+ struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
+ struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
timerqueue_init_head(&pinned);
- while ((node = timerqueue_getnext(&old_base->active))) {
+ while ((node = timerqueue_getnext(&old_c_base->active))) {
timer = container_of(node, struct hrtimer, node);
- BUG_ON(hrtimer_callback_running(timer));
+ if (wait) {
+ /* Ensure timers are done running before continuing */
+ while (hrtimer_callback_running(timer)) {
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
+ cpu_relax();
+ /*
+ * cpu_relax may just be a barrier. Grant the
+ * run_hrtimer_list code some time to obtain
+ * the spinlock.
+ */
+ udelay(1);
+ raw_spin_lock(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock,
+ SINGLE_DEPTH_NESTING);
+ }
+ } else {
+ BUG_ON(hrtimer_callback_running(timer));
+ }
debug_deactivate(timer);
/*
@@ -1637,7 +1659,7 @@
* timer could be seen as !active and just vanish away
* under us on another CPU
*/
- __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
+ __remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
is_pinned = timer->state & HRTIMER_STATE_PINNED;
if (!remove_pinned && is_pinned) {
@@ -1645,7 +1667,7 @@
continue;
}
- timer->base = new_base;
+ timer->base = new_c_base;
/*
* Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer
@@ -1654,7 +1676,7 @@
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_base);
+ enqueue_hrtimer(timer, new_c_base);
}
/* Re-queue pinned timers for non-hotplug usecase */
@@ -1662,11 +1684,12 @@
timer = container_of(node, struct hrtimer, node);
timerqueue_del(&pinned, &timer->node);
- enqueue_hrtimer(timer, old_base);
+ enqueue_hrtimer(timer, old_c_base);
}
}
-static void __migrate_hrtimers(unsigned int scpu, bool remove_pinned)
+static void
+__migrate_hrtimers(unsigned int scpu, bool wait, bool remove_pinned)
{
struct hrtimer_cpu_base *old_base, *new_base;
unsigned long flags;
@@ -1683,8 +1706,8 @@
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i], remove_pinned);
+ migrate_hrtimer_list(old_base, new_base, i, wait,
+ remove_pinned);
}
raw_spin_unlock(&old_base->lock);
@@ -1700,13 +1723,13 @@
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
- __migrate_hrtimers(scpu, true);
+ __migrate_hrtimers(scpu, false, true);
return 0;
}
void hrtimer_quiesce_cpu(void *cpup)
{
- __migrate_hrtimers(*(int *)cpup, false);
+ __migrate_hrtimers(*(int *)cpup, true, false);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5f98592..9055429 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1165,7 +1165,7 @@
static void update_rq_stats(void)
{
unsigned long jiffy_gap = 0;
- unsigned int rq_avg = 0;
+ unsigned long long rq_avg = 0;
unsigned long flags = 0;
jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 470d966..5463c3b 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1856,7 +1856,8 @@
spin_lock_irqsave(&new_base->lock, flags);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- BUG_ON(old_base->running_timer);
+ if (!cpu_online(cpu))
+ BUG_ON(old_base->running_timer);
for (i = 0; i < WHEEL_SIZE; i++)
migrate_timer_list(new_base, old_base->vectors + i,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index da87b3c..221eb59 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3736,23 +3736,24 @@
ftrace_probe_registered = 1;
}
-static void __disable_ftrace_function_probe(void)
+static bool __disable_ftrace_function_probe(void)
{
int i;
if (!ftrace_probe_registered)
- return;
+ return false;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
if (hhd->first)
- return;
+ return false;
}
/* no more funcs left */
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
+ return true;
}
@@ -3882,6 +3883,7 @@
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
@@ -3893,6 +3895,7 @@
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int i, ret;
+ bool disabled;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
func_g.search = NULL;
@@ -3911,6 +3914,10 @@
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
if (!hash)
/* Hmm, should report this somehow */
@@ -3948,12 +3955,17 @@
}
}
mutex_lock(&ftrace_lock);
- __disable_ftrace_function_probe();
+ disabled = __disable_ftrace_function_probe();
/*
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*.
*/
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+ /* still need to update the function call sites */
+ if (ftrace_enabled && !disabled)
+ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+ &old_hash_ops);
synchronize_sched();
if (!ret)
free_ftrace_hash_rcu(old_hash);
@@ -5389,6 +5401,15 @@
trace_free_pid_list(pid_list);
}
+void ftrace_clear_pids(struct trace_array *tr)
+{
+ mutex_lock(&ftrace_lock);
+
+ clear_ftrace_pids(tr);
+
+ mutex_unlock(&ftrace_lock);
+}
+
static void ftrace_pid_reset(struct trace_array *tr)
{
mutex_lock(&ftrace_lock);
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index fa7fd14..6d310ab 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -515,8 +515,8 @@
tsv_qtimer_write(&ectxt);
avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
va_start(arg_list, fmt);
- data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size),
- avail_size, fmt, arg_list);
+ data_size = vscnprintf((ectxt.buff + ectxt.offset + hdr_size),
+ avail_size, fmt, arg_list);
va_end(arg_list);
tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
ectxt.offset += data_size;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9c14373..f5c016e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3435,11 +3435,23 @@
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *reader;
+ struct buffer_page *head_page;
+ struct buffer_page *commit_page;
+ unsigned commit;
cpu_buffer = iter->cpu_buffer;
- return iter->head_page == cpu_buffer->commit_page &&
- iter->head == rb_commit_index(cpu_buffer);
+ /* Remember, trace recording is off when iterator is in use */
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+ commit_page = cpu_buffer->commit_page;
+ commit = rb_page_commit(commit_page);
+
+ return ((iter->head_page == commit_page && iter->head == commit) ||
+ (iter->head_page == reader && commit_page == head_page &&
+ head_page->read == commit &&
+ iter->head == rb_page_commit(cpu_buffer->reader_page)));
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
@@ -4870,9 +4882,9 @@
rb_data[cpu].cnt = cpu;
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
"rbtester/%d", cpu);
- if (WARN_ON(!rb_threads[cpu])) {
+ if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_threads[cpu]);
goto out_free;
}
@@ -4882,9 +4894,9 @@
/* Now create the rb hammer! */
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
- if (WARN_ON(!rb_hammer)) {
+ if (WARN_ON(IS_ERR(rb_hammer))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_hammer);
goto out_free;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b0c47c2..ebf9498 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6576,11 +6576,13 @@
return ret;
out_reg:
+ ret = alloc_snapshot(&global_trace);
+ if (ret < 0)
+ goto out;
+
ret = register_ftrace_function_probe(glob, ops, count);
- if (ret >= 0)
- alloc_snapshot(&global_trace);
-
+ out:
return ret < 0 ? ret : 0;
}
@@ -7245,6 +7247,7 @@
tracing_set_nop(tr);
event_trace_del_tracer(tr);
+ ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove_recursive(tr->dir);
free_trace_buffers(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 38dbb36..e5d06c9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -871,6 +871,7 @@
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
void ftrace_init_tracefs_toplevel(struct trace_array *tr,
struct dentry *d_tracer);
+void ftrace_clear_pids(struct trace_array *tr);
#else
static inline int ftrace_trace_task(struct trace_array *tr)
{
@@ -889,6 +890,7 @@
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
+static inline void ftrace_clear_pids(struct trace_array *tr) { }
/* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index efb0b4d..a75ea63 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -734,6 +734,68 @@
}
EXPORT_SYMBOL(iov_iter_advance);
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ i->count += unroll;
+ if (unlikely(i->type & ITER_PIPE)) {
+ struct pipe_inode_info *pipe = i->pipe;
+ int idx = i->idx;
+ size_t off = i->iov_offset;
+ while (1) {
+ size_t n = off - pipe->bufs[idx].offset;
+ if (unroll < n) {
+ off -= (n - unroll);
+ break;
+ }
+ unroll -= n;
+ if (!unroll && idx == i->start_idx) {
+ off = 0;
+ break;
+ }
+ if (!idx--)
+ idx = pipe->buffers - 1;
+ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ }
+ i->iov_offset = off;
+ i->idx = idx;
+ pipe_truncate(i);
+ return;
+ }
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (i->type & ITER_BVEC) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = i->iov;
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
/*
* Return the count of just the current iov_iter segment.
*/
@@ -787,6 +849,7 @@
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
i->count = count;
+ i->start_idx = i->idx;
}
EXPORT_SYMBOL(iov_iter_pipe);
diff --git a/lib/syscall.c b/lib/syscall.c
index 63239e0..a72cd09 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -11,6 +11,7 @@
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
+ *sp = *pc = 0;
*callno = -1;
return 0;
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 0183305..eb10c90 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -720,3 +720,13 @@
bool
config ARCH_HAS_PKEYS
bool
+
+config FORCE_ALLOC_FROM_DMA_ZONE
+ bool "Force certain memory allocators to always return ZONE_DMA memory"
+ depends on ZONE_DMA
+ help
+ Ensure certain memory allocators always return memory from ZONE_DMA.
+ This option helps ensure that clients who require ZONE_DMA memory are
+ always using ZONE_DMA memory.
+
+ If unsure, say "n".
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 917555c..d5b2b75 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1380,8 +1380,7 @@
deactivate_page(page);
if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
- orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
- tlb->fullmm);
+ pmdp_invalidate(vma, addr, pmd);
orig_pmd = pmd_mkold(orig_pmd);
orig_pmd = pmd_mkclean(orig_pmd);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b6adedb..65c36ac 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4471,6 +4471,7 @@
{
struct page *page = NULL;
spinlock_t *ptl;
+ pte_t pte;
retry:
ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
@@ -4480,12 +4481,13 @@
*/
if (!pmd_huge(*pmd))
goto out;
- if (pmd_present(*pmd)) {
+ pte = huge_ptep_get((pte_t *)pmd);
+ if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
- if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+ if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
__migration_entry_wait(mm, (pte_t *)pmd, ptl);
goto retry;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e866ddcc..fdc790a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2152,6 +2152,8 @@
struct work_struct work;
};
+static struct workqueue_struct *memcg_kmem_cache_create_wq;
+
static void memcg_kmem_cache_create_func(struct work_struct *w)
{
struct memcg_kmem_cache_create_work *cw =
@@ -2183,7 +2185,7 @@
cw->cachep = cachep;
INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
- schedule_work(&cw->work);
+ queue_work(memcg_kmem_cache_create_wq, &cw->work);
}
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
@@ -5796,6 +5798,17 @@
{
int cpu, node;
+#ifndef CONFIG_SLOB
+ /*
+ * Kmem cache creation is mostly done with the slab_mutex held,
+ * so use a special workqueue to avoid stalling all worker
+ * threads in case lots of cgroups are created simultaneously.
+ */
+ memcg_kmem_cache_create_wq =
+ alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
+ BUG_ON(!memcg_kmem_cache_create_wq);
+#endif
+
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
for_each_possible_cpu(cpu)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0adc6f9..9ff5657 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1525,7 +1525,6 @@
COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1534,14 +1533,13 @@
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(bm, nmask, nr_bits);
+ if (compat_get_bitmap(bm, nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, bm, alloc_size);
+ if (copy_to_user(nm, bm, alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
@@ -1549,7 +1547,6 @@
compat_ulong_t, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode, compat_ulong_t, flags)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
@@ -1558,14 +1555,13 @@
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index f49de3cf..435f674 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -183,9 +183,9 @@
unlock_page(page);
put_page(page);
} else {
- putback_lru_page(page);
dec_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
+ putback_lru_page(page);
}
}
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e82002..8e57301 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2058,8 +2058,12 @@
* potentially hurts the reliability of high-order allocations when under
* intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM.
+ *
+ * If @force is true, try to unreserve a pageblock even though highatomic
+ * pageblock is exhausted.
*/
-static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+ bool force)
{
struct zonelist *zonelist = ac->zonelist;
unsigned long flags;
@@ -2067,11 +2071,16 @@
struct zone *zone;
struct page *page;
int order;
+ bool ret;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
ac->nodemask) {
- /* Preserve at least one pageblock */
- if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
+ /*
+ * Preserve at least one pageblock unless memory pressure
+ * is really high.
+ */
+ if (!force && zone->nr_reserved_highatomic <=
+ pageblock_nr_pages)
continue;
spin_lock_irqsave(&zone->lock, flags);
@@ -2085,13 +2094,25 @@
continue;
/*
- * It should never happen but changes to locking could
- * inadvertently allow a per-cpu drain to add pages
- * to MIGRATE_HIGHATOMIC while unreserving so be safe
- * and watch for underflows.
+ * In page freeing path, migratetype change is racy so
+ * we can counter several free pages in a pageblock
+ * in this loop althoug we changed the pageblock type
+ * from highatomic to ac->migratetype. So we should
+ * adjust the count once.
*/
- zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
- zone->nr_reserved_highatomic);
+ if (get_pageblock_migratetype(page) ==
+ MIGRATE_HIGHATOMIC) {
+ /*
+ * It should never happen but changes to
+ * locking could inadvertently allow a per-cpu
+ * drain to add pages to MIGRATE_HIGHATOMIC
+ * while unreserving so be safe and watch for
+ * underflows.
+ */
+ zone->nr_reserved_highatomic -= min(
+ pageblock_nr_pages,
+ zone->nr_reserved_highatomic);
+ }
/*
* Convert to ac->migratetype and avoid the normal
@@ -2103,12 +2124,16 @@
* may increase.
*/
set_pageblock_migratetype(page, ac->migratetype);
- move_freepages_block(zone, page, ac->migratetype);
- spin_unlock_irqrestore(&zone->lock, flags);
- return;
+ ret = move_freepages_block(zone, page, ac->migratetype);
+ if (ret) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return ret;
+ }
}
spin_unlock_irqrestore(&zone->lock, flags);
}
+
+ return false;
}
/* Remove an element from the buddy allocator from the fallback list */
@@ -2133,7 +2158,8 @@
page = list_first_entry(&area->free_list[fallback_mt],
struct page, lru);
- if (can_steal)
+ if (can_steal &&
+ get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
steal_suitable_fallback(zone, page, start_migratetype);
/* Remove the page from the freelists */
@@ -2542,7 +2568,8 @@
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
int mt = get_pageblock_migratetype(page);
- if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
+ if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
+ && mt != MIGRATE_HIGHATOMIC)
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
}
@@ -3313,7 +3340,7 @@
* Shrink them them and try again
*/
if (!page && !drained) {
- unreserve_highatomic_pageblock(ac);
+ unreserve_highatomic_pageblock(ac, false);
drain_all_pages(NULL);
drained = true;
goto retry;
@@ -3430,8 +3457,10 @@
* Make sure we converge to OOM if we cannot make any progress
* several times in the row.
*/
- if (*no_progress_loops > MAX_RECLAIM_RETRIES)
- return false;
+ if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+ /* Before OOM, exhaust highatomic_reserve */
+ return unreserve_highatomic_pageblock(ac, true);
+ }
/*
* Keep reclaiming pages while there is a chance this will lead
@@ -4345,13 +4374,13 @@
K(node_page_state(pgdat, NR_FILE_MAPPED)),
K(node_page_state(pgdat, NR_FILE_DIRTY)),
K(node_page_state(pgdat, NR_WRITEBACK)),
+ K(node_page_state(pgdat, NR_SHMEM)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
* HPAGE_PMD_NR),
K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
#endif
- K(node_page_state(pgdat, NR_SHMEM)),
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/rmap.c b/mm/rmap.c
index 1ef3640..cd37c1c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1295,7 +1295,7 @@
goto out;
}
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
- mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+ mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
out:
unlock_page_memcg(page);
}
@@ -1335,7 +1335,7 @@
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+ mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9d3f6d3..b4d398b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2595,16 +2595,23 @@
sc->nr_scanned - nr_scanned,
node_lru_pages);
+ /*
+ * Record the subtree's reclaim efficiency. The reclaimed
+ * pages from slab is excluded here because the corresponding
+ * scanned pages is not accounted. Moreover, freeing a page
+ * by slab shrinking depends on each slab's object population,
+ * making the cost model (i.e. scan:free) different from that
+ * of LRU.
+ */
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
+
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
- /* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
-
if (sc->nr_reclaimed - nr_reclaimed)
reclaimable = true;
diff --git a/mm/workingset.c b/mm/workingset.c
index 33f6f4d..4c4f056 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -492,7 +492,7 @@
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
+ ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b0bc023..1689bb5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -280,7 +280,7 @@
struct zspage {
struct {
unsigned int fullness:FULLNESS_BITS;
- unsigned int class:CLASS_BITS;
+ unsigned int class:CLASS_BITS + 1;
unsigned int isolated:ISOLATED_BITS;
unsigned int magic:MAGIC_VAL_BITS;
};
diff --git a/net/9p/client.c b/net/9p/client.c
index 3fc94a4..cf129fe 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2101,6 +2101,10 @@
trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
+ if (rsize < count) {
+ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+ count = rsize;
+ }
p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 2efb335..25a30be 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/net.h>
#include <linux/nsproxy.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
@@ -469,11 +470,16 @@
{
struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
struct socket *sock;
+ unsigned int noio_flag;
int ret;
BUG_ON(con->sock);
+
+ /* sock_create_kern() allocates with GFP_KERNEL */
+ noio_flag = memalloc_noio_save();
ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
+ memalloc_noio_restore(noio_flag);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b7de71f..963732e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -378,7 +378,7 @@
struct iov_iter *to, int len)
{
int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int i, copy = start - offset, start_off = offset, n;
struct sk_buff *frag_iter;
trace_skb_copy_datagram_iovec(skb, len);
@@ -387,11 +387,12 @@
if (copy > 0) {
if (copy > len)
copy = len;
- if (copy_to_iter(skb->data + offset, copy, to) != copy)
+ n = copy_to_iter(skb->data + offset, copy, to);
+ offset += n;
+ if (n != copy)
goto short_copy;
if ((len -= copy) == 0)
return 0;
- offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -405,13 +406,14 @@
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (copy_page_to_iter(skb_frag_page(frag),
+ n = copy_page_to_iter(skb_frag_page(frag),
frag->page_offset + offset -
- start, copy, to) != copy)
+ start, copy, to);
+ offset += n;
+ if (n != copy)
goto short_copy;
if (!(len -= copy))
return 0;
- offset += copy;
}
start = end;
}
@@ -443,6 +445,7 @@
*/
fault:
+ iov_iter_revert(to, offset - start_off);
return -EFAULT;
short_copy:
@@ -593,7 +596,7 @@
__wsum *csump)
{
int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int i, copy = start - offset, start_off = offset;
struct sk_buff *frag_iter;
int pos = 0;
int n;
@@ -603,11 +606,11 @@
if (copy > len)
copy = len;
n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
+ offset += n;
if (n != copy)
goto fault;
if ((len -= copy) == 0)
return 0;
- offset += copy;
pos = copy;
}
@@ -629,12 +632,12 @@
offset - start, copy,
&csum2, to);
kunmap(page);
+ offset += n;
if (n != copy)
goto fault;
*csump = csum_block_add(*csump, csum2, pos);
if (!(len -= copy))
return 0;
- offset += copy;
pos += copy;
}
start = end;
@@ -667,6 +670,7 @@
return 0;
fault:
+ iov_iter_revert(to, offset - start_off);
return -EFAULT;
}
@@ -751,6 +755,7 @@
}
return 0;
csum_error:
+ iov_iter_revert(&msg->msg_iter, chunk);
return -EINVAL;
fault:
return -EFAULT;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9901e5b..f45f619 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -859,7 +859,8 @@
if (skb)
skb = skb_clone(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
+ if (neigh->ops->solicit)
+ neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
kfree_skb(skb);
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 53599bd..457f882 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -105,15 +105,21 @@
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
+ unsigned int q_index;
if (!netif_device_present(dev) || !netif_running(dev)) {
kfree_skb(skb);
continue;
}
- txq = skb_get_tx_queue(dev, skb);
-
local_irq_save(flags);
+ /* check if skb->queue_mapping is still valid */
+ q_index = skb_get_queue_mapping(skb);
+ if (unlikely(q_index >= dev->real_num_tx_queues)) {
+ q_index = q_index % dev->real_num_tx_queues;
+ skb_set_queue_mapping(skb, q_index);
+ }
+ txq = netdev_get_tx_queue(dev, q_index);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f0f462c..ba1146c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -209,6 +209,9 @@
u8 *data;
bool pfmemalloc;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@ -367,6 +370,9 @@
unsigned long flags;
void *data;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
data = __alloc_page_frag(nc, fragsz, gfp_mask);
@@ -3076,22 +3082,32 @@
if (sg && csum && (mss != GSO_BY_FRAGS)) {
if (!(features & NETIF_F_GSO_PARTIAL)) {
struct sk_buff *iter;
+ unsigned int frag_len;
if (!list_skb ||
!net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
goto normal;
- /* Split the buffer at the frag_list pointer.
- * This is based on the assumption that all
- * buffers in the chain excluding the last
- * containing the same amount of data.
+ /* If we get here then all the required
+ * GSO features except frag_list are supported.
+ * Try to split the SKB to multiple GSO SKBs
+ * with no frag_list.
+ * Currently we can do that only when the buffers don't
+ * have a linear part and all the buffers except
+ * the last are of the same length.
*/
+ frag_len = list_skb->len;
skb_walk_frags(head_skb, iter) {
+ if (frag_len != iter->len && iter->next)
+ goto normal;
if (skb_headlen(iter))
goto normal;
len -= iter->len;
}
+
+ if (len != frag_len)
+ goto normal;
}
/* GSO partial only requires that we trim off any excess that
@@ -3779,6 +3795,7 @@
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = tstype;
+ serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk->sk_protocol == IPPROTO_TCP &&
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9826695..4d37bdc 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -474,16 +474,15 @@
return false;
/* Support IP_PKTINFO on tstamp packets if requested, to correlate
- * timestamp with egress dev. Not possible for packets without dev
+ * timestamp with egress dev. Not possible for packets without iif
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
*/
- if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
- (!skb->dev))
+ info = PKTINFO_SKB_CB(skb);
+ if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+ !info->ipi_ifindex)
return false;
- info = PKTINFO_SKB_CB(skb);
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
- info->ipi_ifindex = skb->dev->ifindex;
return true;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 06879e6..93bfadf 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@
void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
+
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+ write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&ping_table.lock);
}
+ write_unlock_bh(&ping_table.lock);
}
EXPORT_SYMBOL_GPL(ping_unhash);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 873df83..70c40ba2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -798,6 +798,7 @@
struct rtable *rt;
struct flowi4 fl4;
const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct net *net = dev_net(skb->dev);
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
@@ -805,7 +806,7 @@
rt = (struct rtable *) dst;
- __build_flow_key(sock_net(sk), &fl4, sk, iph, oif, tos, prot, mark, 0);
+ __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
@@ -2580,7 +2581,7 @@
skb_reset_network_header(skb);
/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
- ip_hdr(skb)->protocol = IPPROTO_ICMP;
+ ip_hdr(skb)->protocol = IPPROTO_UDP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6a90a0e..eb142ca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2297,6 +2297,7 @@
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
+ tcp_saved_syn_free(tp);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index f9038d6b..baea5df 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -167,12 +167,8 @@
}
out:
rcu_read_unlock();
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
- /* Clear out private data before diag gets it and
- * the ca has not been initialized.
- */
- if (ca->get_info)
- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
@@ -199,11 +195,10 @@
tcp_cleanup_congestion_control(sk);
icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1;
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
- if (sk->sk_state != TCP_CLOSE) {
- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ if (sk->sk_state != TCP_CLOSE)
tcp_init_congestion_control(sk);
- }
}
/* Manage refcounts on socket close. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0cfb91f..553138d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3293,14 +3293,24 @@
static int fixup_permanent_addr(struct inet6_dev *idev,
struct inet6_ifaddr *ifp)
{
- if (!ifp->rt) {
- struct rt6_info *rt;
+ /* rt6i_ref == 0 means the host route was removed from the
+ * FIB, for example, if 'lo' device is taken down. In that
+ * case regenerate the host route.
+ */
+ if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
+ struct rt6_info *rt, *prev;
rt = addrconf_dst_alloc(idev, &ifp->addr, false);
if (unlikely(IS_ERR(rt)))
return PTR_ERR(rt);
+ /* ifp->rt can be accessed outside of rtnl */
+ spin_lock(&ifp->lock);
+ prev = ifp->rt;
ifp->rt = rt;
+ spin_unlock(&ifp->lock);
+
+ ip6_rt_put(prev);
}
if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
@@ -3642,14 +3652,19 @@
INIT_LIST_HEAD(&del_list);
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
struct rt6_info *rt = NULL;
+ bool keep;
addrconf_del_dad_work(ifa);
+ keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+ !addr_is_local(&ifa->addr);
+ if (!keep)
+ list_move(&ifa->if_list, &del_list);
+
write_unlock_bh(&idev->lock);
spin_lock_bh(&ifa->lock);
- if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
- !addr_is_local(&ifa->addr)) {
+ if (keep) {
/* set state to skip the notifier below */
state = INET6_IFADDR_STATE_DEAD;
ifa->state = 0;
@@ -3661,8 +3676,6 @@
} else {
state = ifa->state;
ifa->state = INET6_IFADDR_STATE_DEAD;
-
- list_move(&ifa->if_list, &del_list);
}
spin_unlock_bh(&ifa->lock);
@@ -5978,13 +5991,6 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "accept_ra_rt_table",
- .data = &ipv6_devconf.accept_ra_rt_table,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
{
.procname = "optimistic_dad",
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1529833..a381772 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -401,9 +401,6 @@
* At one point, excluding local errors was a quick test to identify icmp/icmp6
* errors. This is no longer true, but the test remained, so the v6 stack,
* unlike v4, also honors cmsg requests on all wifi and timestamp errors.
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
- * the PKTINFO fields in skb->cb[]. Fill those in here.
*/
static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
struct sock_exterr_skb *serr)
@@ -415,14 +412,9 @@
if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
return false;
- if (!skb->dev)
+ if (!IP6CB(skb)->iif)
return false;
- if (skb->protocol == htons(ETH_P_IPV6))
- IP6CB(skb)->iif = skb->dev->ifindex;
- else
- PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
-
return true;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 885b411..97e89a2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1037,7 +1037,7 @@
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
struct ipv6_tel_txoption opt;
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
@@ -1057,26 +1057,28 @@
/* NBMA tunnel */
if (ipv6_addr_any(&t->parms.raddr)) {
- struct in6_addr *addr6;
- struct neighbour *neigh;
- int addr_type;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct in6_addr *addr6;
+ struct neighbour *neigh;
+ int addr_type;
- if (!skb_dst(skb))
- goto tx_err_link_failure;
+ if (!skb_dst(skb))
+ goto tx_err_link_failure;
- neigh = dst_neigh_lookup(skb_dst(skb),
- &ipv6_hdr(skb)->daddr);
- if (!neigh)
- goto tx_err_link_failure;
+ neigh = dst_neigh_lookup(skb_dst(skb),
+ &ipv6_hdr(skb)->daddr);
+ if (!neigh)
+ goto tx_err_link_failure;
- addr6 = (struct in6_addr *)&neigh->primary_key;
- addr_type = ipv6_addr_type(addr6);
+ addr6 = (struct in6_addr *)&neigh->primary_key;
+ addr_type = ipv6_addr_type(addr6);
- if (addr_type == IPV6_ADDR_ANY)
- addr6 = &ipv6_hdr(skb)->daddr;
+ if (addr_type == IPV6_ADDR_ANY)
+ addr6 = &ipv6_hdr(skb)->daddr;
- memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
- neigh_release(neigh);
+ memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+ neigh_release(neigh);
+ }
} else if (!(t->parms.flags &
(IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
/* enable the cache only only if the routing decision does
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7f4265b..117405d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -774,7 +774,8 @@
* Delete a VIF entry
*/
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+ struct list_head *head)
{
struct mif_device *v;
struct net_device *dev;
@@ -820,7 +821,7 @@
dev->ifindex, &in6_dev->cnf);
}
- if (v->flags & MIFF_REGISTER)
+ if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
@@ -1331,7 +1332,6 @@
struct mr6_table *mrt;
struct mif_device *v;
int ct;
- LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
@@ -1340,10 +1340,9 @@
v = &mrt->vif6_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (v->dev == dev)
- mif6_delete(mrt, ct, &list);
+ mif6_delete(mrt, ct, 1, NULL);
}
}
- unregister_netdevice_many(&list);
return NOTIFY_DONE;
}
@@ -1552,7 +1551,7 @@
for (i = 0; i < mrt->maxvif; i++) {
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
continue;
- mif6_delete(mrt, i, &list);
+ mif6_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
@@ -1706,7 +1705,7 @@
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
return -EFAULT;
rtnl_lock();
- ret = mif6_delete(mrt, mifi, NULL);
+ ret = mif6_delete(mrt, mifi, 0, NULL);
rtnl_unlock();
return ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5665375..1a34da0 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1172,8 +1172,7 @@
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
- amount = skb_tail_pointer(skb) -
- skb_transport_header(skb);
+ amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 477600f..7d17670 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1825,6 +1825,10 @@
int addr_type;
int err = -EINVAL;
+ /* RTF_PCPU is an internal flag; can not be set by userspace */
+ if (cfg->fc_flags & RTF_PCPU)
+ goto out;
+
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
goto out;
#ifndef CONFIG_IPV6_SUBTREES
@@ -2165,6 +2169,8 @@
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
+ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+ continue;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index a646f34..fecad10 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1685,7 +1685,7 @@
struct kcm_attach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_attach_ioctl(sock, &info);
@@ -1695,7 +1695,7 @@
struct kcm_unattach info;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_unattach_ioctl(sock, &info);
@@ -1706,7 +1706,7 @@
struct socket *newsock = NULL;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
err = kcm_clone(sock, &info, &newsock);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index a2ed3bd..e702cb95 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,8 @@
}
EXPORT_SYMBOL_GPL(l2tp_session_find);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref)
{
int hash;
struct l2tp_session *session;
@@ -288,6 +289,9 @@
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
read_unlock_bh(&tunnel->hlist_lock);
return session;
}
@@ -298,7 +302,7 @@
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 181e755c..e7233ba 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -243,7 +243,8 @@
struct l2tp_session *l2tp_session_find(struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref);
struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a..d100aed 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
@@ -238,10 +238,14 @@
}
/* Show the tunnel or session context */
- if (pd->session == NULL)
+ if (!pd->session) {
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
l2tp_dfs_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ff750bb..2066953 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -178,9 +178,10 @@
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 7095786..26cf4dc 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -191,9 +191,10 @@
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index bf31177..9f66272 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -844,7 +844,7 @@
goto out;
}
- session = l2tp_session_find_nth(tunnel, si);
+ session = l2tp_session_get_nth(tunnel, si, false);
if (session == NULL) {
ti++;
tunnel = NULL;
@@ -854,8 +854,11 @@
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session, L2TP_CMD_SESSION_GET) < 0)
+ session, L2TP_CMD_SESSION_GET) < 0) {
+ l2tp_session_dec_refcount(session);
break;
+ }
+ l2tp_session_dec_refcount(session);
si++;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 41d47bf..1387f54 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@
static void pppol2tp_session_destruct(struct sock *sk)
{
struct l2tp_session *session = sk->sk_user_data;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
if (session) {
sk->sk_user_data = NULL;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@
l2tp_session_queue_purge(session);
sock_put(sk);
}
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
/* This will delete the session context via
@@ -1554,7 +1555,7 @@
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
@@ -1681,10 +1682,14 @@
/* Show the tunnel or session context.
*/
- if (pd->session == NULL)
+ if (!pd->session) {
pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
pppol2tp_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
@@ -1843,4 +1848,4 @@
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 638ec07..8d7747e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -726,7 +726,8 @@
ieee80211_recalc_ps(local);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ local->ops->wake_tx_queue) {
/* XXX: for AP_VLAN, actually track AP queues */
netif_tx_start_all_queues(dev);
} else if (dev) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a697ddf..acaaf61 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -208,6 +208,51 @@
return len;
}
+static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ int rtap_vendor_space)
+{
+ struct {
+ struct ieee80211_hdr_3addr hdr;
+ u8 category;
+ u8 action_code;
+ } __packed action;
+
+ if (!sdata)
+ return;
+
+ BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
+
+ if (skb->len < rtap_vendor_space + sizeof(action) +
+ VHT_MUMIMO_GROUPS_DATA_LEN)
+ return;
+
+ if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
+ return;
+
+ skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
+
+ if (!ieee80211_is_action(action.hdr.frame_control))
+ return;
+
+ if (action.category != WLAN_CATEGORY_VHT)
+ return;
+
+ if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
+ return;
+
+ if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
+ return;
+
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
/*
* ieee80211_add_rx_radiotap_header - add radiotap header
*
@@ -515,7 +560,6 @@
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
unsigned int rtap_vendor_space = 0;
- struct ieee80211_mgmt *mgmt;
struct ieee80211_sub_if_data *monitor_sdata =
rcu_dereference(local->monitor_sdata);
@@ -553,6 +597,8 @@
return remove_monitor_info(local, origskb, rtap_vendor_space);
}
+ ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
+
/* room for the radiotap header based on driver features */
rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
needed_headroom = rt_hdrlen - rtap_vendor_space;
@@ -618,23 +664,6 @@
ieee80211_rx_stats(sdata->dev, skb->len);
}
- mgmt = (void *)skb->data;
- if (monitor_sdata &&
- skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
- ieee80211_is_action(mgmt->frame_control) &&
- mgmt->u.action.category == WLAN_CATEGORY_VHT &&
- mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
- is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
- ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
- struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
-
- if (mu_skb) {
- mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
- skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
- ieee80211_queue_work(&local->hw, &monitor_sdata->work);
- }
- }
-
if (prev_dev) {
skb->dev = prev_dev;
netif_receive_skb(skb);
@@ -3617,6 +3646,27 @@
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
+
+ /*
+ * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+ * the BSSID - we've checked that already but may have accepted
+ * the wildcard (ff:ff:ff:ff:ff:ff).
+ *
+ * It also says:
+ * The BSSID of the Data frame is determined as follows:
+ * a) If the STA is contained within an AP or is associated
+ * with an AP, the BSSID is the address currently in use
+ * by the STA contained in the AP.
+ *
+ * So we should not accept data frames with an address that's
+ * multicast.
+ *
+ * Accepting it also opens a security problem because stations
+ * could encrypt it with the GTK and inject traffic that way.
+ */
+ if (ieee80211_is_data(hdr->frame_control) && multicast)
+ return false;
+
return true;
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 0f5628a..3c7ae04 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -969,9 +969,8 @@
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
IF_DEBUG("qtaguid: iface_stat: create(%s): "
"ifa=%p ifa_label=%s\n",
- ifname, ifa,
- ifa->ifa_label ? ifa->ifa_label : "(null)");
- if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+ ifname, ifa, ifa->ifa_label);
+ if (!strcmp(ifname, ifa->ifa_label))
break;
}
}
@@ -1209,10 +1208,6 @@
pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
par->hooknum, __func__);
BUG();
- } else if (unlikely(!el_dev->name)) {
- pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
- par->hooknum, __func__);
- BUG();
} else {
proto = ipx_proto(skb, par);
MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
@@ -1637,8 +1632,6 @@
if (unlikely(!el_dev)) {
pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
- } else if (unlikely(!el_dev->name)) {
- pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
} else {
int proto = ipx_proto(skb, par);
MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f2b04a7..cb76ff3 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3702,6 +3702,8 @@
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
po->tp_reserve = val;
return 0;
}
@@ -4235,8 +4237,8 @@
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
if (po->tp_version >= TPACKET_V3 &&
- (int)(req->tp_block_size -
- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+ req->tp_block_size <=
+ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
@@ -4247,6 +4249,8 @@
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6cbe5bd..14346dc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4735,6 +4735,12 @@
if (!asoc)
return -EINVAL;
+ /* If there is a thread waiting on more sndbuf space for
+ * sending on this asoc, it cannot be peeled.
+ */
+ if (waitqueue_active(&asoc->wait))
+ return -EBUSY;
+
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
@@ -6855,6 +6861,9 @@
if (sock->state != SS_UNCONNECTED)
goto out;
+ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+ goto out;
+
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
@@ -7427,8 +7436,6 @@
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
- if (sk != asoc->base.sk)
- goto do_error;
lock_sock(sk);
*timeo_p = current_timeo;
diff --git a/net/socket.c b/net/socket.c
index e825856..a4fb472 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -89,6 +89,8 @@
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/seemp_api.h>
+#include <linux/seemp_instrumentation.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -533,7 +535,7 @@
return used;
}
-int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
int err = simple_setattr(dentry, iattr);
@@ -1646,6 +1648,8 @@
struct iovec iov;
int fput_needed;
+ seemp_logk_sendto(fd, buff, len, flags, addr, addr_len);
+
err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 6fdffde..1530825 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1548,7 +1548,7 @@
ret = SVC_COMPLETE;
goto out;
drop:
- ret = SVC_DROP;
+ ret = SVC_CLOSE;
out:
if (rsci)
cache_put(&rsci->h, sn->rsc_cache);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7c8070e..75f290b 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1155,8 +1155,7 @@
case SVC_DENIED:
goto err_bad_auth;
case SVC_CLOSE:
- if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
- svc_close_xprt(rqstp->rq_xprt);
+ goto close;
case SVC_DROP:
goto dropit;
case SVC_COMPLETE:
@@ -1246,7 +1245,7 @@
sendit:
if (svc_authorise(rqstp))
- goto dropit;
+ goto close;
return 1; /* Caller can now send it */
dropit:
@@ -1254,11 +1253,16 @@
dprintk("svc: svc_process dropit\n");
return 0;
+ close:
+ if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+ svc_close_xprt(rqstp->rq_xprt);
+ dprintk("svc: svc_process close\n");
+ return 0;
+
err_short_len:
svc_printk(rqstp, "short len %Zd, dropping request\n",
argv->iov_len);
-
- goto dropit; /* drop request */
+ goto close;
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index a2fc3a0..449e4a3 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -1,17 +1,1491 @@
-#
-# This file is a placeholder to prevent accidental build breakage if someone
-# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
-# enable that build option.
-#
-# You should be using CRDA instead. It is even better if you use the CRDA
-# package provided by your distribution, since they will probably keep it
-# up-to-date on your behalf.
-#
-# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
-# need to replace this file with one containing appropriately formatted
-# regulatory rules that cover the regulatory domains you will be using. Your
-# best option is to extract the db.txt file from the wireless-regdb git
-# repository:
-#
-# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
-#
+# This is the world regulatory domain
+country 00:
+ (2402 - 2472 @ 40), (20)
+ # Channel 12 - 13.
+ (2457 - 2482 @ 40), (20), PASSIVE-SCAN, NO-IBSS
+ # Channel 14. Only JP enables this and for 802.11b only
+ (2474 - 2494 @ 20), (20), PASSIVE-SCAN, NO-IBSS, NO-OFDM
+ # Channel 36 - 48
+ (5170 - 5250 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ (5250 - 5330 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ (5490 - 5710 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ # NB: 5260 MHz - 5700 MHz requies DFS
+ # Channel 149 - 165
+ (5735 - 5835 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ # IEEE 802.11ad (60GHz), channels 1..3
+ (57240 - 63720 @ 2160), (0)
+
+
+country AE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country AF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5150 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5350 @ 80), (23), DFS, AUTO-BW
+ (5470 - 5710 @ 160), (30), DFS
+
+country AM: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (18)
+ (5250 - 5330 @ 20), (18), DFS
+
+country AN: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AR:
+ (2402 - 2482 @ 40), (36)
+ (5170 - 5330 @ 160), (23)
+ (5490 - 5590 @ 80), (36)
+ (5650 - 5730 @ 80), (36)
+ (5735 - 5835 @ 80), (36)
+
+country AS: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5850 @ 80), (30)
+
+country AT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country AU: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5590 @ 80), (24), DFS
+ (5650 - 5730 @ 80), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country AW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (18), AUTO-BW
+ (5250 - 5330 @ 80), (18), DFS, AUTO-BW
+
+country BA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country BB: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country BD:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+country BE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country BF: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country BH:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 20), (23)
+ (5735 - 5835 @ 20), (33)
+
+country BL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country BM: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BN: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (20)
+
+country BO: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5250 - 5330 @ 80), (30), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BR: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BS: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country BY: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country BZ:
+ (2402 - 2482 @ 40), (36)
+ (5170 - 5330 @ 160), (27)
+ (5490 - 5730 @ 160), (36)
+ (5735 - 5835 @ 80), (36)
+
+country CA: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5590 @ 80), (24), DFS
+ (5650 - 5730 @ 80), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CF: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (24)
+ (5250 - 5330 @ 40), (24), DFS
+ (5490 - 5730 @ 40), (24), DFS
+ (5735 - 5835 @ 40), (30)
+
+country CH: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+
+country CI: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CL:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (20)
+ (5735 - 5835 @ 80), (20)
+
+country CN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (33)
+ # 60 gHz band channels 1,4: 28dBm, channels 2,3: 44dBm
+ # ref: http://www.miit.gov.cn/n11293472/n11505629/n11506593/n11960250/n11960606/n11960700/n12330791.files/n12330790.pdf
+ (57240 - 59400 @ 2160), (28)
+ (59400 - 63720 @ 2160), (44)
+ (63720 - 65880 @ 2160), (28)
+
+country CO: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CR: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (24)
+ (5250 - 5330 @ 20), (24), DFS
+ (5490 - 5730 @ 20), (24), DFS
+ (5735 - 5835 @ 20), (30)
+
+country CX: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CY: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+# Data from http://www.ctu.eu/164/download/VOR/VOR-12-08-2005-34.pdf
+# and http://www.ctu.eu/164/download/VOR/VOR-12-05-2007-6-AN.pdf
+country CZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+# Data from "Frequenznutzungsplan" (as published in April 2008), downloaded from
+# http://www.bundesnetzagentur.de/cae/servlet/contentblob/38448/publicationFile/2659/Frequenznutzungsplan2008_Id17448pdf.pdf
+# For the 5GHz range also see
+# http://www.bundesnetzagentur.de/cae/servlet/contentblob/38216/publicationFile/6579/WLAN5GHzVfg7_2010_28042010pdf.pdf
+
+country DE: DFS-ETSI
+ # entries 279004 and 280006
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country DK: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country DM: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country DO: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country DZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5670 @ 160), (23), DFS
+
+country EC: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (24)
+ (5250 - 5330 @ 20), (24), DFS
+ (5490 - 5730 @ 20), (24), DFS
+ (5735 - 5835 @ 20), (30)
+
+country EE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country EG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (23)
+ (5250 - 5330 @ 40), (23), DFS
+
+country ES: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country ET: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country FI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country FM: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country FR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GB: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GD: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country GE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (18), AUTO-BW
+ (5250 - 5330 @ 80), (18), DFS, AUTO-BW
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GH: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country GL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GP: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GU: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country GY:
+ (2402 - 2482 @ 40), (30)
+ (5735 - 5835 @ 80), (30)
+
+country HK: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country HN:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (24)
+ (5490 - 5730 @ 160), (24)
+ (5735 - 5835 @ 80), (30)
+
+country HR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country HT: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country HU: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country ID:
+ # ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
+ (2402 - 2482 @ 40), (30)
+ (5735 - 5815 @ 20), (30)
+
+country IE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country IL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country IN:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (23)
+ (5735 - 5835 @ 80), (30)
+
+country IR:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+country IS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country IT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country JM: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country JO:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23)
+ (5735 - 5835 @ 80), (23)
+
+country JP: DFS-JP
+ (2402 - 2482 @ 40), (20)
+ (2474 - 2494 @ 20), (20), NO-OFDM
+ (5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
+ (5490 - 5710 @ 160), (20), DFS
+ # 60 GHz band channels 2-4 at 10mW,
+ # ref: http://www.arib.or.jp/english/html/overview/doc/1-STD-T74v1_1.pdf
+ (59000 - 66000 @ 2160), (10 mW)
+
+country KE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23)
+ (5490 - 5570 @ 80), (30), DFS
+ (5735 - 5775 @ 40), (23)
+
+country KH: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country KN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5815 @ 80), (30)
+
+country KR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5835 @ 80), (30)
+ # 60 GHz band channels 1-4,
+ # ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
+ (57000 - 66000 @ 2160), (43)
+
+country KP: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20)
+ (5250 - 5330 @ 80), (20), DFS
+ (5490 - 5630 @ 80), (30), DFS
+ (5735 - 5815 @ 80), (30)
+
+country KW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country KY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country KZ:
+ (2402 - 2482 @ 40), (20)
+
+country LB: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country LC: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (30), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5815 @ 80), (30)
+
+country LI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+
+country LK: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (24)
+ (5250 - 5330 @ 20), (24), DFS
+ (5490 - 5730 @ 20), (24), DFS
+ (5735 - 5835 @ 20), (30)
+
+country LS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country LT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country LU: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country LV: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country MA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country MC: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MD: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country ME: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MH: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MK: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country MN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MO: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MP: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MQ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country MU: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MV: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (20)
+
+country MW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MX: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5650 @ 160), (24), DFS
+ (5735 - 5815 @ 80), (24)
+
+country NA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5835 @ 80), (33)
+
+country NG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5250 - 5330 @ 80), (30), DFS
+ (5735 - 5835 @ 80), (30)
+
+country NI: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country NL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country NO: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country NP:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (20)
+ (5735 - 5835 @ 80), (20)
+
+country NZ: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country OM: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country PA:
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUT0-BW
+ (5250 - 5330 @ 80), (30), AUTO-BW
+ (5735 - 5835 @ 80), (36)
+
+country PE: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country PG: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PH: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PK:
+ (2402 - 2482 @ 40), (30)
+ (5735 - 5835 @ 80), (30)
+
+country PL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country PM: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country PR: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+# Public Safety FCCA, FCC4
+# 27dBm [4.9GHz 1/4 rate], 30dBm [1/2 rate], 33dBm [full rate], and 5GHz same as FCC1
+# db.txt cannot express the limitation on 5G so disable all 5G channels for FCC4
+country PS: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (4940 - 4990 @ 40), (33)
+
+country PT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country PW: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country QA:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+country RE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country RO: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+# Source:
+# http://www.ratel.rs/upload/documents/Plan_namene/Plan_namene-sl_glasnik.pdf
+country RS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country RU:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (23)
+ (5490 - 5730 @ 160), (30)
+ (5735 - 5835 @ 80), (30)
+
+country RW: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country SA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country SE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country SG: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country SI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country SK: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country SN:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (24)
+ (5490 - 5730 @ 160), (24)
+ (5735 - 5835 @ 80), (30)
+
+country SR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country SV: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (23)
+ (5250 - 5330 @ 20), (23), DFS
+ (5735 - 5835 @ 20), (30)
+
+country SY:
+ (2402 - 2482 @ 40), (20)
+
+country TC: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country TD: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country TG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (23)
+ (5250 - 5330 @ 40), (23), DFS
+ (5490 - 5710 @ 40), (30), DFS
+
+country TH: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country TN: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country TR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country TT:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (27)
+ (5490 - 5730 @ 160), (36)
+ (5735 - 5835 @ 80), (36)
+
+country TW: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country TZ:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+# Source:
+# #914 / 06 Sep 2007: http://www.ucrf.gov.ua/uk/doc/nkrz/1196068874
+# #1174 / 23 Oct 2008: http://www.nkrz.gov.ua/uk/activities/ruling/1225269361
+# (appendix 8)
+# Listed 5GHz range is a lowest common denominator for all related
+# rules in the referenced laws. Such a range is used because of
+# disputable definitions there.
+country UA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5490 - 5670 @ 160), (20), DFS
+ (5735 - 5835 @ 80), (20)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country UG: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country US: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+ # 5.9ghz band
+ # reference: https://apps.fcc.gov/edocs_public/attachmatch/FCC-03-324A1.pdf
+ (5842 - 5863 @ 5), (30)
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5865 - 5885 @ 20), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5895 - 5915 @ 20), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60g band
+ # reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
+ # channels 1,2,3, EIRP=40dBm(43dBm peak)
+ (57240 - 63720 @ 2160), (40)
+
+country UY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country UZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+
+country VC: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country VE: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country VI: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country VN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country VU: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country WF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country WS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (23)
+ (5250 - 5330 @ 40), (23), DFS
+ (5490 - 5710 @ 40), (30), DFS
+
+country XA: DFS-JP
+ (2402 - 2482 @ 40), (20)
+ (2474 - 2494 @ 20), (20), NO-OFDM
+ (5170 - 5250 @ 80), (20), NO-IR, AUTO-BW, NO-OUTDOOR
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
+ (5490 - 5710 @ 160), (20), DFS
+ (59000 - 66000 @ 2160), (10 mW)
+
+country YE:
+ (2402 - 2482 @ 40), (20)
+
+country YT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country ZA: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country ZW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index eeb23d2..bc0ebd4 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2340,6 +2340,7 @@
return 0;
}
+EXPORT_SYMBOL(regulatory_hint_user);
int regulatory_hint_indoor(bool is_indoor, u32 portid)
{
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index f6ced31..822ac90 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -28,9 +28,6 @@
bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region);
enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
-int regulatory_hint_user(const char *alpha2,
- enum nl80211_user_reg_hint_type user_reg_hint_type);
-
/**
* regulatory_hint_indoor - hint operation in indoor env. or not
* @is_indoor: if true indicates that user space thinks that the
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index e318878..35ad69f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -70,7 +70,7 @@
MODULE_PARM_DESC(bss_entries_limit,
"limit to number of scan BSS entries (per wiphy, default 1000)");
-#define IEEE80211_SCAN_RESULT_EXPIRE (7 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
static void bss_free(struct cfg80211_internal_bss *bss)
{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 14b3f00..2927d06 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -130,12 +130,10 @@
/* Age scan results with time spent in suspend */
cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
- if (rdev->ops->resume) {
- rtnl_lock();
- if (rdev->wiphy.registered)
- ret = rdev_resume(rdev);
- rtnl_unlock();
- }
+ rtnl_lock();
+ if (rdev->wiphy.registered && rdev->ops->resume)
+ ret = rdev_resume(rdev);
+ rtnl_unlock();
return ret;
}
diff --git a/scripts/build-all.py b/scripts/build-all.py
index d36e96f..bd468cd 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -307,10 +307,12 @@
r'qsd*_defconfig',
r'mpq*_defconfig',
r'sdm[0-9]*_defconfig',
+ r'sdx*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
r'sdm[0-9]*_defconfig',
+ r'sdx*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c5b281e..a0d45ef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2438,15 +2438,6 @@
$herecurr);
}
$shorttext = AFTER_SHORTTEXT;
- } elsif (length($line) > (SHORTTEXT_LIMIT +
- $shorttext_exspc)
- && $line !~ /^:([0-7]{6}\s){2}
- ([[:xdigit:]]+\.*
- \s){2}\w+\s\w+/xms) {
- WARN("LONG_COMMIT_TEXT",
- "commit text line over " .
- SHORTTEXT_LIMIT .
- " characters\n" . $herecurr);
} elsif ($line=~/^\s*change-id:/i ||
$line=~/^\s*signed-off-by:/i ||
$line=~/^\s*crs-fixed:/i ||
@@ -2687,6 +2678,7 @@
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump &&
$line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i &&
+ $line !~ /^This reverts commit [0-9a-f]{7,40}/ &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
diff --git a/security/keys/gc.c b/security/keys/gc.c
index addf060..9cb4fe4 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@
* immediately unlinked.
*/
struct key_type key_type_dead = {
- .name = "dead",
+ .name = ".dead",
};
/*
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index d580ad0..dbbfd77 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -271,7 +271,8 @@
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
- * be skipped over.
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
@@ -288,12 +289,16 @@
ret = PTR_ERR(name);
goto error;
}
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
+error_name:
kfree(name);
-
error:
return ret;
}
@@ -1251,8 +1256,8 @@
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist. The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
@@ -1277,11 +1282,8 @@
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
- if (ret < 0) {
- if (ret != -EEXIST)
- goto error;
- ret = 0;
- }
+ if (ret < 0)
+ goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 40a8852..45536c6 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -127,13 +127,18 @@
}
/*
- * Install a fresh thread keyring directly to new credentials. This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
+ if (new->thread_keyring)
+ return 0;
+
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN,
@@ -146,7 +151,9 @@
}
/*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
@@ -157,8 +164,6 @@
if (!new)
return -ENOMEM;
- BUG_ON(new->thread_keyring);
-
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
@@ -169,17 +174,17 @@
}
/*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
*
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
- return -EEXIST;
+ return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
@@ -193,11 +198,9 @@
}
/*
- * Make sure a process keyring is installed for the current process. The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
*
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
@@ -211,14 +214,18 @@
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ? ret : 0;
+ return ret;
}
return commit_creds(new);
}
/*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
@@ -253,8 +260,11 @@
}
/*
- * Install a session keyring, discarding the old one. If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 3f4efcb..3490d21 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -265,6 +265,10 @@
/* NOTE: overflow flag is not cleared */
spin_unlock_irqrestore(&f->lock, flags);
+ /* close the old pool and wait until all users are gone */
+ snd_seq_pool_mark_closing(oldpool);
+ snd_use_lock_sync(&f->use_lock);
+
/* release cells in old pool */
for (cell = oldhead; cell; cell = next) {
next = cell->next;
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 3b693e9..12ba833 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -28,19 +28,16 @@
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
- int max_count = 5 * HZ;
+ int warn_count = 5 * HZ;
if (atomic_read(lockp) < 0) {
pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
return;
}
while (atomic_read(lockp) > 0) {
- if (max_count == 0) {
- pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
- break;
- }
+ if (warn_count-- == 0)
+ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
schedule_timeout_uninterruptible(1);
- max_count--;
}
}
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index f676931..c3768cd 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -45,7 +45,7 @@
struct snd_rawmidi_substream *substream;
snd_fw_async_midi_port_fill fill;
- unsigned int consume_bytes;
+ int consume_bytes;
};
int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index e629b88..474b06d 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -226,11 +226,11 @@
if (err < 0)
goto error;
- err = detect_quirks(oxfw);
+ err = snd_oxfw_stream_discover(oxfw);
if (err < 0)
goto error;
- err = snd_oxfw_stream_discover(oxfw);
+ err = detect_quirks(oxfw);
if (err < 0)
goto error;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 112caa2..bb1aad3 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4846,6 +4846,7 @@
ALC292_FIXUP_DISABLE_AAMIX,
ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5446,6 +5447,15 @@
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
+ },
[ALC275_FIXUP_DELL_XPS] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -5518,7 +5528,7 @@
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_speaker_volume,
.chained = true,
- .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
},
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 89ac5f5..7ae46c2 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -349,7 +349,7 @@
}
#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
-#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8)
+#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
static struct {
int rate;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index fe135b4..9c6f471 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -888,6 +888,8 @@
config SND_SOC_WCD9335
tristate
depends on WCD9335_CODEC
+ select SND_SOC_WCD_MBHC
+ select SND_SOC_WCD_MBHC_LEGACY
config SND_SOC_WCD934X
tristate
@@ -903,6 +905,7 @@
tristate
depends on SND_SOC_WCD934X
select SND_SOC_WCD_MBHC
+ select SND_SOC_WCD_MBHC_ADC
config SND_SOC_WSA881X
tristate
@@ -931,7 +934,12 @@
config SND_SOC_WCD_MBHC
tristate
- default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_SDM660_CDC=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
+
+config SND_SOC_WCD_MBHC_LEGACY
+ tristate
+
+config SND_SOC_WCD_MBHC_ADC
+ tristate
config SND_SOC_WCD_DSP_MGR
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 20ae32e..8c84460 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -173,7 +173,11 @@
endif
snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
-snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
+ifeq ($(CONFIG_SND_SOC_WCD_MBHC_LEGACY), y)
+ snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-legacy.o
+else ifeq ($(CONFIG_SND_SOC_WCD_MBHC_ADC), y)
+ snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-adc.o
+endif
snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o
snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o
snd-soc-wcd-dsp-utils-objs := wcd-dsp-utils.o
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 49caf13..fdc14e5 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2813,6 +2813,8 @@
#ifdef CONFIG_ACPI
static const struct acpi_device_id rt5670_acpi_match[] = {
{ "10EC5670", 0},
+ { "10EC5672", 0},
+ { "10EC5640", 0}, /* quirk */
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
diff --git a/sound/soc/codecs/sdm660_cdc/Kconfig b/sound/soc/codecs/sdm660_cdc/Kconfig
index d370da3..2f36c39 100644
--- a/sound/soc/codecs/sdm660_cdc/Kconfig
+++ b/sound/soc/codecs/sdm660_cdc/Kconfig
@@ -1,3 +1,5 @@
config SND_SOC_SDM660_CDC
tristate "MSM Internal PMIC based codec"
+ select SND_SOC_WCD_MBHC
+ select SND_SOC_WCD_MBHC_LEGACY
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 52e6815..5f8e3fd 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -32,7 +32,7 @@
#include "sdm660-cdc-registers.h"
#include "msm-cdc-common.h"
#include "../../msm/sdm660-common.h"
-#include "../wcd-mbhc-v2.h"
+#include "../wcd-mbhc-v2-api.h"
#define DRV_NAME "pmic_analog_codec"
#define SDM660_CDC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
diff --git a/sound/soc/codecs/wcd-mbhc-adc.c b/sound/soc/codecs/wcd-mbhc-adc.c
new file mode 100644
index 0000000..7278431
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-adc.c
@@ -0,0 +1,1019 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "wcd-mbhc-adc.h"
+#include "wcd-mbhc-v2.h"
+
+#define WCD_MBHC_ADC_HS_THRESHOLD_MV 1700
+#define WCD_MBHC_ADC_HPH_THRESHOLD_MV 75
+#define WCD_MBHC_ADC_MICBIAS_MV 1800
+
+static int wcd_mbhc_get_micbias(struct wcd_mbhc *mbhc)
+{
+ int micbias = 0;
+ u8 vout_ctl = 0;
+
+ /* Read MBHC Micbias (Mic Bias2) voltage */
+ WCD_MBHC_REG_READ(WCD_MBHC_MICB2_VOUT, vout_ctl);
+
+ /* Formula for getting micbias from vout
+ * micbias = 1.0V + VOUT_CTL * 50mV
+ */
+ micbias = 1000 + (vout_ctl * 50);
+ pr_debug("%s: vout_ctl: %d, micbias: %d\n",
+ __func__, vout_ctl, micbias);
+
+ return micbias;
+}
+
+static int wcd_get_voltage_from_adc(u8 val, int micbias)
+{
+ /* Formula for calculating voltage from ADC
+ * Voltage = ADC_RESULT*12.5mV*V_MICBIAS/1.8
+ */
+ return ((val * 125 * micbias)/(WCD_MBHC_ADC_MICBIAS_MV * 10));
+}
+
+static int wcd_measure_adc_continuous(struct wcd_mbhc *mbhc)
+{
+ u8 adc_result = 0;
+ int output_mv = 0;
+ int retry = 3;
+ u8 adc_en = 0;
+
+ pr_debug("%s: enter\n", __func__);
+
+ /* Pre-requisites for ADC continuous measurement */
+ /* Read legacy electircal detection and disable */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+ /* Set ADC to continuous measurement */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 1);
+ /* Read ADC Enable bit to restore after adc measurement */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_EN, adc_en);
+ /* Disable ADC_ENABLE bit */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+ /* Disable MBHC FSM */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ /* Set the MUX selection to IN2P */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, MUX_CTL_IN2P);
+ /* Enable MBHC FSM */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ /* Enable ADC_ENABLE bit */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 1);
+
+ while (retry--) {
+ /* wait for 3 msec before reading ADC result */
+ usleep_range(3000, 3100);
+
+ /* Read ADC result */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_RESULT, adc_result);
+ }
+
+ /* Restore ADC Enable */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, adc_en);
+ /* Get voltage from ADC result */
+ output_mv = wcd_get_voltage_from_adc(adc_result,
+ wcd_mbhc_get_micbias(mbhc));
+ pr_debug("%s: adc_result: 0x%x, output_mv: %d\n",
+ __func__, adc_result, output_mv);
+
+ return output_mv;
+}
+
+static int wcd_measure_adc_once(struct wcd_mbhc *mbhc, int mux_ctl)
+{
+ u8 adc_timeout = 0;
+ u8 adc_complete = 0;
+ u8 adc_result = 0;
+ int retry = 6;
+ int ret = 0;
+ int output_mv = 0;
+ u8 adc_en = 0;
+
+ pr_debug("%s: enter\n", __func__);
+
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+ /* Read ADC Enable bit to restore after adc measurement */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_EN, adc_en);
+ /* Trigger ADC one time measurement */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ /* Set the appropriate MUX selection */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, mux_ctl);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 1);
+
+ while (retry--) {
+ /* wait for 600usec to get adc results */
+ usleep_range(600, 610);
+
+ /* check for ADC Timeout */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_TIMEOUT, adc_timeout);
+ if (adc_timeout)
+ continue;
+
+ /* Read ADC complete bit */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_COMPLETE, adc_complete);
+ if (!adc_complete)
+ continue;
+
+ /* Read ADC result */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_RESULT, adc_result);
+
+ pr_debug("%s: ADC result: 0x%x\n", __func__, adc_result);
+ /* Get voltage from ADC result */
+ output_mv = wcd_get_voltage_from_adc(adc_result,
+ wcd_mbhc_get_micbias(mbhc));
+ break;
+ }
+
+ /* Restore ADC Enable */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, adc_en);
+
+ if (retry <= 0) {
+ pr_err("%s: adc complete: %d, adc timeout: %d\n",
+ __func__, adc_complete, adc_timeout);
+ ret = -EINVAL;
+ } else {
+ pr_debug("%s: adc complete: %d, adc timeout: %d output_mV: %d\n",
+ __func__, adc_complete, adc_timeout, output_mv);
+ ret = output_mv;
+ }
+
+ pr_debug("%s: leave\n", __func__);
+
+ return ret;
+}
+
+static bool wcd_mbhc_adc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+{
+ bool anc_mic_found = false;
+ u16 fsm_en = 0;
+ u8 det = 0;
+ unsigned long retry = 0;
+ int valid_plug_cnt = 0, invalid_plug_cnt = 0;
+ int ret = 0;
+ u8 elect_ctl = 0;
+ u8 adc_mode = 0;
+ u8 vref = 0;
+ int vref_mv[] = {1650, 1500, 1600, 1700};
+
+ if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
+ mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
+ return false;
+
+ if (!mbhc->mbhc_cb->mbhc_micbias_control)
+ return false;
+
+ /* Disable Detection done for ADC operation */
+ WCD_MBHC_REG_READ(WCD_MBHC_DETECTION_DONE, det);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+
+ /* Mask ADC COMPLETE interrupt */
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+
+ WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+ mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+ mbhc->mbhc_cfg->anc_micbias,
+ MICB_ENABLE);
+
+ /* Read legacy electircal detection and disable */
+ WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_MODE, adc_mode);
+
+ /*
+ * wait for button debounce time 20ms. If 4-pole plug is inserted
+ * into 5-pole jack, then there will be a button press interrupt
+ * during anc plug detection. In that case though Hs_comp_res is 0,
+ * it should not be declared as ANC plug type
+ */
+ usleep_range(20000, 20100);
+
+ /*
+ * After enabling FSM, to handle slow insertion scenarios,
+ * check IN3 voltage is below the Vref
+ */
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_VREF, vref);
+
+ do {
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low\n", __func__);
+ goto done;
+ }
+ pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
+ ret = wcd_measure_adc_once(mbhc, MUX_CTL_IN3P);
+ /* TODO - check the logic */
+ if (ret && (ret < vref_mv[vref]))
+ valid_plug_cnt++;
+ else
+ invalid_plug_cnt++;
+ retry++;
+ } while (retry < ANC_DETECT_RETRY_CNT);
+
+ pr_debug("%s: valid: %d, invalid: %d\n", __func__, valid_plug_cnt,
+ invalid_plug_cnt);
+
+ /* decision logic */
+ if (valid_plug_cnt > invalid_plug_cnt)
+ anc_mic_found = true;
+done:
+ /* Restore ADC mode */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, adc_mode);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ /* Set the MUX selection to AUTO */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, MUX_CTL_AUTO);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, fsm_en);
+ /* Restore detection done */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, det);
+
+ /* Restore electrical detection */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+
+ mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+ mbhc->mbhc_cfg->anc_micbias,
+ MICB_DISABLE);
+ pr_debug("%s: anc mic %sfound\n", __func__,
+ anc_mic_found ? "" : "not ");
+
+ return anc_mic_found;
+}
+
+/* To determine if cross connection occurred */
+static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
+{
+ enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
+ int hphl_adc_res = 0, hphr_adc_res = 0;
+ u8 fsm_en = 0;
+ int ret = 0;
+ u8 adc_mode = 0;
+ u8 elect_ctl = 0;
+ u8 adc_en = 0;
+
+ pr_debug("%s: enter\n", __func__);
+ /* Check for button press and plug detection */
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If PA is enabled, dont check for cross-connection */
+ if (mbhc->mbhc_cb->hph_pa_on_status)
+ if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
+ return -EINVAL;
+
+ /* Read legacy electircal detection and disable */
+ WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0x00);
+
+ /* Read and set ADC to single measurement */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_MODE, adc_mode);
+ /* Read ADC Enable bit to restore after adc measurement */
+ WCD_MBHC_REG_READ(WCD_MBHC_ADC_EN, adc_en);
+ /* Read FSM status */
+ WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+
+ /* Get adc result for HPH L */
+ hphl_adc_res = wcd_measure_adc_once(mbhc, MUX_CTL_HPH_L);
+ if (hphl_adc_res < 0) {
+ pr_err("%s: hphl_adc_res adc measurement failed\n", __func__);
+ ret = hphl_adc_res;
+ goto done;
+ }
+
+ /* Get adc result for HPH R in mV */
+ hphr_adc_res = wcd_measure_adc_once(mbhc, MUX_CTL_HPH_R);
+ if (hphr_adc_res < 0) {
+ pr_err("%s: hphr_adc_res adc measurement failed\n", __func__);
+ ret = hphr_adc_res;
+ goto done;
+ }
+
+ if (hphl_adc_res > 100 && hphr_adc_res > 100) {
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ pr_debug("%s: Cross connection identified\n", __func__);
+ } else {
+ pr_debug("%s: No Cross connection found\n", __func__);
+ }
+
+done:
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ /* Set the MUX selection to Auto */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, MUX_CTL_AUTO);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+
+ /* Restore ADC Enable */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, adc_en);
+
+ /* Restore ADC mode */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, adc_mode);
+
+ /* Restore FSM state */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, fsm_en);
+
+ /* Restore electrical detection */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, elect_ctl);
+
+ pr_debug("%s: leave, plug type: %d\n", __func__, plug_type);
+
+ return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
+}
+
+static bool wcd_mbhc_adc_check_for_spl_headset(struct wcd_mbhc *mbhc,
+ int *spl_hs_cnt)
+{
+ bool spl_hs = false;
+ int output_mv = 0;
+ int adc_threshold = 0, adc_hph_threshold = 0;
+
+ pr_debug("%s: enter\n", __func__);
+ if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+ goto exit;
+
+ /* Bump up MB2 to 2.7V */
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+ mbhc->mbhc_cfg->mbhc_micbias, true);
+ usleep_range(10000, 10100);
+
+ /*
+ * Use ADC single mode to minimize the chance of missing out
+ * btn press/relesae for HEADSET type during correct work.
+ */
+ output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+ adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+ wcd_mbhc_get_micbias(mbhc))/WCD_MBHC_ADC_MICBIAS_MV);
+ adc_hph_threshold = ((WCD_MBHC_ADC_HPH_THRESHOLD_MV *
+ wcd_mbhc_get_micbias(mbhc))/
+ WCD_MBHC_ADC_MICBIAS_MV);
+
+ if (output_mv > adc_threshold || output_mv < adc_hph_threshold) {
+ spl_hs = false;
+ } else {
+ spl_hs = true;
+ if (spl_hs_cnt)
+ *spl_hs_cnt += 1;
+ }
+
+ /* MB2 back to 1.8v if the type is not special headset */
+ if (spl_hs_cnt && (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT)) {
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+ mbhc->mbhc_cfg->mbhc_micbias, false);
+ /* Add 10ms delay for micbias to settle */
+ usleep_range(10000, 10100);
+ }
+
+ if (spl_hs)
+ pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
+
+exit:
+ pr_debug("%s: leave\n", __func__);
+ return spl_hs;
+}
+
+static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
+{
+ int delay = 0;
+ bool ret = false;
+ bool is_spl_hs = false;
+ int output_mv = 0;
+ int adc_threshold = 0;
+
+ /*
+ * Increase micbias to 2.7V to detect headsets with
+ * threshold on microphone
+ */
+ if (mbhc->mbhc_cb->mbhc_micbias_control &&
+ !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+ pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
+ __func__);
+ return false;
+ } else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+ ret = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+ MIC_BIAS_2, true);
+ if (ret) {
+ pr_err("%s: mbhc_micb_ctrl_thr_mic failed, ret: %d\n",
+ __func__, ret);
+ return false;
+ }
+ }
+
+ adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+ wcd_mbhc_get_micbias(mbhc)) /
+ WCD_MBHC_ADC_MICBIAS_MV);
+
+ while (!is_spl_hs) {
+ if (mbhc->hs_detect_work_stop) {
+ pr_debug("%s: stop requested: %d\n", __func__,
+ mbhc->hs_detect_work_stop);
+ break;
+ }
+ delay += 50;
+ /* Wait for 50ms for FSM to update result */
+ msleep(50);
+ output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+ if (output_mv <= adc_threshold) {
+ pr_debug("%s: Special headset detected in %d msecs\n",
+ __func__, delay);
+ is_spl_hs = true;
+ }
+
+ if (delay == SPECIAL_HS_DETECT_TIME_MS) {
+ pr_debug("%s: Spl headset not found in 2 sec\n",
+ __func__);
+ break;
+ }
+ }
+ if (is_spl_hs) {
+ pr_debug("%s: Headset with threshold found\n", __func__);
+ mbhc->micbias_enable = true;
+ ret = true;
+ }
+ if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+ !mbhc->micbias_enable)
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec, MIC_BIAS_2,
+ false);
+ pr_debug("%s: leave, micb_enable: %d\n", __func__,
+ mbhc->micbias_enable);
+
+ return ret;
+}
+
+static void wcd_mbhc_adc_update_fsm_source(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type)
+{
+ bool micbias2;
+
+ micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+ MIC_BIAS_2);
+ switch (plug_type) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
+ case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+ if (!mbhc->is_hs_recording && !micbias2)
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+ break;
+ default:
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+ break;
+
+ };
+}
+
+/* should be called under interrupt context that hold suspend */
+static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
+ struct work_struct *work)
+{
+ pr_debug("%s: scheduling correct_swch_plug\n", __func__);
+ WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+ mbhc->hs_detect_work_stop = false;
+ mbhc->mbhc_cb->lock_sleep(mbhc, true);
+ schedule_work(work);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
+ struct work_struct *work)
+{
+ pr_debug("%s: Canceling correct_plug_swch\n", __func__);
+ mbhc->hs_detect_work_stop = true;
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ if (cancel_work_sync(work)) {
+ pr_debug("%s: correct_plug_swch is canceled\n",
+ __func__);
+ mbhc->mbhc_cb->lock_sleep(mbhc, false);
+ }
+ WCD_MBHC_RSC_LOCK(mbhc);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_mbhc_adc_detect_plug_type(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_codec *codec = mbhc->codec;
+
+ pr_debug("%s: enter\n", __func__);
+ WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+ if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+ mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
+
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+
+ if (mbhc->mbhc_cb->mbhc_micbias_control) {
+ mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+ MICB_ENABLE);
+ } else {
+ pr_err("%s: Mic Bias is not enabled\n", __func__);
+ return;
+ }
+
+ /* Re-initialize button press completion object */
+ reinit_completion(&mbhc->btn_press_compl);
+ wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+ pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd_micbias_disable(struct wcd_mbhc *mbhc)
+{
+ if (mbhc->micbias_enable) {
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+ mbhc->codec, MIC_BIAS_2, false);
+ if (mbhc->mbhc_cb->set_micbias_value)
+ mbhc->mbhc_cb->set_micbias_value(
+ mbhc->codec);
+ mbhc->micbias_enable = false;
+ }
+}
+
+static int wcd_mbhc_get_plug_from_adc(int adc_result)
+
+{
+ enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+
+ if (adc_result < WCD_MBHC_ADC_HPH_THRESHOLD_MV)
+ plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+ else if (adc_result > WCD_MBHC_ADC_HS_THRESHOLD_MV)
+ plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+ else
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+ pr_debug("%s: plug type is %d found\n", __func__, plug_type);
+
+ return plug_type;
+}
+
+static void wcd_correct_swch_plug(struct work_struct *work)
+{
+ struct wcd_mbhc *mbhc;
+ struct snd_soc_codec *codec;
+ enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+ unsigned long timeout;
+ bool wrk_complete = false;
+ int pt_gnd_mic_swap_cnt = 0;
+ int no_gnd_mic_swap_cnt = 0;
+ bool is_pa_on = false, spl_hs = false, spl_hs_reported = false;
+ int ret = 0;
+ int spl_hs_count = 0;
+ int output_mv = 0;
+ int cross_conn;
+ int try = 0;
+
+ pr_debug("%s: enter\n", __func__);
+
+ mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
+ codec = mbhc->codec;
+
+ WCD_MBHC_RSC_LOCK(mbhc);
+ /* Mask ADC COMPLETE interrupt */
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+
+ /* Check for cross connection */
+ do {
+ cross_conn = wcd_check_cross_conn(mbhc);
+ try++;
+ } while (try < GND_MIC_SWAP_THRESHOLD);
+
+ if (cross_conn > 0) {
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ pr_debug("%s: cross connection found, Plug type %d\n",
+ __func__, plug_type);
+ goto correct_plug_type;
+ }
+ /* Find plug type */
+ output_mv = wcd_measure_adc_continuous(mbhc);
+ plug_type = wcd_mbhc_get_plug_from_adc(output_mv);
+
+ /*
+ * Report plug type if it is either headset or headphone
+ * else start the 3 sec loop
+ */
+ if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
+ plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
+ (!wcd_swch_level_remove(mbhc))) {
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
+
+ /*
+ * Set DETECTION_DONE bit for HEADSET and ANC_HEADPHONE,
+ * so that btn press/release interrupt can be generated.
+ */
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET ||
+ mbhc->current_plug == MBHC_PLUG_TYPE_ANC_HEADPHONE) {
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 1);
+ }
+
+correct_plug_type:
+ timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+ while (!time_after(jiffies, timeout)) {
+ if (mbhc->hs_detect_work_stop) {
+ pr_debug("%s: stop requested: %d\n", __func__,
+ mbhc->hs_detect_work_stop);
+ wcd_micbias_disable(mbhc);
+ goto exit;
+ }
+
+ /* allow sometime and re-check stop requested again */
+ msleep(20);
+ if (mbhc->hs_detect_work_stop) {
+ pr_debug("%s: stop requested: %d\n", __func__,
+ mbhc->hs_detect_work_stop);
+ wcd_micbias_disable(mbhc);
+ goto exit;
+ }
+
+ msleep(180);
+ /*
+ * Use ADC single mode to minimize the chance of missing out
+ * btn press/release for HEADSET type during correct work.
+ */
+ output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+
+ /*
+ * instead of hogging system by contineous polling, wait for
+ * sometime and re-check stop request again.
+ */
+ plug_type = wcd_mbhc_get_plug_from_adc(output_mv);
+
+ if ((output_mv > WCD_MBHC_ADC_HS_THRESHOLD_MV) &&
+ (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
+ spl_hs = wcd_mbhc_adc_check_for_spl_headset(mbhc,
+ &spl_hs_count);
+
+ if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+ output_mv = WCD_MBHC_ADC_HS_THRESHOLD_MV;
+ spl_hs = true;
+ mbhc->micbias_enable = true;
+ }
+ }
+
+ if (mbhc->mbhc_cb->hph_pa_on_status)
+ is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec);
+
+ if ((output_mv <= WCD_MBHC_ADC_HS_THRESHOLD_MV) &&
+ (!is_pa_on)) {
+ /* Check for cross connection*/
+ ret = wcd_check_cross_conn(mbhc);
+ if (ret < 0)
+ continue;
+ else if (ret > 0) {
+ pt_gnd_mic_swap_cnt++;
+ no_gnd_mic_swap_cnt = 0;
+ if (pt_gnd_mic_swap_cnt <
+ GND_MIC_SWAP_THRESHOLD) {
+ continue;
+ } else if (pt_gnd_mic_swap_cnt >
+ GND_MIC_SWAP_THRESHOLD) {
+ /*
+ * This is due to GND/MIC switch didn't
+ * work, Report unsupported plug.
+ */
+ pr_debug("%s: switch did not work\n",
+ __func__);
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ goto report;
+ } else {
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ }
+ } else {
+ no_gnd_mic_swap_cnt++;
+ pt_gnd_mic_swap_cnt = 0;
+ plug_type = wcd_mbhc_get_plug_from_adc(
+ output_mv);
+ if ((no_gnd_mic_swap_cnt <
+ GND_MIC_SWAP_THRESHOLD) &&
+ (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
+ continue;
+ } else {
+ no_gnd_mic_swap_cnt = 0;
+ }
+ }
+ if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
+ (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+ /*
+ * if switch is toggled, check again,
+ * otherwise report unsupported plug
+ */
+ if (mbhc->mbhc_cfg->swap_gnd_mic &&
+ mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+ pr_debug("%s: US_EU gpio present,flip switch\n"
+ , __func__);
+ continue;
+ }
+ }
+ }
+
+ if (output_mv > WCD_MBHC_ADC_HS_THRESHOLD_MV) {
+ pr_debug("%s: cable is extension cable\n", __func__);
+ plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+ wrk_complete = true;
+ } else {
+ pr_debug("%s: cable might be headset: %d\n", __func__,
+ plug_type);
+ if (plug_type != MBHC_PLUG_TYPE_GND_MIC_SWAP) {
+ plug_type = wcd_mbhc_get_plug_from_adc(
+ output_mv);
+ if (!spl_hs_reported &&
+ spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+ spl_hs_reported = true;
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_find_plug_and_report(mbhc,
+ plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ continue;
+ } else if (spl_hs_reported)
+ continue;
+ /*
+ * Report headset only if not already reported
+ * and if there is not button press without
+ * release
+ */
+ if ((mbhc->current_plug !=
+ MBHC_PLUG_TYPE_HEADSET) &&
+ (mbhc->current_plug !=
+ MBHC_PLUG_TYPE_ANC_HEADPHONE) &&
+ !wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: cable is %s headset\n",
+ __func__,
+ ((spl_hs_count ==
+ WCD_MBHC_SPL_HS_CNT) ?
+ "special ":""));
+ goto report;
+ }
+ }
+ wrk_complete = false;
+ }
+ }
+ if (!wrk_complete) {
+ /*
+ * If plug_tye is headset, we might have already reported either
+ * in detect_plug-type or in above while loop, no need to report
+ * again
+ */
+ if ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
+ (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE)) {
+ pr_debug("%s: plug_type:0x%x already reported\n",
+ __func__, mbhc->current_plug);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+ goto enable_supply;
+ }
+ }
+ if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH) {
+ if (wcd_is_special_headset(mbhc)) {
+ pr_debug("%s: Special headset found %d\n",
+ __func__, plug_type);
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+ } else {
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_ISRC_EN, 1);
+ }
+ }
+
+report:
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low\n", __func__);
+ goto exit;
+ }
+
+ pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
+ __func__, plug_type, wrk_complete,
+ mbhc->btn_press_intr);
+
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+enable_supply:
+ /*
+ * Set DETECTION_DONE bit for HEADSET and ANC_HEADPHONE,
+ * so that btn press/release interrupt can be generated.
+ * For other plug type, clear the bit.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADSET ||
+ plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE)
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 1);
+ else
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ wcd_mbhc_adc_update_fsm_source(mbhc, plug_type);
+exit:
+ if (mbhc->mbhc_cb->mbhc_micbias_control &&
+ !mbhc->micbias_enable)
+ mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+ MICB_DISABLE);
+
+ /*
+ * If plug type is corrected from special headset to headphone,
+ * clear the micbias enable flag, set micbias back to 1.8V and
+ * disable micbias.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADPHONE &&
+ mbhc->micbias_enable) {
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ mbhc->mbhc_cb->mbhc_micbias_control(
+ codec, MIC_BIAS_2,
+ MICB_DISABLE);
+ if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+ codec,
+ MIC_BIAS_2, false);
+ if (mbhc->mbhc_cb->set_micbias_value) {
+ mbhc->mbhc_cb->set_micbias_value(codec);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+ }
+ mbhc->micbias_enable = false;
+ }
+
+ if (mbhc->mbhc_cfg->detect_extn_cable &&
+ ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
+ (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
+ !mbhc->hs_detect_work_stop) {
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
+
+ /*
+ * Enable ADC COMPLETE interrupt for HEADPHONE.
+ * Btn release may happen after the correct work, ADC COMPLETE
+ * interrupt needs to be captured to correct plug type.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+ true);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
+
+ if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+ mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
+
+ mbhc->mbhc_cb->lock_sleep(mbhc, false);
+ pr_debug("%s: leave\n", __func__);
+}
+
+static irqreturn_t wcd_mbhc_adc_hs_rem_irq(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ unsigned long timeout;
+ int adc_threshold, output_mv, retry = 0;
+
+ pr_debug("%s: enter\n", __func__);
+ WCD_MBHC_RSC_LOCK(mbhc);
+
+ timeout = jiffies +
+ msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
+ adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+ wcd_mbhc_get_micbias(mbhc)) /
+ WCD_MBHC_ADC_MICBIAS_MV);
+ do {
+ retry++;
+ /*
+ * read output_mv every 10ms to look for
+ * any change in IN2_P
+ */
+ usleep_range(10000, 10100);
+ output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+
+ pr_debug("%s: Check for fake removal: output_mv %d\n",
+ __func__, output_mv);
+ if ((output_mv <= adc_threshold) &&
+ retry > FAKE_REM_RETRY_ATTEMPTS) {
+ pr_debug("%s: headset is NOT actually removed\n",
+ __func__);
+ goto exit;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low ", __func__);
+ goto exit;
+ }
+
+ /*
+ * ADC COMPLETE and ELEC_REM interrupts are both enabled for HEADPHONE,
+ * need to reject the ADC COMPLETE interrupt which follows ELEC_REM one
+ * when HEADPHONE is removed.
+ */
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE)
+ mbhc->extn_cable_hph_rem = true;
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
+ wcd_mbhc_elec_hs_report_unplug(mbhc);
+exit:
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_adc_hs_ins_irq(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+
+ pr_debug("%s: enter\n", __func__);
+
+ /*
+ * ADC COMPLETE and ELEC_REM interrupts are both enabled for HEADPHONE,
+ * need to reject the ADC COMPLETE interrupt which follows ELEC_REM one
+ * when HEADPHONE is removed.
+ */
+ if (mbhc->extn_cable_hph_rem == true) {
+ mbhc->extn_cable_hph_rem = false;
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ WCD_MBHC_RSC_LOCK(mbhc);
+ /*
+ * If current plug is headphone then there is no chance to
+ * get ADC complete interrupt, so connected cable should be
+ * headset not headphone.
+ */
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_DETECTION_DONE, 1);
+ wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ return IRQ_HANDLED;
+ }
+
+ if (!mbhc->mbhc_cfg->detect_extn_cable) {
+ pr_debug("%s: Returning as Extension cable feature not enabled\n",
+ __func__);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ return IRQ_HANDLED;
+ }
+
+ pr_debug("%s: Disable electrical headset insertion interrupt\n",
+ __func__);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_ISRC_EN, 0);
+ mbhc->is_extn_cable = true;
+ mbhc->btn_press_intr = false;
+ wcd_mbhc_adc_detect_plug_type(mbhc);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static struct wcd_mbhc_fn mbhc_fn = {
+ .wcd_mbhc_hs_ins_irq = wcd_mbhc_adc_hs_ins_irq,
+ .wcd_mbhc_hs_rem_irq = wcd_mbhc_adc_hs_rem_irq,
+ .wcd_mbhc_detect_plug_type = wcd_mbhc_adc_detect_plug_type,
+ .wcd_mbhc_detect_anc_plug_type = wcd_mbhc_adc_detect_anc_plug_type,
+ .wcd_cancel_hs_detect_plug = wcd_cancel_hs_detect_plug,
+};
+
+/* Function: wcd_mbhc_adc_init
+ * @mbhc: MBHC function pointer
+ * Description: Initialize MBHC ADC related function pointers to MBHC structure
+ */
+void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc)
+{
+ if (!mbhc) {
+ pr_err("%s: mbhc is NULL\n", __func__);
+ return;
+ }
+ mbhc->mbhc_fn = &mbhc_fn;
+ INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+}
+EXPORT_SYMBOL(wcd_mbhc_adc_init);
diff --git a/sound/soc/codecs/wcd-mbhc-adc.h b/sound/soc/codecs/wcd-mbhc-adc.h
new file mode 100644
index 0000000..112d508
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-adc.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_ADC_H__
+#define __WCD_MBHC_ADC_H__
+
+#include "wcd-mbhc-v2.h"
+
+enum wcd_mbhc_adc_mux_ctl {
+ MUX_CTL_AUTO = 0,
+ MUX_CTL_IN2P,
+ MUX_CTL_IN3P,
+ MUX_CTL_IN4P,
+ MUX_CTL_HPH_L,
+ MUX_CTL_HPH_R,
+ MUX_CTL_NONE,
+};
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
+void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc);
+#else
+static inline void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc)
+{
+
+}
+#endif
+#endif /* __WCD_MBHC_ADC_H__ */
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.c b/sound/soc/codecs/wcd-mbhc-legacy.c
new file mode 100644
index 0000000..83023bc
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-legacy.c
@@ -0,0 +1,974 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include "wcd-mbhc-legacy.h"
+#include "wcd-mbhc-v2.h"
+
+static int det_extn_cable_en;
+module_param(det_extn_cable_en, int, 0664);
+MODULE_PARM_DESC(det_extn_cable_en, "enable/disable extn cable detect");
+
+static bool wcd_mbhc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+{
+ bool anc_mic_found = false;
+ u16 val, hs_comp_res, btn_status = 0;
+ unsigned long retry = 0;
+ int valid_plug_cnt = 0, invalid_plug_cnt = 0;
+ int btn_status_cnt = 0;
+ bool is_check_btn_press = false;
+
+
+ if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
+ mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
+ return false;
+
+ if (!mbhc->mbhc_cb->mbhc_micbias_control)
+ return false;
+
+ WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, val);
+
+ if (val)
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+
+ mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+ mbhc->mbhc_cfg->anc_micbias,
+ MICB_ENABLE);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x2);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ /*
+ * wait for button debounce time 20ms. If 4-pole plug is inserted
+ * into 5-pole jack, then there will be a button press interrupt
+ * during anc plug detection. In that case though Hs_comp_res is 0,
+ * it should not be declared as ANC plug type
+ */
+ usleep_range(20000, 20100);
+
+ /*
+ * After enabling FSM, to handle slow insertion scenarios,
+ * check hs_comp_result for few times to see if the IN3 voltage
+ * is below the Vref
+ */
+ do {
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low\n", __func__);
+ goto exit;
+ }
+ pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+ if (!hs_comp_res) {
+ valid_plug_cnt++;
+ is_check_btn_press = true;
+ } else
+ invalid_plug_cnt++;
+ /* Wait 1ms before taking another reading */
+ usleep_range(1000, 1100);
+
+ WCD_MBHC_REG_READ(WCD_MBHC_FSM_STATUS, btn_status);
+ if (btn_status)
+ btn_status_cnt++;
+
+ retry++;
+ } while (retry < ANC_DETECT_RETRY_CNT);
+
+ pr_debug("%s: valid: %d, invalid: %d, btn_status_cnt: %d\n",
+ __func__, valid_plug_cnt, invalid_plug_cnt, btn_status_cnt);
+
+ /* decision logic */
+ if ((valid_plug_cnt > invalid_plug_cnt) && is_check_btn_press &&
+ (btn_status_cnt == 0))
+ anc_mic_found = true;
+exit:
+ if (!val)
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
+
+ mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
+ mbhc->mbhc_cfg->anc_micbias,
+ MICB_DISABLE);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x0);
+ pr_debug("%s: anc mic %sfound\n", __func__,
+ anc_mic_found ? "" : "not ");
+ return anc_mic_found;
+}
+
+/* To determine if cross connection occurred */
+static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
+{
+ u16 swap_res = 0;
+ enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
+ s16 reg1 = 0;
+ bool hphl_sch_res = 0, hphr_sch_res = 0;
+
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If PA is enabled, dont check for cross-connection */
+ if (mbhc->mbhc_cb->hph_pa_on_status)
+ if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
+ return false;
+
+ WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
+ /*
+ * Check if there is any cross connection,
+ * Micbias and schmitt trigger (HPHL-HPHR)
+ * needs to be enabled. For some codecs like wcd9335,
+ * pull-up will already be enabled when this function
+ * is called for cross-connection identification. No
+ * need to enable micbias in that case.
+ */
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 2);
+
+ WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, swap_res);
+ pr_debug("%s: swap_res%x\n", __func__, swap_res);
+
+ /*
+ * Read reg hphl and hphr schmitt result with cross connection
+ * bit. These bits will both be "0" in case of cross connection
+ * otherwise, they stay at 1
+ */
+ WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch_res);
+ WCD_MBHC_REG_READ(WCD_MBHC_HPHR_SCHMT_RESULT, hphr_sch_res);
+ if (!(hphl_sch_res || hphr_sch_res)) {
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ pr_debug("%s: Cross connection identified\n", __func__);
+ } else {
+ pr_debug("%s: No Cross connection found\n", __func__);
+ }
+
+ /* Disable schmitt trigger and restore micbias */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
+ pr_debug("%s: leave, plug type: %d\n", __func__, plug_type);
+
+ return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
+}
+
+static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_codec *codec = mbhc->codec;
+ int delay = 0, rc;
+ bool ret = false;
+ u16 hs_comp_res;
+ bool is_spl_hs = false;
+
+ /*
+ * Increase micbias to 2.7V to detect headsets with
+ * threshold on microphone
+ */
+ if (mbhc->mbhc_cb->mbhc_micbias_control &&
+ !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+ pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
+ __func__);
+ return false;
+ } else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+ rc = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec,
+ MIC_BIAS_2, true);
+ if (rc) {
+ pr_err("%s: Micbias control for thr mic failed, rc: %d\n",
+ __func__, rc);
+ return false;
+ }
+ }
+
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+ pr_debug("%s: special headset, start register writes\n", __func__);
+
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+ while (!is_spl_hs) {
+ if (mbhc->hs_detect_work_stop) {
+ pr_debug("%s: stop requested: %d\n", __func__,
+ mbhc->hs_detect_work_stop);
+ break;
+ }
+ delay = delay + 50;
+ if (mbhc->mbhc_cb->mbhc_common_micb_ctrl) {
+ mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+ MBHC_COMMON_MICB_PRECHARGE,
+ true);
+ mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+ MBHC_COMMON_MICB_SET_VAL,
+ true);
+ }
+ /* Wait for 50msec for MICBIAS to settle down */
+ msleep(50);
+ if (mbhc->mbhc_cb->set_auto_zeroing)
+ mbhc->mbhc_cb->set_auto_zeroing(codec, true);
+ /* Wait for 50msec for FSM to update result values */
+ msleep(50);
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+ if (!(hs_comp_res)) {
+ pr_debug("%s: Special headset detected in %d msecs\n",
+ __func__, (delay * 2));
+ is_spl_hs = true;
+ }
+ if (delay == SPECIAL_HS_DETECT_TIME_MS) {
+ pr_debug("%s: Spl headset didn't get detect in 4 sec\n",
+ __func__);
+ break;
+ }
+ }
+ if (is_spl_hs) {
+ pr_debug("%s: Headset with threshold found\n", __func__);
+ mbhc->micbias_enable = true;
+ ret = true;
+ }
+ if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
+ mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
+ MBHC_COMMON_MICB_PRECHARGE,
+ false);
+ if (mbhc->mbhc_cb->set_micbias_value && !mbhc->micbias_enable)
+ mbhc->mbhc_cb->set_micbias_value(codec);
+ if (mbhc->mbhc_cb->set_auto_zeroing)
+ mbhc->mbhc_cb->set_auto_zeroing(codec, false);
+
+ if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+ !mbhc->micbias_enable)
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec, MIC_BIAS_2,
+ false);
+
+ pr_debug("%s: leave, micb_enable: %d\n", __func__,
+ mbhc->micbias_enable);
+ return ret;
+}
+
+static void wcd_mbhc_update_fsm_source(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type)
+{
+ bool micbias2;
+
+ micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+ MIC_BIAS_2);
+ switch (plug_type) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
+ case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+ if (!mbhc->is_hs_recording && !micbias2)
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
+ break;
+ default:
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+ break;
+
+ };
+}
+
+static void wcd_enable_mbhc_supply(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type)
+{
+
+ struct snd_soc_codec *codec = mbhc->codec;
+
+ /*
+ * Do not disable micbias if recording is going on or
+ * headset is inserted on the other side of the extn
+ * cable. If headset has been detected current source
+ * needs to be kept enabled for button detection to work.
+ * If the accessory type is invalid or unsupported, we
+ * dont need to enable either of them.
+ */
+ if (det_extn_cable_en && mbhc->is_extn_cable &&
+ mbhc->mbhc_cb && mbhc->mbhc_cb->extn_use_mb &&
+ mbhc->mbhc_cb->extn_use_mb(codec)) {
+ if (plug_type == MBHC_PLUG_TYPE_HEADPHONE ||
+ plug_type == MBHC_PLUG_TYPE_HEADSET)
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+ } else {
+ if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
+ if (mbhc->is_hs_recording || mbhc->micbias_enable) {
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+ } else if ((test_bit(WCD_MBHC_EVENT_PA_HPHL,
+ &mbhc->event_state)) ||
+ (test_bit(WCD_MBHC_EVENT_PA_HPHR,
+ &mbhc->event_state))) {
+ wcd_enable_curr_micbias(mbhc,
+ WCD_MBHC_EN_PULLUP);
+ } else {
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+ }
+ } else if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
+ } else {
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
+ }
+ }
+}
+
+static bool wcd_mbhc_check_for_spl_headset(struct wcd_mbhc *mbhc,
+ int *spl_hs_cnt)
+{
+ u16 hs_comp_res_1_8v = 0, hs_comp_res_2_7v = 0;
+ bool spl_hs = false;
+
+ if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+ goto done;
+
+ if (!spl_hs_cnt) {
+ pr_err("%s: spl_hs_cnt is NULL\n", __func__);
+ goto done;
+ }
+ /* Read back hs_comp_res @ 1.8v Micbias */
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_1_8v);
+ if (!hs_comp_res_1_8v) {
+ spl_hs = false;
+ goto done;
+ }
+
+ /* Bump up MB2 to 2.7v */
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+ mbhc->mbhc_cfg->mbhc_micbias, true);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ usleep_range(10000, 10100);
+
+ /* Read back HS_COMP_RESULT */
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_2_7v);
+ if (!hs_comp_res_2_7v && hs_comp_res_1_8v)
+ spl_hs = true;
+
+ if (spl_hs)
+ *spl_hs_cnt += 1;
+
+ /* MB2 back to 1.8v */
+ if (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT) {
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+ mbhc->mbhc_cfg->mbhc_micbias, false);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ usleep_range(10000, 10100);
+ }
+
+ if (spl_hs)
+ pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
+
+done:
+ return spl_hs;
+}
+
+/* should be called under interrupt context that hold suspend */
+static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
+ struct work_struct *work)
+{
+ pr_debug("%s: scheduling correct_swch_plug\n", __func__);
+ WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+ mbhc->hs_detect_work_stop = false;
+ mbhc->mbhc_cb->lock_sleep(mbhc, true);
+ schedule_work(work);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
+ struct work_struct *work)
+{
+ pr_debug("%s: Canceling correct_plug_swch\n", __func__);
+ mbhc->hs_detect_work_stop = true;
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ if (cancel_work_sync(work)) {
+ pr_debug("%s: correct_plug_swch is canceled\n",
+ __func__);
+ mbhc->mbhc_cb->lock_sleep(mbhc, false);
+ }
+ WCD_MBHC_RSC_LOCK(mbhc);
+}
+
+/* called under codec_resource_lock acquisition */
+static void wcd_mbhc_detect_plug_type(struct wcd_mbhc *mbhc)
+{
+ struct snd_soc_codec *codec = mbhc->codec;
+ bool micbias1 = false;
+
+ pr_debug("%s: enter\n", __func__);
+ WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
+
+ if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+ mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
+
+ if (mbhc->mbhc_cb->micbias_enable_status)
+ micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+ MIC_BIAS_1);
+
+ if (mbhc->mbhc_cb->set_cap_mode)
+ mbhc->mbhc_cb->set_cap_mode(codec, micbias1, true);
+
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+ MICB_ENABLE);
+ else
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+ /* Re-initialize button press completion object */
+ reinit_completion(&mbhc->btn_press_compl);
+ wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+ pr_debug("%s: leave\n", __func__);
+}
+
+static void wcd_correct_swch_plug(struct work_struct *work)
+{
+ struct wcd_mbhc *mbhc;
+ struct snd_soc_codec *codec;
+ enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
+ unsigned long timeout;
+ u16 hs_comp_res = 0, hphl_sch = 0, mic_sch = 0, btn_result = 0;
+ bool wrk_complete = false;
+ int pt_gnd_mic_swap_cnt = 0;
+ int no_gnd_mic_swap_cnt = 0;
+ bool is_pa_on = false, spl_hs = false, spl_hs_reported = false;
+ bool micbias2 = false;
+ bool micbias1 = false;
+ int ret = 0;
+ int rc, spl_hs_count = 0;
+ int cross_conn;
+ int try = 0;
+
+ pr_debug("%s: enter\n", __func__);
+
+ mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
+ codec = mbhc->codec;
+
+ /*
+ * Enable micbias/pullup for detection in correct work.
+ * This work will get scheduled from detect_plug_type which
+ * will already request for pullup/micbias. If the pullup/micbias
+ * is handled with ref-counts by individual codec drivers, there is
+ * no need to enabale micbias/pullup here
+ */
+
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
+
+ /* Enable HW FSM */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ /*
+ * Check for any button press interrupts before starting 3-sec
+ * loop.
+ */
+ rc = wait_for_completion_timeout(&mbhc->btn_press_compl,
+ msecs_to_jiffies(WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS));
+
+ WCD_MBHC_REG_READ(WCD_MBHC_BTN_RESULT, btn_result);
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+ if (!rc) {
+ pr_debug("%s No btn press interrupt\n", __func__);
+ if (!btn_result && !hs_comp_res)
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+ else if (!btn_result && hs_comp_res)
+ plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+ else
+ plug_type = MBHC_PLUG_TYPE_INVALID;
+ } else {
+ if (!btn_result && !hs_comp_res)
+ plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+ else
+ plug_type = MBHC_PLUG_TYPE_INVALID;
+ }
+
+ do {
+ cross_conn = wcd_check_cross_conn(mbhc);
+ try++;
+ } while (try < GND_MIC_SWAP_THRESHOLD);
+
+ /*
+ * Check for cross connection 4 times.
+ * Consider the result of the fourth iteration.
+ */
+ if (cross_conn > 0) {
+ pr_debug("%s: cross con found, start polling\n",
+ __func__);
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ pr_debug("%s: Plug found, plug type is %d\n",
+ __func__, plug_type);
+ goto correct_plug_type;
+ }
+
+ if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
+ plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
+ (!wcd_swch_level_remove(mbhc))) {
+ WCD_MBHC_RSC_LOCK(mbhc);
+ if (mbhc->current_plug == MBHC_PLUG_TYPE_HIGH_HPH)
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
+ 0);
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
+
+correct_plug_type:
+
+ timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
+ while (!time_after(jiffies, timeout)) {
+ if (mbhc->hs_detect_work_stop) {
+ pr_debug("%s: stop requested: %d\n", __func__,
+ mbhc->hs_detect_work_stop);
+ wcd_enable_curr_micbias(mbhc,
+ WCD_MBHC_EN_NONE);
+ if (mbhc->micbias_enable) {
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+ mbhc->codec, MIC_BIAS_2, false);
+ if (mbhc->mbhc_cb->set_micbias_value)
+ mbhc->mbhc_cb->set_micbias_value(
+ mbhc->codec);
+ mbhc->micbias_enable = false;
+ }
+ goto exit;
+ }
+ if (mbhc->btn_press_intr) {
+ wcd_cancel_btn_work(mbhc);
+ mbhc->btn_press_intr = false;
+ }
+ /* Toggle FSM */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+
+ /* allow sometime and re-check stop requested again */
+ msleep(20);
+ if (mbhc->hs_detect_work_stop) {
+ pr_debug("%s: stop requested: %d\n", __func__,
+ mbhc->hs_detect_work_stop);
+ wcd_enable_curr_micbias(mbhc,
+ WCD_MBHC_EN_NONE);
+ if (mbhc->micbias_enable) {
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+ mbhc->codec, MIC_BIAS_2, false);
+ if (mbhc->mbhc_cb->set_micbias_value)
+ mbhc->mbhc_cb->set_micbias_value(
+ mbhc->codec);
+ mbhc->micbias_enable = false;
+ }
+ goto exit;
+ }
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+
+ pr_debug("%s: hs_comp_res: %x\n", __func__, hs_comp_res);
+ if (mbhc->mbhc_cb->hph_pa_on_status)
+ is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
+
+ /*
+ * instead of hogging system by contineous polling, wait for
+ * sometime and re-check stop request again.
+ */
+ msleep(180);
+ if (hs_comp_res && (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
+ spl_hs = wcd_mbhc_check_for_spl_headset(mbhc,
+ &spl_hs_count);
+
+ if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+ hs_comp_res = 0;
+ spl_hs = true;
+ mbhc->micbias_enable = true;
+ }
+ }
+
+ if ((!hs_comp_res) && (!is_pa_on)) {
+ /* Check for cross connection*/
+ ret = wcd_check_cross_conn(mbhc);
+ if (ret < 0) {
+ continue;
+ } else if (ret > 0) {
+ pt_gnd_mic_swap_cnt++;
+ no_gnd_mic_swap_cnt = 0;
+ if (pt_gnd_mic_swap_cnt <
+ GND_MIC_SWAP_THRESHOLD) {
+ continue;
+ } else if (pt_gnd_mic_swap_cnt >
+ GND_MIC_SWAP_THRESHOLD) {
+ /*
+ * This is due to GND/MIC switch didn't
+ * work, Report unsupported plug.
+ */
+ pr_debug("%s: switch didn't work\n",
+ __func__);
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ goto report;
+ } else {
+ plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
+ }
+ } else {
+ no_gnd_mic_swap_cnt++;
+ pt_gnd_mic_swap_cnt = 0;
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+ if ((no_gnd_mic_swap_cnt <
+ GND_MIC_SWAP_THRESHOLD) &&
+ (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
+ continue;
+ } else {
+ no_gnd_mic_swap_cnt = 0;
+ }
+ }
+ if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
+ (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+ /*
+ * if switch is toggled, check again,
+ * otherwise report unsupported plug
+ */
+ if (mbhc->mbhc_cfg->swap_gnd_mic &&
+ mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+ pr_debug("%s: US_EU gpio present,flip switch\n"
+ , __func__);
+ continue;
+ }
+ }
+ }
+
+ WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+ WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+ if (hs_comp_res && !(hphl_sch || mic_sch)) {
+ pr_debug("%s: cable is extension cable\n", __func__);
+ plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
+ wrk_complete = true;
+ } else {
+ pr_debug("%s: cable might be headset: %d\n", __func__,
+ plug_type);
+ if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+ if (!spl_hs_reported &&
+ spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+ spl_hs_reported = true;
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_find_plug_and_report(mbhc,
+ plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ continue;
+ } else if (spl_hs_reported)
+ continue;
+ /*
+ * Report headset only if not already reported
+ * and if there is not button press without
+ * release
+ */
+ if (((mbhc->current_plug !=
+ MBHC_PLUG_TYPE_HEADSET) &&
+ (mbhc->current_plug !=
+ MBHC_PLUG_TYPE_ANC_HEADPHONE)) &&
+ !wcd_swch_level_remove(mbhc) &&
+ !mbhc->btn_press_intr) {
+ pr_debug("%s: cable is %sheadset\n",
+ __func__,
+ ((spl_hs_count ==
+ WCD_MBHC_SPL_HS_CNT) ?
+ "special ":""));
+ goto report;
+ }
+ }
+ wrk_complete = false;
+ }
+ }
+ if (!wrk_complete && mbhc->btn_press_intr) {
+ pr_debug("%s: Can be slow insertion of headphone\n", __func__);
+ wcd_cancel_btn_work(mbhc);
+ plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+ }
+ /*
+ * If plug_tye is headset, we might have already reported either in
+ * detect_plug-type or in above while loop, no need to report again
+ */
+ if (!wrk_complete && ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
+ (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE))) {
+ pr_debug("%s: plug_type:0x%x already reported\n",
+ __func__, mbhc->current_plug);
+ goto enable_supply;
+ }
+
+ if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH &&
+ (!det_extn_cable_en)) {
+ if (wcd_is_special_headset(mbhc)) {
+ pr_debug("%s: Special headset found %d\n",
+ __func__, plug_type);
+ plug_type = MBHC_PLUG_TYPE_HEADSET;
+ goto report;
+ }
+ }
+
+report:
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low\n", __func__);
+ goto exit;
+ }
+ if (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP && mbhc->btn_press_intr) {
+ pr_debug("%s: insertion of headphone with swap\n", __func__);
+ wcd_cancel_btn_work(mbhc);
+ plug_type = MBHC_PLUG_TYPE_HEADPHONE;
+ }
+ pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
+ __func__, plug_type, wrk_complete,
+ mbhc->btn_press_intr);
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_find_plug_and_report(mbhc, plug_type);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+enable_supply:
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ wcd_mbhc_update_fsm_source(mbhc, plug_type);
+ else
+ wcd_enable_mbhc_supply(mbhc, plug_type);
+exit:
+ if (mbhc->mbhc_cb->mbhc_micbias_control &&
+ !mbhc->micbias_enable)
+ mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
+ MICB_DISABLE);
+
+ /*
+ * If plug type is corrected from special headset to headphone,
+ * clear the micbias enable flag, set micbias back to 1.8V and
+ * disable micbias.
+ */
+ if (plug_type == MBHC_PLUG_TYPE_HEADPHONE &&
+ mbhc->micbias_enable) {
+ if (mbhc->mbhc_cb->mbhc_micbias_control)
+ mbhc->mbhc_cb->mbhc_micbias_control(
+ codec, MIC_BIAS_2,
+ MICB_DISABLE);
+ if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+ mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+ codec,
+ MIC_BIAS_2, false);
+ if (mbhc->mbhc_cb->set_micbias_value) {
+ mbhc->mbhc_cb->set_micbias_value(codec);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+ }
+ mbhc->micbias_enable = false;
+ }
+
+ if (mbhc->mbhc_cb->micbias_enable_status) {
+ micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+ MIC_BIAS_1);
+ micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
+ MIC_BIAS_2);
+ }
+
+ if (mbhc->mbhc_cfg->detect_extn_cable &&
+ ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
+ (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
+ !mbhc->hs_detect_work_stop) {
+ WCD_MBHC_RSC_LOCK(mbhc);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
+ if (mbhc->mbhc_cb->set_cap_mode)
+ mbhc->mbhc_cb->set_cap_mode(codec, micbias1, micbias2);
+
+ if (mbhc->mbhc_cb->hph_pull_down_ctrl)
+ mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
+
+ mbhc->mbhc_cb->lock_sleep(mbhc, false);
+ pr_debug("%s: leave\n", __func__);
+}
+
+static irqreturn_t wcd_mbhc_hs_rem_irq(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ u8 hs_comp_result = 0, hphl_sch = 0, mic_sch = 0;
+ static u16 hphl_trigerred;
+ static u16 mic_trigerred;
+ unsigned long timeout;
+ bool removed = true;
+ int retry = 0;
+
+ pr_debug("%s: enter\n", __func__);
+
+ WCD_MBHC_RSC_LOCK(mbhc);
+
+ timeout = jiffies +
+ msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
+ do {
+ retry++;
+ /*
+ * read the result register every 10ms to look for
+ * any change in HS_COMP_RESULT bit
+ */
+ usleep_range(10000, 10100);
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
+ pr_debug("%s: Check result reg for fake removal: hs_comp_res %x\n",
+ __func__, hs_comp_result);
+ if ((!hs_comp_result) &&
+ retry > FAKE_REM_RETRY_ATTEMPTS) {
+ removed = false;
+ break;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ if (wcd_swch_level_remove(mbhc)) {
+ pr_debug("%s: Switch level is low ", __func__);
+ goto exit;
+ }
+ pr_debug("%s: headset %s actually removed\n", __func__,
+ removed ? "" : "not ");
+
+ WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+ WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+ WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
+
+ if (removed) {
+ if (!(hphl_sch && mic_sch && hs_comp_result)) {
+ /*
+ * extension cable is still plugged in
+ * report it as LINEOUT device
+ */
+ goto report_unplug;
+ } else {
+ if (!mic_sch) {
+ mic_trigerred++;
+ pr_debug("%s: Removal MIC trigerred %d\n",
+ __func__, mic_trigerred);
+ }
+ if (!hphl_sch) {
+ hphl_trigerred++;
+ pr_debug("%s: Removal HPHL trigerred %d\n",
+ __func__, hphl_trigerred);
+ }
+ if (mic_trigerred && hphl_trigerred) {
+ /*
+ * extension cable is still plugged in
+ * report it as LINEOUT device
+ */
+ goto report_unplug;
+ }
+ }
+ }
+exit:
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+
+report_unplug:
+ wcd_mbhc_elec_hs_report_unplug(mbhc);
+ hphl_trigerred = 0;
+ mic_trigerred = 0;
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcd_mbhc_hs_ins_irq(int irq, void *data)
+{
+ struct wcd_mbhc *mbhc = data;
+ bool detection_type = 0, hphl_sch = 0, mic_sch = 0;
+ u16 elect_result = 0;
+ static u16 hphl_trigerred;
+ static u16 mic_trigerred;
+
+ pr_debug("%s: enter\n", __func__);
+ if (!mbhc->mbhc_cfg->detect_extn_cable) {
+ pr_debug("%s: Returning as Extension cable feature not enabled\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+ WCD_MBHC_RSC_LOCK(mbhc);
+
+ WCD_MBHC_REG_READ(WCD_MBHC_ELECT_DETECTION_TYPE, detection_type);
+ WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, elect_result);
+
+ pr_debug("%s: detection_type %d, elect_result %x\n", __func__,
+ detection_type, elect_result);
+ if (detection_type) {
+ /* check if both Left and MIC Schmitt triggers are triggered */
+ WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
+ WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
+ if (hphl_sch && mic_sch) {
+ /* Go for plug type determination */
+ pr_debug("%s: Go for plug type determination\n",
+ __func__);
+ goto determine_plug;
+
+ } else {
+ if (mic_sch) {
+ mic_trigerred++;
+ pr_debug("%s: Insertion MIC trigerred %d\n",
+ __func__, mic_trigerred);
+ WCD_MBHC_REG_UPDATE_BITS(
+ WCD_MBHC_ELECT_SCHMT_ISRC,
+ 0);
+ msleep(20);
+ WCD_MBHC_REG_UPDATE_BITS(
+ WCD_MBHC_ELECT_SCHMT_ISRC,
+ 1);
+ }
+ if (hphl_sch) {
+ hphl_trigerred++;
+ pr_debug("%s: Insertion HPHL trigerred %d\n",
+ __func__, hphl_trigerred);
+ }
+ if (mic_trigerred && hphl_trigerred) {
+ /* Go for plug type determination */
+ pr_debug("%s: Go for plug type determination\n",
+ __func__);
+ goto determine_plug;
+ }
+ }
+ }
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+
+determine_plug:
+ /*
+ * Disable HPHL trigger and MIC Schmitt triggers.
+ * Setup for insertion detection.
+ */
+ pr_debug("%s: Disable insertion interrupt\n", __func__);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+ false);
+
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+ hphl_trigerred = 0;
+ mic_trigerred = 0;
+ mbhc->is_extn_cable = true;
+ mbhc->btn_press_intr = false;
+ wcd_mbhc_detect_plug_type(mbhc);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ pr_debug("%s: leave\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static struct wcd_mbhc_fn mbhc_fn = {
+ .wcd_mbhc_hs_ins_irq = wcd_mbhc_hs_ins_irq,
+ .wcd_mbhc_hs_rem_irq = wcd_mbhc_hs_rem_irq,
+ .wcd_mbhc_detect_plug_type = wcd_mbhc_detect_plug_type,
+ .wcd_mbhc_detect_anc_plug_type = wcd_mbhc_detect_anc_plug_type,
+ .wcd_cancel_hs_detect_plug = wcd_cancel_hs_detect_plug,
+};
+
+/* Function: wcd_mbhc_legacy_init
+ * @mbhc: MBHC function pointer
+ * Description: Initialize MBHC legacy based function pointers to MBHC structure
+ */
+void wcd_mbhc_legacy_init(struct wcd_mbhc *mbhc)
+{
+ if (!mbhc) {
+ pr_err("%s: mbhc is NULL\n", __func__);
+ return;
+ }
+ mbhc->mbhc_fn = &mbhc_fn;
+ INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
+}
+EXPORT_SYMBOL(wcd_mbhc_legacy_init);
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.h b/sound/soc/codecs/wcd-mbhc-legacy.h
new file mode 100644
index 0000000..594393d
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-legacy.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_LEGACY_H__
+#define __WCD_MBHC_LEGACY_H__
+
+#include "wcdcal-hwdep.h"
+#include "wcd-mbhc-v2.h"
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC_LEGACY
+void wcd_mbhc_legacy_init(struct wcd_mbhc *mbhc);
+#else
+static inline void wcd_mbhc_legacy_init(struct wcd_mbhc *mbhc)
+{
+}
+#endif
+
+#endif /* __WCD_MBHC_LEGACY_H__ */
diff --git a/sound/soc/codecs/wcd-mbhc-v2-api.h b/sound/soc/codecs/wcd-mbhc-v2-api.h
new file mode 100644
index 0000000..fab2b49
--- /dev/null
+++ b/sound/soc/codecs/wcd-mbhc-v2-api.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __WCD_MBHC_V2_API_H__
+#define __WCD_MBHC_V2_API_H__
+
+#include "wcd-mbhc-v2.h"
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC
+int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+ struct wcd_mbhc_config *mbhc_cfg);
+void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
+int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
+ const struct wcd_mbhc_cb *mbhc_cb,
+ const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+ struct wcd_mbhc_register *wcd_mbhc_regs,
+ bool impedance_det_en);
+int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
+ uint32_t *zr);
+void wcd_mbhc_deinit(struct wcd_mbhc *mbhc);
+
+#else
+static inline void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
+{
+}
+int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
+ const struct wcd_mbhc_cb *mbhc_cb,
+ const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
+ struct wcd_mbhc_register *wcd_mbhc_regs,
+ bool impedance_det_en)
+{
+ return 0;
+}
+static inline int wcd_mbhc_start(struct wcd_mbhc *mbhc,
+ struct wcd_mbhc_config *mbhc_cfg)
+{
+ return 0;
+}
+static inline int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc,
+ uint32_t *zl,
+ uint32_t *zr)
+{
+ *zl = 0;
+ *zr = 0;
+ return -EINVAL;
+}
+static inline void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
+{
+}
+#endif
+
+#endif /* __WCD_MBHC_V2_API_H__ */
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 75e2709..510a8dc 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -28,49 +28,17 @@
#include <linux/mfd/msm-cdc-pinctrl.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "wcd-mbhc-v2.h"
#include "wcdcal-hwdep.h"
+#include "wcd-mbhc-legacy.h"
+#include "wcd-mbhc-adc.h"
+#include "wcd-mbhc-v2-api.h"
-#define WCD_MBHC_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
- SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
- SND_JACK_MECHANICAL | SND_JACK_MICROPHONE2 | \
- SND_JACK_UNSUPPORTED)
-
-#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
- SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
- SND_JACK_BTN_4 | SND_JACK_BTN_5)
-#define OCP_ATTEMPT 20
-#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
-#define SPECIAL_HS_DETECT_TIME_MS (2 * 1000)
-#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
-#define GND_MIC_SWAP_THRESHOLD 4
-#define WCD_FAKE_REMOVAL_MIN_PERIOD_MS 100
-#define HS_VREF_MIN_VAL 1400
-#define FW_READ_ATTEMPTS 15
-#define FW_READ_TIMEOUT 4000000
-#define FAKE_REM_RETRY_ATTEMPTS 3
-#define MAX_IMPED 60000
-
-#define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS 50
-#define ANC_DETECT_RETRY_CNT 7
-#define WCD_MBHC_SPL_HS_CNT 2
-
-static int det_extn_cable_en;
-module_param(det_extn_cable_en, int, 0664);
-MODULE_PARM_DESC(det_extn_cable_en, "enable/disable extn cable detect");
-
-enum wcd_mbhc_cs_mb_en_flag {
- WCD_MBHC_EN_CS = 0,
- WCD_MBHC_EN_MB,
- WCD_MBHC_EN_PULLUP,
- WCD_MBHC_EN_NONE,
-};
-
-static void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
- struct snd_soc_jack *jack, int status, int mask)
+void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
+ struct snd_soc_jack *jack, int status, int mask)
{
snd_soc_jack_report(jack, status, mask);
}
+EXPORT_SYMBOL(wcd_mbhc_jack_report);
static void __hphocp_off_report(struct wcd_mbhc *mbhc, u32 jack_status,
int irq)
@@ -144,7 +112,7 @@
micbias);
}
-static void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
+void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
const enum wcd_mbhc_cs_mb_en_flag cs_mb_en)
{
@@ -194,6 +162,7 @@
pr_debug("%s: exit\n", __func__);
}
+EXPORT_SYMBOL(wcd_enable_curr_micbias);
static const char *wcd_mbhc_get_event_string(int event)
{
@@ -414,7 +383,7 @@
return 0;
}
-static int wcd_cancel_btn_work(struct wcd_mbhc *mbhc)
+int wcd_cancel_btn_work(struct wcd_mbhc *mbhc)
{
int r;
@@ -427,40 +396,16 @@
mbhc->mbhc_cb->lock_sleep(mbhc, false);
return r;
}
+EXPORT_SYMBOL(wcd_cancel_btn_work);
-static bool wcd_swch_level_remove(struct wcd_mbhc *mbhc)
+bool wcd_swch_level_remove(struct wcd_mbhc *mbhc)
{
u16 result2 = 0;
WCD_MBHC_REG_READ(WCD_MBHC_SWCH_LEVEL_REMOVE, result2);
return (result2) ? true : false;
}
-
-/* should be called under interrupt context that hold suspend */
-static void wcd_schedule_hs_detect_plug(struct wcd_mbhc *mbhc,
- struct work_struct *work)
-{
- pr_debug("%s: scheduling correct_swch_plug\n", __func__);
- WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
- mbhc->hs_detect_work_stop = false;
- mbhc->mbhc_cb->lock_sleep(mbhc, true);
- schedule_work(work);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd_cancel_hs_detect_plug(struct wcd_mbhc *mbhc,
- struct work_struct *work)
-{
- pr_debug("%s: Canceling correct_plug_swch\n", __func__);
- mbhc->hs_detect_work_stop = true;
- WCD_MBHC_RSC_UNLOCK(mbhc);
- if (cancel_work_sync(work)) {
- pr_debug("%s: correct_plug_swch is canceled\n",
- __func__);
- mbhc->mbhc_cb->lock_sleep(mbhc, false);
- }
- WCD_MBHC_RSC_LOCK(mbhc);
-}
+EXPORT_SYMBOL(wcd_swch_level_remove);
static void wcd_mbhc_clr_and_turnon_hph_padac(struct wcd_mbhc *mbhc)
{
@@ -539,8 +484,9 @@
else
return -EINVAL;
}
+EXPORT_SYMBOL(wcd_mbhc_get_impedance);
-static void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type,
+void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type,
bool enable)
{
int irq;
@@ -567,12 +513,14 @@
clear_bit(irq_type, &mbhc->intr_status);
}
}
+EXPORT_SYMBOL(wcd_mbhc_hs_elec_irq);
static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion,
enum snd_jack_types jack_type)
{
struct snd_soc_codec *codec = mbhc->codec;
bool is_pa_on = false;
+ u8 fsm_en = 0;
WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
@@ -664,9 +612,6 @@
wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
pr_debug("%s: set up elec removal detection\n",
__func__);
- WCD_MBHC_REG_UPDATE_BITS(
- WCD_MBHC_ELECT_DETECTION_TYPE,
- 0);
usleep_range(200, 210);
wcd_mbhc_hs_elec_irq(mbhc,
WCD_MBHC_ELEC_HS_REM,
@@ -702,8 +647,16 @@
mbhc->mbhc_cb->compute_impedance &&
(mbhc->mbhc_cfg->linein_th != 0) &&
(!is_pa_on)) {
+ /* Set MUX_CTL to AUTO for Z-det */
+ WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, fsm_en);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL,
+ MUX_CTL_AUTO);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
mbhc->mbhc_cb->compute_impedance(mbhc,
&mbhc->zl, &mbhc->zr);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN,
+ fsm_en);
if ((mbhc->zl > mbhc->mbhc_cfg->linein_th &&
mbhc->zl < MAX_IMPED) &&
(mbhc->zr > mbhc->mbhc_cfg->linein_th &&
@@ -737,94 +690,47 @@
pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status);
}
-static bool wcd_mbhc_detect_anc_plug_type(struct wcd_mbhc *mbhc)
+void wcd_mbhc_elec_hs_report_unplug(struct wcd_mbhc *mbhc)
{
- bool anc_mic_found = false;
- u16 val, hs_comp_res, btn_status = 0;
- unsigned long retry = 0;
- int valid_plug_cnt = 0, invalid_plug_cnt = 0;
- int btn_status_cnt = 0;
- bool is_check_btn_press = false;
+ /* cancel pending button press */
+ if (wcd_cancel_btn_work(mbhc))
+ pr_debug("%s: button press is canceled\n", __func__);
+ /* cancel correct work function */
+ if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+ mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
+ &mbhc->correct_plug_swch);
+ else
+ pr_info("%s: hs_detect_plug work not cancelled\n", __func__);
-
- if (mbhc->mbhc_cfg->anc_micbias < MIC_BIAS_1 ||
- mbhc->mbhc_cfg->anc_micbias > MIC_BIAS_4)
- return false;
-
- if (!mbhc->mbhc_cb->mbhc_micbias_control)
- return false;
-
- WCD_MBHC_REG_READ(WCD_MBHC_FSM_EN, val);
-
- if (val)
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-
- mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
- mbhc->mbhc_cfg->anc_micbias,
- MICB_ENABLE);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x2);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 1);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
+ pr_debug("%s: Report extension cable\n", __func__);
+ wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
/*
- * wait for button debounce time 20ms. If 4-pole plug is inserted
- * into 5-pole jack, then there will be a button press interrupt
- * during anc plug detection. In that case though Hs_comp_res is 0,
- * it should not be declared as ANC plug type
+ * If PA is enabled HPHL schmitt trigger can
+ * be unreliable, make sure to disable it
*/
- usleep_range(20000, 20100);
-
+ if (test_bit(WCD_MBHC_EVENT_PA_HPHL,
+ &mbhc->event_state))
+ wcd_mbhc_set_and_turnoff_hph_padac(mbhc);
/*
- * After enabling FSM, to handle slow insertion scenarios,
- * check hs_comp_result for few times to see if the IN3 voltage
- * is below the Vref
+ * Disable HPHL trigger and MIC Schmitt triggers.
+ * Setup for insertion detection.
*/
- do {
- if (wcd_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch level is low\n", __func__);
- goto exit;
- }
- pr_debug("%s: Retry attempt %lu\n", __func__, retry + 1);
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
+ false);
+ wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
+ /* Disable HW FSM */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 3);
- if (!hs_comp_res) {
- valid_plug_cnt++;
- is_check_btn_press = true;
- } else
- invalid_plug_cnt++;
- /* Wait 1ms before taking another reading */
- usleep_range(1000, 1100);
-
- WCD_MBHC_REG_READ(WCD_MBHC_FSM_STATUS, btn_status);
- if (btn_status)
- btn_status_cnt++;
-
- retry++;
- } while (retry < ANC_DETECT_RETRY_CNT);
-
- pr_debug("%s: valid: %d, invalid: %d, btn_status_cnt: %d\n",
- __func__, valid_plug_cnt, invalid_plug_cnt, btn_status_cnt);
-
- /* decision logic */
- if ((valid_plug_cnt > invalid_plug_cnt) && is_check_btn_press &&
- (btn_status_cnt == 0))
- anc_mic_found = true;
-exit:
- if (!val)
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
-
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ANC_DET_EN, 0);
-
- mbhc->mbhc_cb->mbhc_micbias_control(mbhc->codec,
- mbhc->mbhc_cfg->anc_micbias,
- MICB_DISABLE);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MUX_CTL, 0x0);
- pr_debug("%s: anc mic %sfound\n", __func__,
- anc_mic_found ? "" : "not ");
- return anc_mic_found;
+ /* Set the detection type appropriately */
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
+ true);
}
+EXPORT_SYMBOL(wcd_mbhc_elec_hs_report_unplug);
-static void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
- enum wcd_mbhc_plug_type plug_type)
+void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type)
{
bool anc_mic_found = false;
enum snd_jack_types jack_type;
@@ -852,9 +758,10 @@
wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
wcd_mbhc_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED);
} else if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
- if (mbhc->mbhc_cfg->enable_anc_mic_detect)
- anc_mic_found = wcd_mbhc_detect_anc_plug_type(mbhc);
-
+ if (mbhc->mbhc_cfg->enable_anc_mic_detect &&
+ mbhc->mbhc_fn->wcd_mbhc_detect_anc_plug_type)
+ anc_mic_found =
+ mbhc->mbhc_fn->wcd_mbhc_detect_anc_plug_type(mbhc);
jack_type = SND_JACK_HEADSET;
if (anc_mic_found)
jack_type = SND_JACK_ANC_HEADPHONE;
@@ -895,618 +802,17 @@
exit:
pr_debug("%s: leave\n", __func__);
}
-
-/* To determine if cross connection occurred */
-static int wcd_check_cross_conn(struct wcd_mbhc *mbhc)
-{
- u16 swap_res = 0;
- enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_NONE;
- s16 reg1 = 0;
- bool hphl_sch_res = 0, hphr_sch_res = 0;
-
- if (wcd_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch level is low\n", __func__);
- return -EINVAL;
- }
-
- /* If PA is enabled, dont check for cross-connection */
- if (mbhc->mbhc_cb->hph_pa_on_status)
- if (mbhc->mbhc_cb->hph_pa_on_status(mbhc->codec))
- return false;
-
- WCD_MBHC_REG_READ(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
- /*
- * Check if there is any cross connection,
- * Micbias and schmitt trigger (HPHL-HPHR)
- * needs to be enabled. For some codecs like wcd9335,
- * pull-up will already be enabled when this function
- * is called for cross-connection identification. No
- * need to enable micbias in that case.
- */
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 2);
-
- WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, swap_res);
- pr_debug("%s: swap_res%x\n", __func__, swap_res);
-
- /*
- * Read reg hphl and hphr schmitt result with cross connection
- * bit. These bits will both be "0" in case of cross connection
- * otherwise, they stay at 1
- */
- WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch_res);
- WCD_MBHC_REG_READ(WCD_MBHC_HPHR_SCHMT_RESULT, hphr_sch_res);
- if (!(hphl_sch_res || hphr_sch_res)) {
- plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
- pr_debug("%s: Cross connection identified\n", __func__);
- } else {
- pr_debug("%s: No Cross connection found\n", __func__);
- }
-
- /* Disable schmitt trigger and restore micbias */
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, reg1);
- pr_debug("%s: leave, plug type: %d\n", __func__, plug_type);
-
- return (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP) ? true : false;
-}
-
-static bool wcd_is_special_headset(struct wcd_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- int delay = 0, rc;
- bool ret = false;
- u16 hs_comp_res;
- bool is_spl_hs = false;
-
- /*
- * Increase micbias to 2.7V to detect headsets with
- * threshold on microphone
- */
- if (mbhc->mbhc_cb->mbhc_micbias_control &&
- !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
- pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
- __func__);
- return false;
- } else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
- rc = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec,
- MIC_BIAS_2, true);
- if (rc) {
- pr_err("%s: Micbias control for thr mic failed, rc: %d\n",
- __func__, rc);
- return false;
- }
- }
-
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-
- pr_debug("%s: special headset, start register writes\n", __func__);
-
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
- while (!is_spl_hs) {
- if (mbhc->hs_detect_work_stop) {
- pr_debug("%s: stop requested: %d\n", __func__,
- mbhc->hs_detect_work_stop);
- break;
- }
- delay = delay + 50;
- if (mbhc->mbhc_cb->mbhc_common_micb_ctrl) {
- mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
- MBHC_COMMON_MICB_PRECHARGE,
- true);
- mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
- MBHC_COMMON_MICB_SET_VAL,
- true);
- }
- /* Wait for 50msec for MICBIAS to settle down */
- msleep(50);
- if (mbhc->mbhc_cb->set_auto_zeroing)
- mbhc->mbhc_cb->set_auto_zeroing(codec, true);
- /* Wait for 50msec for FSM to update result values */
- msleep(50);
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
- if (!(hs_comp_res)) {
- pr_debug("%s: Special headset detected in %d msecs\n",
- __func__, (delay * 2));
- is_spl_hs = true;
- }
- if (delay == SPECIAL_HS_DETECT_TIME_MS) {
- pr_debug("%s: Spl headset did not get detect in 4 sec\n",
- __func__);
- break;
- }
- }
- if (is_spl_hs) {
- pr_debug("%s: Headset with threshold found\n", __func__);
- mbhc->micbias_enable = true;
- ret = true;
- }
- if (mbhc->mbhc_cb->mbhc_common_micb_ctrl)
- mbhc->mbhc_cb->mbhc_common_micb_ctrl(codec,
- MBHC_COMMON_MICB_PRECHARGE,
- false);
- if (mbhc->mbhc_cb->set_micbias_value && !mbhc->micbias_enable)
- mbhc->mbhc_cb->set_micbias_value(codec);
- if (mbhc->mbhc_cb->set_auto_zeroing)
- mbhc->mbhc_cb->set_auto_zeroing(codec, false);
-
- if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
- !mbhc->micbias_enable)
- mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(codec, MIC_BIAS_2,
- false);
-
- pr_debug("%s: leave, micb_enable: %d\n", __func__,
- mbhc->micbias_enable);
- return ret;
-}
-
-static void wcd_mbhc_update_fsm_source(struct wcd_mbhc *mbhc,
- enum wcd_mbhc_plug_type plug_type)
-{
- bool micbias2;
-
- micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
- MIC_BIAS_2);
- switch (plug_type) {
- case MBHC_PLUG_TYPE_HEADPHONE:
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
- break;
- case MBHC_PLUG_TYPE_HEADSET:
- case MBHC_PLUG_TYPE_ANC_HEADPHONE:
- if (!mbhc->is_hs_recording && !micbias2)
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 3);
- break;
- default:
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
- break;
-
- };
-}
-
-static void wcd_enable_mbhc_supply(struct wcd_mbhc *mbhc,
- enum wcd_mbhc_plug_type plug_type)
-{
-
- struct snd_soc_codec *codec = mbhc->codec;
-
- /*
- * Do not disable micbias if recording is going on or
- * headset is inserted on the other side of the extn
- * cable. If headset has been detected current source
- * needs to be kept enabled for button detection to work.
- * If the accessory type is invalid or unsupported, we
- * dont need to enable either of them.
- */
- if (det_extn_cable_en && mbhc->is_extn_cable &&
- mbhc->mbhc_cb && mbhc->mbhc_cb->extn_use_mb &&
- mbhc->mbhc_cb->extn_use_mb(codec)) {
- if (plug_type == MBHC_PLUG_TYPE_HEADPHONE ||
- plug_type == MBHC_PLUG_TYPE_HEADSET)
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
- } else {
- if (plug_type == MBHC_PLUG_TYPE_HEADSET) {
- if (mbhc->is_hs_recording || mbhc->micbias_enable)
- wcd_enable_curr_micbias(mbhc,
- WCD_MBHC_EN_MB);
- else if ((test_bit(WCD_MBHC_EVENT_PA_HPHL,
- &mbhc->event_state)) ||
- (test_bit(WCD_MBHC_EVENT_PA_HPHR,
- &mbhc->event_state)))
- wcd_enable_curr_micbias(mbhc,
- WCD_MBHC_EN_PULLUP);
- else
- wcd_enable_curr_micbias(mbhc,
- WCD_MBHC_EN_CS);
- } else if (plug_type == MBHC_PLUG_TYPE_HEADPHONE) {
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
- } else {
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
- }
- }
-}
-
-static bool wcd_mbhc_check_for_spl_headset(struct wcd_mbhc *mbhc,
- int *spl_hs_cnt)
-{
- u16 hs_comp_res_1_8v = 0, hs_comp_res_2_7v = 0;
- bool spl_hs = false;
-
- if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
- goto exit;
-
- /* Read back hs_comp_res @ 1.8v Micbias */
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_1_8v);
- if (!hs_comp_res_1_8v) {
- spl_hs = false;
- goto exit;
- }
-
- /* Bump up MB2 to 2.7v */
- mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
- mbhc->mbhc_cfg->mbhc_micbias, true);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
- usleep_range(10000, 10100);
-
- /* Read back HS_COMP_RESULT */
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res_2_7v);
- if (!hs_comp_res_2_7v && hs_comp_res_1_8v)
- spl_hs = true;
-
- if (spl_hs && spl_hs_cnt)
- *spl_hs_cnt += 1;
-
- /* MB2 back to 1.8v */
- if (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT) {
- mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
- mbhc->mbhc_cfg->mbhc_micbias, false);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
- usleep_range(10000, 10100);
- }
-
- if (spl_hs)
- pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
-
-exit:
- return spl_hs;
-}
-
-static void wcd_correct_swch_plug(struct work_struct *work)
-{
- struct wcd_mbhc *mbhc;
- struct snd_soc_codec *codec;
- enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
- unsigned long timeout;
- u16 hs_comp_res = 0, hphl_sch = 0, mic_sch = 0, btn_result = 0;
- bool wrk_complete = false;
- int pt_gnd_mic_swap_cnt = 0;
- int no_gnd_mic_swap_cnt = 0;
- bool is_pa_on = false, spl_hs = false;
- bool micbias2 = false;
- bool micbias1 = false;
- int ret = 0;
- int rc, spl_hs_count = 0;
- int cross_conn;
- int try = 0;
-
- pr_debug("%s: enter\n", __func__);
-
- mbhc = container_of(work, struct wcd_mbhc, correct_plug_swch);
- codec = mbhc->codec;
-
- /*
- * Enable micbias/pullup for detection in correct work.
- * This work will get scheduled from detect_plug_type which
- * will already request for pullup/micbias. If the pullup/micbias
- * is handled with ref-counts by individual codec drivers, there is
- * no need to enabale micbias/pullup here
- */
-
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-
-
- /* Enable HW FSM */
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
- /*
- * Check for any button press interrupts before starting 3-sec
- * loop.
- */
- rc = wait_for_completion_timeout(&mbhc->btn_press_compl,
- msecs_to_jiffies(WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS));
-
- WCD_MBHC_REG_READ(WCD_MBHC_BTN_RESULT, btn_result);
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
-
- if (!rc) {
- pr_debug("%s No btn press interrupt\n", __func__);
- if (!btn_result && !hs_comp_res)
- plug_type = MBHC_PLUG_TYPE_HEADSET;
- else if (!btn_result && hs_comp_res)
- plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
- else
- plug_type = MBHC_PLUG_TYPE_INVALID;
- } else {
- if (!btn_result && !hs_comp_res)
- plug_type = MBHC_PLUG_TYPE_HEADPHONE;
- else
- plug_type = MBHC_PLUG_TYPE_INVALID;
- }
-
- do {
- cross_conn = wcd_check_cross_conn(mbhc);
- try++;
- } while (try < GND_MIC_SWAP_THRESHOLD);
- /*
- * check for cross coneection 4 times.
- * conisder the result of the fourth iteration.
- */
- if (cross_conn > 0) {
- pr_debug("%s: cross con found, start polling\n",
- __func__);
- plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
- pr_debug("%s: Plug found, plug type is %d\n",
- __func__, plug_type);
- goto correct_plug_type;
- }
-
- if ((plug_type == MBHC_PLUG_TYPE_HEADSET ||
- plug_type == MBHC_PLUG_TYPE_HEADPHONE) &&
- (!wcd_swch_level_remove(mbhc))) {
- WCD_MBHC_RSC_LOCK(mbhc);
- wcd_mbhc_find_plug_and_report(mbhc, plug_type);
- WCD_MBHC_RSC_UNLOCK(mbhc);
- }
-
-correct_plug_type:
-
- timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
- while (!time_after(jiffies, timeout)) {
- if (mbhc->hs_detect_work_stop) {
- pr_debug("%s: stop requested: %d\n", __func__,
- mbhc->hs_detect_work_stop);
- wcd_enable_curr_micbias(mbhc,
- WCD_MBHC_EN_NONE);
- if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
- mbhc->micbias_enable) {
- mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
- mbhc->codec, MIC_BIAS_2, false);
- if (mbhc->mbhc_cb->set_micbias_value)
- mbhc->mbhc_cb->set_micbias_value(
- mbhc->codec);
- mbhc->micbias_enable = false;
- }
- goto exit;
- }
- if (mbhc->btn_press_intr) {
- wcd_cancel_btn_work(mbhc);
- mbhc->btn_press_intr = false;
- }
- /* Toggle FSM */
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 1);
-
- /* allow sometime and re-check stop requested again */
- msleep(20);
- if (mbhc->hs_detect_work_stop) {
- pr_debug("%s: stop requested: %d\n", __func__,
- mbhc->hs_detect_work_stop);
- wcd_enable_curr_micbias(mbhc,
- WCD_MBHC_EN_NONE);
- if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
- mbhc->micbias_enable) {
- mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
- mbhc->codec, MIC_BIAS_2, false);
- if (mbhc->mbhc_cb->set_micbias_value)
- mbhc->mbhc_cb->set_micbias_value(
- mbhc->codec);
- mbhc->micbias_enable = false;
- }
- goto exit;
- }
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_res);
-
- pr_debug("%s: hs_comp_res: %x\n", __func__, hs_comp_res);
- if (mbhc->mbhc_cb->hph_pa_on_status)
- is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
-
- /*
- * instead of hogging system by contineous polling, wait for
- * sometime and re-check stop request again.
- */
- msleep(180);
- if (hs_comp_res && (spl_hs_count < WCD_MBHC_SPL_HS_CNT)) {
- spl_hs = wcd_mbhc_check_for_spl_headset(mbhc,
- &spl_hs_count);
-
- if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
- hs_comp_res = 0;
- spl_hs = true;
- mbhc->micbias_enable = true;
- }
- }
-
- if ((!hs_comp_res) && (!is_pa_on)) {
- /* Check for cross connection*/
- ret = wcd_check_cross_conn(mbhc);
- if (ret < 0) {
- continue;
- } else if (ret > 0) {
- pt_gnd_mic_swap_cnt++;
- no_gnd_mic_swap_cnt = 0;
- if (pt_gnd_mic_swap_cnt <
- GND_MIC_SWAP_THRESHOLD) {
- continue;
- } else if (pt_gnd_mic_swap_cnt >
- GND_MIC_SWAP_THRESHOLD) {
- /*
- * This is due to GND/MIC switch didn't
- * work, Report unsupported plug.
- */
- pr_debug("%s: switch did not work\n",
- __func__);
- plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
- goto report;
- } else {
- plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
- }
- } else {
- no_gnd_mic_swap_cnt++;
- pt_gnd_mic_swap_cnt = 0;
- plug_type = MBHC_PLUG_TYPE_HEADSET;
- if ((no_gnd_mic_swap_cnt <
- GND_MIC_SWAP_THRESHOLD) &&
- (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
- continue;
- } else {
- no_gnd_mic_swap_cnt = 0;
- }
- }
- if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
- (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
- /*
- * if switch is toggled, check again,
- * otherwise report unsupported plug
- */
- if (mbhc->mbhc_cfg->swap_gnd_mic &&
- mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
- pr_debug("%s: US_EU gpio present,flip switch\n"
- , __func__);
- continue;
- }
- }
- }
-
- WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
- WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
- if (hs_comp_res && !(hphl_sch || mic_sch)) {
- pr_debug("%s: cable is extension cable\n", __func__);
- plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
- wrk_complete = true;
- } else {
- pr_debug("%s: cable might be headset: %d\n", __func__,
- plug_type);
- if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
- plug_type = MBHC_PLUG_TYPE_HEADSET;
- /*
- * Report headset only if not already reported
- * and if there is not button press without
- * release
- */
- if (((mbhc->current_plug !=
- MBHC_PLUG_TYPE_HEADSET) &&
- (mbhc->current_plug !=
- MBHC_PLUG_TYPE_ANC_HEADPHONE)) &&
- !wcd_swch_level_remove(mbhc) &&
- !mbhc->btn_press_intr) {
- pr_debug("%s: cable is %sheadset\n",
- __func__,
- ((spl_hs_count ==
- WCD_MBHC_SPL_HS_CNT) ?
- "special ":""));
- goto report;
- }
- }
- wrk_complete = false;
- }
- }
- if (!wrk_complete && mbhc->btn_press_intr) {
- pr_debug("%s: Can be slow insertion of headphone\n", __func__);
- wcd_cancel_btn_work(mbhc);
- plug_type = MBHC_PLUG_TYPE_HEADPHONE;
- }
- /*
- * If plug_tye is headset, we might have already reported either in
- * detect_plug-type or in above while loop, no need to report again
- */
- if (!wrk_complete && ((plug_type == MBHC_PLUG_TYPE_HEADSET) ||
- (plug_type == MBHC_PLUG_TYPE_ANC_HEADPHONE))) {
- pr_debug("%s: plug_type:0x%x already reported\n",
- __func__, mbhc->current_plug);
- goto enable_supply;
- }
-
- if (plug_type == MBHC_PLUG_TYPE_HIGH_HPH &&
- (!det_extn_cable_en)) {
- if (wcd_is_special_headset(mbhc)) {
- pr_debug("%s: Special headset found %d\n",
- __func__, plug_type);
- plug_type = MBHC_PLUG_TYPE_HEADSET;
- goto report;
- }
- }
-
-report:
- if (wcd_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch level is low\n", __func__);
- goto exit;
- }
- if (plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP && mbhc->btn_press_intr) {
- pr_debug("%s: insertion of headphone with swap\n", __func__);
- wcd_cancel_btn_work(mbhc);
- plug_type = MBHC_PLUG_TYPE_HEADPHONE;
- }
- pr_debug("%s: Valid plug found, plug type %d wrk_cmpt %d btn_intr %d\n",
- __func__, plug_type, wrk_complete,
- mbhc->btn_press_intr);
- WCD_MBHC_RSC_LOCK(mbhc);
- wcd_mbhc_find_plug_and_report(mbhc, plug_type);
- WCD_MBHC_RSC_UNLOCK(mbhc);
-enable_supply:
- if (mbhc->mbhc_cb->mbhc_micbias_control)
- wcd_mbhc_update_fsm_source(mbhc, plug_type);
- else
- wcd_enable_mbhc_supply(mbhc, plug_type);
-exit:
- if (mbhc->mbhc_cb->mbhc_micbias_control &&
- !mbhc->micbias_enable)
- mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
- MICB_DISABLE);
- if (mbhc->mbhc_cb->micbias_enable_status) {
- micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
- MIC_BIAS_1);
- micbias2 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
- MIC_BIAS_2);
- }
-
- if (mbhc->mbhc_cfg->detect_extn_cable &&
- ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
- (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
- !mbhc->hs_detect_work_stop) {
- WCD_MBHC_RSC_LOCK(mbhc);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, true);
- WCD_MBHC_RSC_UNLOCK(mbhc);
- }
- if (mbhc->mbhc_cb->set_cap_mode)
- mbhc->mbhc_cb->set_cap_mode(codec, micbias1, micbias2);
-
- if (mbhc->mbhc_cb->hph_pull_down_ctrl)
- mbhc->mbhc_cb->hph_pull_down_ctrl(codec, true);
-
- mbhc->mbhc_cb->lock_sleep(mbhc, false);
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd_mbhc_detect_plug_type(struct wcd_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- bool micbias1 = false;
-
- pr_debug("%s: enter\n", __func__);
- WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
-
- if (mbhc->mbhc_cb->hph_pull_down_ctrl)
- mbhc->mbhc_cb->hph_pull_down_ctrl(codec, false);
-
- if (mbhc->mbhc_cb->micbias_enable_status)
- micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
- MIC_BIAS_1);
-
- if (mbhc->mbhc_cb->set_cap_mode)
- mbhc->mbhc_cb->set_cap_mode(codec, micbias1, true);
-
- if (mbhc->mbhc_cb->mbhc_micbias_control)
- mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
- MICB_ENABLE);
- else
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_MB);
-
- /* Re-initialize button press completion object */
- reinit_completion(&mbhc->btn_press_compl);
- wcd_schedule_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
- pr_debug("%s: leave\n", __func__);
-}
+EXPORT_SYMBOL(wcd_mbhc_find_plug_and_report);
static void wcd_mbhc_swch_irq_handler(struct wcd_mbhc *mbhc)
{
bool detection_type = 0;
bool micbias1 = false;
struct snd_soc_codec *codec = mbhc->codec;
+ enum snd_jack_types jack_type;
dev_dbg(codec->dev, "%s: enter\n", __func__);
-
WCD_MBHC_RSC_LOCK(mbhc);
-
mbhc->in_swch_irq_handler = true;
/* cancel pending button press */
@@ -1521,7 +827,11 @@
pr_debug("%s: mbhc->current_plug: %d detection_type: %d\n", __func__,
mbhc->current_plug, detection_type);
- wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
+ if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+ mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
+ &mbhc->correct_plug_swch);
+ else
+ pr_info("%s: hs_detect_plug work not cancelled\n", __func__);
if (mbhc->mbhc_cb->micbias_enable_status)
micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
@@ -1554,7 +864,8 @@
mbhc->mbhc_cb->enable_mb_source(mbhc, true);
mbhc->btn_press_intr = false;
mbhc->is_btn_press = false;
- wcd_mbhc_detect_plug_type(mbhc);
+ if (mbhc->mbhc_fn)
+ mbhc->mbhc_fn->wcd_mbhc_detect_plug_type(mbhc);
} else if ((mbhc->current_plug != MBHC_PLUG_TYPE_NONE)
&& !detection_type) {
/* Disable external voltage source to micbias if present */
@@ -1572,50 +883,41 @@
mbhc->btn_press_intr = false;
mbhc->is_btn_press = false;
- if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
- false);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
- false);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
- 1);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
- wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
- } else if (mbhc->current_plug == MBHC_PLUG_TYPE_GND_MIC_SWAP) {
- wcd_mbhc_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED);
- } else if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADSET) {
+ switch (mbhc->current_plug) {
+ case MBHC_PLUG_TYPE_HEADPHONE:
+ jack_type = SND_JACK_HEADPHONE;
+ break;
+ case MBHC_PLUG_TYPE_GND_MIC_SWAP:
+ jack_type = SND_JACK_UNSUPPORTED;
+ break;
+ case MBHC_PLUG_TYPE_HEADSET:
/* make sure to turn off Rbias */
if (mbhc->mbhc_cb->micb_internal)
mbhc->mbhc_cb->micb_internal(codec, 1, false);
-
/* Pulldown micbias */
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_PULLDOWN_CTRL, 1);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
- false);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
- false);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
- 1);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
- wcd_mbhc_report_plug(mbhc, 0, SND_JACK_HEADSET);
- } else if (mbhc->current_plug == MBHC_PLUG_TYPE_HIGH_HPH) {
+ jack_type = SND_JACK_HEADSET;
+ break;
+ case MBHC_PLUG_TYPE_HIGH_HPH:
mbhc->is_extn_cable = false;
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
- false);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
- false);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
- 1);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
- wcd_mbhc_report_plug(mbhc, 0, SND_JACK_LINEOUT);
- } else if (mbhc->current_plug == MBHC_PLUG_TYPE_ANC_HEADPHONE) {
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, false);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
- 0);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
- wcd_mbhc_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE);
+ jack_type = SND_JACK_LINEOUT;
+ break;
+ case MBHC_PLUG_TYPE_ANC_HEADPHONE:
+ jack_type = SND_JACK_ANC_HEADPHONE;
+ break;
+ default:
+ pr_info("%s: Invalid current plug: %d\n",
+ __func__, mbhc->current_plug);
+ jack_type = SND_JACK_UNSUPPORTED;
+ break;
}
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, false);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
+ WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
+ mbhc->extn_cable_hph_rem = false;
+ wcd_mbhc_report_plug(mbhc, 0, jack_type);
+
} else if (!detection_type) {
/* Disable external voltage source to micbias if present */
if (mbhc->mbhc_cb->enable_mb_source)
@@ -1623,6 +925,7 @@
/* Disable HW FSM */
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_BTN_ISRC_CTL, 0);
+ mbhc->extn_cable_hph_rem = false;
}
mbhc->in_swch_irq_handler = false;
@@ -1648,7 +951,7 @@
return r;
}
-static int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc)
+int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc)
{
int mask = 0;
int btn;
@@ -1680,203 +983,7 @@
return mask;
}
-
-static irqreturn_t wcd_mbhc_hs_ins_irq(int irq, void *data)
-{
- struct wcd_mbhc *mbhc = data;
- bool detection_type = 0, hphl_sch = 0, mic_sch = 0;
- u16 elect_result = 0;
- static u16 hphl_trigerred;
- static u16 mic_trigerred;
-
- pr_debug("%s: enter\n", __func__);
- if (!mbhc->mbhc_cfg->detect_extn_cable) {
- pr_debug("%s: Returning as Extension cable feature not enabled\n",
- __func__);
- return IRQ_HANDLED;
- }
- WCD_MBHC_RSC_LOCK(mbhc);
-
- WCD_MBHC_REG_READ(WCD_MBHC_ELECT_DETECTION_TYPE, detection_type);
- WCD_MBHC_REG_READ(WCD_MBHC_ELECT_RESULT, elect_result);
-
- pr_debug("%s: detection_type %d, elect_result %x\n", __func__,
- detection_type, elect_result);
- if (detection_type) {
- /* check if both Left and MIC Schmitt triggers are triggered */
- WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
- WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
- if (hphl_sch && mic_sch) {
- /* Go for plug type determination */
- pr_debug("%s: Go for plug type determination\n",
- __func__);
- goto determine_plug;
-
- } else {
- if (mic_sch) {
- mic_trigerred++;
- pr_debug("%s: Insertion MIC trigerred %d\n",
- __func__, mic_trigerred);
- WCD_MBHC_REG_UPDATE_BITS(
- WCD_MBHC_ELECT_SCHMT_ISRC,
- 0);
- msleep(20);
- WCD_MBHC_REG_UPDATE_BITS(
- WCD_MBHC_ELECT_SCHMT_ISRC,
- 1);
- }
- if (hphl_sch) {
- hphl_trigerred++;
- pr_debug("%s: Insertion HPHL trigerred %d\n",
- __func__, hphl_trigerred);
- }
- if (mic_trigerred && hphl_trigerred) {
- /* Go for plug type determination */
- pr_debug("%s: Go for plug type determination\n",
- __func__);
- goto determine_plug;
- }
- }
- }
- WCD_MBHC_RSC_UNLOCK(mbhc);
- pr_debug("%s: leave\n", __func__);
- return IRQ_HANDLED;
-
-determine_plug:
- /*
- * Disable HPHL trigger and MIC Schmitt triggers.
- * Setup for insertion detection.
- */
- pr_debug("%s: Disable insertion interrupt\n", __func__);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
- false);
-
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
- hphl_trigerred = 0;
- mic_trigerred = 0;
- mbhc->is_extn_cable = true;
- mbhc->btn_press_intr = false;
- mbhc->is_btn_press = false;
- wcd_mbhc_detect_plug_type(mbhc);
- WCD_MBHC_RSC_UNLOCK(mbhc);
- pr_debug("%s: leave\n", __func__);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd_mbhc_hs_rem_irq(int irq, void *data)
-{
- struct wcd_mbhc *mbhc = data;
- u8 hs_comp_result = 0, hphl_sch = 0, mic_sch = 0;
- static u16 hphl_trigerred;
- static u16 mic_trigerred;
- unsigned long timeout;
- bool removed = true;
- int retry = 0;
-
- pr_debug("%s: enter\n", __func__);
-
- WCD_MBHC_RSC_LOCK(mbhc);
-
- timeout = jiffies +
- msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
- do {
- retry++;
- /*
- * read the result register every 10ms to look for
- * any change in HS_COMP_RESULT bit
- */
- usleep_range(10000, 10100);
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
- pr_debug("%s: Check result reg for fake removal: hs_comp_res %x\n",
- __func__, hs_comp_result);
- if ((!hs_comp_result) &&
- retry > FAKE_REM_RETRY_ATTEMPTS) {
- removed = false;
- break;
- }
- } while (!time_after(jiffies, timeout));
-
- if (wcd_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch level is low ", __func__);
- goto exit;
- }
- pr_debug("%s: headset %s actually removed\n", __func__,
- removed ? "" : "not ");
-
- WCD_MBHC_REG_READ(WCD_MBHC_HPHL_SCHMT_RESULT, hphl_sch);
- WCD_MBHC_REG_READ(WCD_MBHC_MIC_SCHMT_RESULT, mic_sch);
- WCD_MBHC_REG_READ(WCD_MBHC_HS_COMP_RESULT, hs_comp_result);
-
- if (removed) {
- if (!(hphl_sch && mic_sch && hs_comp_result)) {
- /*
- * extension cable is still plugged in
- * report it as LINEOUT device
- */
- goto report_unplug;
- } else {
- if (!mic_sch) {
- mic_trigerred++;
- pr_debug("%s: Removal MIC trigerred %d\n",
- __func__, mic_trigerred);
- }
- if (!hphl_sch) {
- hphl_trigerred++;
- pr_debug("%s: Removal HPHL trigerred %d\n",
- __func__, hphl_trigerred);
- }
- if (mic_trigerred && hphl_trigerred) {
- /*
- * extension cable is still plugged in
- * report it as LINEOUT device
- */
- goto report_unplug;
- }
- }
- }
-exit:
- WCD_MBHC_RSC_UNLOCK(mbhc);
- pr_debug("%s: leave\n", __func__);
- return IRQ_HANDLED;
-
-report_unplug:
-
- /* cancel pending button press */
- if (wcd_cancel_btn_work(mbhc))
- pr_debug("%s: button press is canceled\n", __func__);
- /* cancel correct work function */
- wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
-
- pr_debug("%s: Report extension cable\n", __func__);
- wcd_mbhc_report_plug(mbhc, 1, SND_JACK_LINEOUT);
- /*
- * If PA is enabled HPHL schmitt trigger can
- * be unreliable, make sure to disable it
- */
- if (test_bit(WCD_MBHC_EVENT_PA_HPHL,
- &mbhc->event_state))
- wcd_mbhc_set_and_turnoff_hph_padac(mbhc);
- /*
- * Disable HPHL trigger and MIC Schmitt triggers.
- * Setup for insertion detection.
- */
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM,
- false);
- wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_NONE);
- /* Disable HW FSM */
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_FSM_EN, 0);
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 3);
-
- /* Set the detection type appropriately */
- WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE, 1);
- wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS,
- true);
- hphl_trigerred = 0;
- mic_trigerred = 0;
- WCD_MBHC_RSC_UNLOCK(mbhc);
- pr_debug("%s: leave\n", __func__);
- return IRQ_HANDLED;
-}
+EXPORT_SYMBOL(wcd_mbhc_get_button_mask);
static void wcd_btn_lpress_fn(struct work_struct *work)
{
@@ -1998,8 +1105,11 @@
* If current plug is headphone then there is no chance to
* get btn release interrupt, so connected cable should be
* headset not headphone.
+ * For ADC MBHC, ADC_COMPLETE interrupt will be generated
+ * in this case. So skip the check here.
*/
- if (mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
+ if (!WCD_MBHC_DETECTION &&
+ mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
goto exit;
@@ -2181,7 +1291,6 @@
wcd_program_btn_threshold(mbhc, false);
- INIT_WORK(&mbhc->correct_plug_swch, wcd_correct_swch_plug);
init_completion(&mbhc->btn_press_compl);
@@ -2271,7 +1380,7 @@
(void) wcd_mbhc_initialise(mbhc);
}
-int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc)
+static int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc)
{
enum snd_jack_types type;
int i, ret, result = 0;
@@ -2761,6 +1870,7 @@
mbhc->btn_press_intr = false;
mbhc->is_hs_recording = false;
mbhc->is_extn_cable = false;
+ mbhc->extn_cable_hph_rem = false;
mbhc->hph_type = WCD_MBHC_HPH_NONE;
mbhc->wcd_mbhc_regs = wcd_mbhc_regs;
@@ -2784,6 +1894,7 @@
return -EINVAL;
}
+ /* No need to create new sound card jacks if is is already created */
if (mbhc->headset_jack.jack == NULL) {
ret = snd_soc_card_jack_new(codec->component.card,
"Headset Jack", WCD_MBHC_JACK_MASK,
@@ -2833,6 +1944,27 @@
init_waitqueue_head(&mbhc->wait_btn_press);
mutex_init(&mbhc->codec_resource_lock);
+ switch (WCD_MBHC_DETECTION) {
+ case WCD_DETECTION_LEGACY:
+ wcd_mbhc_legacy_init(mbhc);
+ break;
+ case WCD_DETECTION_ADC:
+ wcd_mbhc_adc_init(mbhc);
+ break;
+ default:
+ pr_err("%s: Unknown detection logic type %d\n",
+ __func__, WCD_MBHC_DETECTION);
+ break;
+ }
+
+ if (!mbhc->mbhc_fn ||
+ !mbhc->mbhc_fn->wcd_mbhc_hs_ins_irq ||
+ !mbhc->mbhc_fn->wcd_mbhc_hs_rem_irq ||
+ !mbhc->mbhc_fn->wcd_mbhc_detect_plug_type ||
+ !mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) {
+ pr_err("%s: mbhc function pointer is NULL\n", __func__);
+ goto err_mbhc_sw_irq;
+ }
ret = mbhc->mbhc_cb->request_irq(codec, mbhc->intr_ids->mbhc_sw_intr,
wcd_mbhc_mech_plug_detect_irq,
"mbhc sw intr", mbhc);
@@ -2845,8 +1977,7 @@
ret = mbhc->mbhc_cb->request_irq(codec,
mbhc->intr_ids->mbhc_btn_press_intr,
wcd_mbhc_btn_press_handler,
- "Button Press detect",
- mbhc);
+ "Button Press detect", mbhc);
if (ret) {
pr_err("%s: Failed to request irq %d\n", __func__,
mbhc->intr_ids->mbhc_btn_press_intr);
@@ -2865,7 +1996,7 @@
ret = mbhc->mbhc_cb->request_irq(codec,
mbhc->intr_ids->mbhc_hs_ins_intr,
- wcd_mbhc_hs_ins_irq,
+ mbhc->mbhc_fn->wcd_mbhc_hs_ins_irq,
"Elect Insert", mbhc);
if (ret) {
pr_err("%s: Failed to request irq %d\n", __func__,
@@ -2878,7 +2009,7 @@
ret = mbhc->mbhc_cb->request_irq(codec,
mbhc->intr_ids->mbhc_hs_rem_intr,
- wcd_mbhc_hs_rem_irq,
+ mbhc->mbhc_fn->wcd_mbhc_hs_rem_irq,
"Elect Remove", mbhc);
if (ret) {
pr_err("%s: Failed to request irq %d\n", __func__,
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index e6cd1971..4ea4401 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -27,6 +27,149 @@
#define WCD_MONO_HS_MIN_THR 2
#define WCD_MBHC_STRINGIFY(s) __stringify(s)
+#define WCD_MBHC_REGISTER(rid, rreg, rmask, rshift, rinvert) \
+{ .id = rid, .reg = rreg, .mask = rmask, .offset = rshift, .invert = rinvert }
+
+#define WCD_MBHC_RSC_LOCK(mbhc) \
+{ \
+ pr_debug("%s: Acquiring BCL\n", __func__); \
+ mutex_lock(&mbhc->codec_resource_lock); \
+ pr_debug("%s: Acquiring BCL done\n", __func__); \
+}
+
+#define WCD_MBHC_RSC_UNLOCK(mbhc) \
+{ \
+ pr_debug("%s: Release BCL\n", __func__); \
+ mutex_unlock(&mbhc->codec_resource_lock); \
+}
+
+#define WCD_MBHC_RSC_ASSERT_LOCKED(mbhc) \
+{ \
+ WARN_ONCE(!mutex_is_locked(&mbhc->codec_resource_lock), \
+ "%s: BCL should have acquired\n", __func__); \
+}
+
+/*
+ * Macros to update and read mbhc register bits. Check for
+ * "0" before updating or reading the register, because it
+ * is possible that one codec wants to write to that bit and
+ * other codec does not.
+ */
+#define WCD_MBHC_REG_UPDATE_BITS(function, val) \
+do { \
+ if (mbhc->wcd_mbhc_regs[function].reg) { \
+ snd_soc_update_bits(mbhc->codec, \
+ mbhc->wcd_mbhc_regs[function].reg, \
+ mbhc->wcd_mbhc_regs[function].mask, \
+ val << (mbhc->wcd_mbhc_regs[function].offset)); \
+ } \
+} while (0)
+
+#define WCD_MBHC_REG_READ(function, val) \
+do { \
+ if (mbhc->wcd_mbhc_regs[function].reg) { \
+ val = (((snd_soc_read(mbhc->codec, \
+ mbhc->wcd_mbhc_regs[function].reg)) & \
+ (mbhc->wcd_mbhc_regs[function].mask)) >> \
+ (mbhc->wcd_mbhc_regs[function].offset)); \
+ } else { \
+ val = -EINVAL; \
+ } \
+} while (0)
+
+#define WCD_MBHC_CAL_SIZE(buttons, rload) ( \
+ sizeof(struct wcd_mbhc_general_cfg) + \
+ sizeof(struct wcd_mbhc_plug_detect_cfg) + \
+ ((sizeof(s16) + sizeof(s16)) * buttons) + \
+ sizeof(struct wcd_mbhc_plug_type_cfg) + \
+ sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+ sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+ ((sizeof(u16) + sizeof(u16)) * rload) \
+ )
+
+#define WCD_MBHC_CAL_GENERAL_PTR(cali) ( \
+ (struct wcd_mbhc_general_cfg *) cali)
+#define WCD_MBHC_CAL_PLUG_DET_PTR(cali) ( \
+ (struct wcd_mbhc_plug_detect_cfg *) \
+ &(WCD_MBHC_CAL_GENERAL_PTR(cali)[1]))
+#define WCD_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
+ (struct wcd_mbhc_plug_type_cfg *) \
+ &(WCD_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
+#define WCD_MBHC_CAL_BTN_DET_PTR(cali) ( \
+ (struct wcd_mbhc_btn_detect_cfg *) \
+ &(WCD_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
+#define WCD_MBHC_CAL_IMPED_DET_PTR(cali) ( \
+ (struct wcd_mbhc_imped_detect_cfg *) \
+ (((void *)&WCD_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
+ (WCD_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
+ (sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
+ sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
+ )
+
+#define WCD_MBHC_CAL_MIN_SIZE ( \
+ sizeof(struct wcd_mbhc_general_cfg) + \
+ sizeof(struct wcd_mbhc_plug_detect_cfg) + \
+ sizeof(struct wcd_mbhc_plug_type_cfg) + \
+ sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+ sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+ (sizeof(u16)*2) \
+ )
+
+#define WCD_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
+ sizeof(struct wcd_mbhc_btn_detect_cfg) + \
+ (cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
+ sizeof(cfg_ptr->_v_btn_high[0]))))
+
+#define WCD_MBHC_CAL_IMPED_MIN_SZ ( \
+ sizeof(struct wcd_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
+
+#define WCD_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
+ sizeof(struct wcd_mbhc_imped_detect_cfg) + \
+ (cfg_ptr->_n_rload * \
+ (sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
+
+#define WCD_MBHC_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
+ SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
+ SND_JACK_MECHANICAL | SND_JACK_MICROPHONE2 | \
+ SND_JACK_UNSUPPORTED)
+
+#define WCD_MBHC_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
+ SND_JACK_BTN_4 | SND_JACK_BTN_5)
+#define OCP_ATTEMPT 20
+#define HS_DETECT_PLUG_TIME_MS (3 * 1000)
+#define SPECIAL_HS_DETECT_TIME_MS (2 * 1000)
+#define MBHC_BUTTON_PRESS_THRESHOLD_MIN 250
+#define GND_MIC_SWAP_THRESHOLD 4
+#define WCD_FAKE_REMOVAL_MIN_PERIOD_MS 100
+#define HS_VREF_MIN_VAL 1400
+#define FW_READ_ATTEMPTS 15
+#define FW_READ_TIMEOUT 4000000
+#define FAKE_REM_RETRY_ATTEMPTS 3
+#define MAX_IMPED 60000
+
+#define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS 50
+#define ANC_DETECT_RETRY_CNT 7
+#define WCD_MBHC_SPL_HS_CNT 1
+
+enum wcd_mbhc_detect_logic {
+ WCD_DETECTION_LEGACY,
+ WCD_DETECTION_ADC,
+};
+
+#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
+#define WCD_MBHC_DETECTION WCD_DETECTION_ADC
+#else
+#define WCD_MBHC_DETECTION WCD_DETECTION_LEGACY
+#endif
+
+enum wcd_mbhc_cs_mb_en_flag {
+ WCD_MBHC_EN_CS = 0,
+ WCD_MBHC_EN_MB,
+ WCD_MBHC_EN_PULLUP,
+ WCD_MBHC_EN_NONE,
+};
+
enum {
WCD_MBHC_ELEC_HS_INS,
WCD_MBHC_ELEC_HS_REM,
@@ -71,6 +214,14 @@
WCD_MBHC_HPHR_OCP_DET_EN,
WCD_MBHC_HPHL_OCP_STATUS,
WCD_MBHC_HPHR_OCP_STATUS,
+ WCD_MBHC_ADC_EN,
+ WCD_MBHC_ADC_COMPLETE,
+ WCD_MBHC_ADC_TIMEOUT,
+ WCD_MBHC_ADC_RESULT,
+ WCD_MBHC_MICB2_VOUT,
+ WCD_MBHC_ADC_MODE,
+ WCD_MBHC_DETECTION_DONE,
+ WCD_MBHC_ELECT_ISRC_EN,
WCD_MBHC_REG_FUNC_MAX,
};
@@ -141,6 +292,7 @@
WCD_MBHC_EVENT_PA_HPHL,
WCD_MBHC_EVENT_PA_HPHR,
};
+
struct wcd_mbhc_general_cfg {
u8 t_ldoh;
u8 t_bg_fast_settle;
@@ -295,56 +447,6 @@
u8 invert;
};
-#define WCD_MBHC_REGISTER(rid, rreg, rmask, rshift, rinvert) \
-{ .id = rid, .reg = rreg, .mask = rmask, .offset = rshift, .invert = rinvert }
-
-#define WCD_MBHC_RSC_LOCK(mbhc) \
-{ \
- pr_debug("%s: Acquiring BCL\n", __func__); \
- mutex_lock(&mbhc->codec_resource_lock); \
- pr_debug("%s: Acquiring BCL done\n", __func__); \
-}
-
-#define WCD_MBHC_RSC_UNLOCK(mbhc) \
-{ \
- pr_debug("%s: Release BCL\n", __func__); \
- mutex_unlock(&mbhc->codec_resource_lock); \
-}
-
-#define WCD_MBHC_RSC_ASSERT_LOCKED(mbhc) \
-{ \
- WARN_ONCE(!mutex_is_locked(&mbhc->codec_resource_lock), \
- "%s: BCL should have acquired\n", __func__); \
-}
-
-/*
- * Macros to update and read mbhc register bits. Check for
- * "0" before updating or reading the register, because it
- * is possible that one codec wants to write to that bit and
- * other codec does not.
- */
-#define WCD_MBHC_REG_UPDATE_BITS(function, val) \
-do { \
- if (mbhc->wcd_mbhc_regs[function].reg) { \
- snd_soc_update_bits(mbhc->codec, \
- mbhc->wcd_mbhc_regs[function].reg, \
- mbhc->wcd_mbhc_regs[function].mask, \
- val << (mbhc->wcd_mbhc_regs[function].offset)); \
- } \
-} while (0)
-
-#define WCD_MBHC_REG_READ(function, val) \
-do { \
- if (mbhc->wcd_mbhc_regs[function].reg) { \
- val = (((snd_soc_read(mbhc->codec, \
- mbhc->wcd_mbhc_regs[function].reg)) & \
- (mbhc->wcd_mbhc_regs[function].mask)) >> \
- (mbhc->wcd_mbhc_regs[function].offset)); \
- } else { \
- val = -EINVAL; \
- } \
-} while (0)
-
struct wcd_mbhc_cb {
int (*enable_mb_source)(struct wcd_mbhc *, bool);
void (*trim_btn_reg)(struct snd_soc_codec *);
@@ -388,6 +490,15 @@
bool (*hph_register_recovery)(struct wcd_mbhc *);
};
+struct wcd_mbhc_fn {
+ irqreturn_t (*wcd_mbhc_hs_ins_irq)(int irq, void *data);
+ irqreturn_t (*wcd_mbhc_hs_rem_irq)(int irq, void *data);
+ void (*wcd_mbhc_detect_plug_type)(struct wcd_mbhc *mbhc);
+ bool (*wcd_mbhc_detect_anc_plug_type)(struct wcd_mbhc *mbhc);
+ void (*wcd_cancel_hs_detect_plug)(struct wcd_mbhc *mbhc,
+ struct work_struct *work);
+};
+
struct wcd_mbhc {
/* Delayed work to report long button press */
struct delayed_work mbhc_btn_dwork;
@@ -417,6 +528,7 @@
bool is_extn_cable;
bool skip_imped_detection;
bool is_btn_already_regd;
+ bool extn_cable_hph_rem;
struct snd_soc_codec *codec;
/* Work to perform MBHC Firmware Read */
@@ -461,101 +573,20 @@
struct notifier_block psy_nb;
struct power_supply *usb_psy;
struct work_struct usbc_analog_work;
+
+ struct wcd_mbhc_fn *mbhc_fn;
};
-#define WCD_MBHC_CAL_SIZE(buttons, rload) ( \
- sizeof(struct wcd_mbhc_general_cfg) + \
- sizeof(struct wcd_mbhc_plug_detect_cfg) + \
- ((sizeof(s16) + sizeof(s16)) * buttons) + \
- sizeof(struct wcd_mbhc_plug_type_cfg) + \
- sizeof(struct wcd_mbhc_btn_detect_cfg) + \
- sizeof(struct wcd_mbhc_imped_detect_cfg) + \
- ((sizeof(u16) + sizeof(u16)) * rload) \
- )
-
-#define WCD_MBHC_CAL_GENERAL_PTR(cali) ( \
- (struct wcd_mbhc_general_cfg *) cali)
-#define WCD_MBHC_CAL_PLUG_DET_PTR(cali) ( \
- (struct wcd_mbhc_plug_detect_cfg *) \
- &(WCD_MBHC_CAL_GENERAL_PTR(cali)[1]))
-#define WCD_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
- (struct wcd_mbhc_plug_type_cfg *) \
- &(WCD_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
-#define WCD_MBHC_CAL_BTN_DET_PTR(cali) ( \
- (struct wcd_mbhc_btn_detect_cfg *) \
- &(WCD_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
-#define WCD_MBHC_CAL_IMPED_DET_PTR(cali) ( \
- (struct wcd_mbhc_imped_detect_cfg *) \
- (((void *)&WCD_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
- (WCD_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
- (sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
- sizeof(WCD_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
- )
-
-#define WCD_MBHC_CAL_MIN_SIZE ( \
- sizeof(struct wcd_mbhc_general_cfg) + \
- sizeof(struct wcd_mbhc_plug_detect_cfg) + \
- sizeof(struct wcd_mbhc_plug_type_cfg) + \
- sizeof(struct wcd_mbhc_btn_detect_cfg) + \
- sizeof(struct wcd_mbhc_imped_detect_cfg) + \
- (sizeof(u16)*2) \
- )
-
-#define WCD_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
- sizeof(struct wcd_mbhc_btn_detect_cfg) + \
- (cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
- sizeof(cfg_ptr->_v_btn_high[0]))))
-
-#define WCD_MBHC_CAL_IMPED_MIN_SZ ( \
- sizeof(struct wcd_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
-
-#define WCD_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
- sizeof(struct wcd_mbhc_imped_detect_cfg) + \
- (cfg_ptr->_n_rload * \
- (sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
-
-#ifdef CONFIG_SND_SOC_WCD_MBHC
-int wcd_mbhc_set_keycode(struct wcd_mbhc *mbhc);
-int wcd_mbhc_start(struct wcd_mbhc *mbhc,
- struct wcd_mbhc_config *mbhc_cfg);
-void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
-int wcd_mbhc_init(struct wcd_mbhc *mbhc, struct snd_soc_codec *codec,
- const struct wcd_mbhc_cb *mbhc_cb,
- const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
- struct wcd_mbhc_register *mbhc_reg,
- bool impedance_det_en);
-int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr);
-void wcd_mbhc_deinit(struct wcd_mbhc *mbhc);
-#else
-static inline void wcd_mbhc_stop(struct wcd_mbhc *mbhc)
-{
-}
-static inline int wcd_mbhc_init(struct wcd_mbhc *mbhc,
- struct snd_soc_codec *codec,
- const struct wcd_mbhc_cb *mbhc_cb,
- const struct wcd_mbhc_intr *mbhc_cdc_intr_ids,
- struct wcd_mbhc_register *mbhc_reg,
- bool impedance_det_en)
-{
- return 0;
-}
-static inline int wcd_mbhc_start(struct wcd_mbhc *mbhc,
- struct wcd_mbhc_config *mbhc_cfg)
-{
- return 0;
-}
-static inline int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc,
- uint32_t *zl,
- uint32_t *zr)
-{
- *zl = 0;
- *zr = 0;
- return -EINVAL;
-}
-static inline void wcd_mbhc_deinit(struct wcd_mbhc *mbhc)
-{
-}
-#endif
+void wcd_mbhc_find_plug_and_report(struct wcd_mbhc *mbhc,
+ enum wcd_mbhc_plug_type plug_type);
+void wcd_mbhc_hs_elec_irq(struct wcd_mbhc *mbhc, int irq_type, bool enable);
+void wcd_mbhc_elec_hs_report_unplug(struct wcd_mbhc *mbhc);
+bool wcd_swch_level_remove(struct wcd_mbhc *mbhc);
+void wcd_enable_curr_micbias(const struct wcd_mbhc *mbhc,
+ const enum wcd_mbhc_cs_mb_en_flag cs_mb_en);
+void wcd_mbhc_jack_report(struct wcd_mbhc *mbhc,
+ struct snd_soc_jack *jack, int status, int mask);
+int wcd_cancel_btn_work(struct wcd_mbhc *mbhc);
+int wcd_mbhc_get_button_mask(struct wcd_mbhc *mbhc);
#endif /* __WCD_MBHC_V2_H__ */
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 5ea0551..dedf4dc 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -46,6 +46,7 @@
#include "wcd9xxx-resmgr-v2.h"
#include "wcd_cpe_core.h"
#include "wcdcal-hwdep.h"
+#include "wcd-mbhc-v2-api.h"
#define TASHA_RX_PORT_START_NUMBER 16
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index 3d032f0..578c347 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -32,6 +32,7 @@
#include "wcd934x.h"
#include "wcd934x-mbhc.h"
#include "../wcdcal-hwdep.h"
+#include "../wcd-mbhc-v2-api.h"
#define TAVIL_ZDET_SUPPORTED true
/* Z value defined in milliohm */
@@ -113,7 +114,7 @@
WCD_MBHC_REGISTER("WCD_MBHC_PULLDOWN_CTRL",
0, 0, 0, 0),
WCD_MBHC_REGISTER("WCD_MBHC_ANC_DET_EN",
- WCD934X_ANA_MBHC_ZDET, 0x01, 0, 0),
+ WCD934X_MBHC_CTL_BCS, 0x02, 1, 0),
WCD_MBHC_REGISTER("WCD_MBHC_FSM_STATUS",
WCD934X_MBHC_STATUS_SPARE_1, 0x01, 0, 0),
WCD_MBHC_REGISTER("WCD_MBHC_MUX_CTL",
@@ -126,6 +127,21 @@
WCD934X_INTR_PIN1_STATUS0, 0x04, 2, 0),
WCD_MBHC_REGISTER("WCD_MBHC_HPHR_OCP_STATUS",
WCD934X_INTR_PIN1_STATUS0, 0x08, 3, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ADC_EN",
+ WCD934X_MBHC_NEW_CTL_1, 0x08, 3, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ADC_COMPLETE", WCD934X_MBHC_NEW_FSM_STATUS,
+ 0x40, 6, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ADC_TIMEOUT", WCD934X_MBHC_NEW_FSM_STATUS,
+ 0x80, 7, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ADC_RESULT", WCD934X_MBHC_NEW_ADC_RESULT,
+ 0xFF, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_MICB2_VOUT", WCD934X_ANA_MICB2, 0x3F, 0, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ADC_MODE",
+ WCD934X_MBHC_NEW_CTL_1, 0x10, 4, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_DETECTION_DONE",
+ WCD934X_MBHC_NEW_CTL_1, 0x04, 2, 0),
+ WCD_MBHC_REGISTER("WCD_MBHC_ELECT_ISRC_EN",
+ WCD934X_ANA_MBHC_ZDET, 0x02, 1, 0),
};
static const struct wcd_mbhc_intr intr_ids = {
@@ -993,8 +1009,10 @@
__func__);
goto done;
}
- snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
- snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+ if (!WCD_MBHC_DETECTION) {
+ snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+ }
done:
return ret;
@@ -1025,8 +1043,9 @@
wcd934x_mbhc->fw_data = fw_data;
BLOCKING_INIT_NOTIFIER_HEAD(&wcd934x_mbhc->notifier);
- ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
- wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
+ ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb,
+ &intr_ids, wcd_mbhc_registers,
+ TAVIL_ZDET_SUPPORTED);
if (ret) {
dev_err(codec->dev, "%s: mbhc initialization failed\n",
__func__);
@@ -1050,8 +1069,10 @@
snd_soc_add_codec_controls(codec, hph_type_detect_controls,
ARRAY_SIZE(hph_type_detect_controls));
- snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
- snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+ if (!WCD_MBHC_DETECTION) {
+ snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
+ snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
+ }
return 0;
err:
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index fd5d1e0..e18fe9d 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -33,11 +33,9 @@
select SND_SOC_INTEL_SST_MATCH if ACPI
depends on (X86 || COMPILE_TEST)
-# firmware stuff depends DW_DMAC_CORE; since there is no depends-on from
-# the reverse selection, each machine driver needs to select
-# SND_SOC_INTEL_SST_FIRMWARE carefully depending on DW_DMAC_CORE
config SND_SOC_INTEL_SST_FIRMWARE
tristate
+ select DW_DMAC_CORE
config SND_SOC_INTEL_SST_ACPI
tristate
@@ -47,16 +45,18 @@
config SND_SOC_INTEL_HASWELL
tristate
+ select SND_SOC_INTEL_SST
select SND_SOC_INTEL_SST_FIRMWARE
config SND_SOC_INTEL_BAYTRAIL
tristate
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
config SND_SOC_INTEL_HASWELL_MACH
tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on DW_DMAC_CORE
- select SND_SOC_INTEL_SST
+ depends on DMADEVICES
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
@@ -99,9 +99,8 @@
config SND_SOC_INTEL_BYT_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
- select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
+ depends on DMADEVICES
+ depends on SND_SST_IPC_ACPI = n
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
help
@@ -112,9 +111,8 @@
config SND_SOC_INTEL_BYT_MAX98090_MACH
tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
- select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
+ depends on DMADEVICES
+ depends on SND_SST_IPC_ACPI = n
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
help
@@ -123,9 +121,8 @@
config SND_SOC_INTEL_BDW_RT5677_MACH
tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
- depends on X86_INTEL_LPSS && GPIOLIB && I2C && DW_DMAC
- depends on DW_DMAC_CORE=y
- select SND_SOC_INTEL_SST
+ depends on X86_INTEL_LPSS && GPIOLIB && I2C
+ depends on DMADEVICES
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5677
help
@@ -134,10 +131,8 @@
config SND_SOC_INTEL_BROADWELL_MACH
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
- depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
- I2C_DESIGNWARE_PLATFORM
- depends on DW_DMAC_CORE
- select SND_SOC_INTEL_SST
+ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+ depends on DMADEVICES
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 0a88537..0bfa688 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -400,6 +400,7 @@
static unsigned long cht_machine_id;
#define CHT_SURFACE_MACH 1
+#define BYT_THINKPAD_10 2
static int cht_surface_quirk_cb(const struct dmi_system_id *id)
{
@@ -407,6 +408,23 @@
return 1;
}
+static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
+{
+ cht_machine_id = BYT_THINKPAD_10;
+ return 1;
+}
+
+
+static const struct dmi_system_id byt_table[] = {
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20C3001VHH"),
+ },
+ },
+ { }
+};
static const struct dmi_system_id cht_table[] = {
{
@@ -424,6 +442,10 @@
"10EC5640", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
&chv_platform_data };
+static struct sst_acpi_mach byt_thinkpad_10 = {
+ "10EC5640", "cht-bsw-rt5672", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
+ &byt_rvp_platform_data };
+
static struct sst_acpi_mach *cht_quirk(void *arg)
{
struct sst_acpi_mach *mach = arg;
@@ -436,8 +458,21 @@
return mach;
}
+static struct sst_acpi_mach *byt_quirk(void *arg)
+{
+ struct sst_acpi_mach *mach = arg;
+
+ dmi_check_system(byt_table);
+
+ if (cht_machine_id == BYT_THINKPAD_10)
+ return &byt_thinkpad_10;
+ else
+ return mach;
+}
+
+
static struct sst_acpi_mach sst_acpi_bytcr[] = {
- {"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
+ {"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", byt_quirk,
&byt_rvp_platform_data },
{"10EC5642", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
&byt_rvp_platform_data },
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index bff77a1..d5873ee 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -57,9 +57,7 @@
struct clk *mclk;
};
-static unsigned long byt_rt5640_quirk = BYT_RT5640_DMIC1_MAP |
- BYT_RT5640_DMIC_EN |
- BYT_RT5640_MCLK_EN;
+static unsigned long byt_rt5640_quirk = BYT_RT5640_MCLK_EN;
static void log_quirks(struct device *dev)
{
@@ -389,6 +387,16 @@
BYT_RT5640_SSP0_AIF1),
},
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ },
+ .driver_data = (unsigned long *)(BYT_RT5640_IN3_MAP |
+ BYT_RT5640_MCLK_EN |
+ BYT_RT5640_SSP0_AIF1),
+
+ },
{}
};
@@ -613,7 +621,7 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.platform_name = "sst-mfld-platform",
- .ignore_suspend = 1,
+ .nonatomic = true,
.dynamic = 1,
.dpcm_playback = 1,
.dpcm_capture = 1,
@@ -626,7 +634,6 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.platform_name = "sst-mfld-platform",
- .ignore_suspend = 1,
.nonatomic = true,
.dynamic = 1,
.dpcm_playback = 1,
@@ -653,6 +660,7 @@
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_rt5640_codec_fixup,
.ignore_suspend = 1,
+ .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = byt_rt5640_init,
@@ -738,6 +746,13 @@
if (res_info->acpi_ipc_irq_index == 0) {
byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
}
+
+ /* change defaults for Baytrail-CR capture */
+ byt_rt5640_quirk |= BYT_RT5640_IN1_MAP;
+ byt_rt5640_quirk |= BYT_RT5640_DIFF_MIC;
+ } else {
+ byt_rt5640_quirk |= (BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_DMIC_EN);
}
/* check quirks before creating card */
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 35f591e..eabff3a 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -235,7 +235,6 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.platform_name = "sst-mfld-platform",
- .ignore_suspend = 1,
.nonatomic = true,
.dynamic = 1,
.dpcm_playback = 1,
@@ -249,7 +248,6 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.platform_name = "sst-mfld-platform",
- .ignore_suspend = 1,
.nonatomic = true,
.dynamic = 1,
.dpcm_playback = 1,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 16c94c4..9052561 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -24,6 +24,9 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include <asm/platform_sst_audio.h>
+#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -45,6 +48,7 @@
struct snd_soc_jack jack;
struct cht_acpi_card *acpi_card;
char codec_name[16];
+ struct clk *mclk;
};
static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
@@ -65,6 +69,7 @@
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_soc_card *card = dapm->card;
struct snd_soc_dai *codec_dai;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
int ret;
codec_dai = cht_get_codec_dai(card);
@@ -73,19 +78,30 @@
return -EIO;
}
- if (!SND_SOC_DAPM_EVENT_OFF(event))
- return 0;
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ if (ctx->mclk) {
+ ret = clk_prepare_enable(ctx->mclk);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "could not configure MCLK state");
+ return ret;
+ }
+ }
+ } else {
+ /* Set codec sysclk source to its internal clock because codec PLL will
+ * be off when idle and MCLK will also be off when codec is
+ * runtime suspended. Codec needs clock for jack detection and button
+ * press. MCLK is turned off with clock framework or ACPI.
+ */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_RCCLK,
+ 48000 * 512, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
- /* Set codec sysclk source to its internal clock because codec PLL will
- * be off when idle and MCLK will also be off by ACPI when codec is
- * runtime suspended. Codec needs clock for jack detection and button
- * press.
- */
- ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_RCCLK,
- 0, SND_SOC_CLOCK_IN);
- if (ret < 0) {
- dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
- return ret;
+ if (ctx->mclk)
+ clk_disable_unprepare(ctx->mclk);
}
return 0;
@@ -97,7 +113,7 @@
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
- platform_clock_control, SND_SOC_DAPM_POST_PMD),
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
@@ -225,6 +241,26 @@
rt5645_set_jack_detect(codec, &ctx->jack, &ctx->jack, &ctx->jack);
+ if (ctx->mclk) {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(ctx->mclk);
+ if (!ret)
+ clk_disable_unprepare(ctx->mclk);
+
+ ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ);
+
+ if (ret)
+ dev_err(runtime->dev, "unable to set MCLK rate\n");
+ }
return ret;
}
@@ -349,6 +385,18 @@
static char cht_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static bool is_valleyview(void)
+{
+ static const struct x86_cpu_id cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
+ {}
+ };
+
+ if (!x86_match_cpu(cpu_ids))
+ return false;
+ return true;
+}
+
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
@@ -358,22 +406,32 @@
struct sst_acpi_mach *mach;
const char *i2c_name = NULL;
int dai_index = 0;
+ bool found = false;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
if (!drv)
return -ENOMEM;
+ mach = (&pdev->dev)->platform_data;
+
for (i = 0; i < ARRAY_SIZE(snd_soc_cards); i++) {
- if (acpi_dev_found(snd_soc_cards[i].codec_id)) {
+ if (acpi_dev_found(snd_soc_cards[i].codec_id) &&
+ (!strncmp(snd_soc_cards[i].codec_id, mach->id, 8))) {
dev_dbg(&pdev->dev,
"found codec %s\n", snd_soc_cards[i].codec_id);
card = snd_soc_cards[i].soc_card;
drv->acpi_card = &snd_soc_cards[i];
+ found = true;
break;
}
}
+
+ if (!found) {
+ dev_err(&pdev->dev, "No matching HID found in supported list\n");
+ return -ENODEV;
+ }
+
card->dev = &pdev->dev;
- mach = card->dev->platform_data;
sprintf(drv->codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
/* set correct codec name */
@@ -391,6 +449,16 @@
cht_dailink[dai_index].codec_name = cht_rt5640_codec_name;
}
+ if (is_valleyview()) {
+ drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(drv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(drv->mclk));
+ return PTR_ERR(drv->mclk);
+ }
+ }
+
snd_soc_card_set_drvdata(card, drv);
ret_val = devm_snd_soc_register_card(&pdev->dev, card);
if (ret_val) {
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index b5b1934..bef8a45 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -448,7 +448,7 @@
if (bc->set_params != SKL_PARAM_INIT)
continue;
- mconfig->formats_config.caps = (u32 *)&bc->params;
+ mconfig->formats_config.caps = (u32 *)bc->params;
mconfig->formats_config.caps_size = bc->size;
break;
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index bc5f7e5..45c5479 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -351,7 +351,7 @@
static int msm8996_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
pr_debug("%s() ucontrol->value.integer.value[0] = %ld\n",
__func__, ucontrol->value.integer.value[0]);
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 51c27b7..b75ba98 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -5247,7 +5247,6 @@
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.dpcm_playback = 1,
- .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index e39e642..654806e 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -153,27 +153,27 @@
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_voice *voice;
- if (!strcmp("VoLTE", substream->pcm->id)) {
+ if (!strncmp("VoLTE", substream->pcm->id, 5)) {
voice = &voice_info[VOLTE_SESSION_INDEX];
pr_debug("%s: Open VoLTE Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("Voice2", substream->pcm->id)) {
+ } else if (!strncmp("Voice2", substream->pcm->id, 6)) {
voice = &voice_info[VOICE2_SESSION_INDEX];
pr_debug("%s: Open Voice2 Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("QCHAT", substream->pcm->id)) {
+ } else if (!strncmp("QCHAT", substream->pcm->id, 5)) {
voice = &voice_info[QCHAT_SESSION_INDEX];
pr_debug("%s: Open QCHAT Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("VoWLAN", substream->pcm->id)) {
+ } else if (!strncmp("VoWLAN", substream->pcm->id, 6)) {
voice = &voice_info[VOWLAN_SESSION_INDEX];
pr_debug("%s: Open VoWLAN Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("VoiceMMode1", substream->pcm->id)) {
+ } else if (!strncmp("VoiceMMode1", substream->pcm->id, 11)) {
voice = &voice_info[VOICEMMODE1_INDEX];
pr_debug("%s: Open VoiceMMode1 Substream Id=%s\n",
__func__, substream->pcm->id);
- } else if (!strcmp("VoiceMMode2", substream->pcm->id)) {
+ } else if (!strncmp("VoiceMMode2", substream->pcm->id, 11)) {
voice = &voice_info[VOICEMMODE2_INDEX];
pr_debug("%s: Open VoiceMMode2 Substream Id=%s\n",
__func__, substream->pcm->id);
@@ -394,12 +394,13 @@
struct snd_ctl_elem_value *ucontrol)
{
int ret;
- bool sidetone_enable = ucontrol->value.integer.value[0];
+ long value = ucontrol->value.integer.value[0];
+ bool sidetone_enable = value;
uint32_t session_id = ALL_SESSION_VSID;
- if (sidetone_enable < 0) {
- pr_err("%s: Invalid arguments sidetone enable %d\n",
- __func__, sidetone_enable);
+ if (value < 0) {
+ pr_err("%s: Invalid arguments sidetone enable %ld\n",
+ __func__, value);
ret = -EINVAL;
return ret;
}
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index f64074d..1c03d8c 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -335,7 +335,6 @@
.platform_name = "msm-pcm-hostless",
.dynamic = 1,
.dpcm_playback = 1,
- .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 6987949..304bf47 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -155,6 +155,21 @@
u32 index;
};
+enum pinctrl_pin_state {
+ STATE_DISABLE = 0, /* All pins are in sleep state */
+ STATE_MI2S_ACTIVE, /* IS2 = active, TDM = sleep */
+ STATE_TDM_ACTIVE, /* IS2 = sleep, TDM = active */
+};
+
+struct msm_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *mi2s_disable;
+ struct pinctrl_state *tdm_disable;
+ struct pinctrl_state *mi2s_active;
+ struct pinctrl_state *tdm_active;
+ enum pinctrl_pin_state curr_state;
+};
+
struct msm_asoc_mach_data {
u32 mclk_freq;
int us_euro_gpio; /* used by gpio driver API */
@@ -162,6 +177,7 @@
struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
struct snd_info_entry *codec_root;
+ struct msm_pinctrl_info pinctrl_info;
};
struct msm_asoc_wcd93xx_codec {
@@ -170,6 +186,9 @@
void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
};
+static const char *const pin_states[] = {"sleep", "i2s-active",
+ "tdm-active"};
+
enum {
TDM_0 = 0,
TDM_1,
@@ -397,7 +416,8 @@
"KHZ_88P2", "KHZ_96", "KHZ_176P4",
"KHZ_192", "KHZ_352P8", "KHZ_384"};
static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
- "KHZ_192"};
+ "KHZ_192", "KHZ_32", "KHZ_44P1",
+ "KHZ_88P2", "KHZ_176P4" };
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -508,6 +528,9 @@
.key_code[7] = 0,
.linein_th = 5000,
.moisture_en = true,
+ .mbhc_micbias = MIC_BIAS_2,
+ .anc_micbias = MIC_BIAS_2,
+ .enable_anc_mic_detect = false,
};
static struct snd_soc_dapm_route wcd_audio_paths[] = {
@@ -1464,6 +1487,22 @@
return idx;
switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 6;
+ break;
+
+ case SAMPLING_RATE_88P2KHZ:
+ sample_rate_val = 5;
+ break;
+
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 4;
+ break;
+
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 3;
+ break;
+
case SAMPLING_RATE_192KHZ:
sample_rate_val = 2;
break;
@@ -1494,6 +1533,18 @@
return idx;
switch (ucontrol->value.integer.value[0]) {
+ case 6:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 5:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+ break;
+ case 4:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 3:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+ break;
case 2:
ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
break;
@@ -3771,6 +3822,275 @@
return ret;
}
+static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
+ enum pinctrl_pin_state new_state)
+{
+ int ret = 0;
+ int curr_state = 0;
+
+ if (pinctrl_info == NULL) {
+ pr_err("%s: pinctrl_info is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ curr_state = pinctrl_info->curr_state;
+ pinctrl_info->curr_state = new_state;
+ pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
+ pin_states[curr_state], pin_states[pinctrl_info->curr_state]);
+
+ if (curr_state == pinctrl_info->curr_state) {
+ pr_debug("%s: Already in same state\n", __func__);
+ goto err;
+ }
+
+ if (curr_state != STATE_DISABLE &&
+ pinctrl_info->curr_state != STATE_DISABLE) {
+ pr_debug("%s: state already active cannot switch\n", __func__);
+ ret = -EIO;
+ goto err;
+ }
+
+ switch (pinctrl_info->curr_state) {
+ case STATE_MI2S_ACTIVE:
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->mi2s_active);
+ if (ret) {
+ pr_err("%s: MI2S state select failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ break;
+ case STATE_TDM_ACTIVE:
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->tdm_active);
+ if (ret) {
+ pr_err("%s: TDM state select failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ break;
+ case STATE_DISABLE:
+ if (curr_state == STATE_MI2S_ACTIVE) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->mi2s_disable);
+ } else {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->tdm_disable);
+ }
+ if (ret) {
+ pr_err("%s: state disable failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ break;
+ default:
+ pr_err("%s: TLMM pin state is invalid\n", __func__);
+ return -EINVAL;
+ }
+
+err:
+ return ret;
+}
+
+static void msm_release_pinctrl(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+ if (pinctrl_info->pinctrl) {
+ devm_pinctrl_put(pinctrl_info->pinctrl);
+ pinctrl_info->pinctrl = NULL;
+ }
+}
+
+static int msm_get_pinctrl(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = NULL;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ pinctrl_info = &pdata->pinctrl_info;
+
+ if (pinctrl_info == NULL) {
+ pr_err("%s: pinctrl_info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR_OR_NULL(pinctrl)) {
+ pr_err("%s: Unable to get pinctrl handle\n", __func__);
+ return -EINVAL;
+ }
+ pinctrl_info->pinctrl = pinctrl;
+
+ /* get all the states handles from Device Tree */
+ pinctrl_info->mi2s_disable = pinctrl_lookup_state(pinctrl,
+ "quat-mi2s-sleep");
+ if (IS_ERR(pinctrl_info->mi2s_disable)) {
+ pr_err("%s: could not get mi2s_disable pinstate\n", __func__);
+ goto err;
+ }
+ pinctrl_info->mi2s_active = pinctrl_lookup_state(pinctrl,
+ "quat-mi2s-active");
+ if (IS_ERR(pinctrl_info->mi2s_active)) {
+ pr_err("%s: could not get mi2s_active pinstate\n", __func__);
+ goto err;
+ }
+ pinctrl_info->tdm_disable = pinctrl_lookup_state(pinctrl,
+ "quat-tdm-sleep");
+ if (IS_ERR(pinctrl_info->tdm_disable)) {
+ pr_err("%s: could not get tdm_disable pinstate\n", __func__);
+ goto err;
+ }
+ pinctrl_info->tdm_active = pinctrl_lookup_state(pinctrl,
+ "quat-tdm-active");
+ if (IS_ERR(pinctrl_info->tdm_active)) {
+ pr_err("%s: could not get tdm_active pinstate\n",
+ __func__);
+ goto err;
+ }
+ /* Reset the TLMM pins to a default state */
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->mi2s_disable);
+ if (ret != 0) {
+ pr_err("%s: Disable TLMM pins failed with %d\n",
+ __func__, ret);
+ ret = -EIO;
+ goto err;
+ }
+ pinctrl_info->curr_state = STATE_DISABLE;
+
+ return 0;
+
+err:
+ devm_pinctrl_put(pinctrl);
+ pinctrl_info->pinctrl = NULL;
+ return -EINVAL;
+}
+
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ if (cpu_dai->id == AFE_PORT_ID_QUATERNARY_TDM_RX) {
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+ rate->min = rate->max =
+ tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+ } else if (cpu_dai->id == AFE_PORT_ID_SECONDARY_TDM_RX) {
+ channels->min = channels->max =
+ tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+ rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+ } else {
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+ __func__, cpu_dai->id, channels->max, rate->max,
+ params_format(params));
+
+ return 0;
+}
+
+static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ int channels, slot_width, slots;
+ unsigned int slot_mask;
+ unsigned int slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+ pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+ slots = tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+ /*2 slot config - bits 0 and 1 set for the first two slots */
+ slot_mask = 0x0000FFFF >> (16-slots);
+ slot_width = 32;
+ channels = slots;
+
+ pr_debug("%s: slot_width %d slots %d\n", __func__, slot_width, slots);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pr_debug("%s: slot_width %d\n", __func__, slot_width);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+ slots, slot_width);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm slot, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai,
+ 0, NULL, channels, slot_offset);
+ if (ret < 0) {
+ pr_err("%s: failed to set channel map, err:%d\n",
+ __func__, ret);
+ goto end;
+ }
+ } else {
+ pr_err("%s: invalid use case, err:%d\n",
+ __func__, ret);
+ }
+
+end:
+ return ret;
+}
+
+static int sdm845_tdm_snd_startup(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+ ret = msm_set_pinctrl(pinctrl_info, STATE_TDM_ACTIVE);
+ if (ret)
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static void sdm845_tdm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+ ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+ if (ret)
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+
+}
+
+static struct snd_soc_ops sdm845_tdm_be_ops = {
+ .hw_params = sdm845_tdm_snd_hw_params,
+ .startup = sdm845_tdm_snd_startup,
+ .shutdown = sdm845_tdm_snd_shutdown
+};
+
static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
{
int ret = 0;
@@ -3778,6 +4098,9 @@
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int index = cpu_dai->id;
unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
dev_dbg(rtd->card->dev,
"%s: substream = %s stream = %d, dai name %s, dai ID %d\n",
@@ -3791,6 +4114,14 @@
__func__, cpu_dai->id);
goto err;
}
+ if (index == QUAT_MI2S) {
+ ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+ if (ret) {
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+ goto err;
+ }
+ }
/*
* Muxtex protection in case the same MI2S
* interface using for both TX and RX so
@@ -3843,6 +4174,9 @@
int ret;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int index = rtd->cpu_dai->id;
+ struct snd_soc_card *card = rtd->card;
+ struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+ struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
@@ -3861,6 +4195,13 @@
}
}
mutex_unlock(&mi2s_intf_conf[index].lock);
+
+ if (index == QUAT_MI2S) {
+ ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+ if (ret)
+ pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+ __func__, ret);
+ }
}
static struct snd_soc_ops msm_mi2s_be_ops = {
@@ -4655,6 +4996,42 @@
},
};
+static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
+ {
+ .name = MSM_DAILINK_NAME(ASM Loopback),
+ .stream_name = "MultiMedia6",
+ .cpu_dai_name = "MultiMedia6",
+ .platform_name = "msm-pcm-loopback",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ignore_suspend = 1,
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_pmdown_time = 1,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ },
+ {
+ .name = "USB Audio Hostless",
+ .stream_name = "USB Audio Hostless",
+ .cpu_dai_name = "USBAUDIO_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+};
+
static struct snd_soc_dai_link msm_common_be_dai_links[] = {
/* Backend AFE DAI Links */
{
@@ -4862,8 +5239,8 @@
.no_pcm = 1,
.dpcm_playback = 1,
.id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
- .be_hw_params_fixup = msm_be_hw_params_fixup,
- .ops = &msm_tdm_be_ops,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &sdm845_tdm_be_ops,
.ignore_suspend = 1,
},
{
@@ -5373,6 +5750,7 @@
static struct snd_soc_dai_link msm_tavil_snd_card_dai_links[
ARRAY_SIZE(msm_common_dai_links) +
ARRAY_SIZE(msm_tavil_fe_dai_links) +
+ ARRAY_SIZE(msm_common_misc_fe_dai_links) +
ARRAY_SIZE(msm_common_be_dai_links) +
ARRAY_SIZE(msm_tavil_be_dai_links) +
ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -5662,7 +6040,7 @@
{
struct snd_soc_card *card = NULL;
struct snd_soc_dai_link *dailink;
- int len_1, len_2, len_3;
+ int len_1, len_2, len_3, len_4;
int total_links;
const struct of_device_id *match;
@@ -5677,8 +6055,9 @@
card = &snd_soc_card_tavil_msm;
len_1 = ARRAY_SIZE(msm_common_dai_links);
len_2 = len_1 + ARRAY_SIZE(msm_tavil_fe_dai_links);
- len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
- total_links = len_3 + ARRAY_SIZE(msm_tavil_be_dai_links);
+ len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+ len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+ total_links = len_4 + ARRAY_SIZE(msm_tavil_be_dai_links);
memcpy(msm_tavil_snd_card_dai_links,
msm_common_dai_links,
sizeof(msm_common_dai_links));
@@ -5686,9 +6065,12 @@
msm_tavil_fe_dai_links,
sizeof(msm_tavil_fe_dai_links));
memcpy(msm_tavil_snd_card_dai_links + len_2,
+ msm_common_misc_fe_dai_links,
+ sizeof(msm_common_misc_fe_dai_links));
+ memcpy(msm_tavil_snd_card_dai_links + len_3,
msm_common_be_dai_links,
sizeof(msm_common_be_dai_links));
- memcpy(msm_tavil_snd_card_dai_links + len_3,
+ memcpy(msm_tavil_snd_card_dai_links + len_4,
msm_tavil_be_dai_links,
sizeof(msm_tavil_be_dai_links));
@@ -6186,14 +6568,19 @@
pdev->dev.of_node->full_name);
dev_dbg(&pdev->dev, "Jack type properties set to default");
} else {
- if (!strcmp(mbhc_audio_jack_type, "4-pole-jack"))
+ if (!strcmp(mbhc_audio_jack_type, "4-pole-jack")) {
+ wcd_mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "This hardware has 4 pole jack");
- else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack"))
+ } else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack")) {
+ wcd_mbhc_cfg.enable_anc_mic_detect = true;
dev_dbg(&pdev->dev, "This hardware has 5 pole jack");
- else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack"))
+ } else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack")) {
+ wcd_mbhc_cfg.enable_anc_mic_detect = true;
dev_dbg(&pdev->dev, "This hardware has 6 pole jack");
- else
+ } else {
+ wcd_mbhc_cfg.enable_anc_mic_detect = false;
dev_dbg(&pdev->dev, "Unknown value, set to default");
+ }
}
/*
* Parse US-Euro gpio info from DT. Report no error if us-euro
@@ -6219,6 +6606,17 @@
dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
ret);
+ /* Parse pinctrl info from devicetree */
+ ret = msm_get_pinctrl(pdev);
+ if (!ret) {
+ pr_debug("%s: pinctrl parsing successful\n", __func__);
+ } else {
+ dev_dbg(&pdev->dev,
+ "%s: Parsing pinctrl failed with %d. Cannot use Ports\n",
+ __func__, ret);
+ ret = 0;
+ }
+
msm_i2s_auxpcm_init(pdev);
is_initial_boot = true;
@@ -6230,6 +6628,7 @@
return 0;
err:
+ msm_release_pinctrl(pdev);
devm_kfree(&pdev->dev, pdata);
return ret;
}
@@ -6246,6 +6645,7 @@
}
msm_i2s_auxpcm_deinit();
+ msm_release_pinctrl(pdev);
snd_soc_unregister_card(card);
return 0;
}
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 687a8f8..15c9240 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -14,9 +14,11 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
@@ -92,6 +94,7 @@
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regmap;
+ struct reset_control *rst;
struct snd_dmaengine_dai_dma_data playback_dma_data;
};
@@ -585,9 +588,22 @@
return 0;
}
+struct sun4i_i2s_quirks {
+ bool has_reset;
+};
+
+static const struct sun4i_i2s_quirks sun4i_a10_i2s_quirks = {
+ .has_reset = false,
+};
+
+static const struct sun4i_i2s_quirks sun6i_a31_i2s_quirks = {
+ .has_reset = true,
+};
+
static int sun4i_i2s_probe(struct platform_device *pdev)
{
struct sun4i_i2s *i2s;
+ const struct sun4i_i2s_quirks *quirks;
struct resource *res;
void __iomem *regs;
int irq, ret;
@@ -608,6 +624,12 @@
return irq;
}
+ quirks = of_device_get_match_data(&pdev->dev);
+ if (!quirks) {
+ dev_err(&pdev->dev, "Failed to determine the quirks to use\n");
+ return -ENODEV;
+ }
+
i2s->bus_clk = devm_clk_get(&pdev->dev, "apb");
if (IS_ERR(i2s->bus_clk)) {
dev_err(&pdev->dev, "Can't get our bus clock\n");
@@ -626,7 +648,24 @@
dev_err(&pdev->dev, "Can't get our mod clock\n");
return PTR_ERR(i2s->mod_clk);
}
-
+
+ if (quirks->has_reset) {
+ i2s->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(i2s->rst)) {
+ dev_err(&pdev->dev, "Failed to get reset control\n");
+ return PTR_ERR(i2s->rst);
+ }
+ }
+
+ if (!IS_ERR(i2s->rst)) {
+ ret = reset_control_deassert(i2s->rst);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to deassert the reset control\n");
+ return -EINVAL;
+ }
+ }
+
i2s->playback_dma_data.addr = res->start + SUN4I_I2S_FIFO_TX_REG;
i2s->playback_dma_data.maxburst = 4;
@@ -658,23 +697,37 @@
sun4i_i2s_runtime_suspend(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR(i2s->rst))
+ reset_control_assert(i2s->rst);
return ret;
}
static int sun4i_i2s_remove(struct platform_device *pdev)
{
+ struct sun4i_i2s *i2s = dev_get_drvdata(&pdev->dev);
+
snd_dmaengine_pcm_unregister(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
sun4i_i2s_runtime_suspend(&pdev->dev);
+ if (!IS_ERR(i2s->rst))
+ reset_control_assert(i2s->rst);
+
return 0;
}
static const struct of_device_id sun4i_i2s_match[] = {
- { .compatible = "allwinner,sun4i-a10-i2s", },
+ {
+ .compatible = "allwinner,sun4i-a10-i2s",
+ .data = &sun4i_a10_i2s_quirks,
+ },
+ {
+ .compatible = "allwinner,sun6i-a31-i2s",
+ .data = &sun6i_a31_i2s_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_i2s_match);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index db85d92..8279009 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -348,6 +348,16 @@
alts = &iface->altsetting[1];
goto add_sync_ep;
+ case USB_ID(0x2466, 0x8003):
+ ep = 0x86;
+ iface = usb_ifnum_to_if(dev, 2);
+
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
+
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 93bb14e7..eb4b9f7 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1166,6 +1166,18 @@
return false;
}
+/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
+ * between PCM/DOP and native DSD mode
+ */
+static bool is_teac_50X_dac(unsigned int id)
+{
+ switch (id) {
+ case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+ return true;
+ }
+ return false;
+}
+
int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
struct audioformat *fmt)
{
@@ -1193,6 +1205,26 @@
break;
}
mdelay(20);
+ } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+ /* Vendor mode switch cmd is required. */
+ switch (fmt->altsetting) {
+ case 3: /* DSD mode (DSD_U32) requested */
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1, 1, NULL, 0);
+ if (err < 0)
+ return err;
+ break;
+
+ case 2: /* PCM or DOP mode (S32) requested */
+ case 1: /* PCM mode (S16) requested */
+ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 0, 1, NULL, 0);
+ if (err < 0)
+ return err;
+ break;
+ }
}
return 0;
}
@@ -1338,5 +1370,11 @@
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
}
+ /* TEAC devices with USB DAC functionality */
+ if (is_teac_50X_dac(chip->usb_id)) {
+ if (fp->altsetting == 3)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ }
+
return 0;
}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786d..4d28a9d 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- kvm->buses[bus_idx]->ioeventfd_count--;
+ if (kvm->buses[bus_idx])
+ kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7f9ee29..f4c6d4f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -720,8 +720,11 @@
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- for (i = 0; i < KVM_NR_BUSES; i++)
- kvm_io_bus_destroy(kvm->buses[i]);
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ if (kvm->buses[i])
+ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm->buses[i] = NULL;
+ }
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -3463,6 +3466,8 @@
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3480,6 +3485,8 @@
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
/* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3530,6 +3537,8 @@
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3542,6 +3551,9 @@
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
+ if (!bus)
+ return -ENOMEM;
+
/* exclude ioeventfd which is limited by maximum fd */
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
@@ -3561,37 +3573,41 @@
}
/* Caller must hold slots_lock. */
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev)
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int i, r;
+ int i;
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
- r = -ENOENT;
+ if (!bus)
+ return;
+
for (i = 0; i < bus->dev_count; i++)
if (bus->range[i].dev == dev) {
- r = 0;
break;
}
- if (r)
- return r;
+ if (i == bus->dev_count)
+ return;
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
- if (!new_bus)
- return -ENOMEM;
+ if (!new_bus) {
+ pr_err("kvm: failed to shrink bus, removing it completely\n");
+ goto broken;
+ }
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1,
(new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
- return r;
+ return;
}
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3604,6 +3620,8 @@
srcu_idx = srcu_read_lock(&kvm->srcu);
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ if (!bus)
+ goto out_unlock;
dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
if (dev_idx < 0)