blob: 2e9341233f666f5e0b0a62449d5cf8ffefa3522d [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030041#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020042#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020043#include <linux/of.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053044#include "ufshcd.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020045#include "ufs_quirks.h"
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +053046#include "unipro.h"
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053047
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053048#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
49 UTP_TASK_REQ_COMPL |\
50 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053051/* UIC command timeout, unit: ms */
52#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053053
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053054/* NOP OUT retries waiting for NOP IN response */
55#define NOP_OUT_RETRIES 10
56/* Timeout after 30 msecs if NOP OUT hangs without response */
57#define NOP_OUT_TIMEOUT 30 /* msecs */
58
Dolev Raviv68078d52013-07-30 00:35:58 +053059/* Query request retries */
60#define QUERY_REQ_RETRIES 10
61/* Query request timeout */
62#define QUERY_REQ_TIMEOUT 30 /* msec */
Yaniv Gardie5ad4062016-02-01 15:02:41 +020063/*
64 * Query request timeout for fDeviceInit flag
65 * fDeviceInit query response time for some devices is too large that default
66 * QUERY_REQ_TIMEOUT may not be enough for such devices.
67 */
68#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
Dolev Raviv68078d52013-07-30 00:35:58 +053069
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053070/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */
72
Yaniv Gardi64238fb2016-02-01 15:02:43 +020073/* maximum number of retries for a general UIC command */
74#define UFS_UIC_COMMAND_RETRIES 3
75
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030076/* maximum number of link-startup retries */
77#define DME_LINKSTARTUP_RETRIES 3
78
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +020079/* Maximum retries for Hibern8 enter */
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +030082/* maximum number of reset retries before giving up */
83#define MAX_HOST_RESET_RETRIES 5
84
Dolev Raviv68078d52013-07-30 00:35:58 +053085/* Expose the flag value from utp_upiu_query.value */
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
Seungwon Jeon7d568652013-08-31 21:40:20 +053088/* Interrupt aggregation default timeout, unit: 40us */
89#define INT_AGGR_DEF_TO 0x02
90
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030091#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
92 ({ \
93 int _ret; \
94 if (_on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
96 else \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
98 _ret; \
99 })
100
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300101static u32 ufs_query_desc_max_size[] = {
102 QUERY_DESC_DEVICE_MAX_SIZE,
103 QUERY_DESC_CONFIGURAION_MAX_SIZE,
104 QUERY_DESC_UNIT_MAX_SIZE,
105 QUERY_DESC_RFU_MAX_SIZE,
106 QUERY_DESC_INTERCONNECT_MAX_SIZE,
107 QUERY_DESC_STRING_MAX_SIZE,
108 QUERY_DESC_RFU_MAX_SIZE,
Tomas Winkler1ce21792016-02-09 10:25:40 +0200109 QUERY_DESC_GEOMETRY_MAX_SIZE,
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300110 QUERY_DESC_POWER_MAX_SIZE,
111 QUERY_DESC_RFU_MAX_SIZE,
112};
113
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530114enum {
115 UFSHCD_MAX_CHANNEL = 0,
116 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530117 UFSHCD_CMD_PER_LUN = 32,
118 UFSHCD_CAN_QUEUE = 32,
119};
120
121/* UFSHCD states */
122enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530123 UFSHCD_STATE_RESET,
124 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530125 UFSHCD_STATE_OPERATIONAL,
Zang Leiganga17bddc2017-04-04 19:32:20 +0000126 UFSHCD_STATE_EH_SCHEDULED,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530127};
128
129/* UFSHCD error handling flags */
130enum {
131 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530132};
133
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530134/* UFSHCD UIC layer error flags */
135enum {
136 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200137 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
138 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
139 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
140 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
141 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530142};
143
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530144/* Interrupt configuration options */
145enum {
146 UFSHCD_INT_DISABLE,
147 UFSHCD_INT_ENABLE,
148 UFSHCD_INT_CLEAR,
149};
150
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530151#define ufshcd_set_eh_in_progress(h) \
152 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
153#define ufshcd_eh_in_progress(h) \
154 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
155#define ufshcd_clear_eh_in_progress(h) \
156 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
157
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300158#define ufshcd_set_ufs_dev_active(h) \
159 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
160#define ufshcd_set_ufs_dev_sleep(h) \
161 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
162#define ufshcd_set_ufs_dev_poweroff(h) \
163 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
164#define ufshcd_is_ufs_dev_active(h) \
165 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
166#define ufshcd_is_ufs_dev_sleep(h) \
167 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
168#define ufshcd_is_ufs_dev_poweroff(h) \
169 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
170
171static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
172 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
173 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
174 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
175 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
177 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
178};
179
180static inline enum ufs_dev_pwr_mode
181ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
182{
183 return ufs_pm_lvl_states[lvl].dev_state;
184}
185
186static inline enum uic_link_state
187ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
188{
189 return ufs_pm_lvl_states[lvl].link_state;
190}
191
Subhash Jadavanibedc6292017-04-04 19:32:13 +0000192static struct ufs_dev_fix ufs_fixups[] = {
193 /* UFS cards deviations table */
194 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
195 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
196 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
197 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
198 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
199 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
200 UFS_DEVICE_NO_FASTAUTO),
201 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
202 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
203 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
204 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
205 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
206 UFS_DEVICE_QUIRK_PA_TACTIVATE),
207 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
208 UFS_DEVICE_QUIRK_PA_TACTIVATE),
209 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
210 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
211 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
212
213 END_FIX
214};
215
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530216static void ufshcd_tmc_handler(struct ufs_hba *hba);
217static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530218static int ufshcd_reset_and_restore(struct ufs_hba *hba);
219static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300220static void ufshcd_hba_exit(struct ufs_hba *hba);
221static int ufshcd_probe_hba(struct ufs_hba *hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300222static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
223 bool skip_ref_clk);
224static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200225static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300226static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
227static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300228static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300229static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
230static irqreturn_t ufshcd_intr(int irq, void *__hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +0300231static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
232 struct ufs_pa_layer_attr *desired_pwr_mode);
Yaniv Gardi874237f2015-05-17 18:55:03 +0300233static int ufshcd_change_power_mode(struct ufs_hba *hba,
234 struct ufs_pa_layer_attr *pwr_mode);
Yaniv Gardi14497322016-02-01 15:02:39 +0200235static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
236{
237 return tag >= 0 && tag < hba->nutrs;
238}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300239
240static inline int ufshcd_enable_irq(struct ufs_hba *hba)
241{
242 int ret = 0;
243
244 if (!hba->is_irq_enabled) {
245 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
246 hba);
247 if (ret)
248 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
249 __func__, ret);
250 hba->is_irq_enabled = true;
251 }
252
253 return ret;
254}
255
256static inline void ufshcd_disable_irq(struct ufs_hba *hba)
257{
258 if (hba->is_irq_enabled) {
259 free_irq(hba->irq, hba);
260 hba->is_irq_enabled = false;
261 }
262}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530263
Yaniv Gardib573d482016-03-10 17:37:09 +0200264/* replace non-printable or non-ASCII characters with spaces */
265static inline void ufshcd_remove_non_printable(char *val)
266{
267 if (!val)
268 return;
269
270 if (*val < 0x20 || *val > 0x7e)
271 *val = ' ';
272}
273
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530274/*
275 * ufshcd_wait_for_register - wait for register value to change
276 * @hba - per-adapter interface
277 * @reg - mmio register offset
278 * @mask - mask to apply to read register value
279 * @val - wait condition
280 * @interval_us - polling interval in microsecs
281 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200282 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530283 *
284 * Returns -ETIMEDOUT on error, zero on success
285 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200286int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
287 u32 val, unsigned long interval_us,
288 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530289{
290 int err = 0;
291 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
292
293 /* ignore bits that we don't intend to wait on */
294 val = val & mask;
295
296 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200297 if (can_sleep)
298 usleep_range(interval_us, interval_us + 50);
299 else
300 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530301 if (time_after(jiffies, timeout)) {
302 if ((ufshcd_readl(hba, reg) & mask) != val)
303 err = -ETIMEDOUT;
304 break;
305 }
306 }
307
308 return err;
309}
310
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530311/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530312 * ufshcd_get_intr_mask - Get the interrupt bit mask
313 * @hba - Pointer to adapter instance
314 *
315 * Returns interrupt bit mask per version
316 */
317static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
318{
319 if (hba->ufs_version == UFSHCI_VERSION_10)
320 return INTERRUPT_MASK_ALL_VER_10;
321 else
322 return INTERRUPT_MASK_ALL_VER_11;
323}
324
325/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530326 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
327 * @hba - Pointer to adapter instance
328 *
329 * Returns UFSHCI version supported by the controller
330 */
331static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
332{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200333 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
334 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300335
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530336 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530337}
338
339/**
340 * ufshcd_is_device_present - Check if any device connected to
341 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300342 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530343 *
Venkatraman S73ec5132012-07-10 19:39:23 +0530344 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530345 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300346static inline int ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530347{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300348 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
349 DEVICE_PRESENT) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530350}
351
352/**
353 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
354 * @lrb: pointer to local command reference block
355 *
356 * This function is used to get the OCS field from UTRD
357 * Returns the OCS field in the UTRD
358 */
359static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
360{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530361 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530362}
363
364/**
365 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
366 * @task_req_descp: pointer to utp_task_req_desc structure
367 *
368 * This function is used to get the OCS field from UTMRD
369 * Returns the OCS field in the UTMRD
370 */
371static inline int
372ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
373{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530374 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530375}
376
377/**
378 * ufshcd_get_tm_free_slot - get a free slot for task management request
379 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530380 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530381 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530382 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
383 * Returns 0 if free slot is not available, else return 1 with tag value
384 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530385 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530386static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530387{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530388 int tag;
389 bool ret = false;
390
391 if (!free_slot)
392 goto out;
393
394 do {
395 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
396 if (tag >= hba->nutmrs)
397 goto out;
398 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
399
400 *free_slot = tag;
401 ret = true;
402out:
403 return ret;
404}
405
406static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
407{
408 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530409}
410
411/**
412 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
413 * @hba: per adapter instance
414 * @pos: position of the bit to be cleared
415 */
416static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
417{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530418 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530419}
420
421/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200422 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
423 * @hba: per adapter instance
424 * @tag: position of the bit to be cleared
425 */
426static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
427{
428 __clear_bit(tag, &hba->outstanding_reqs);
429}
430
431/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530432 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
433 * @reg: Register value of host controller status
434 *
435 * Returns integer, 0 on Success and positive value if failed
436 */
437static inline int ufshcd_get_lists_status(u32 reg)
438{
439 /*
440 * The mask 0xFF is for the following HCS register bits
441 * Bit Description
442 * 0 Device Present
443 * 1 UTRLRDY
444 * 2 UTMRLRDY
445 * 3 UCRDY
Yaniv Gardi897efe62016-02-01 15:02:48 +0200446 * 4-7 reserved
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530447 */
Yaniv Gardi897efe62016-02-01 15:02:48 +0200448 return ((reg & 0xFF) >> 1) ^ 0x07;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530449}
450
451/**
452 * ufshcd_get_uic_cmd_result - Get the UIC command result
453 * @hba: Pointer to adapter instance
454 *
455 * This function gets the result of UIC command completion
456 * Returns 0 on success, non zero value on error
457 */
458static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
459{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530460 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530461 MASK_UIC_COMMAND_RESULT;
462}
463
464/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530465 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
466 * @hba: Pointer to adapter instance
467 *
468 * This function gets UIC command argument3
469 * Returns 0 on success, non zero value on error
470 */
471static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
472{
473 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
474}
475
476/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530477 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530478 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530479 */
480static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530481ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530482{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530483 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530484}
485
486/**
487 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
488 * @ucd_rsp_ptr: pointer to response UPIU
489 *
490 * This function gets the response status and scsi_status from response UPIU
491 * Returns the response result code.
492 */
493static inline int
494ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
495{
496 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
497}
498
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530499/*
500 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
501 * from response UPIU
502 * @ucd_rsp_ptr: pointer to response UPIU
503 *
504 * Return the data segment length.
505 */
506static inline unsigned int
507ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
508{
509 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
510 MASK_RSP_UPIU_DATA_SEG_LEN;
511}
512
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530513/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530514 * ufshcd_is_exception_event - Check if the device raised an exception event
515 * @ucd_rsp_ptr: pointer to response UPIU
516 *
517 * The function checks if the device raised an exception event indicated in
518 * the Device Information field of response UPIU.
519 *
520 * Returns true if exception is raised, false otherwise.
521 */
522static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
523{
524 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
525 MASK_RSP_EXCEPTION_EVENT ? true : false;
526}
527
528/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530529 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530530 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530531 */
532static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530533ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530534{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530535 ufshcd_writel(hba, INT_AGGR_ENABLE |
536 INT_AGGR_COUNTER_AND_TIMER_RESET,
537 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
538}
539
540/**
541 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
542 * @hba: per adapter instance
543 * @cnt: Interrupt aggregation counter threshold
544 * @tmout: Interrupt aggregation timeout value
545 */
546static inline void
547ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
548{
549 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
550 INT_AGGR_COUNTER_THLD_VAL(cnt) |
551 INT_AGGR_TIMEOUT_VAL(tmout),
552 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530553}
554
555/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300556 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
557 * @hba: per adapter instance
558 */
559static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
560{
561 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
562}
563
564/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530565 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
566 * When run-stop registers are set to 1, it indicates the
567 * host controller that it can process the requests
568 * @hba: per adapter instance
569 */
570static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
571{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530572 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
573 REG_UTP_TASK_REQ_LIST_RUN_STOP);
574 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
575 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530576}
577
578/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530579 * ufshcd_hba_start - Start controller initialization sequence
580 * @hba: per adapter instance
581 */
582static inline void ufshcd_hba_start(struct ufs_hba *hba)
583{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530584 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530585}
586
587/**
588 * ufshcd_is_hba_active - Get controller state
589 * @hba: per adapter instance
590 *
591 * Returns zero if controller is active, 1 otherwise
592 */
593static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
594{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530595 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530596}
597
Yaniv Gardi37113102016-03-10 17:37:16 +0200598u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
599{
600 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
601 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
602 (hba->ufs_version == UFSHCI_VERSION_11))
603 return UFS_UNIPRO_VER_1_41;
604 else
605 return UFS_UNIPRO_VER_1_6;
606}
607EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
608
609static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
610{
611 /*
612 * If both host and device support UniPro ver1.6 or later, PA layer
613 * parameters tuning happens during link startup itself.
614 *
615 * We can manually tune PA layer parameters if either host or device
616 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
617 * logic simple, we will only do manual tuning if local unipro version
618 * doesn't support ver1.6 or later.
619 */
620 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
621 return true;
622 else
623 return false;
624}
625
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300626static void ufshcd_ungate_work(struct work_struct *work)
627{
628 int ret;
629 unsigned long flags;
630 struct ufs_hba *hba = container_of(work, struct ufs_hba,
631 clk_gating.ungate_work);
632
633 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
634
635 spin_lock_irqsave(hba->host->host_lock, flags);
636 if (hba->clk_gating.state == CLKS_ON) {
637 spin_unlock_irqrestore(hba->host->host_lock, flags);
638 goto unblock_reqs;
639 }
640
641 spin_unlock_irqrestore(hba->host->host_lock, flags);
642 ufshcd_setup_clocks(hba, true);
643
644 /* Exit from hibern8 */
645 if (ufshcd_can_hibern8_during_gating(hba)) {
646 /* Prevent gating in this path */
647 hba->clk_gating.is_suspended = true;
648 if (ufshcd_is_link_hibern8(hba)) {
649 ret = ufshcd_uic_hibern8_exit(hba);
650 if (ret)
651 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
652 __func__, ret);
653 else
654 ufshcd_set_link_active(hba);
655 }
656 hba->clk_gating.is_suspended = false;
657 }
658unblock_reqs:
Sahitya Tummala856b3482014-09-25 15:32:34 +0300659 if (ufshcd_is_clkscaling_enabled(hba))
660 devfreq_resume_device(hba->devfreq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300661 scsi_unblock_requests(hba->host);
662}
663
664/**
665 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
666 * Also, exit from hibern8 mode and set the link as active.
667 * @hba: per adapter instance
668 * @async: This indicates whether caller should ungate clocks asynchronously.
669 */
670int ufshcd_hold(struct ufs_hba *hba, bool async)
671{
672 int rc = 0;
673 unsigned long flags;
674
675 if (!ufshcd_is_clkgating_allowed(hba))
676 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300677 spin_lock_irqsave(hba->host->host_lock, flags);
678 hba->clk_gating.active_reqs++;
679
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200680 if (ufshcd_eh_in_progress(hba)) {
681 spin_unlock_irqrestore(hba->host->host_lock, flags);
682 return 0;
683 }
684
Sahitya Tummala856b3482014-09-25 15:32:34 +0300685start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300686 switch (hba->clk_gating.state) {
687 case CLKS_ON:
688 break;
689 case REQ_CLKS_OFF:
690 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
691 hba->clk_gating.state = CLKS_ON;
692 break;
693 }
694 /*
695 * If we here, it means gating work is either done or
696 * currently running. Hence, fall through to cancel gating
697 * work and to enable clocks.
698 */
699 case CLKS_OFF:
700 scsi_block_requests(hba->host);
701 hba->clk_gating.state = REQ_CLKS_ON;
702 schedule_work(&hba->clk_gating.ungate_work);
703 /*
704 * fall through to check if we should wait for this
705 * work to be done or not.
706 */
707 case REQ_CLKS_ON:
708 if (async) {
709 rc = -EAGAIN;
710 hba->clk_gating.active_reqs--;
711 break;
712 }
713
714 spin_unlock_irqrestore(hba->host->host_lock, flags);
715 flush_work(&hba->clk_gating.ungate_work);
716 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +0300717 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300718 goto start;
719 default:
720 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
721 __func__, hba->clk_gating.state);
722 break;
723 }
724 spin_unlock_irqrestore(hba->host->host_lock, flags);
725out:
726 return rc;
727}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200728EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300729
730static void ufshcd_gate_work(struct work_struct *work)
731{
732 struct ufs_hba *hba = container_of(work, struct ufs_hba,
733 clk_gating.gate_work.work);
734 unsigned long flags;
735
736 spin_lock_irqsave(hba->host->host_lock, flags);
737 if (hba->clk_gating.is_suspended) {
738 hba->clk_gating.state = CLKS_ON;
739 goto rel_lock;
740 }
741
742 if (hba->clk_gating.active_reqs
743 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
744 || hba->lrb_in_use || hba->outstanding_tasks
745 || hba->active_uic_cmd || hba->uic_async_done)
746 goto rel_lock;
747
748 spin_unlock_irqrestore(hba->host->host_lock, flags);
749
750 /* put the link into hibern8 mode before turning off clocks */
751 if (ufshcd_can_hibern8_during_gating(hba)) {
752 if (ufshcd_uic_hibern8_enter(hba)) {
753 hba->clk_gating.state = CLKS_ON;
754 goto out;
755 }
756 ufshcd_set_link_hibern8(hba);
757 }
758
Sahitya Tummala856b3482014-09-25 15:32:34 +0300759 if (ufshcd_is_clkscaling_enabled(hba)) {
760 devfreq_suspend_device(hba->devfreq);
761 hba->clk_scaling.window_start_t = 0;
762 }
763
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300764 if (!ufshcd_is_link_active(hba))
765 ufshcd_setup_clocks(hba, false);
766 else
767 /* If link is active, device ref_clk can't be switched off */
768 __ufshcd_setup_clocks(hba, false, true);
769
770 /*
771 * In case you are here to cancel this work the gating state
772 * would be marked as REQ_CLKS_ON. In this case keep the state
773 * as REQ_CLKS_ON which would anyway imply that clocks are off
774 * and a request to turn them on is pending. By doing this way,
775 * we keep the state machine in tact and this would ultimately
776 * prevent from doing cancel work multiple times when there are
777 * new requests arriving before the current cancel work is done.
778 */
779 spin_lock_irqsave(hba->host->host_lock, flags);
780 if (hba->clk_gating.state == REQ_CLKS_OFF)
781 hba->clk_gating.state = CLKS_OFF;
782
783rel_lock:
784 spin_unlock_irqrestore(hba->host->host_lock, flags);
785out:
786 return;
787}
788
789/* host lock must be held before calling this variant */
790static void __ufshcd_release(struct ufs_hba *hba)
791{
792 if (!ufshcd_is_clkgating_allowed(hba))
793 return;
794
795 hba->clk_gating.active_reqs--;
796
797 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
798 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
799 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +0200800 || hba->active_uic_cmd || hba->uic_async_done
801 || ufshcd_eh_in_progress(hba))
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300802 return;
803
804 hba->clk_gating.state = REQ_CLKS_OFF;
805 schedule_delayed_work(&hba->clk_gating.gate_work,
806 msecs_to_jiffies(hba->clk_gating.delay_ms));
807}
808
809void ufshcd_release(struct ufs_hba *hba)
810{
811 unsigned long flags;
812
813 spin_lock_irqsave(hba->host->host_lock, flags);
814 __ufshcd_release(hba);
815 spin_unlock_irqrestore(hba->host->host_lock, flags);
816}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +0200817EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300818
819static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
820 struct device_attribute *attr, char *buf)
821{
822 struct ufs_hba *hba = dev_get_drvdata(dev);
823
824 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
825}
826
827static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
828 struct device_attribute *attr, const char *buf, size_t count)
829{
830 struct ufs_hba *hba = dev_get_drvdata(dev);
831 unsigned long flags, value;
832
833 if (kstrtoul(buf, 0, &value))
834 return -EINVAL;
835
836 spin_lock_irqsave(hba->host->host_lock, flags);
837 hba->clk_gating.delay_ms = value;
838 spin_unlock_irqrestore(hba->host->host_lock, flags);
839 return count;
840}
841
842static void ufshcd_init_clk_gating(struct ufs_hba *hba)
843{
844 if (!ufshcd_is_clkgating_allowed(hba))
845 return;
846
847 hba->clk_gating.delay_ms = 150;
848 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
849 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
850
851 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
852 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
853 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
854 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
855 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
856 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
857 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
858}
859
860static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
861{
862 if (!ufshcd_is_clkgating_allowed(hba))
863 return;
864 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +0900865 cancel_work_sync(&hba->clk_gating.ungate_work);
866 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +0300867}
868
Sahitya Tummala856b3482014-09-25 15:32:34 +0300869/* Must be called with host lock acquired */
870static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
871{
872 if (!ufshcd_is_clkscaling_enabled(hba))
873 return;
874
875 if (!hba->clk_scaling.is_busy_started) {
876 hba->clk_scaling.busy_start_t = ktime_get();
877 hba->clk_scaling.is_busy_started = true;
878 }
879}
880
881static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
882{
883 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
884
885 if (!ufshcd_is_clkscaling_enabled(hba))
886 return;
887
888 if (!hba->outstanding_reqs && scaling->is_busy_started) {
889 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
890 scaling->busy_start_t));
891 scaling->busy_start_t = ktime_set(0, 0);
892 scaling->is_busy_started = false;
893 }
894}
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530895/**
896 * ufshcd_send_command - Send SCSI or device management commands
897 * @hba: per adapter instance
898 * @task_tag: Task tag of the command
899 */
900static inline
901void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
902{
Sahitya Tummala856b3482014-09-25 15:32:34 +0300903 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530904 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530905 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530906}
907
908/**
909 * ufshcd_copy_sense_data - Copy sense data in case of check condition
910 * @lrb - pointer to local reference block
911 */
912static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
913{
914 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530915 if (lrbp->sense_buffer &&
916 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530917 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530918 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530919 lrbp->ucd_rsp_ptr->sr.sense_data,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530920 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
921 }
922}
923
924/**
Dolev Raviv68078d52013-07-30 00:35:58 +0530925 * ufshcd_copy_query_response() - Copy the Query Response and the data
926 * descriptor
927 * @hba: per adapter instance
928 * @lrb - pointer to local reference block
929 */
930static
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300931int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +0530932{
933 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
934
Dolev Raviv68078d52013-07-30 00:35:58 +0530935 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +0530936
Dolev Raviv68078d52013-07-30 00:35:58 +0530937 /* Get the descriptor */
938 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +0300939 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +0530940 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300941 u16 resp_len;
942 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +0530943
944 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300945 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +0530946 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +0300947 buf_len = be16_to_cpu(
948 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300949 if (likely(buf_len >= resp_len)) {
950 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
951 } else {
952 dev_warn(hba->dev,
953 "%s: Response size is bigger than buffer",
954 __func__);
955 return -EINVAL;
956 }
Dolev Raviv68078d52013-07-30 00:35:58 +0530957 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +0300958
959 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +0530960}
961
962/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530963 * ufshcd_hba_capabilities - Read controller capabilities
964 * @hba: per adapter instance
965 */
966static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
967{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530968 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530969
970 /* nutrs and nutmrs are 0 based values */
971 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
972 hba->nutmrs =
973 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
974}
975
976/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530977 * ufshcd_ready_for_uic_cmd - Check if controller is ready
978 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530979 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530980 * Return true on success, else false
981 */
982static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
983{
984 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
985 return true;
986 else
987 return false;
988}
989
990/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +0530991 * ufshcd_get_upmcrs - Get the power mode change request status
992 * @hba: Pointer to adapter instance
993 *
994 * This function gets the UPMCRS field of HCS register
995 * Returns value of UPMCRS field
996 */
997static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
998{
999 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1000}
1001
1002/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301003 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1004 * @hba: per adapter instance
1005 * @uic_cmd: UIC command
1006 *
1007 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301008 */
1009static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301010ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301011{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301012 WARN_ON(hba->active_uic_cmd);
1013
1014 hba->active_uic_cmd = uic_cmd;
1015
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301016 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301017 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1018 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1019 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301020
1021 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301022 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301023 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301024}
1025
1026/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301027 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1028 * @hba: per adapter instance
1029 * @uic_command: UIC command
1030 *
1031 * Must be called with mutex held.
1032 * Returns 0 only if success.
1033 */
1034static int
1035ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1036{
1037 int ret;
1038 unsigned long flags;
1039
1040 if (wait_for_completion_timeout(&uic_cmd->done,
1041 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1042 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1043 else
1044 ret = -ETIMEDOUT;
1045
1046 spin_lock_irqsave(hba->host->host_lock, flags);
1047 hba->active_uic_cmd = NULL;
1048 spin_unlock_irqrestore(hba->host->host_lock, flags);
1049
1050 return ret;
1051}
1052
1053/**
1054 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1055 * @hba: per adapter instance
1056 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001057 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301058 *
1059 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001060 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301061 * Returns 0 only if success.
1062 */
1063static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001064__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1065 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301066{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301067 if (!ufshcd_ready_for_uic_cmd(hba)) {
1068 dev_err(hba->dev,
1069 "Controller not ready to accept UIC commands\n");
1070 return -EIO;
1071 }
1072
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001073 if (completion)
1074 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301075
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301076 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301077
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001078 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301079}
1080
1081/**
1082 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1083 * @hba: per adapter instance
1084 * @uic_cmd: UIC command
1085 *
1086 * Returns 0 only if success.
1087 */
1088static int
1089ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1090{
1091 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001092 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301093
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001094 ufshcd_hold(hba, false);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301095 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03001096 ufshcd_add_delay_before_dme_cmd(hba);
1097
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001098 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02001099 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03001100 spin_unlock_irqrestore(hba->host->host_lock, flags);
1101 if (!ret)
1102 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1103
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301104 mutex_unlock(&hba->uic_cmd_mutex);
1105
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001106 ufshcd_release(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05301107 return ret;
1108}
1109
1110/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301111 * ufshcd_map_sg - Map scatter-gather list to prdt
1112 * @lrbp - pointer to local reference block
1113 *
1114 * Returns 0 in case of success, non-zero value in case of failure
1115 */
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00001116static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301117{
1118 struct ufshcd_sg_entry *prd_table;
1119 struct scatterlist *sg;
1120 struct scsi_cmnd *cmd;
1121 int sg_segments;
1122 int i;
1123
1124 cmd = lrbp->cmd;
1125 sg_segments = scsi_dma_map(cmd);
1126 if (sg_segments < 0)
1127 return sg_segments;
1128
1129 if (sg_segments) {
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00001130 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1131 lrbp->utr_descriptor_ptr->prd_table_length =
1132 cpu_to_le16((u16)(sg_segments *
1133 sizeof(struct ufshcd_sg_entry)));
1134 else
1135 lrbp->utr_descriptor_ptr->prd_table_length =
1136 cpu_to_le16((u16) (sg_segments));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301137
1138 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1139
1140 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1141 prd_table[i].size =
1142 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1143 prd_table[i].base_addr =
1144 cpu_to_le32(lower_32_bits(sg->dma_address));
1145 prd_table[i].upper_addr =
1146 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001147 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301148 }
1149 } else {
1150 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1151 }
1152
1153 return 0;
1154}
1155
1156/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301157 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301158 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301159 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301160 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301161static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301162{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301163 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1164
1165 if (hba->ufs_version == UFSHCI_VERSION_10) {
1166 u32 rw;
1167 rw = set & INTERRUPT_MASK_RW_VER_10;
1168 set = rw | ((set ^ intrs) & intrs);
1169 } else {
1170 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301171 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301172
1173 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1174}
1175
1176/**
1177 * ufshcd_disable_intr - disable interrupts
1178 * @hba: per adapter instance
1179 * @intrs: interrupt bits
1180 */
1181static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1182{
1183 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1184
1185 if (hba->ufs_version == UFSHCI_VERSION_10) {
1186 u32 rw;
1187 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1188 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1189 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1190
1191 } else {
1192 set &= ~intrs;
1193 }
1194
1195 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301196}
1197
1198/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301199 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1200 * descriptor according to request
1201 * @lrbp: pointer to local reference block
1202 * @upiu_flags: flags required in the header
1203 * @cmd_dir: requests data direction
1204 */
1205static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
Joao Pinto300bb132016-05-11 12:21:27 +01001206 u32 *upiu_flags, enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301207{
1208 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1209 u32 data_direction;
1210 u32 dword_0;
1211
1212 if (cmd_dir == DMA_FROM_DEVICE) {
1213 data_direction = UTP_DEVICE_TO_HOST;
1214 *upiu_flags = UPIU_CMD_FLAGS_READ;
1215 } else if (cmd_dir == DMA_TO_DEVICE) {
1216 data_direction = UTP_HOST_TO_DEVICE;
1217 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1218 } else {
1219 data_direction = UTP_NO_DATA_TRANSFER;
1220 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1221 }
1222
1223 dword_0 = data_direction | (lrbp->command_type
1224 << UPIU_COMMAND_TYPE_OFFSET);
1225 if (lrbp->intr_cmd)
1226 dword_0 |= UTP_REQ_DESC_INT_CMD;
1227
1228 /* Transfer request descriptor header fields */
1229 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001230 /* dword_1 is reserved, hence it is set to 0 */
1231 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301232 /*
1233 * assigning invalid value for command status. Controller
1234 * updates OCS on command completion, with the command
1235 * status
1236 */
1237 req_desc->header.dword_2 =
1238 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001239 /* dword_3 is reserved, hence it is set to 0 */
1240 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02001241
1242 req_desc->prd_table_length = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301243}
1244
1245/**
1246 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1247 * for scsi commands
1248 * @lrbp - local reference block pointer
1249 * @upiu_flags - flags
1250 */
1251static
1252void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1253{
1254 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001255 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301256
1257 /* command descriptor fields */
1258 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1259 UPIU_TRANSACTION_COMMAND, upiu_flags,
1260 lrbp->lun, lrbp->task_tag);
1261 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1262 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1263
1264 /* Total EHS length and Data segment length will be zero */
1265 ucd_req_ptr->header.dword_2 = 0;
1266
1267 ucd_req_ptr->sc.exp_data_transfer_len =
1268 cpu_to_be32(lrbp->cmd->sdb.length);
1269
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02001270 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1271 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1272 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1273
1274 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301275}
1276
Dolev Raviv68078d52013-07-30 00:35:58 +05301277/**
1278 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1279 * for query requsts
1280 * @hba: UFS hba
1281 * @lrbp: local reference block pointer
1282 * @upiu_flags: flags
1283 */
1284static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1285 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1286{
1287 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1288 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301289 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05301290 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1291
1292 /* Query request header */
1293 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1294 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1295 lrbp->lun, lrbp->task_tag);
1296 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1297 0, query->request.query_func, 0, 0);
1298
Zang Leigang68612852016-08-25 17:39:19 +08001299 /* Data segment length only need for WRITE_DESC */
1300 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1301 ucd_req_ptr->header.dword_2 =
1302 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1303 else
1304 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05301305
1306 /* Copy the Query Request buffer as is */
1307 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1308 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05301309
1310 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001311 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1312 memcpy(descp, query->descriptor, len);
1313
Yaniv Gardi51047262016-02-01 15:02:38 +02001314 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05301315}
1316
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301317static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1318{
1319 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1320
1321 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1322
1323 /* command descriptor fields */
1324 ucd_req_ptr->header.dword_0 =
1325 UPIU_HEADER_DWORD(
1326 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02001327 /* clear rest of the fields of basic header */
1328 ucd_req_ptr->header.dword_1 = 0;
1329 ucd_req_ptr->header.dword_2 = 0;
1330
1331 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301332}
1333
1334/**
Joao Pinto300bb132016-05-11 12:21:27 +01001335 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1336 * for Device Management Purposes
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301337 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301338 * @lrb - pointer to local reference block
1339 */
Joao Pinto300bb132016-05-11 12:21:27 +01001340static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301341{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301342 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301343 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301344
Joao Pinto300bb132016-05-11 12:21:27 +01001345 if (hba->ufs_version == UFSHCI_VERSION_20)
1346 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1347 else
1348 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1349
1350 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1351 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1352 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1353 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1354 ufshcd_prepare_utp_nop_upiu(lrbp);
1355 else
1356 ret = -EINVAL;
1357
1358 return ret;
1359}
1360
1361/**
1362 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1363 * for SCSI Purposes
1364 * @hba - per adapter instance
1365 * @lrb - pointer to local reference block
1366 */
1367static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1368{
1369 u32 upiu_flags;
1370 int ret = 0;
1371
1372 if (hba->ufs_version == UFSHCI_VERSION_20)
1373 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1374 else
1375 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1376
1377 if (likely(lrbp->cmd)) {
1378 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1379 lrbp->cmd->sc_data_direction);
1380 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1381 } else {
1382 ret = -EINVAL;
1383 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301384
1385 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301386}
1387
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001388/*
1389 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1390 * @scsi_lun: scsi LUN id
1391 *
1392 * Returns UPIU LUN id
1393 */
1394static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1395{
1396 if (scsi_is_wlun(scsi_lun))
1397 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1398 | UFS_UPIU_WLUN_ID;
1399 else
1400 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1401}
1402
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301403/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03001404 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1405 * @scsi_lun: UPIU W-LUN id
1406 *
1407 * Returns SCSI W-LUN id
1408 */
1409static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1410{
1411 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1412}
1413
1414/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301415 * ufshcd_queuecommand - main entry point for SCSI requests
1416 * @cmd: command from SCSI Midlayer
1417 * @done: call back function
1418 *
1419 * Returns 0 for success, non-zero in case of failure
1420 */
1421static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1422{
1423 struct ufshcd_lrb *lrbp;
1424 struct ufs_hba *hba;
1425 unsigned long flags;
1426 int tag;
1427 int err = 0;
1428
1429 hba = shost_priv(host);
1430
1431 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02001432 if (!ufshcd_valid_tag(hba, tag)) {
1433 dev_err(hba->dev,
1434 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1435 __func__, tag, cmd, cmd->request);
1436 BUG();
1437 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301438
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301439 spin_lock_irqsave(hba->host->host_lock, flags);
1440 switch (hba->ufshcd_state) {
1441 case UFSHCD_STATE_OPERATIONAL:
1442 break;
Zang Leiganga17bddc2017-04-04 19:32:20 +00001443 case UFSHCD_STATE_EH_SCHEDULED:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301444 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301445 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301446 goto out_unlock;
1447 case UFSHCD_STATE_ERROR:
1448 set_host_byte(cmd, DID_ERROR);
1449 cmd->scsi_done(cmd);
1450 goto out_unlock;
1451 default:
1452 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1453 __func__, hba->ufshcd_state);
1454 set_host_byte(cmd, DID_BAD_TARGET);
1455 cmd->scsi_done(cmd);
1456 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301457 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001458
1459 /* if error handling is in progress, don't issue commands */
1460 if (ufshcd_eh_in_progress(hba)) {
1461 set_host_byte(cmd, DID_ERROR);
1462 cmd->scsi_done(cmd);
1463 goto out_unlock;
1464 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301465 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301466
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301467 /* acquire the tag to make sure device cmds don't use it */
1468 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1469 /*
1470 * Dev manage command in progress, requeue the command.
1471 * Requeuing the command helps in cases where the request *may*
1472 * find different tag instead of waiting for dev manage command
1473 * completion.
1474 */
1475 err = SCSI_MLQUEUE_HOST_BUSY;
1476 goto out;
1477 }
1478
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001479 err = ufshcd_hold(hba, true);
1480 if (err) {
1481 err = SCSI_MLQUEUE_HOST_BUSY;
1482 clear_bit_unlock(tag, &hba->lrb_in_use);
1483 goto out;
1484 }
1485 WARN_ON(hba->clk_gating.state != CLKS_ON);
1486
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301487 lrbp = &hba->lrb[tag];
1488
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301489 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301490 lrbp->cmd = cmd;
1491 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1492 lrbp->sense_buffer = cmd->sense_buffer;
1493 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03001494 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03001495 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301496
Joao Pinto300bb132016-05-11 12:21:27 +01001497 ufshcd_comp_scsi_upiu(hba, lrbp);
1498
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00001499 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301500 if (err) {
1501 lrbp->cmd = NULL;
1502 clear_bit_unlock(tag, &hba->lrb_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301503 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301504 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301505
1506 /* issue command to the controller */
1507 spin_lock_irqsave(hba->host->host_lock, flags);
1508 ufshcd_send_command(hba, tag);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05301509out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301510 spin_unlock_irqrestore(hba->host->host_lock, flags);
1511out:
1512 return err;
1513}
1514
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301515static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1516 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1517{
1518 lrbp->cmd = NULL;
1519 lrbp->sense_bufflen = 0;
1520 lrbp->sense_buffer = NULL;
1521 lrbp->task_tag = tag;
1522 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301523 lrbp->intr_cmd = true; /* No interrupt aggregation */
1524 hba->dev_cmd.type = cmd_type;
1525
Joao Pinto300bb132016-05-11 12:21:27 +01001526 return ufshcd_comp_devman_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301527}
1528
1529static int
1530ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1531{
1532 int err = 0;
1533 unsigned long flags;
1534 u32 mask = 1 << tag;
1535
1536 /* clear outstanding transaction before retry */
1537 spin_lock_irqsave(hba->host->host_lock, flags);
1538 ufshcd_utrl_clear(hba, tag);
1539 spin_unlock_irqrestore(hba->host->host_lock, flags);
1540
1541 /*
1542 * wait for for h/w to clear corresponding bit in door-bell.
1543 * max. wait is 1 sec.
1544 */
1545 err = ufshcd_wait_for_register(hba,
1546 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02001547 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301548
1549 return err;
1550}
1551
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001552static int
1553ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1554{
1555 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1556
1557 /* Get the UPIU response */
1558 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1559 UPIU_RSP_CODE_OFFSET;
1560 return query_res->response;
1561}
1562
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301563/**
1564 * ufshcd_dev_cmd_completion() - handles device management command responses
1565 * @hba: per adapter instance
1566 * @lrbp: pointer to local reference block
1567 */
1568static int
1569ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1570{
1571 int resp;
1572 int err = 0;
1573
1574 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1575
1576 switch (resp) {
1577 case UPIU_TRANSACTION_NOP_IN:
1578 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1579 err = -EINVAL;
1580 dev_err(hba->dev, "%s: unexpected response %x\n",
1581 __func__, resp);
1582 }
1583 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05301584 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03001585 err = ufshcd_check_query_response(hba, lrbp);
1586 if (!err)
1587 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05301588 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301589 case UPIU_TRANSACTION_REJECT_UPIU:
1590 /* TODO: handle Reject UPIU Response */
1591 err = -EPERM;
1592 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1593 __func__);
1594 break;
1595 default:
1596 err = -EINVAL;
1597 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1598 __func__, resp);
1599 break;
1600 }
1601
1602 return err;
1603}
1604
1605static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1606 struct ufshcd_lrb *lrbp, int max_timeout)
1607{
1608 int err = 0;
1609 unsigned long time_left;
1610 unsigned long flags;
1611
1612 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1613 msecs_to_jiffies(max_timeout));
1614
1615 spin_lock_irqsave(hba->host->host_lock, flags);
1616 hba->dev_cmd.complete = NULL;
1617 if (likely(time_left)) {
1618 err = ufshcd_get_tr_ocs(lrbp);
1619 if (!err)
1620 err = ufshcd_dev_cmd_completion(hba, lrbp);
1621 }
1622 spin_unlock_irqrestore(hba->host->host_lock, flags);
1623
1624 if (!time_left) {
1625 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001626 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1627 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301628 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02001629 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301630 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02001631 /*
1632 * in case of an error, after clearing the doorbell,
1633 * we also need to clear the outstanding_request
1634 * field in hba
1635 */
1636 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301637 }
1638
1639 return err;
1640}
1641
1642/**
1643 * ufshcd_get_dev_cmd_tag - Get device management command tag
1644 * @hba: per-adapter instance
1645 * @tag: pointer to variable with available slot value
1646 *
1647 * Get a free slot and lock it until device management command
1648 * completes.
1649 *
1650 * Returns false if free slot is unavailable for locking, else
1651 * return true with tag value in @tag.
1652 */
1653static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1654{
1655 int tag;
1656 bool ret = false;
1657 unsigned long tmp;
1658
1659 if (!tag_out)
1660 goto out;
1661
1662 do {
1663 tmp = ~hba->lrb_in_use;
1664 tag = find_last_bit(&tmp, hba->nutrs);
1665 if (tag >= hba->nutrs)
1666 goto out;
1667 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1668
1669 *tag_out = tag;
1670 ret = true;
1671out:
1672 return ret;
1673}
1674
1675static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1676{
1677 clear_bit_unlock(tag, &hba->lrb_in_use);
1678}
1679
1680/**
1681 * ufshcd_exec_dev_cmd - API for sending device management requests
1682 * @hba - UFS hba
1683 * @cmd_type - specifies the type (NOP, Query...)
1684 * @timeout - time in seconds
1685 *
Dolev Raviv68078d52013-07-30 00:35:58 +05301686 * NOTE: Since there is only one available tag for device management commands,
1687 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301688 */
1689static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1690 enum dev_cmd_type cmd_type, int timeout)
1691{
1692 struct ufshcd_lrb *lrbp;
1693 int err;
1694 int tag;
1695 struct completion wait;
1696 unsigned long flags;
1697
1698 /*
1699 * Get free slot, sleep if slots are unavailable.
1700 * Even though we use wait_event() which sleeps indefinitely,
1701 * the maximum wait time is bounded by SCSI request timeout.
1702 */
1703 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1704
1705 init_completion(&wait);
1706 lrbp = &hba->lrb[tag];
1707 WARN_ON(lrbp->cmd);
1708 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1709 if (unlikely(err))
1710 goto out_put_tag;
1711
1712 hba->dev_cmd.complete = &wait;
1713
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02001714 /* Make sure descriptors are ready before ringing the doorbell */
1715 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301716 spin_lock_irqsave(hba->host->host_lock, flags);
1717 ufshcd_send_command(hba, tag);
1718 spin_unlock_irqrestore(hba->host->host_lock, flags);
1719
1720 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1721
1722out_put_tag:
1723 ufshcd_put_dev_cmd_tag(hba, tag);
1724 wake_up(&hba->dev_cmd.tag_wq);
1725 return err;
1726}
1727
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301728/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001729 * ufshcd_init_query() - init the query response and request parameters
1730 * @hba: per-adapter instance
1731 * @request: address of the request pointer to be initialized
1732 * @response: address of the response pointer to be initialized
1733 * @opcode: operation to perform
1734 * @idn: flag idn to access
1735 * @index: LU number to access
1736 * @selector: query/flag/descriptor further identification
1737 */
1738static inline void ufshcd_init_query(struct ufs_hba *hba,
1739 struct ufs_query_req **request, struct ufs_query_res **response,
1740 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1741{
1742 *request = &hba->dev_cmd.query.request;
1743 *response = &hba->dev_cmd.query.response;
1744 memset(*request, 0, sizeof(struct ufs_query_req));
1745 memset(*response, 0, sizeof(struct ufs_query_res));
1746 (*request)->upiu_req.opcode = opcode;
1747 (*request)->upiu_req.idn = idn;
1748 (*request)->upiu_req.index = index;
1749 (*request)->upiu_req.selector = selector;
1750}
1751
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02001752static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1753 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
1754{
1755 int ret;
1756 int retries;
1757
1758 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1759 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1760 if (ret)
1761 dev_dbg(hba->dev,
1762 "%s: failed with error %d, retries %d\n",
1763 __func__, ret, retries);
1764 else
1765 break;
1766 }
1767
1768 if (ret)
1769 dev_err(hba->dev,
1770 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1771 __func__, opcode, idn, ret, retries);
1772 return ret;
1773}
1774
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001775/**
Dolev Raviv68078d52013-07-30 00:35:58 +05301776 * ufshcd_query_flag() - API function for sending flag query requests
1777 * hba: per-adapter instance
1778 * query_opcode: flag query to perform
1779 * idn: flag idn to access
1780 * flag_res: the flag value after the query request completes
1781 *
1782 * Returns 0 for success, non-zero in case of failure
1783 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02001784int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05301785 enum flag_idn idn, bool *flag_res)
1786{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001787 struct ufs_query_req *request = NULL;
1788 struct ufs_query_res *response = NULL;
1789 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02001790 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05301791
1792 BUG_ON(!hba);
1793
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001794 ufshcd_hold(hba, false);
Dolev Raviv68078d52013-07-30 00:35:58 +05301795 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001796 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1797 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05301798
1799 switch (opcode) {
1800 case UPIU_QUERY_OPCODE_SET_FLAG:
1801 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1802 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1803 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1804 break;
1805 case UPIU_QUERY_OPCODE_READ_FLAG:
1806 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1807 if (!flag_res) {
1808 /* No dummy reads */
1809 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1810 __func__);
1811 err = -EINVAL;
1812 goto out_unlock;
1813 }
1814 break;
1815 default:
1816 dev_err(hba->dev,
1817 "%s: Expected query flag opcode but got = %d\n",
1818 __func__, opcode);
1819 err = -EINVAL;
1820 goto out_unlock;
1821 }
Dolev Raviv68078d52013-07-30 00:35:58 +05301822
Yaniv Gardie5ad4062016-02-01 15:02:41 +02001823 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
1824 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
1825
1826 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05301827
1828 if (err) {
1829 dev_err(hba->dev,
1830 "%s: Sending flag query for idn %d failed, err = %d\n",
1831 __func__, idn, err);
1832 goto out_unlock;
1833 }
1834
1835 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301836 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05301837 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1838
1839out_unlock:
1840 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001841 ufshcd_release(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05301842 return err;
1843}
1844
1845/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301846 * ufshcd_query_attr - API function for sending attribute requests
1847 * hba: per-adapter instance
1848 * opcode: attribute opcode
1849 * idn: attribute idn to access
1850 * index: index field
1851 * selector: selector field
1852 * attr_val: the attribute value after the query request completes
1853 *
1854 * Returns 0 for success, non-zero in case of failure
1855*/
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05301856static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301857 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1858{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001859 struct ufs_query_req *request = NULL;
1860 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301861 int err;
1862
1863 BUG_ON(!hba);
1864
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001865 ufshcd_hold(hba, false);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301866 if (!attr_val) {
1867 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1868 __func__, opcode);
1869 err = -EINVAL;
1870 goto out;
1871 }
1872
1873 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001874 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1875 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301876
1877 switch (opcode) {
1878 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1879 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301880 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301881 break;
1882 case UPIU_QUERY_OPCODE_READ_ATTR:
1883 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1884 break;
1885 default:
1886 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1887 __func__, opcode);
1888 err = -EINVAL;
1889 goto out_unlock;
1890 }
1891
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001892 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301893
1894 if (err) {
1895 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1896 __func__, opcode, idn, err);
1897 goto out_unlock;
1898 }
1899
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301900 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301901
1902out_unlock:
1903 mutex_unlock(&hba->dev_cmd.lock);
1904out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001905 ufshcd_release(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301906 return err;
1907}
1908
1909/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02001910 * ufshcd_query_attr_retry() - API function for sending query
1911 * attribute with retries
1912 * @hba: per-adapter instance
1913 * @opcode: attribute opcode
1914 * @idn: attribute idn to access
1915 * @index: index field
1916 * @selector: selector field
1917 * @attr_val: the attribute value after the query request
1918 * completes
1919 *
1920 * Returns 0 for success, non-zero in case of failure
1921*/
1922static int ufshcd_query_attr_retry(struct ufs_hba *hba,
1923 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
1924 u32 *attr_val)
1925{
1926 int ret = 0;
1927 u32 retries;
1928
1929 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1930 ret = ufshcd_query_attr(hba, opcode, idn, index,
1931 selector, attr_val);
1932 if (ret)
1933 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
1934 __func__, ret, retries);
1935 else
1936 break;
1937 }
1938
1939 if (ret)
1940 dev_err(hba->dev,
1941 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
1942 __func__, idn, ret, QUERY_REQ_RETRIES);
1943 return ret;
1944}
1945
Yaniv Gardia70e91b2016-03-10 17:37:14 +02001946static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001947 enum query_opcode opcode, enum desc_idn idn, u8 index,
1948 u8 selector, u8 *desc_buf, int *buf_len)
1949{
1950 struct ufs_query_req *request = NULL;
1951 struct ufs_query_res *response = NULL;
1952 int err;
1953
1954 BUG_ON(!hba);
1955
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001956 ufshcd_hold(hba, false);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001957 if (!desc_buf) {
1958 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1959 __func__, opcode);
1960 err = -EINVAL;
1961 goto out;
1962 }
1963
1964 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1965 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1966 __func__, *buf_len);
1967 err = -EINVAL;
1968 goto out;
1969 }
1970
1971 mutex_lock(&hba->dev_cmd.lock);
1972 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1973 selector);
1974 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03001975 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03001976
1977 switch (opcode) {
1978 case UPIU_QUERY_OPCODE_WRITE_DESC:
1979 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1980 break;
1981 case UPIU_QUERY_OPCODE_READ_DESC:
1982 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1983 break;
1984 default:
1985 dev_err(hba->dev,
1986 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1987 __func__, opcode);
1988 err = -EINVAL;
1989 goto out_unlock;
1990 }
1991
1992 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1993
1994 if (err) {
1995 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1996 __func__, opcode, idn, err);
1997 goto out_unlock;
1998 }
1999
2000 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002001 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002002
2003out_unlock:
2004 mutex_unlock(&hba->dev_cmd.lock);
2005out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002006 ufshcd_release(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002007 return err;
2008}
2009
2010/**
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002011 * ufshcd_query_descriptor_retry - API function for sending descriptor
2012 * requests
2013 * hba: per-adapter instance
2014 * opcode: attribute opcode
2015 * idn: attribute idn to access
2016 * index: index field
2017 * selector: selector field
2018 * desc_buf: the buffer that contains the descriptor
2019 * buf_len: length parameter passed to the device
2020 *
2021 * Returns 0 for success, non-zero in case of failure.
2022 * The buf_len parameter will contain, on return, the length parameter
2023 * received on the response.
2024 */
2025int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2026 enum query_opcode opcode, enum desc_idn idn, u8 index,
2027 u8 selector, u8 *desc_buf, int *buf_len)
2028{
2029 int err;
2030 int retries;
2031
2032 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2033 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2034 selector, desc_buf, buf_len);
2035 if (!err || err == -EINVAL)
2036 break;
2037 }
2038
2039 return err;
2040}
2041EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2042
2043/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002044 * ufshcd_read_desc_param - read the specified descriptor parameter
2045 * @hba: Pointer to adapter instance
2046 * @desc_id: descriptor idn value
2047 * @desc_index: descriptor index
2048 * @param_offset: offset of the parameter to read
2049 * @param_read_buf: pointer to buffer where parameter would be read
2050 * @param_size: sizeof(param_read_buf)
2051 *
2052 * Return 0 in case of success, non-zero otherwise
2053 */
2054static int ufshcd_read_desc_param(struct ufs_hba *hba,
2055 enum desc_idn desc_id,
2056 int desc_index,
2057 u32 param_offset,
2058 u8 *param_read_buf,
2059 u32 param_size)
2060{
2061 int ret;
2062 u8 *desc_buf;
2063 u32 buff_len;
2064 bool is_kmalloc = true;
2065
2066 /* safety checks */
2067 if (desc_id >= QUERY_DESC_IDN_MAX)
2068 return -EINVAL;
2069
2070 buff_len = ufs_query_desc_max_size[desc_id];
2071 if ((param_offset + param_size) > buff_len)
2072 return -EINVAL;
2073
2074 if (!param_offset && (param_size == buff_len)) {
2075 /* memory space already available to hold full descriptor */
2076 desc_buf = param_read_buf;
2077 is_kmalloc = false;
2078 } else {
2079 /* allocate memory to hold full descriptor */
2080 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2081 if (!desc_buf)
2082 return -ENOMEM;
2083 }
2084
Yaniv Gardia70e91b2016-03-10 17:37:14 +02002085 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2086 desc_id, desc_index, 0, desc_buf,
2087 &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002088
2089 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
2090 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
2091 ufs_query_desc_max_size[desc_id])
2092 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
2093 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
2094 __func__, desc_id, param_offset, buff_len, ret);
2095 if (!ret)
2096 ret = -EINVAL;
2097
2098 goto out;
2099 }
2100
2101 if (is_kmalloc)
2102 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2103out:
2104 if (is_kmalloc)
2105 kfree(desc_buf);
2106 return ret;
2107}
2108
2109static inline int ufshcd_read_desc(struct ufs_hba *hba,
2110 enum desc_idn desc_id,
2111 int desc_index,
2112 u8 *buf,
2113 u32 size)
2114{
2115 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2116}
2117
2118static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2119 u8 *buf,
2120 u32 size)
2121{
2122 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2123}
2124
Yaniv Gardib573d482016-03-10 17:37:09 +02002125int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2126{
2127 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2128}
2129EXPORT_SYMBOL(ufshcd_read_device_desc);
2130
2131/**
2132 * ufshcd_read_string_desc - read string descriptor
2133 * @hba: pointer to adapter instance
2134 * @desc_index: descriptor index
2135 * @buf: pointer to buffer where descriptor would be read
2136 * @size: size of buf
2137 * @ascii: if true convert from unicode to ascii characters
2138 *
2139 * Return 0 in case of success, non-zero otherwise
2140 */
2141int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2142 u32 size, bool ascii)
2143{
2144 int err = 0;
2145
2146 err = ufshcd_read_desc(hba,
2147 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2148
2149 if (err) {
2150 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2151 __func__, QUERY_REQ_RETRIES, err);
2152 goto out;
2153 }
2154
2155 if (ascii) {
2156 int desc_len;
2157 int ascii_len;
2158 int i;
2159 char *buff_ascii;
2160
2161 desc_len = buf[0];
2162 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2163 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2164 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2165 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2166 __func__);
2167 err = -ENOMEM;
2168 goto out;
2169 }
2170
2171 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2172 if (!buff_ascii) {
2173 err = -ENOMEM;
Tiezhu Yangfcbefc32016-06-25 12:35:22 +08002174 goto out;
Yaniv Gardib573d482016-03-10 17:37:09 +02002175 }
2176
2177 /*
2178 * the descriptor contains string in UTF16 format
2179 * we need to convert to utf-8 so it can be displayed
2180 */
2181 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2182 desc_len - QUERY_DESC_HDR_SIZE,
2183 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2184
2185 /* replace non-printable or non-ASCII characters with spaces */
2186 for (i = 0; i < ascii_len; i++)
2187 ufshcd_remove_non_printable(&buff_ascii[i]);
2188
2189 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2190 size - QUERY_DESC_HDR_SIZE);
2191 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2192 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Yaniv Gardib573d482016-03-10 17:37:09 +02002193 kfree(buff_ascii);
2194 }
2195out:
2196 return err;
2197}
2198EXPORT_SYMBOL(ufshcd_read_string_desc);
2199
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002200/**
2201 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2202 * @hba: Pointer to adapter instance
2203 * @lun: lun id
2204 * @param_offset: offset of the parameter to read
2205 * @param_read_buf: pointer to buffer where parameter would be read
2206 * @param_size: sizeof(param_read_buf)
2207 *
2208 * Return 0 in case of success, non-zero otherwise
2209 */
2210static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2211 int lun,
2212 enum unit_desc_param param_offset,
2213 u8 *param_read_buf,
2214 u32 param_size)
2215{
2216 /*
2217 * Unit descriptors are only available for general purpose LUs (LUN id
2218 * from 0 to 7) and RPMB Well known LU.
2219 */
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002220 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03002221 return -EOPNOTSUPP;
2222
2223 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2224 param_offset, param_read_buf, param_size);
2225}
2226
2227/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302228 * ufshcd_memory_alloc - allocate memory for host memory space data structures
2229 * @hba: per adapter instance
2230 *
2231 * 1. Allocate DMA memory for Command Descriptor array
2232 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2233 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2234 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2235 * (UTMRDL)
2236 * 4. Allocate memory for local reference block(lrb).
2237 *
2238 * Returns 0 for success, non-zero in case of failure
2239 */
2240static int ufshcd_memory_alloc(struct ufs_hba *hba)
2241{
2242 size_t utmrdl_size, utrdl_size, ucdl_size;
2243
2244 /* Allocate memory for UTP command descriptors */
2245 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002246 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2247 ucdl_size,
2248 &hba->ucdl_dma_addr,
2249 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302250
2251 /*
2252 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2253 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2254 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2255 * be aligned to 128 bytes as well
2256 */
2257 if (!hba->ucdl_base_addr ||
2258 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302259 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302260 "Command Descriptor Memory allocation failed\n");
2261 goto out;
2262 }
2263
2264 /*
2265 * Allocate memory for UTP Transfer descriptors
2266 * UFSHCI requires 1024 byte alignment of UTRD
2267 */
2268 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09002269 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2270 utrdl_size,
2271 &hba->utrdl_dma_addr,
2272 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302273 if (!hba->utrdl_base_addr ||
2274 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302275 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302276 "Transfer Descriptor Memory allocation failed\n");
2277 goto out;
2278 }
2279
2280 /*
2281 * Allocate memory for UTP Task Management descriptors
2282 * UFSHCI requires 1024 byte alignment of UTMRD
2283 */
2284 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09002285 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2286 utmrdl_size,
2287 &hba->utmrdl_dma_addr,
2288 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302289 if (!hba->utmrdl_base_addr ||
2290 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302291 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302292 "Task Management Descriptor Memory allocation failed\n");
2293 goto out;
2294 }
2295
2296 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09002297 hba->lrb = devm_kzalloc(hba->dev,
2298 hba->nutrs * sizeof(struct ufshcd_lrb),
2299 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302300 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302301 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302302 goto out;
2303 }
2304 return 0;
2305out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302306 return -ENOMEM;
2307}
2308
2309/**
2310 * ufshcd_host_memory_configure - configure local reference block with
2311 * memory offsets
2312 * @hba: per adapter instance
2313 *
2314 * Configure Host memory space
2315 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2316 * address.
2317 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2318 * and PRDT offset.
2319 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2320 * into local reference block.
2321 */
2322static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2323{
2324 struct utp_transfer_cmd_desc *cmd_descp;
2325 struct utp_transfer_req_desc *utrdlp;
2326 dma_addr_t cmd_desc_dma_addr;
2327 dma_addr_t cmd_desc_element_addr;
2328 u16 response_offset;
2329 u16 prdt_offset;
2330 int cmd_desc_size;
2331 int i;
2332
2333 utrdlp = hba->utrdl_base_addr;
2334 cmd_descp = hba->ucdl_base_addr;
2335
2336 response_offset =
2337 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2338 prdt_offset =
2339 offsetof(struct utp_transfer_cmd_desc, prd_table);
2340
2341 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2342 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2343
2344 for (i = 0; i < hba->nutrs; i++) {
2345 /* Configure UTRD with command descriptor base address */
2346 cmd_desc_element_addr =
2347 (cmd_desc_dma_addr + (cmd_desc_size * i));
2348 utrdlp[i].command_desc_base_addr_lo =
2349 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2350 utrdlp[i].command_desc_base_addr_hi =
2351 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2352
2353 /* Response upiu and prdt offset should be in double words */
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00002354 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
2355 utrdlp[i].response_upiu_offset =
2356 cpu_to_le16(response_offset);
2357 utrdlp[i].prd_table_offset =
2358 cpu_to_le16(prdt_offset);
2359 utrdlp[i].response_upiu_length =
2360 cpu_to_le16(ALIGNED_UPIU_SIZE);
2361 } else {
2362 utrdlp[i].response_upiu_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302363 cpu_to_le16((response_offset >> 2));
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00002364 utrdlp[i].prd_table_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302365 cpu_to_le16((prdt_offset >> 2));
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00002366 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05302367 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00002368 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302369
2370 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302371 hba->lrb[i].ucd_req_ptr =
2372 (struct utp_upiu_req *)(cmd_descp + i);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302373 hba->lrb[i].ucd_rsp_ptr =
2374 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2375 hba->lrb[i].ucd_prdt_ptr =
2376 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2377 }
2378}
2379
2380/**
2381 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2382 * @hba: per adapter instance
2383 *
2384 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2385 * in order to initialize the Unipro link startup procedure.
2386 * Once the Unipro links are up, the device connected to the controller
2387 * is detected.
2388 *
2389 * Returns 0 on success, non-zero value on failure
2390 */
2391static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2392{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302393 struct uic_command uic_cmd = {0};
2394 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302395
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302396 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
2397
2398 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2399 if (ret)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302400 dev_err(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302401 "dme-link-startup: error code %d\n", ret);
2402 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302403}
2404
Yaniv Gardicad2e032015-03-31 17:37:14 +03002405static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2406{
2407 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2408 unsigned long min_sleep_time_us;
2409
2410 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2411 return;
2412
2413 /*
2414 * last_dme_cmd_tstamp will be 0 only for 1st call to
2415 * this function
2416 */
2417 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2418 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2419 } else {
2420 unsigned long delta =
2421 (unsigned long) ktime_to_us(
2422 ktime_sub(ktime_get(),
2423 hba->last_dme_cmd_tstamp));
2424
2425 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2426 min_sleep_time_us =
2427 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2428 else
2429 return; /* no more delay required */
2430 }
2431
2432 /* allow sleep for extra 50us if needed */
2433 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2434}
2435
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302436/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302437 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2438 * @hba: per adapter instance
2439 * @attr_sel: uic command argument1
2440 * @attr_set: attribute set type as uic command argument2
2441 * @mib_val: setting value as uic command argument3
2442 * @peer: indicate whether peer or local
2443 *
2444 * Returns 0 on success, non-zero value on failure
2445 */
2446int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2447 u8 attr_set, u32 mib_val, u8 peer)
2448{
2449 struct uic_command uic_cmd = {0};
2450 static const char *const action[] = {
2451 "dme-set",
2452 "dme-peer-set"
2453 };
2454 const char *set = action[!!peer];
2455 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002456 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302457
2458 uic_cmd.command = peer ?
2459 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2460 uic_cmd.argument1 = attr_sel;
2461 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2462 uic_cmd.argument3 = mib_val;
2463
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002464 do {
2465 /* for peer attributes we retry upon failure */
2466 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2467 if (ret)
2468 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2469 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2470 } while (ret && peer && --retries);
2471
2472 if (!retries)
2473 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2474 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2475 retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302476
2477 return ret;
2478}
2479EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2480
2481/**
2482 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2483 * @hba: per adapter instance
2484 * @attr_sel: uic command argument1
2485 * @mib_val: the value of the attribute as returned by the UIC command
2486 * @peer: indicate whether peer or local
2487 *
2488 * Returns 0 on success, non-zero value on failure
2489 */
2490int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2491 u32 *mib_val, u8 peer)
2492{
2493 struct uic_command uic_cmd = {0};
2494 static const char *const action[] = {
2495 "dme-get",
2496 "dme-peer-get"
2497 };
2498 const char *get = action[!!peer];
2499 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002500 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002501 struct ufs_pa_layer_attr orig_pwr_info;
2502 struct ufs_pa_layer_attr temp_pwr_info;
2503 bool pwr_mode_change = false;
2504
2505 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2506 orig_pwr_info = hba->pwr_info;
2507 temp_pwr_info = orig_pwr_info;
2508
2509 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2510 orig_pwr_info.pwr_rx == FAST_MODE) {
2511 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2512 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2513 pwr_mode_change = true;
2514 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2515 orig_pwr_info.pwr_rx == SLOW_MODE) {
2516 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2517 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2518 pwr_mode_change = true;
2519 }
2520 if (pwr_mode_change) {
2521 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2522 if (ret)
2523 goto out;
2524 }
2525 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302526
2527 uic_cmd.command = peer ?
2528 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2529 uic_cmd.argument1 = attr_sel;
2530
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002531 do {
2532 /* for peer attributes we retry upon failure */
2533 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2534 if (ret)
2535 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2536 get, UIC_GET_ATTR_ID(attr_sel), ret);
2537 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302538
Yaniv Gardi64238fb2016-02-01 15:02:43 +02002539 if (!retries)
2540 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2541 get, UIC_GET_ATTR_ID(attr_sel), retries);
2542
2543 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302544 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03002545
2546 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2547 && pwr_mode_change)
2548 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05302549out:
2550 return ret;
2551}
2552EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2553
2554/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002555 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2556 * state) and waits for it to take effect.
2557 *
2558 * @hba: per adapter instance
2559 * @cmd: UIC command to execute
2560 *
2561 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2562 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2563 * and device UniPro link and hence it's final completion would be indicated by
2564 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2565 * addition to normal UIC command completion Status (UCCS). This function only
2566 * returns after the relevant status bits indicate the completion.
2567 *
2568 * Returns 0 on success, non-zero value on failure
2569 */
2570static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2571{
2572 struct completion uic_async_done;
2573 unsigned long flags;
2574 u8 status;
2575 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002576 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002577
2578 mutex_lock(&hba->uic_cmd_mutex);
2579 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002580 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002581
2582 spin_lock_irqsave(hba->host->host_lock, flags);
2583 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002584 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2585 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2586 /*
2587 * Make sure UIC command completion interrupt is disabled before
2588 * issuing UIC command.
2589 */
2590 wmb();
2591 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002592 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002593 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2594 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002595 if (ret) {
2596 dev_err(hba->dev,
2597 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2598 cmd->command, cmd->argument3, ret);
2599 goto out;
2600 }
2601
2602 if (!wait_for_completion_timeout(hba->uic_async_done,
2603 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2604 dev_err(hba->dev,
2605 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2606 cmd->command, cmd->argument3);
2607 ret = -ETIMEDOUT;
2608 goto out;
2609 }
2610
2611 status = ufshcd_get_upmcrs(hba);
2612 if (status != PWR_LOCAL) {
2613 dev_err(hba->dev,
Kiwoong Kim73615422016-09-08 16:50:02 +09002614 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002615 cmd->command, status);
2616 ret = (status != PWR_OK) ? status : -1;
2617 }
2618out:
2619 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002620 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002621 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002622 if (reenable_intr)
2623 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002624 spin_unlock_irqrestore(hba->host->host_lock, flags);
2625 mutex_unlock(&hba->uic_cmd_mutex);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002626
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002627 return ret;
2628}
2629
2630/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302631 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2632 * using DME_SET primitives.
2633 * @hba: per adapter instance
2634 * @mode: powr mode value
2635 *
2636 * Returns 0 on success, non-zero value on failure
2637 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05302638static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302639{
2640 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002641 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302642
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03002643 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2644 ret = ufshcd_dme_set(hba,
2645 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2646 if (ret) {
2647 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2648 __func__, ret);
2649 goto out;
2650 }
2651 }
2652
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302653 uic_cmd.command = UIC_CMD_DME_SET;
2654 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2655 uic_cmd.argument3 = mode;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002656 ufshcd_hold(hba, false);
2657 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2658 ufshcd_release(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302659
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03002660out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002661 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002662}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302663
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002664static int ufshcd_link_recovery(struct ufs_hba *hba)
2665{
2666 int ret;
2667 unsigned long flags;
2668
2669 spin_lock_irqsave(hba->host->host_lock, flags);
2670 hba->ufshcd_state = UFSHCD_STATE_RESET;
2671 ufshcd_set_eh_in_progress(hba);
2672 spin_unlock_irqrestore(hba->host->host_lock, flags);
2673
2674 ret = ufshcd_host_reset_and_restore(hba);
2675
2676 spin_lock_irqsave(hba->host->host_lock, flags);
2677 if (ret)
2678 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2679 ufshcd_clear_eh_in_progress(hba);
2680 spin_unlock_irqrestore(hba->host->host_lock, flags);
2681
2682 if (ret)
2683 dev_err(hba->dev, "%s: link recovery failed, err %d",
2684 __func__, ret);
2685
2686 return ret;
2687}
2688
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002689static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002690{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002691 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002692 struct uic_command uic_cmd = {0};
2693
2694 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002695 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002696
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002697 if (ret) {
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002698 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
2699 __func__, ret);
2700
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002701 /*
2702 * If link recovery fails then return error so that caller
2703 * don't retry the hibern8 enter again.
2704 */
2705 if (ufshcd_link_recovery(hba))
2706 ret = -ENOLINK;
2707 }
2708
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02002709 return ret;
2710}
2711
2712static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2713{
2714 int ret = 0, retries;
2715
2716 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
2717 ret = __ufshcd_uic_hibern8_enter(hba);
2718 if (!ret || ret == -ENOLINK)
2719 goto out;
2720 }
2721out:
2722 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002723}
2724
2725static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2726{
2727 struct uic_command uic_cmd = {0};
2728 int ret;
2729
2730 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2731 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302732 if (ret) {
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002733 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2734 __func__, ret);
2735 ret = ufshcd_link_recovery(hba);
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302736 }
2737
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302738 return ret;
2739}
2740
Yaniv Gardi50646362014-10-23 13:25:13 +03002741 /**
2742 * ufshcd_init_pwr_info - setting the POR (power on reset)
2743 * values in hba power info
2744 * @hba: per-adapter instance
2745 */
2746static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2747{
2748 hba->pwr_info.gear_rx = UFS_PWM_G1;
2749 hba->pwr_info.gear_tx = UFS_PWM_G1;
2750 hba->pwr_info.lane_rx = 1;
2751 hba->pwr_info.lane_tx = 1;
2752 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2753 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2754 hba->pwr_info.hs_rate = 0;
2755}
2756
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302757/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002758 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2759 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302760 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002761static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302762{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002763 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2764
2765 if (hba->max_pwr_info.is_valid)
2766 return 0;
2767
2768 pwr_info->pwr_tx = FASTAUTO_MODE;
2769 pwr_info->pwr_rx = FASTAUTO_MODE;
2770 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302771
2772 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002773 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2774 &pwr_info->lane_rx);
2775 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2776 &pwr_info->lane_tx);
2777
2778 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2779 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2780 __func__,
2781 pwr_info->lane_rx,
2782 pwr_info->lane_tx);
2783 return -EINVAL;
2784 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302785
2786 /*
2787 * First, get the maximum gears of HS speed.
2788 * If a zero value, it means there is no HSGEAR capability.
2789 * Then, get the maximum gears of PWM speed.
2790 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002791 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2792 if (!pwr_info->gear_rx) {
2793 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2794 &pwr_info->gear_rx);
2795 if (!pwr_info->gear_rx) {
2796 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2797 __func__, pwr_info->gear_rx);
2798 return -EINVAL;
2799 }
2800 pwr_info->pwr_rx = SLOWAUTO_MODE;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302801 }
2802
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002803 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2804 &pwr_info->gear_tx);
2805 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302806 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002807 &pwr_info->gear_tx);
2808 if (!pwr_info->gear_tx) {
2809 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2810 __func__, pwr_info->gear_tx);
2811 return -EINVAL;
2812 }
2813 pwr_info->pwr_tx = SLOWAUTO_MODE;
2814 }
2815
2816 hba->max_pwr_info.is_valid = true;
2817 return 0;
2818}
2819
2820static int ufshcd_change_power_mode(struct ufs_hba *hba,
2821 struct ufs_pa_layer_attr *pwr_mode)
2822{
2823 int ret;
2824
2825 /* if already configured to the requested pwr_mode */
2826 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2827 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2828 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2829 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2830 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2831 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2832 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2833 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2834 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302835 }
2836
2837 /*
2838 * Configure attributes for power mode change with below.
2839 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2840 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2841 * - PA_HSSERIES
2842 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002843 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2844 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2845 pwr_mode->lane_rx);
2846 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2847 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302848 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002849 else
2850 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302851
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002852 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2853 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2854 pwr_mode->lane_tx);
2855 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2856 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302857 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002858 else
2859 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302860
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002861 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2862 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2863 pwr_mode->pwr_rx == FAST_MODE ||
2864 pwr_mode->pwr_tx == FAST_MODE)
2865 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2866 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302867
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002868 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2869 | pwr_mode->pwr_tx);
2870
2871 if (ret) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302872 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002873 "%s: power mode change failed %d\n", __func__, ret);
2874 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02002875 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
2876 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002877
2878 memcpy(&hba->pwr_info, pwr_mode,
2879 sizeof(struct ufs_pa_layer_attr));
2880 }
2881
2882 return ret;
2883}
2884
2885/**
2886 * ufshcd_config_pwr_mode - configure a new power mode
2887 * @hba: per-adapter instance
2888 * @desired_pwr_mode: desired power configuration
2889 */
2890static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2891 struct ufs_pa_layer_attr *desired_pwr_mode)
2892{
2893 struct ufs_pa_layer_attr final_params = { 0 };
2894 int ret;
2895
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02002896 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
2897 desired_pwr_mode, &final_params);
2898
2899 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03002900 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2901
2902 ret = ufshcd_change_power_mode(hba, &final_params);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05302903
2904 return ret;
2905}
2906
2907/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302908 * ufshcd_complete_dev_init() - checks device readiness
2909 * hba: per-adapter instance
2910 *
2911 * Set fDeviceInit flag and poll until device toggles it.
2912 */
2913static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2914{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002915 int i;
2916 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05302917 bool flag_res = 1;
2918
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002919 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2920 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05302921 if (err) {
2922 dev_err(hba->dev,
2923 "%s setting fDeviceInit flag failed with error %d\n",
2924 __func__, err);
2925 goto out;
2926 }
2927
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02002928 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
2929 for (i = 0; i < 1000 && !err && flag_res; i++)
2930 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2931 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2932
Dolev Raviv68078d52013-07-30 00:35:58 +05302933 if (err)
2934 dev_err(hba->dev,
2935 "%s reading fDeviceInit flag failed with error %d\n",
2936 __func__, err);
2937 else if (flag_res)
2938 dev_err(hba->dev,
2939 "%s fDeviceInit was not cleared by the device\n",
2940 __func__);
2941
2942out:
2943 return err;
2944}
2945
2946/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302947 * ufshcd_make_hba_operational - Make UFS controller operational
2948 * @hba: per adapter instance
2949 *
2950 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03002951 * 1. Enable required interrupts
2952 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02002953 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03002954 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302955 *
2956 * Returns 0 on success, non-zero value on failure
2957 */
2958static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2959{
2960 int err = 0;
2961 u32 reg;
2962
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302963 /* Enable required interrupts */
2964 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2965
2966 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03002967 if (ufshcd_is_intr_aggr_allowed(hba))
2968 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2969 else
2970 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302971
2972 /* Configure UTRL and UTMRL base address registers */
2973 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2974 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2975 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2976 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2977 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2978 REG_UTP_TASK_REQ_LIST_BASE_L);
2979 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2980 REG_UTP_TASK_REQ_LIST_BASE_H);
2981
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302982 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02002983 * Make sure base address and interrupt setup are updated before
2984 * enabling the run/stop registers below.
2985 */
2986 wmb();
2987
2988 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302989 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302990 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03002991 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302992 if (!(ufshcd_get_lists_status(reg))) {
2993 ufshcd_enable_run_stop_reg(hba);
2994 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05302995 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302996 "Host controller not ready to process requests");
2997 err = -EIO;
2998 goto out;
2999 }
3000
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303001out:
3002 return err;
3003}
3004
3005/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02003006 * ufshcd_hba_stop - Send controller to reset state
3007 * @hba: per adapter instance
3008 * @can_sleep: perform sleep or just spin
3009 */
3010static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
3011{
3012 int err;
3013
3014 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
3015 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
3016 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
3017 10, 1, can_sleep);
3018 if (err)
3019 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
3020}
3021
3022/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303023 * ufshcd_hba_enable - initialize the controller
3024 * @hba: per adapter instance
3025 *
3026 * The controller resets itself and controller firmware initialization
3027 * sequence kicks off. When controller is ready it will set
3028 * the Host Controller Enable bit to 1.
3029 *
3030 * Returns 0 on success, non-zero value on failure
3031 */
3032static int ufshcd_hba_enable(struct ufs_hba *hba)
3033{
3034 int retry;
3035
3036 /*
3037 * msleep of 1 and 5 used in this function might result in msleep(20),
3038 * but it was necessary to send the UFS FPGA to reset mode during
3039 * development and testing of this driver. msleep can be changed to
3040 * mdelay and retry count can be reduced based on the controller.
3041 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003042 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303043 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02003044 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303045
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003046 /* UniPro link is disabled at this point */
3047 ufshcd_set_link_off(hba);
3048
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003049 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003050
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303051 /* start controller initialization sequence */
3052 ufshcd_hba_start(hba);
3053
3054 /*
3055 * To initialize a UFS host controller HCE bit must be set to 1.
3056 * During initialization the HCE bit value changes from 1->0->1.
3057 * When the host controller completes initialization sequence
3058 * it sets the value of HCE bit to 1. The same HCE bit is read back
3059 * to check if the controller has completed initialization sequence.
3060 * So without this delay the value HCE = 1, set in the previous
3061 * instruction might be read back.
3062 * This delay can be changed based on the controller.
3063 */
3064 msleep(1);
3065
3066 /* wait for the host controller to complete initialization */
3067 retry = 10;
3068 while (ufshcd_is_hba_active(hba)) {
3069 if (retry) {
3070 retry--;
3071 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303072 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303073 "Controller enable failed\n");
3074 return -EIO;
3075 }
3076 msleep(5);
3077 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003078
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003079 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003080 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003081
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003082 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003083
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303084 return 0;
3085}
3086
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003087static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3088{
3089 int tx_lanes, i, err = 0;
3090
3091 if (!peer)
3092 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3093 &tx_lanes);
3094 else
3095 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3096 &tx_lanes);
3097 for (i = 0; i < tx_lanes; i++) {
3098 if (!peer)
3099 err = ufshcd_dme_set(hba,
3100 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3101 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3102 0);
3103 else
3104 err = ufshcd_dme_peer_set(hba,
3105 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3106 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3107 0);
3108 if (err) {
3109 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3110 __func__, peer, i, err);
3111 break;
3112 }
3113 }
3114
3115 return err;
3116}
3117
3118static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3119{
3120 return ufshcd_disable_tx_lcc(hba, true);
3121}
3122
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303123/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303124 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303125 * @hba: per adapter instance
3126 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303127 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303128 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303129static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303130{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303131 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003132 int retries = DME_LINKSTARTUP_RETRIES;
subhashj@codeaurora.orgc5fc9462017-04-04 19:32:20 +00003133 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303134
subhashj@codeaurora.orgc5fc9462017-04-04 19:32:20 +00003135 /*
3136 * If UFS device isn't active then we will have to issue link startup
3137 * 2 times to make sure the device state move to active.
3138 */
3139 if (!ufshcd_is_ufs_dev_active(hba))
3140 link_startup_again = true;
3141
3142link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003143 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003144 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303145
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003146 ret = ufshcd_dme_link_startup(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003147
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003148 /* check if device is detected by inter-connect layer */
3149 if (!ret && !ufshcd_is_device_present(hba)) {
3150 dev_err(hba->dev, "%s: Device not present\n", __func__);
3151 ret = -ENXIO;
3152 goto out;
3153 }
3154
3155 /*
3156 * DME link lost indication is only received when link is up,
3157 * but we can't be sure if the link is up until link startup
3158 * succeeds. So reset the local Uni-Pro and try again.
3159 */
3160 if (ret && ufshcd_hba_enable(hba))
3161 goto out;
3162 } while (ret && retries--);
3163
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303164 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03003165 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303166 goto out;
3167
subhashj@codeaurora.orgc5fc9462017-04-04 19:32:20 +00003168 if (link_startup_again) {
3169 link_startup_again = false;
3170 retries = DME_LINKSTARTUP_RETRIES;
3171 goto link_startup;
3172 }
3173
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03003174 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3175 ret = ufshcd_disable_device_tx_lcc(hba);
3176 if (ret)
3177 goto out;
3178 }
3179
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003180 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02003181 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3182 if (ret)
3183 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03003184
3185 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303186out:
3187 if (ret)
3188 dev_err(hba->dev, "link startup failed %d\n", ret);
3189 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303190}
3191
3192/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303193 * ufshcd_verify_dev_init() - Verify device initialization
3194 * @hba: per-adapter instance
3195 *
3196 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
3197 * device Transport Protocol (UTP) layer is ready after a reset.
3198 * If the UTP layer at the device side is not initialized, it may
3199 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
3200 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
3201 */
3202static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3203{
3204 int err = 0;
3205 int retries;
3206
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003207 ufshcd_hold(hba, false);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303208 mutex_lock(&hba->dev_cmd.lock);
3209 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3210 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3211 NOP_OUT_TIMEOUT);
3212
3213 if (!err || err == -ETIMEDOUT)
3214 break;
3215
3216 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3217 }
3218 mutex_unlock(&hba->dev_cmd.lock);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003219 ufshcd_release(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303220
3221 if (err)
3222 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3223 return err;
3224}
3225
3226/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003227 * ufshcd_set_queue_depth - set lun queue depth
3228 * @sdev: pointer to SCSI device
3229 *
3230 * Read bLUQueueDepth value and activate scsi tagged command
3231 * queueing. For WLUN, queue depth is set to 1. For best-effort
3232 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
3233 * value that host can queue.
3234 */
3235static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3236{
3237 int ret = 0;
3238 u8 lun_qdepth;
3239 struct ufs_hba *hba;
3240
3241 hba = shost_priv(sdev->host);
3242
3243 lun_qdepth = hba->nutrs;
3244 ret = ufshcd_read_unit_desc_param(hba,
3245 ufshcd_scsi_to_upiu_lun(sdev->lun),
3246 UNIT_DESC_PARAM_LU_Q_DEPTH,
3247 &lun_qdepth,
3248 sizeof(lun_qdepth));
3249
3250 /* Some WLUN doesn't support unit descriptor */
3251 if (ret == -EOPNOTSUPP)
3252 lun_qdepth = 1;
3253 else if (!lun_qdepth)
3254 /* eventually, we can figure out the real queue depth */
3255 lun_qdepth = hba->nutrs;
3256 else
3257 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3258
3259 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3260 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003261 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003262}
3263
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003264/*
3265 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
3266 * @hba: per-adapter instance
3267 * @lun: UFS device lun id
3268 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
3269 *
3270 * Returns 0 in case of success and b_lu_write_protect status would be returned
3271 * @b_lu_write_protect parameter.
3272 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
3273 * Returns -EINVAL in case of invalid parameters passed to this function.
3274 */
3275static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3276 u8 lun,
3277 u8 *b_lu_write_protect)
3278{
3279 int ret;
3280
3281 if (!b_lu_write_protect)
3282 ret = -EINVAL;
3283 /*
3284 * According to UFS device spec, RPMB LU can't be write
3285 * protected so skip reading bLUWriteProtect parameter for
3286 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
3287 */
3288 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3289 ret = -ENOTSUPP;
3290 else
3291 ret = ufshcd_read_unit_desc_param(hba,
3292 lun,
3293 UNIT_DESC_PARAM_LU_WR_PROTECT,
3294 b_lu_write_protect,
3295 sizeof(*b_lu_write_protect));
3296 return ret;
3297}
3298
3299/**
3300 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
3301 * status
3302 * @hba: per-adapter instance
3303 * @sdev: pointer to SCSI device
3304 *
3305 */
3306static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3307 struct scsi_device *sdev)
3308{
3309 if (hba->dev_info.f_power_on_wp_en &&
3310 !hba->dev_info.is_lu_power_on_wp) {
3311 u8 b_lu_write_protect;
3312
3313 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3314 &b_lu_write_protect) &&
3315 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3316 hba->dev_info.is_lu_power_on_wp = true;
3317 }
3318}
3319
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003320/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303321 * ufshcd_slave_alloc - handle initial SCSI device configurations
3322 * @sdev: pointer to SCSI device
3323 *
3324 * Returns success
3325 */
3326static int ufshcd_slave_alloc(struct scsi_device *sdev)
3327{
3328 struct ufs_hba *hba;
3329
3330 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303331
3332 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
3333 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303334
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303335 /* allow SCSI layer to restart the device in case of errors */
3336 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003337
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03003338 /* REPORT SUPPORTED OPERATION CODES is not supported */
3339 sdev->no_report_opcodes = 1;
3340
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003341
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003342 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003343
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003344 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3345
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003346 return 0;
3347}
3348
3349/**
3350 * ufshcd_change_queue_depth - change queue depth
3351 * @sdev: pointer to SCSI device
3352 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003353 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003354 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003355 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003356static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03003357{
3358 struct ufs_hba *hba = shost_priv(sdev->host);
3359
3360 if (depth > hba->nutrs)
3361 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01003362 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303363}
3364
3365/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09003366 * ufshcd_slave_configure - adjust SCSI device configurations
3367 * @sdev: pointer to SCSI device
3368 */
3369static int ufshcd_slave_configure(struct scsi_device *sdev)
3370{
3371 struct request_queue *q = sdev->request_queue;
3372
3373 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3374 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3375
3376 return 0;
3377}
3378
3379/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303380 * ufshcd_slave_destroy - remove SCSI device configurations
3381 * @sdev: pointer to SCSI device
3382 */
3383static void ufshcd_slave_destroy(struct scsi_device *sdev)
3384{
3385 struct ufs_hba *hba;
3386
3387 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003388 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003389 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3390 unsigned long flags;
3391
3392 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003393 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03003394 spin_unlock_irqrestore(hba->host->host_lock, flags);
3395 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303396}
3397
3398/**
3399 * ufshcd_task_req_compl - handle task management request completion
3400 * @hba: per adapter instance
3401 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303402 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303403 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303404 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303405 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303406static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303407{
3408 struct utp_task_req_desc *task_req_descp;
3409 struct utp_upiu_task_rsp *task_rsp_upiup;
3410 unsigned long flags;
3411 int ocs_value;
3412 int task_result;
3413
3414 spin_lock_irqsave(hba->host->host_lock, flags);
3415
3416 /* Clear completed tasks from outstanding_tasks */
3417 __clear_bit(index, &hba->outstanding_tasks);
3418
3419 task_req_descp = hba->utmrdl_base_addr;
3420 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3421
3422 if (ocs_value == OCS_SUCCESS) {
3423 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3424 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09003425 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3426 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303427 if (resp)
3428 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303429 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303430 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3431 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303432 }
3433 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05303434
3435 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303436}
3437
3438/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303439 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3440 * @lrb: pointer to local reference block of completed command
3441 * @scsi_status: SCSI command status
3442 *
3443 * Returns value base on SCSI command status
3444 */
3445static inline int
3446ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3447{
3448 int result = 0;
3449
3450 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303451 case SAM_STAT_CHECK_CONDITION:
3452 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303453 case SAM_STAT_GOOD:
3454 result |= DID_OK << 16 |
3455 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303456 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303457 break;
3458 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303459 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303460 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05303461 ufshcd_copy_sense_data(lrbp);
3462 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303463 break;
3464 default:
3465 result |= DID_ERROR << 16;
3466 break;
3467 } /* end of switch */
3468
3469 return result;
3470}
3471
3472/**
3473 * ufshcd_transfer_rsp_status - Get overall status of the response
3474 * @hba: per adapter instance
3475 * @lrb: pointer to local reference block of completed command
3476 *
3477 * Returns result of the command to notify SCSI midlayer
3478 */
3479static inline int
3480ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3481{
3482 int result = 0;
3483 int scsi_status;
3484 int ocs;
3485
3486 /* overall command status of utrd */
3487 ocs = ufshcd_get_tr_ocs(lrbp);
3488
3489 switch (ocs) {
3490 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303491 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303492
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303493 switch (result) {
3494 case UPIU_TRANSACTION_RESPONSE:
3495 /*
3496 * get the response UPIU result to extract
3497 * the SCSI command status
3498 */
3499 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3500
3501 /*
3502 * get the result based on SCSI status response
3503 * to notify the SCSI midlayer of the command status
3504 */
3505 scsi_status = result & MASK_SCSI_STATUS;
3506 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303507
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02003508 /*
3509 * Currently we are only supporting BKOPs exception
3510 * events hence we can ignore BKOPs exception event
3511 * during power management callbacks. BKOPs exception
3512 * event is not expected to be raised in runtime suspend
3513 * callback as it allows the urgent bkops.
3514 * During system suspend, we are anyway forcefully
3515 * disabling the bkops and if urgent bkops is needed
3516 * it will be enabled on system resume. Long term
3517 * solution could be to abort the system suspend if
3518 * UFS device needs urgent BKOPs.
3519 */
3520 if (!hba->pm_op_in_progress &&
3521 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303522 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303523 break;
3524 case UPIU_TRANSACTION_REJECT_UPIU:
3525 /* TODO: handle Reject UPIU Response */
3526 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303527 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303528 "Reject UPIU not fully implemented\n");
3529 break;
3530 default:
3531 result = DID_ERROR << 16;
3532 dev_err(hba->dev,
3533 "Unexpected request response code = %x\n",
3534 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303535 break;
3536 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303537 break;
3538 case OCS_ABORTED:
3539 result |= DID_ABORT << 16;
3540 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05303541 case OCS_INVALID_COMMAND_STATUS:
3542 result |= DID_REQUEUE << 16;
3543 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303544 case OCS_INVALID_CMD_TABLE_ATTR:
3545 case OCS_INVALID_PRDT_ATTR:
3546 case OCS_MISMATCH_DATA_BUF_SIZE:
3547 case OCS_MISMATCH_RESP_UPIU_SIZE:
3548 case OCS_PEER_COMM_FAILURE:
3549 case OCS_FATAL_ERROR:
3550 default:
3551 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303552 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303553 "OCS error from controller = %x\n", ocs);
3554 break;
3555 } /* end of switch */
3556
3557 return result;
3558}
3559
3560/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303561 * ufshcd_uic_cmd_compl - handle completion of uic command
3562 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303563 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303564 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303565static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303566{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303567 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303568 hba->active_uic_cmd->argument2 |=
3569 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303570 hba->active_uic_cmd->argument3 =
3571 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303572 complete(&hba->active_uic_cmd->done);
3573 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05303574
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003575 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3576 complete(hba->uic_async_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303577}
3578
3579/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003580 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303581 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003582 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303583 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003584static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3585 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303586{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303587 struct ufshcd_lrb *lrbp;
3588 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303589 int result;
3590 int index;
Dolev Ravive9d501b2014-07-01 12:22:37 +03003591
Dolev Ravive9d501b2014-07-01 12:22:37 +03003592 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3593 lrbp = &hba->lrb[index];
3594 cmd = lrbp->cmd;
3595 if (cmd) {
3596 result = ufshcd_transfer_rsp_status(hba, lrbp);
3597 scsi_dma_unmap(cmd);
3598 cmd->result = result;
3599 /* Mark completed command as NULL in LRB */
3600 lrbp->cmd = NULL;
3601 clear_bit_unlock(index, &hba->lrb_in_use);
3602 /* Do not touch lrbp after scsi done */
3603 cmd->scsi_done(cmd);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003604 __ufshcd_release(hba);
Joao Pinto300bb132016-05-11 12:21:27 +01003605 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3606 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
Dolev Ravive9d501b2014-07-01 12:22:37 +03003607 if (hba->dev_cmd.complete)
3608 complete(hba->dev_cmd.complete);
3609 }
3610 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303611
3612 /* clear corresponding bits of completed commands */
3613 hba->outstanding_reqs ^= completed_reqs;
3614
Sahitya Tummala856b3482014-09-25 15:32:34 +03003615 ufshcd_clk_scaling_update_busy(hba);
3616
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303617 /* we might have free'd some tags above */
3618 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303619}
3620
3621/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003622 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3623 * @hba: per adapter instance
3624 */
3625static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3626{
3627 unsigned long completed_reqs;
3628 u32 tr_doorbell;
3629
3630 /* Resetting interrupt aggregation counters first and reading the
3631 * DOOR_BELL afterward allows us to handle all the completed requests.
3632 * In order to prevent other interrupts starvation the DB is read once
3633 * after reset. The down side of this solution is the possibility of
3634 * false interrupt if device completes another request after resetting
3635 * aggregation and before reading the DB.
3636 */
3637 if (ufshcd_is_intr_aggr_allowed(hba))
3638 ufshcd_reset_intr_aggr(hba);
3639
3640 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3641 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3642
3643 __ufshcd_transfer_req_compl(hba, completed_reqs);
3644}
3645
3646/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303647 * ufshcd_disable_ee - disable exception event
3648 * @hba: per-adapter instance
3649 * @mask: exception event to disable
3650 *
3651 * Disables exception event in the device so that the EVENT_ALERT
3652 * bit is not set.
3653 *
3654 * Returns zero on success, non-zero error value on failure.
3655 */
3656static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3657{
3658 int err = 0;
3659 u32 val;
3660
3661 if (!(hba->ee_ctrl_mask & mask))
3662 goto out;
3663
3664 val = hba->ee_ctrl_mask & ~mask;
3665 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003666 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303667 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3668 if (!err)
3669 hba->ee_ctrl_mask &= ~mask;
3670out:
3671 return err;
3672}
3673
3674/**
3675 * ufshcd_enable_ee - enable exception event
3676 * @hba: per-adapter instance
3677 * @mask: exception event to enable
3678 *
3679 * Enable corresponding exception event in the device to allow
3680 * device to alert host in critical scenarios.
3681 *
3682 * Returns zero on success, non-zero error value on failure.
3683 */
3684static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3685{
3686 int err = 0;
3687 u32 val;
3688
3689 if (hba->ee_ctrl_mask & mask)
3690 goto out;
3691
3692 val = hba->ee_ctrl_mask | mask;
3693 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003694 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303695 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3696 if (!err)
3697 hba->ee_ctrl_mask |= mask;
3698out:
3699 return err;
3700}
3701
3702/**
3703 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3704 * @hba: per-adapter instance
3705 *
3706 * Allow device to manage background operations on its own. Enabling
3707 * this might lead to inconsistent latencies during normal data transfers
3708 * as the device is allowed to manage its own way of handling background
3709 * operations.
3710 *
3711 * Returns zero on success, non-zero on failure.
3712 */
3713static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3714{
3715 int err = 0;
3716
3717 if (hba->auto_bkops_enabled)
3718 goto out;
3719
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003720 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303721 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3722 if (err) {
3723 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3724 __func__, err);
3725 goto out;
3726 }
3727
3728 hba->auto_bkops_enabled = true;
3729
3730 /* No need of URGENT_BKOPS exception from the device */
3731 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3732 if (err)
3733 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3734 __func__, err);
3735out:
3736 return err;
3737}
3738
3739/**
3740 * ufshcd_disable_auto_bkops - block device in doing background operations
3741 * @hba: per-adapter instance
3742 *
3743 * Disabling background operations improves command response latency but
3744 * has drawback of device moving into critical state where the device is
3745 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3746 * host is idle so that BKOPS are managed effectively without any negative
3747 * impacts.
3748 *
3749 * Returns zero on success, non-zero on failure.
3750 */
3751static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3752{
3753 int err = 0;
3754
3755 if (!hba->auto_bkops_enabled)
3756 goto out;
3757
3758 /*
3759 * If host assisted BKOPs is to be enabled, make sure
3760 * urgent bkops exception is allowed.
3761 */
3762 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3763 if (err) {
3764 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3765 __func__, err);
3766 goto out;
3767 }
3768
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003769 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303770 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3771 if (err) {
3772 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3773 __func__, err);
3774 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3775 goto out;
3776 }
3777
3778 hba->auto_bkops_enabled = false;
3779out:
3780 return err;
3781}
3782
3783/**
subhashj@codeaurora.orgcfb09f02016-12-22 18:41:22 -08003784 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303785 * @hba: per adapter instance
3786 *
3787 * After a device reset the device may toggle the BKOPS_EN flag
3788 * to default value. The s/w tracking variables should be updated
subhashj@codeaurora.orgcfb09f02016-12-22 18:41:22 -08003789 * as well. This function would change the auto-bkops state based on
3790 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303791 */
subhashj@codeaurora.orgcfb09f02016-12-22 18:41:22 -08003792static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303793{
subhashj@codeaurora.orgcfb09f02016-12-22 18:41:22 -08003794 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
3795 hba->auto_bkops_enabled = false;
3796 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3797 ufshcd_enable_auto_bkops(hba);
3798 } else {
3799 hba->auto_bkops_enabled = true;
3800 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
3801 ufshcd_disable_auto_bkops(hba);
3802 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303803}
3804
3805static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3806{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003807 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303808 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3809}
3810
3811/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003812 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
3813 * @hba: per-adapter instance
3814 * @status: bkops_status value
3815 *
3816 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3817 * flag in the device to permit background operations if the device
3818 * bkops_status is greater than or equal to "status" argument passed to
3819 * this function, disable otherwise.
3820 *
3821 * Returns 0 for success, non-zero in case of failure.
3822 *
3823 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3824 * to know whether auto bkops is enabled or disabled after this function
3825 * returns control to it.
3826 */
3827static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3828 enum bkops_status status)
3829{
3830 int err;
3831 u32 curr_status = 0;
3832
3833 err = ufshcd_get_bkops_status(hba, &curr_status);
3834 if (err) {
3835 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3836 __func__, err);
3837 goto out;
3838 } else if (curr_status > BKOPS_STATUS_MAX) {
3839 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3840 __func__, curr_status);
3841 err = -EINVAL;
3842 goto out;
3843 }
3844
3845 if (curr_status >= status)
3846 err = ufshcd_enable_auto_bkops(hba);
3847 else
3848 err = ufshcd_disable_auto_bkops(hba);
3849out:
3850 return err;
3851}
3852
3853/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303854 * ufshcd_urgent_bkops - handle urgent bkops exception event
3855 * @hba: per-adapter instance
3856 *
3857 * Enable fBackgroundOpsEn flag in the device to permit background
3858 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003859 *
3860 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3861 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303862 */
3863static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3864{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02003865 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303866}
3867
3868static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3869{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003870 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303871 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3872}
3873
Yaniv Gardiafdfff52016-03-10 17:37:15 +02003874static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
3875{
3876 int err;
3877 u32 curr_status = 0;
3878
3879 if (hba->is_urgent_bkops_lvl_checked)
3880 goto enable_auto_bkops;
3881
3882 err = ufshcd_get_bkops_status(hba, &curr_status);
3883 if (err) {
3884 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3885 __func__, err);
3886 goto out;
3887 }
3888
3889 /*
3890 * We are seeing that some devices are raising the urgent bkops
3891 * exception events even when BKOPS status doesn't indicate performace
3892 * impacted or critical. Handle these device by determining their urgent
3893 * bkops status at runtime.
3894 */
3895 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
3896 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
3897 __func__, curr_status);
3898 /* update the current status as the urgent bkops level */
3899 hba->urgent_bkops_lvl = curr_status;
3900 hba->is_urgent_bkops_lvl_checked = true;
3901 }
3902
3903enable_auto_bkops:
3904 err = ufshcd_enable_auto_bkops(hba);
3905out:
3906 if (err < 0)
3907 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3908 __func__, err);
3909}
3910
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303911/**
3912 * ufshcd_exception_event_handler - handle exceptions raised by device
3913 * @work: pointer to work data
3914 *
3915 * Read bExceptionEventStatus attribute from the device and handle the
3916 * exception event accordingly.
3917 */
3918static void ufshcd_exception_event_handler(struct work_struct *work)
3919{
3920 struct ufs_hba *hba;
3921 int err;
3922 u32 status = 0;
3923 hba = container_of(work, struct ufs_hba, eeh_work);
3924
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05303925 pm_runtime_get_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303926 err = ufshcd_get_ee_status(hba, &status);
3927 if (err) {
3928 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3929 __func__, err);
3930 goto out;
3931 }
3932
3933 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02003934
3935 if (status & MASK_EE_URGENT_BKOPS)
3936 ufshcd_bkops_exception_event_handler(hba);
3937
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303938out:
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05303939 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303940 return;
3941}
3942
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02003943/* Complete requests that have door-bell cleared */
3944static void ufshcd_complete_requests(struct ufs_hba *hba)
3945{
3946 ufshcd_transfer_req_compl(hba);
3947 ufshcd_tmc_handler(hba);
3948}
3949
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303950/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02003951 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
3952 * to recover from the DL NAC errors or not.
3953 * @hba: per-adapter instance
3954 *
3955 * Returns true if error handling is required, false otherwise
3956 */
3957static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
3958{
3959 unsigned long flags;
3960 bool err_handling = true;
3961
3962 spin_lock_irqsave(hba->host->host_lock, flags);
3963 /*
3964 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
3965 * device fatal error and/or DL NAC & REPLAY timeout errors.
3966 */
3967 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
3968 goto out;
3969
3970 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
3971 ((hba->saved_err & UIC_ERROR) &&
3972 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
3973 goto out;
3974
3975 if ((hba->saved_err & UIC_ERROR) &&
3976 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
3977 int err;
3978 /*
3979 * wait for 50ms to see if we can get any other errors or not.
3980 */
3981 spin_unlock_irqrestore(hba->host->host_lock, flags);
3982 msleep(50);
3983 spin_lock_irqsave(hba->host->host_lock, flags);
3984
3985 /*
3986 * now check if we have got any other severe errors other than
3987 * DL NAC error?
3988 */
3989 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3990 ((hba->saved_err & UIC_ERROR) &&
3991 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
3992 goto out;
3993
3994 /*
3995 * As DL NAC is the only error received so far, send out NOP
3996 * command to confirm if link is still active or not.
3997 * - If we don't get any response then do error recovery.
3998 * - If we get response then clear the DL NAC error bit.
3999 */
4000
4001 spin_unlock_irqrestore(hba->host->host_lock, flags);
4002 err = ufshcd_verify_dev_init(hba);
4003 spin_lock_irqsave(hba->host->host_lock, flags);
4004
4005 if (err)
4006 goto out;
4007
4008 /* Link seems to be alive hence ignore the DL NAC errors */
4009 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
4010 hba->saved_err &= ~UIC_ERROR;
4011 /* clear NAC error */
4012 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4013 if (!hba->saved_uic_err) {
4014 err_handling = false;
4015 goto out;
4016 }
4017 }
4018out:
4019 spin_unlock_irqrestore(hba->host->host_lock, flags);
4020 return err_handling;
4021}
4022
4023/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304024 * ufshcd_err_handler - handle UFS errors that require s/w attention
4025 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304026 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304027static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304028{
4029 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304030 unsigned long flags;
4031 u32 err_xfer = 0;
4032 u32 err_tm = 0;
4033 int err = 0;
4034 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004035 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304036
4037 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304038
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304039 pm_runtime_get_sync(hba->dev);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004040 ufshcd_hold(hba, false);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304041
4042 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004043 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304044 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304045
4046 hba->ufshcd_state = UFSHCD_STATE_RESET;
4047 ufshcd_set_eh_in_progress(hba);
4048
4049 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004050 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02004051
4052 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4053 bool ret;
4054
4055 spin_unlock_irqrestore(hba->host->host_lock, flags);
4056 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
4057 ret = ufshcd_quirk_dl_nac_errors(hba);
4058 spin_lock_irqsave(hba->host->host_lock, flags);
4059 if (!ret)
4060 goto skip_err_handling;
4061 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004062 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4063 ((hba->saved_err & UIC_ERROR) &&
4064 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4065 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4066 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4067 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304068
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004069 /*
4070 * if host reset is required then skip clearing the pending
4071 * transfers forcefully because they will automatically get
4072 * cleared after link startup.
4073 */
4074 if (needs_reset)
4075 goto skip_pending_xfer_clear;
4076
4077 /* release lock as clear command might sleep */
4078 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304079 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004080 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4081 if (ufshcd_clear_cmd(hba, tag)) {
4082 err_xfer = true;
4083 goto lock_skip_pending_xfer_clear;
4084 }
4085 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304086
4087 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004088 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4089 if (ufshcd_clear_tm_cmd(hba, tag)) {
4090 err_tm = true;
4091 goto lock_skip_pending_xfer_clear;
4092 }
4093 }
4094
4095lock_skip_pending_xfer_clear:
4096 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304097
4098 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004099 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304100
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004101 if (err_xfer || err_tm)
4102 needs_reset = true;
4103
4104skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304105 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004106 if (needs_reset) {
4107 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4108
4109 /*
4110 * ufshcd_reset_and_restore() does the link reinitialization
4111 * which will need atleast one empty doorbell slot to send the
4112 * device management commands (NOP and query commands).
4113 * If there is no slot empty at this moment then free up last
4114 * slot forcefully.
4115 */
4116 if (hba->outstanding_reqs == max_doorbells)
4117 __ufshcd_transfer_req_compl(hba,
4118 (1UL << (hba->nutrs - 1)));
4119
4120 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304121 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004122 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304123 if (err) {
4124 dev_err(hba->dev, "%s: reset and restore failed\n",
4125 __func__);
4126 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4127 }
4128 /*
4129 * Inform scsi mid-layer that we did reset and allow to handle
4130 * Unit Attention properly.
4131 */
4132 scsi_report_bus_reset(hba->host, 0);
4133 hba->saved_err = 0;
4134 hba->saved_uic_err = 0;
4135 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004136
Yaniv Gardi583fa622016-03-10 17:37:13 +02004137skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004138 if (!needs_reset) {
4139 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4140 if (hba->saved_err || hba->saved_uic_err)
4141 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4142 __func__, hba->saved_err, hba->saved_uic_err);
4143 }
4144
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304145 ufshcd_clear_eh_in_progress(hba);
4146
4147out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004148 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304149 scsi_unblock_requests(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004150 ufshcd_release(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05304151 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304152}
4153
4154/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304155 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4156 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304157 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304158static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304159{
4160 u32 reg;
4161
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304162 /* PA_INIT_ERROR is fatal and needs UIC reset */
4163 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4164 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4165 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
Yaniv Gardi583fa622016-03-10 17:37:13 +02004166 else if (hba->dev_quirks &
4167 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4168 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4169 hba->uic_error |=
4170 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4171 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4172 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4173 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304174
4175 /* UIC NL/TL/DME errors needs software retry */
4176 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4177 if (reg)
4178 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4179
4180 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4181 if (reg)
4182 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4183
4184 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4185 if (reg)
4186 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4187
4188 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4189 __func__, hba->uic_error);
4190}
4191
4192/**
4193 * ufshcd_check_errors - Check for errors that need s/w attention
4194 * @hba: per-adapter instance
4195 */
4196static void ufshcd_check_errors(struct ufs_hba *hba)
4197{
4198 bool queue_eh_work = false;
4199
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304200 if (hba->errors & INT_FATAL_ERRORS)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304201 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304202
4203 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304204 hba->uic_error = 0;
4205 ufshcd_update_uic_error(hba);
4206 if (hba->uic_error)
4207 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304208 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304209
4210 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02004211 /*
4212 * update the transfer error masks to sticky bits, let's do this
4213 * irrespective of current ufshcd_state.
4214 */
4215 hba->saved_err |= hba->errors;
4216 hba->saved_uic_err |= hba->uic_error;
4217
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304218 /* handle fatal errors only when link is functional */
4219 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4220 /* block commands from scsi mid-layer */
4221 scsi_block_requests(hba->host);
4222
Zang Leiganga17bddc2017-04-04 19:32:20 +00004223 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304224 schedule_work(&hba->eh_work);
4225 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304226 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304227 /*
4228 * if (!queue_eh_work) -
4229 * Other errors are either non-fatal where host recovers
4230 * itself without s/w intervention or errors that will be
4231 * handled by the SCSI core layer.
4232 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304233}
4234
4235/**
4236 * ufshcd_tmc_handler - handle task management function completion
4237 * @hba: per adapter instance
4238 */
4239static void ufshcd_tmc_handler(struct ufs_hba *hba)
4240{
4241 u32 tm_doorbell;
4242
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304243 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304244 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304245 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304246}
4247
4248/**
4249 * ufshcd_sl_intr - Interrupt service routine
4250 * @hba: per adapter instance
4251 * @intr_status: contains interrupts generated by the controller
4252 */
4253static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4254{
4255 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4256 if (hba->errors)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304257 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304258
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304259 if (intr_status & UFSHCD_UIC_MASK)
4260 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304261
4262 if (intr_status & UTP_TASK_REQ_COMPL)
4263 ufshcd_tmc_handler(hba);
4264
4265 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4266 ufshcd_transfer_req_compl(hba);
4267}
4268
4269/**
4270 * ufshcd_intr - Main interrupt service routine
4271 * @irq: irq number
4272 * @__hba: pointer to adapter instance
4273 *
4274 * Returns IRQ_HANDLED - If interrupt is valid
4275 * IRQ_NONE - If invalid interrupt
4276 */
4277static irqreturn_t ufshcd_intr(int irq, void *__hba)
4278{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004279 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304280 irqreturn_t retval = IRQ_NONE;
4281 struct ufs_hba *hba = __hba;
4282
4283 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304284 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004285 enabled_intr_status =
4286 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304287
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004288 if (intr_status)
Seungwon Jeon261ea452013-06-26 22:39:28 +05304289 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004290
4291 if (enabled_intr_status) {
4292 ufshcd_sl_intr(hba, enabled_intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304293 retval = IRQ_HANDLED;
4294 }
4295 spin_unlock(hba->host->host_lock);
4296 return retval;
4297}
4298
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304299static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4300{
4301 int err = 0;
4302 u32 mask = 1 << tag;
4303 unsigned long flags;
4304
4305 if (!test_bit(tag, &hba->outstanding_tasks))
4306 goto out;
4307
4308 spin_lock_irqsave(hba->host->host_lock, flags);
4309 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4310 spin_unlock_irqrestore(hba->host->host_lock, flags);
4311
4312 /* poll for max. 1 sec to clear door bell register by h/w */
4313 err = ufshcd_wait_for_register(hba,
4314 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02004315 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304316out:
4317 return err;
4318}
4319
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304320/**
4321 * ufshcd_issue_tm_cmd - issues task management commands to controller
4322 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304323 * @lun_id: LUN ID to which TM command is sent
4324 * @task_id: task ID to which the TM command is applicable
4325 * @tm_function: task management function opcode
4326 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304327 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304328 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304329 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304330static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4331 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304332{
4333 struct utp_task_req_desc *task_req_descp;
4334 struct utp_upiu_task_req *task_req_upiup;
4335 struct Scsi_Host *host;
4336 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304337 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304338 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304339 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304340
4341 host = hba->host;
4342
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304343 /*
4344 * Get free slot, sleep if slots are unavailable.
4345 * Even though we use wait_event() which sleeps indefinitely,
4346 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
4347 */
4348 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004349 ufshcd_hold(hba, false);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304350
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304351 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304352 task_req_descp = hba->utmrdl_base_addr;
4353 task_req_descp += free_slot;
4354
4355 /* Configure task request descriptor */
4356 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4357 task_req_descp->header.dword_2 =
4358 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4359
4360 /* Configure task request UPIU */
4361 task_req_upiup =
4362 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304363 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304364 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304365 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304366 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304367 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304368 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004369 /*
4370 * The host shall provide the same value for LUN field in the basic
4371 * header and for Input Parameter.
4372 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304373 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4374 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304375
4376 /* send command to the controller */
4377 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02004378
4379 /* Make sure descriptors are ready before ringing the task doorbell */
4380 wmb();
4381
Seungwon Jeonb873a2752013-06-26 22:39:26 +05304382 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304383
4384 spin_unlock_irqrestore(host->host_lock, flags);
4385
4386 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304387 err = wait_event_timeout(hba->tm_wq,
4388 test_bit(free_slot, &hba->tm_condition),
4389 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304390 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304391 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4392 __func__, tm_function);
4393 if (ufshcd_clear_tm_cmd(hba, free_slot))
4394 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4395 __func__, free_slot);
4396 err = -ETIMEDOUT;
4397 } else {
4398 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304399 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304400
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304401 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304402 ufshcd_put_tm_slot(hba, free_slot);
4403 wake_up(&hba->tm_tag_wq);
4404
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004405 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304406 return err;
4407}
4408
4409/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304410 * ufshcd_eh_device_reset_handler - device reset handler registered to
4411 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304412 * @cmd: SCSI command pointer
4413 *
4414 * Returns SUCCESS/FAILED
4415 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304416static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304417{
4418 struct Scsi_Host *host;
4419 struct ufs_hba *hba;
4420 unsigned int tag;
4421 u32 pos;
4422 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304423 u8 resp = 0xF;
4424 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304425 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304426
4427 host = cmd->device->host;
4428 hba = shost_priv(host);
4429 tag = cmd->request->tag;
4430
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304431 lrbp = &hba->lrb[tag];
4432 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
4433 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304434 if (!err)
4435 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304436 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304437 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304438
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304439 /* clear the commands that were pending for corresponding LUN */
4440 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4441 if (hba->lrb[pos].lun == lrbp->lun) {
4442 err = ufshcd_clear_cmd(hba, pos);
4443 if (err)
4444 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304445 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304446 }
4447 spin_lock_irqsave(host->host_lock, flags);
4448 ufshcd_transfer_req_compl(hba);
4449 spin_unlock_irqrestore(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304450out:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304451 if (!err) {
4452 err = SUCCESS;
4453 } else {
4454 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4455 err = FAILED;
4456 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304457 return err;
4458}
4459
4460/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304461 * ufshcd_abort - abort a specific command
4462 * @cmd: SCSI command pointer
4463 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304464 * Abort the pending command in device by sending UFS_ABORT_TASK task management
4465 * command, and in host controller by clearing the door-bell register. There can
4466 * be race between controller sending the command to the device while abort is
4467 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
4468 * really issued and then try to abort it.
4469 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304470 * Returns SUCCESS/FAILED
4471 */
4472static int ufshcd_abort(struct scsi_cmnd *cmd)
4473{
4474 struct Scsi_Host *host;
4475 struct ufs_hba *hba;
4476 unsigned long flags;
4477 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304478 int err = 0;
4479 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304480 u8 resp = 0xF;
4481 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03004482 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304483
4484 host = cmd->device->host;
4485 hba = shost_priv(host);
4486 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02004487 if (!ufshcd_valid_tag(hba, tag)) {
4488 dev_err(hba->dev,
4489 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4490 __func__, tag, cmd, cmd->request);
4491 BUG();
4492 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304493
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004494 ufshcd_hold(hba, false);
Dolev Ravive9d501b2014-07-01 12:22:37 +03004495 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02004496 /* If command is already aborted/completed, return SUCCESS */
4497 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4498 dev_err(hba->dev,
4499 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4500 __func__, tag, hba->outstanding_reqs, reg);
4501 goto out;
4502 }
4503
Dolev Ravive9d501b2014-07-01 12:22:37 +03004504 if (!(reg & (1 << tag))) {
4505 dev_err(hba->dev,
4506 "%s: cmd was completed, but without a notifying intr, tag = %d",
4507 __func__, tag);
4508 }
4509
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304510 lrbp = &hba->lrb[tag];
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304511 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4512 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4513 UFS_QUERY_TASK, &resp);
4514 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4515 /* cmd pending in the device */
4516 break;
4517 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304518 /*
4519 * cmd not pending in the device, check if it is
4520 * in transition.
4521 */
4522 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4523 if (reg & (1 << tag)) {
4524 /* sleep for max. 200us to stabilize */
4525 usleep_range(100, 200);
4526 continue;
4527 }
4528 /* command completed already */
4529 goto out;
4530 } else {
4531 if (!err)
4532 err = resp; /* service response error */
4533 goto out;
4534 }
4535 }
4536
4537 if (!poll_cnt) {
4538 err = -EBUSY;
4539 goto out;
4540 }
4541
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304542 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4543 UFS_ABORT_TASK, &resp);
4544 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304545 if (!err)
4546 err = resp; /* service response error */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304547 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304548 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304549
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304550 err = ufshcd_clear_cmd(hba, tag);
4551 if (err)
4552 goto out;
4553
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304554 scsi_dma_unmap(cmd);
4555
4556 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02004557 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304558 hba->lrb[tag].cmd = NULL;
4559 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304560
4561 clear_bit_unlock(tag, &hba->lrb_in_use);
4562 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004563
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304564out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05304565 if (!err) {
4566 err = SUCCESS;
4567 } else {
4568 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4569 err = FAILED;
4570 }
4571
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004572 /*
4573 * This ufshcd_release() corresponds to the original scsi cmd that got
4574 * aborted here (as we won't get any IRQ for it).
4575 */
4576 ufshcd_release(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304577 return err;
4578}
4579
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304580/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304581 * ufshcd_host_reset_and_restore - reset and restore host controller
4582 * @hba: per-adapter instance
4583 *
4584 * Note that host controller reset may issue DME_RESET to
4585 * local and remote (device) Uni-Pro stack and the attributes
4586 * are reset to default state.
4587 *
4588 * Returns zero on success, non-zero on failure
4589 */
4590static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4591{
4592 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304593 unsigned long flags;
4594
4595 /* Reset the host controller */
4596 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02004597 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304598 spin_unlock_irqrestore(hba->host->host_lock, flags);
4599
4600 err = ufshcd_hba_enable(hba);
4601 if (err)
4602 goto out;
4603
4604 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004605 err = ufshcd_probe_hba(hba);
4606
4607 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304608 err = -EIO;
4609out:
4610 if (err)
4611 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4612
4613 return err;
4614}
4615
4616/**
4617 * ufshcd_reset_and_restore - reset and re-initialize host/device
4618 * @hba: per-adapter instance
4619 *
4620 * Reset and recover device, host and re-establish link. This
4621 * is helpful to recover the communication in fatal error conditions.
4622 *
4623 * Returns zero on success, non-zero on failure
4624 */
4625static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4626{
4627 int err = 0;
4628 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004629 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304630
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004631 do {
4632 err = ufshcd_host_reset_and_restore(hba);
4633 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304634
4635 /*
4636 * After reset the door-bell might be cleared, complete
4637 * outstanding requests in s/w here.
4638 */
4639 spin_lock_irqsave(hba->host->host_lock, flags);
4640 ufshcd_transfer_req_compl(hba);
4641 ufshcd_tmc_handler(hba);
4642 spin_unlock_irqrestore(hba->host->host_lock, flags);
4643
4644 return err;
4645}
4646
4647/**
4648 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
4649 * @cmd - SCSI command pointer
4650 *
4651 * Returns SUCCESS/FAILED
4652 */
4653static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
4654{
4655 int err;
4656 unsigned long flags;
4657 struct ufs_hba *hba;
4658
4659 hba = shost_priv(cmd->device->host);
4660
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004661 ufshcd_hold(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304662 /*
4663 * Check if there is any race with fatal error handling.
4664 * If so, wait for it to complete. Even though fatal error
4665 * handling does reset and restore in some cases, don't assume
4666 * anything out of it. We are just avoiding race here.
4667 */
4668 do {
4669 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304670 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304671 hba->ufshcd_state == UFSHCD_STATE_RESET))
4672 break;
4673 spin_unlock_irqrestore(hba->host->host_lock, flags);
4674 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304675 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304676 } while (1);
4677
4678 hba->ufshcd_state = UFSHCD_STATE_RESET;
4679 ufshcd_set_eh_in_progress(hba);
4680 spin_unlock_irqrestore(hba->host->host_lock, flags);
4681
4682 err = ufshcd_reset_and_restore(hba);
4683
4684 spin_lock_irqsave(hba->host->host_lock, flags);
4685 if (!err) {
4686 err = SUCCESS;
4687 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4688 } else {
4689 err = FAILED;
4690 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4691 }
4692 ufshcd_clear_eh_in_progress(hba);
4693 spin_unlock_irqrestore(hba->host->host_lock, flags);
4694
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004695 ufshcd_release(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05304696 return err;
4697}
4698
4699/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03004700 * ufshcd_get_max_icc_level - calculate the ICC level
4701 * @sup_curr_uA: max. current supported by the regulator
4702 * @start_scan: row at the desc table to start scan from
4703 * @buff: power descriptor buffer
4704 *
4705 * Returns calculated max ICC level for specific regulator
4706 */
4707static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4708{
4709 int i;
4710 int curr_uA;
4711 u16 data;
4712 u16 unit;
4713
4714 for (i = start_scan; i >= 0; i--) {
4715 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
4716 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4717 ATTR_ICC_LVL_UNIT_OFFSET;
4718 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
4719 switch (unit) {
4720 case UFSHCD_NANO_AMP:
4721 curr_uA = curr_uA / 1000;
4722 break;
4723 case UFSHCD_MILI_AMP:
4724 curr_uA = curr_uA * 1000;
4725 break;
4726 case UFSHCD_AMP:
4727 curr_uA = curr_uA * 1000 * 1000;
4728 break;
4729 case UFSHCD_MICRO_AMP:
4730 default:
4731 break;
4732 }
4733 if (sup_curr_uA >= curr_uA)
4734 break;
4735 }
4736 if (i < 0) {
4737 i = 0;
4738 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4739 }
4740
4741 return (u32)i;
4742}
4743
4744/**
4745 * ufshcd_calc_icc_level - calculate the max ICC level
4746 * In case regulators are not initialized we'll return 0
4747 * @hba: per-adapter instance
4748 * @desc_buf: power descriptor buffer to extract ICC levels from.
4749 * @len: length of desc_buff
4750 *
4751 * Returns calculated ICC level
4752 */
4753static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4754 u8 *desc_buf, int len)
4755{
4756 u32 icc_level = 0;
4757
4758 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4759 !hba->vreg_info.vccq2) {
4760 dev_err(hba->dev,
4761 "%s: Regulator capability was not set, actvIccLevel=%d",
4762 __func__, icc_level);
4763 goto out;
4764 }
4765
4766 if (hba->vreg_info.vcc)
4767 icc_level = ufshcd_get_max_icc_level(
4768 hba->vreg_info.vcc->max_uA,
4769 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4770 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4771
4772 if (hba->vreg_info.vccq)
4773 icc_level = ufshcd_get_max_icc_level(
4774 hba->vreg_info.vccq->max_uA,
4775 icc_level,
4776 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4777
4778 if (hba->vreg_info.vccq2)
4779 icc_level = ufshcd_get_max_icc_level(
4780 hba->vreg_info.vccq2->max_uA,
4781 icc_level,
4782 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4783out:
4784 return icc_level;
4785}
4786
4787static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4788{
4789 int ret;
4790 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4791 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4792
4793 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4794 if (ret) {
4795 dev_err(hba->dev,
4796 "%s: Failed reading power descriptor.len = %d ret = %d",
4797 __func__, buff_len, ret);
4798 return;
4799 }
4800
4801 hba->init_prefetch_data.icc_level =
4802 ufshcd_find_max_sup_active_icc_level(hba,
4803 desc_buf, buff_len);
4804 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4805 __func__, hba->init_prefetch_data.icc_level);
4806
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02004807 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4808 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4809 &hba->init_prefetch_data.icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03004810
4811 if (ret)
4812 dev_err(hba->dev,
4813 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4814 __func__, hba->init_prefetch_data.icc_level , ret);
4815
4816}
4817
4818/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004819 * ufshcd_scsi_add_wlus - Adds required W-LUs
4820 * @hba: per-adapter instance
4821 *
4822 * UFS device specification requires the UFS devices to support 4 well known
4823 * logical units:
4824 * "REPORT_LUNS" (address: 01h)
4825 * "UFS Device" (address: 50h)
4826 * "RPMB" (address: 44h)
4827 * "BOOT" (address: 30h)
4828 * UFS device's power management needs to be controlled by "POWER CONDITION"
4829 * field of SSU (START STOP UNIT) command. But this "power condition" field
4830 * will take effect only when its sent to "UFS device" well known logical unit
4831 * hence we require the scsi_device instance to represent this logical unit in
4832 * order for the UFS host driver to send the SSU command for power management.
4833
4834 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4835 * Block) LU so user space process can control this LU. User space may also
4836 * want to have access to BOOT LU.
4837
4838 * This function adds scsi device instances for each of all well known LUs
4839 * (except "REPORT LUNS" LU).
4840 *
4841 * Returns zero on success (all required W-LUs are added successfully),
4842 * non-zero error value on failure (if failed to add any of the required W-LU).
4843 */
4844static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4845{
4846 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004847 struct scsi_device *sdev_rpmb;
4848 struct scsi_device *sdev_boot;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004849
4850 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4851 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4852 if (IS_ERR(hba->sdev_ufs_device)) {
4853 ret = PTR_ERR(hba->sdev_ufs_device);
4854 hba->sdev_ufs_device = NULL;
4855 goto out;
4856 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004857 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004858
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004859 sdev_boot = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004860 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004861 if (IS_ERR(sdev_boot)) {
4862 ret = PTR_ERR(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004863 goto remove_sdev_ufs_device;
4864 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004865 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004866
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004867 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004868 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004869 if (IS_ERR(sdev_rpmb)) {
4870 ret = PTR_ERR(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004871 goto remove_sdev_boot;
4872 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004873 scsi_device_put(sdev_rpmb);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004874 goto out;
4875
4876remove_sdev_boot:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004877 scsi_remove_device(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004878remove_sdev_ufs_device:
4879 scsi_remove_device(hba->sdev_ufs_device);
4880out:
4881 return ret;
4882}
4883
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02004884static int ufs_get_device_info(struct ufs_hba *hba,
4885 struct ufs_device_info *card_data)
4886{
4887 int err;
4888 u8 model_index;
4889 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
4890 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
4891
4892 err = ufshcd_read_device_desc(hba, desc_buf,
4893 QUERY_DESC_DEVICE_MAX_SIZE);
4894 if (err) {
4895 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
4896 __func__, err);
4897 goto out;
4898 }
4899
4900 /*
4901 * getting vendor (manufacturerID) and Bank Index in big endian
4902 * format
4903 */
4904 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
4905 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
4906
4907 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4908
4909 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
4910 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
4911 if (err) {
4912 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
4913 __func__, err);
4914 goto out;
4915 }
4916
4917 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
4918 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
4919 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
4920 MAX_MODEL_LEN));
4921
4922 /* Null terminate the model string */
4923 card_data->model[MAX_MODEL_LEN] = '\0';
4924
4925out:
4926 return err;
4927}
4928
4929void ufs_advertise_fixup_device(struct ufs_hba *hba)
4930{
4931 int err;
4932 struct ufs_dev_fix *f;
4933 struct ufs_device_info card_data;
4934
4935 card_data.wmanufacturerid = 0;
4936
4937 err = ufs_get_device_info(hba, &card_data);
4938 if (err) {
4939 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
4940 __func__, err);
4941 return;
4942 }
4943
4944 for (f = ufs_fixups; f->quirk; f++) {
4945 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
4946 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
4947 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
4948 !strcmp(f->card.model, UFS_ANY_MODEL)))
4949 hba->dev_quirks |= f->quirk;
4950 }
4951}
4952
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03004953/**
Yaniv Gardi37113102016-03-10 17:37:16 +02004954 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
4955 * @hba: per-adapter instance
4956 *
4957 * PA_TActivate parameter can be tuned manually if UniPro version is less than
4958 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
4959 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
4960 * the hibern8 exit latency.
4961 *
4962 * Returns zero on success, non-zero error value on failure.
4963 */
4964static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
4965{
4966 int ret = 0;
4967 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
4968
4969 ret = ufshcd_dme_peer_get(hba,
4970 UIC_ARG_MIB_SEL(
4971 RX_MIN_ACTIVATETIME_CAPABILITY,
4972 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4973 &peer_rx_min_activatetime);
4974 if (ret)
4975 goto out;
4976
4977 /* make sure proper unit conversion is applied */
4978 tuned_pa_tactivate =
4979 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
4980 / PA_TACTIVATE_TIME_UNIT_US);
4981 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
4982 tuned_pa_tactivate);
4983
4984out:
4985 return ret;
4986}
4987
4988/**
4989 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
4990 * @hba: per-adapter instance
4991 *
4992 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
4993 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
4994 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
4995 * This optimal value can help reduce the hibern8 exit latency.
4996 *
4997 * Returns zero on success, non-zero error value on failure.
4998 */
4999static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
5000{
5001 int ret = 0;
5002 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
5003 u32 max_hibern8_time, tuned_pa_hibern8time;
5004
5005 ret = ufshcd_dme_get(hba,
5006 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
5007 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
5008 &local_tx_hibern8_time_cap);
5009 if (ret)
5010 goto out;
5011
5012 ret = ufshcd_dme_peer_get(hba,
5013 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
5014 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
5015 &peer_rx_hibern8_time_cap);
5016 if (ret)
5017 goto out;
5018
5019 max_hibern8_time = max(local_tx_hibern8_time_cap,
5020 peer_rx_hibern8_time_cap);
5021 /* make sure proper unit conversion is applied */
5022 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
5023 / PA_HIBERN8_TIME_UNIT_US);
5024 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
5025 tuned_pa_hibern8time);
5026out:
5027 return ret;
5028}
5029
Subhash Jadavani3a87bcd2017-04-04 19:32:07 +00005030/**
5031 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
5032 * less than device PA_TACTIVATE time.
5033 * @hba: per-adapter instance
5034 *
5035 * Some UFS devices require host PA_TACTIVATE to be lower than device
5036 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
5037 * for such devices.
5038 *
5039 * Returns zero on success, non-zero error value on failure.
5040 */
5041static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
5042{
5043 int ret = 0;
5044 u32 granularity, peer_granularity;
5045 u32 pa_tactivate, peer_pa_tactivate;
5046 u32 pa_tactivate_us, peer_pa_tactivate_us;
5047 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
5048
5049 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5050 &granularity);
5051 if (ret)
5052 goto out;
5053
5054 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
5055 &peer_granularity);
5056 if (ret)
5057 goto out;
5058
5059 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
5060 (granularity > PA_GRANULARITY_MAX_VAL)) {
5061 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
5062 __func__, granularity);
5063 return -EINVAL;
5064 }
5065
5066 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
5067 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
5068 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
5069 __func__, peer_granularity);
5070 return -EINVAL;
5071 }
5072
5073 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
5074 if (ret)
5075 goto out;
5076
5077 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
5078 &peer_pa_tactivate);
5079 if (ret)
5080 goto out;
5081
5082 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
5083 peer_pa_tactivate_us = peer_pa_tactivate *
5084 gran_to_us_table[peer_granularity - 1];
5085
5086 if (pa_tactivate_us > peer_pa_tactivate_us) {
5087 u32 new_peer_pa_tactivate;
5088
5089 new_peer_pa_tactivate = pa_tactivate_us /
5090 gran_to_us_table[peer_granularity - 1];
5091 new_peer_pa_tactivate++;
5092 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
5093 new_peer_pa_tactivate);
5094 }
5095
5096out:
5097 return ret;
5098}
5099
Yaniv Gardi37113102016-03-10 17:37:16 +02005100static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5101{
5102 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
5103 ufshcd_tune_pa_tactivate(hba);
5104 ufshcd_tune_pa_hibern8time(hba);
5105 }
5106
5107 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
5108 /* set 1ms timeout for PA_TACTIVATE */
5109 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
Subhash Jadavani3a87bcd2017-04-04 19:32:07 +00005110
5111 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
5112 ufshcd_quirk_tune_host_pa_tactivate(hba);
Subhash Jadavanibedc6292017-04-04 19:32:13 +00005113
5114 ufshcd_vops_apply_dev_quirks(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005115}
5116
5117/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005118 * ufshcd_probe_hba - probe hba to detect device and initialize
5119 * @hba: per-adapter instance
5120 *
5121 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305122 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005123static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305124{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305125 int ret;
5126
5127 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305128 if (ret)
5129 goto out;
5130
Yaniv Gardi50646362014-10-23 13:25:13 +03005131 ufshcd_init_pwr_info(hba);
5132
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005133 /* set the default level for urgent bkops */
5134 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5135 hba->is_urgent_bkops_lvl_checked = false;
5136
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005137 /* UniPro link is active now */
5138 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05305139
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305140 ret = ufshcd_verify_dev_init(hba);
5141 if (ret)
5142 goto out;
5143
Dolev Raviv68078d52013-07-30 00:35:58 +05305144 ret = ufshcd_complete_dev_init(hba);
5145 if (ret)
5146 goto out;
5147
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02005148 ufs_advertise_fixup_device(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02005149 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02005150
5151 ret = ufshcd_set_vccq_rail_unused(hba,
5152 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5153 if (ret)
5154 goto out;
5155
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005156 /* UFS device is also active now */
5157 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305158 ufshcd_force_reset_auto_bkops(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005159 hba->wlun_dev_clr_ua = true;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305160
Dolev Raviv7eb584d2014-09-25 15:32:31 +03005161 if (ufshcd_get_max_pwr_mode(hba)) {
5162 dev_err(hba->dev,
5163 "%s: Failed getting max supported power mode\n",
5164 __func__);
5165 } else {
5166 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
5167 if (ret)
5168 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5169 __func__, ret);
5170 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005171
Yaniv Gardi53c12d02016-02-01 15:02:45 +02005172 /* set the state as operational after switching to desired gear */
5173 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005174 /*
5175 * If we are in error handling context or in power management callbacks
5176 * context, no need to scan the host
5177 */
5178 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5179 bool flag;
5180
5181 /* clear any previous UFS device information */
5182 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005183 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5184 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005185 hba->dev_info.f_power_on_wp_en = flag;
5186
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005187 if (!hba->is_init_prefetch)
5188 ufshcd_init_icc_levels(hba);
5189
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03005190 /* Add required well known logical units to scsi mid layer */
5191 if (ufshcd_scsi_add_wlus(hba))
5192 goto out;
5193
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305194 scsi_scan_host(hba->host);
5195 pm_runtime_put_sync(hba->dev);
5196 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03005197
5198 if (!hba->is_init_prefetch)
5199 hba->is_init_prefetch = true;
5200
Sahitya Tummala856b3482014-09-25 15:32:34 +03005201 /* Resume devfreq after UFS device is detected */
5202 if (ufshcd_is_clkscaling_enabled(hba))
5203 devfreq_resume_device(hba->devfreq);
5204
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305205out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005206 /*
5207 * If we failed to initialize the device or the device is not
5208 * present, turn off the power/clocks etc.
5209 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005210 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5211 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005212 ufshcd_hba_exit(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005213 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005214
5215 return ret;
5216}
5217
5218/**
5219 * ufshcd_async_scan - asynchronous execution for probing hba
5220 * @data: data pointer to pass to this function
5221 * @cookie: cookie data
5222 */
5223static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5224{
5225 struct ufs_hba *hba = (struct ufs_hba *)data;
5226
5227 ufshcd_probe_hba(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305228}
5229
Yaniv Gardif550c652016-03-10 17:37:07 +02005230static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5231{
5232 unsigned long flags;
5233 struct Scsi_Host *host;
5234 struct ufs_hba *hba;
5235 int index;
5236 bool found = false;
5237
5238 if (!scmd || !scmd->device || !scmd->device->host)
5239 return BLK_EH_NOT_HANDLED;
5240
5241 host = scmd->device->host;
5242 hba = shost_priv(host);
5243 if (!hba)
5244 return BLK_EH_NOT_HANDLED;
5245
5246 spin_lock_irqsave(host->host_lock, flags);
5247
5248 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5249 if (hba->lrb[index].cmd == scmd) {
5250 found = true;
5251 break;
5252 }
5253 }
5254
5255 spin_unlock_irqrestore(host->host_lock, flags);
5256
5257 /*
5258 * Bypass SCSI error handling and reset the block layer timer if this
5259 * SCSI command was not actually dispatched to UFS driver, otherwise
5260 * let SCSI layer handle the error as usual.
5261 */
5262 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5263}
5264
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305265static struct scsi_host_template ufshcd_driver_template = {
5266 .module = THIS_MODULE,
5267 .name = UFSHCD,
5268 .proc_name = UFSHCD,
5269 .queuecommand = ufshcd_queuecommand,
5270 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005271 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305272 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005273 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305274 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305275 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5276 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02005277 .eh_timed_out = ufshcd_eh_timed_out,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305278 .this_id = -1,
5279 .sg_tablesize = SG_ALL,
5280 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5281 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005282 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01005283 .track_queue_depth = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305284};
5285
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005286static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5287 int ua)
5288{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005289 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005290
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005291 if (!vreg)
5292 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005293
Bjorn Andersson7b16a072015-02-11 19:35:28 -08005294 ret = regulator_set_load(vreg->reg, ua);
5295 if (ret < 0) {
5296 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5297 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005298 }
5299
5300 return ret;
5301}
5302
5303static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5304 struct ufs_vreg *vreg)
5305{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005306 if (!vreg)
5307 return 0;
5308 else if (vreg->unused)
5309 return 0;
5310 else
5311 return ufshcd_config_vreg_load(hba->dev, vreg,
5312 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005313}
5314
5315static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5316 struct ufs_vreg *vreg)
5317{
Yaniv Gardi60f01872016-03-10 17:37:11 +02005318 if (!vreg)
5319 return 0;
5320 else if (vreg->unused)
5321 return 0;
5322 else
5323 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005324}
5325
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005326static int ufshcd_config_vreg(struct device *dev,
5327 struct ufs_vreg *vreg, bool on)
5328{
5329 int ret = 0;
Gustavo A. R. Silvaa248dc62017-11-20 08:12:29 -06005330 struct regulator *reg;
5331 const char *name;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005332 int min_uV, uA_load;
5333
5334 BUG_ON(!vreg);
5335
Gustavo A. R. Silvaa248dc62017-11-20 08:12:29 -06005336 reg = vreg->reg;
5337 name = vreg->name;
5338
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005339 if (regulator_count_voltages(reg) > 0) {
5340 min_uV = on ? vreg->min_uV : 0;
5341 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5342 if (ret) {
5343 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
5344 __func__, name, ret);
5345 goto out;
5346 }
5347
5348 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005349 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5350 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005351 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005352 }
5353out:
5354 return ret;
5355}
5356
5357static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5358{
5359 int ret = 0;
5360
Yaniv Gardi60f01872016-03-10 17:37:11 +02005361 if (!vreg)
5362 goto out;
5363 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005364 goto out;
5365
5366 ret = ufshcd_config_vreg(dev, vreg, true);
5367 if (!ret)
5368 ret = regulator_enable(vreg->reg);
5369
5370 if (!ret)
5371 vreg->enabled = true;
5372 else
5373 dev_err(dev, "%s: %s enable failed, err=%d\n",
5374 __func__, vreg->name, ret);
5375out:
5376 return ret;
5377}
5378
5379static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5380{
5381 int ret = 0;
5382
Yaniv Gardi60f01872016-03-10 17:37:11 +02005383 if (!vreg)
5384 goto out;
5385 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005386 goto out;
5387
5388 ret = regulator_disable(vreg->reg);
5389
5390 if (!ret) {
5391 /* ignore errors on applying disable config */
5392 ufshcd_config_vreg(dev, vreg, false);
5393 vreg->enabled = false;
5394 } else {
5395 dev_err(dev, "%s: %s disable failed, err=%d\n",
5396 __func__, vreg->name, ret);
5397 }
5398out:
5399 return ret;
5400}
5401
5402static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5403{
5404 int ret = 0;
5405 struct device *dev = hba->dev;
5406 struct ufs_vreg_info *info = &hba->vreg_info;
5407
5408 if (!info)
5409 goto out;
5410
5411 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5412 if (ret)
5413 goto out;
5414
5415 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5416 if (ret)
5417 goto out;
5418
5419 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5420 if (ret)
5421 goto out;
5422
5423out:
5424 if (ret) {
5425 ufshcd_toggle_vreg(dev, info->vccq2, false);
5426 ufshcd_toggle_vreg(dev, info->vccq, false);
5427 ufshcd_toggle_vreg(dev, info->vcc, false);
5428 }
5429 return ret;
5430}
5431
Raviv Shvili6a771a62014-09-25 15:32:24 +03005432static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5433{
5434 struct ufs_vreg_info *info = &hba->vreg_info;
5435
5436 if (info)
5437 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5438
5439 return 0;
5440}
5441
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005442static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5443{
5444 int ret = 0;
5445
5446 if (!vreg)
5447 goto out;
5448
5449 vreg->reg = devm_regulator_get(dev, vreg->name);
5450 if (IS_ERR(vreg->reg)) {
5451 ret = PTR_ERR(vreg->reg);
5452 dev_err(dev, "%s: %s get failed, err=%d\n",
5453 __func__, vreg->name, ret);
5454 }
5455out:
5456 return ret;
5457}
5458
5459static int ufshcd_init_vreg(struct ufs_hba *hba)
5460{
5461 int ret = 0;
5462 struct device *dev = hba->dev;
5463 struct ufs_vreg_info *info = &hba->vreg_info;
5464
5465 if (!info)
5466 goto out;
5467
5468 ret = ufshcd_get_vreg(dev, info->vcc);
5469 if (ret)
5470 goto out;
5471
5472 ret = ufshcd_get_vreg(dev, info->vccq);
5473 if (ret)
5474 goto out;
5475
5476 ret = ufshcd_get_vreg(dev, info->vccq2);
5477out:
5478 return ret;
5479}
5480
Raviv Shvili6a771a62014-09-25 15:32:24 +03005481static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5482{
5483 struct ufs_vreg_info *info = &hba->vreg_info;
5484
5485 if (info)
5486 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5487
5488 return 0;
5489}
5490
Yaniv Gardi60f01872016-03-10 17:37:11 +02005491static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5492{
5493 int ret = 0;
5494 struct ufs_vreg_info *info = &hba->vreg_info;
5495
5496 if (!info)
5497 goto out;
5498 else if (!info->vccq)
5499 goto out;
5500
5501 if (unused) {
5502 /* shut off the rail here */
5503 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5504 /*
5505 * Mark this rail as no longer used, so it doesn't get enabled
5506 * later by mistake
5507 */
5508 if (!ret)
5509 info->vccq->unused = true;
5510 } else {
5511 /*
5512 * rail should have been already enabled hence just make sure
5513 * that unused flag is cleared.
5514 */
5515 info->vccq->unused = false;
5516 }
5517out:
5518 return ret;
5519}
5520
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005521static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5522 bool skip_ref_clk)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005523{
5524 int ret = 0;
5525 struct ufs_clk_info *clki;
5526 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005527 unsigned long flags;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005528
5529 if (!head || list_empty(head))
5530 goto out;
5531
5532 list_for_each_entry(clki, head, list) {
5533 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005534 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5535 continue;
5536
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005537 if (on && !clki->enabled) {
5538 ret = clk_prepare_enable(clki->clk);
5539 if (ret) {
5540 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
5541 __func__, clki->name, ret);
5542 goto out;
5543 }
5544 } else if (!on && clki->enabled) {
5545 clk_disable_unprepare(clki->clk);
5546 }
5547 clki->enabled = on;
5548 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
5549 clki->name, on ? "en" : "dis");
5550 }
5551 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005552
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005553 ret = ufshcd_vops_setup_clocks(hba, on);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005554out:
5555 if (ret) {
5556 list_for_each_entry(clki, head, list) {
5557 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5558 clk_disable_unprepare(clki->clk);
5559 }
Dolev Raviveda910e2014-10-23 13:25:16 +03005560 } else if (on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005561 spin_lock_irqsave(hba->host->host_lock, flags);
5562 hba->clk_gating.state = CLKS_ON;
5563 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005564 }
5565 return ret;
5566}
5567
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005568static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
5569{
5570 return __ufshcd_setup_clocks(hba, on, false);
5571}
5572
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005573static int ufshcd_init_clocks(struct ufs_hba *hba)
5574{
5575 int ret = 0;
5576 struct ufs_clk_info *clki;
5577 struct device *dev = hba->dev;
5578 struct list_head *head = &hba->clk_list_head;
5579
5580 if (!head || list_empty(head))
5581 goto out;
5582
5583 list_for_each_entry(clki, head, list) {
5584 if (!clki->name)
5585 continue;
5586
5587 clki->clk = devm_clk_get(dev, clki->name);
5588 if (IS_ERR(clki->clk)) {
5589 ret = PTR_ERR(clki->clk);
5590 dev_err(dev, "%s: %s clk get failed, %d\n",
5591 __func__, clki->name, ret);
5592 goto out;
5593 }
5594
5595 if (clki->max_freq) {
5596 ret = clk_set_rate(clki->clk, clki->max_freq);
5597 if (ret) {
5598 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5599 __func__, clki->name,
5600 clki->max_freq, ret);
5601 goto out;
5602 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03005603 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005604 }
5605 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
5606 clki->name, clk_get_rate(clki->clk));
5607 }
5608out:
5609 return ret;
5610}
5611
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005612static int ufshcd_variant_hba_init(struct ufs_hba *hba)
5613{
5614 int err = 0;
5615
5616 if (!hba->vops)
5617 goto out;
5618
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005619 err = ufshcd_vops_init(hba);
5620 if (err)
5621 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005622
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005623 err = ufshcd_vops_setup_regulators(hba, true);
5624 if (err)
5625 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005626
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005627 goto out;
5628
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005629out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005630 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005631out:
5632 if (err)
5633 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005634 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005635 return err;
5636}
5637
5638static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
5639{
5640 if (!hba->vops)
5641 return;
5642
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005643 ufshcd_vops_setup_clocks(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005644
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005645 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005646
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005647 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005648}
5649
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005650static int ufshcd_hba_init(struct ufs_hba *hba)
5651{
5652 int err;
5653
Raviv Shvili6a771a62014-09-25 15:32:24 +03005654 /*
5655 * Handle host controller power separately from the UFS device power
5656 * rails as it will help controlling the UFS host controller power
5657 * collapse easily which is different than UFS device power collapse.
5658 * Also, enable the host controller power before we go ahead with rest
5659 * of the initialization here.
5660 */
5661 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005662 if (err)
5663 goto out;
5664
Raviv Shvili6a771a62014-09-25 15:32:24 +03005665 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005666 if (err)
5667 goto out;
5668
Raviv Shvili6a771a62014-09-25 15:32:24 +03005669 err = ufshcd_init_clocks(hba);
5670 if (err)
5671 goto out_disable_hba_vreg;
5672
5673 err = ufshcd_setup_clocks(hba, true);
5674 if (err)
5675 goto out_disable_hba_vreg;
5676
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005677 err = ufshcd_init_vreg(hba);
5678 if (err)
5679 goto out_disable_clks;
5680
5681 err = ufshcd_setup_vreg(hba, true);
5682 if (err)
5683 goto out_disable_clks;
5684
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005685 err = ufshcd_variant_hba_init(hba);
5686 if (err)
5687 goto out_disable_vreg;
5688
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005689 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005690 goto out;
5691
5692out_disable_vreg:
5693 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03005694out_disable_clks:
5695 ufshcd_setup_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03005696out_disable_hba_vreg:
5697 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005698out:
5699 return err;
5700}
5701
5702static void ufshcd_hba_exit(struct ufs_hba *hba)
5703{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005704 if (hba->is_powered) {
5705 ufshcd_variant_hba_exit(hba);
5706 ufshcd_setup_vreg(hba, false);
5707 ufshcd_setup_clocks(hba, false);
5708 ufshcd_setup_hba_vreg(hba, false);
5709 hba->is_powered = false;
5710 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03005711}
5712
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005713static int
5714ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305715{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005716 unsigned char cmd[6] = {REQUEST_SENSE,
5717 0,
5718 0,
5719 0,
5720 SCSI_SENSE_BUFFERSIZE,
5721 0};
5722 char *buffer;
5723 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305724
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005725 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5726 if (!buffer) {
5727 ret = -ENOMEM;
5728 goto out;
5729 }
5730
5731 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5732 SCSI_SENSE_BUFFERSIZE, NULL,
5733 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
5734 if (ret)
5735 pr_err("%s: failed with err %d\n", __func__, ret);
5736
5737 kfree(buffer);
5738out:
5739 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305740}
5741
5742/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005743 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
5744 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305745 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005746 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305747 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005748 * Returns 0 if requested power mode is set successfully
5749 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305750 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005751static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
5752 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305753{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005754 unsigned char cmd[6] = { START_STOP };
5755 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005756 struct scsi_device *sdp;
5757 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005758 int ret;
5759
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005760 spin_lock_irqsave(hba->host->host_lock, flags);
5761 sdp = hba->sdev_ufs_device;
5762 if (sdp) {
5763 ret = scsi_device_get(sdp);
5764 if (!ret && !scsi_device_online(sdp)) {
5765 ret = -ENODEV;
5766 scsi_device_put(sdp);
5767 }
5768 } else {
5769 ret = -ENODEV;
5770 }
5771 spin_unlock_irqrestore(hba->host->host_lock, flags);
5772
5773 if (ret)
5774 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005775
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305776 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005777 * If scsi commands fail, the scsi mid-layer schedules scsi error-
5778 * handling, which would wait for host to be resumed. Since we know
5779 * we are functional while we are here, skip host resume in error
5780 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305781 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005782 hba->host->eh_noresume = 1;
5783 if (hba->wlun_dev_clr_ua) {
5784 ret = ufshcd_send_request_sense(hba, sdp);
5785 if (ret)
5786 goto out;
5787 /* Unit attention condition is cleared now */
5788 hba->wlun_dev_clr_ua = false;
5789 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305790
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005791 cmd[4] = pwr_mode << 4;
5792
5793 /*
5794 * Current function would be generally called from the power management
5795 * callbacks hence set the REQ_PM flag so that it doesn't resume the
5796 * already suspended childs.
5797 */
5798 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
5799 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
5800 if (ret) {
5801 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02005802 "START_STOP failed for power mode: %d, result %x\n",
5803 pwr_mode, ret);
Hannes Reinecke21045512015-01-08 07:43:46 +01005804 if (driver_byte(ret) & DRIVER_SENSE)
5805 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005806 }
5807
5808 if (!ret)
5809 hba->curr_dev_pwr_mode = pwr_mode;
5810out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005811 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005812 hba->host->eh_noresume = 0;
5813 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305814}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305815
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005816static int ufshcd_link_state_transition(struct ufs_hba *hba,
5817 enum uic_link_state req_link_state,
5818 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305819{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005820 int ret = 0;
5821
5822 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305823 return 0;
5824
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005825 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
5826 ret = ufshcd_uic_hibern8_enter(hba);
5827 if (!ret)
5828 ufshcd_set_link_hibern8(hba);
5829 else
5830 goto out;
5831 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305832 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005833 * If autobkops is enabled, link can't be turned off because
5834 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305835 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005836 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
5837 (!check_for_bkops || (check_for_bkops &&
5838 !hba->auto_bkops_enabled))) {
5839 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02005840 * Let's make sure that link is in low power mode, we are doing
5841 * this currently by putting the link in Hibern8. Otherway to
5842 * put the link in low power mode is to send the DME end point
5843 * to device and then send the DME reset command to local
5844 * unipro. But putting the link in hibern8 is much faster.
5845 */
5846 ret = ufshcd_uic_hibern8_enter(hba);
5847 if (ret)
5848 goto out;
5849 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005850 * Change controller state to "reset state" which
5851 * should also put the link in off/reset state
5852 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02005853 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005854 /*
5855 * TODO: Check if we need any delay to make sure that
5856 * controller is reset
5857 */
5858 ufshcd_set_link_off(hba);
5859 }
5860
5861out:
5862 return ret;
5863}
5864
5865static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
5866{
5867 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02005868 * It seems some UFS devices may keep drawing more than sleep current
5869 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
5870 * To avoid this situation, add 2ms delay before putting these UFS
5871 * rails in LPM mode.
5872 */
5873 if (!ufshcd_is_link_active(hba) &&
5874 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
5875 usleep_range(2000, 2100);
5876
5877 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005878 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
5879 * power.
5880 *
5881 * If UFS device and link is in OFF state, all power supplies (VCC,
5882 * VCCQ, VCCQ2) can be turned off if power on write protect is not
5883 * required. If UFS link is inactive (Hibern8 or OFF state) and device
5884 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
5885 *
5886 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
5887 * in low power state which would save some power.
5888 */
5889 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5890 !hba->dev_info.is_lu_power_on_wp) {
5891 ufshcd_setup_vreg(hba, false);
5892 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5893 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5894 if (!ufshcd_is_link_active(hba)) {
5895 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5896 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
5897 }
5898 }
5899}
5900
5901static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
5902{
5903 int ret = 0;
5904
5905 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5906 !hba->dev_info.is_lu_power_on_wp) {
5907 ret = ufshcd_setup_vreg(hba, true);
5908 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5909 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
5910 if (!ret && !ufshcd_is_link_active(hba)) {
5911 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5912 if (ret)
5913 goto vcc_disable;
5914 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5915 if (ret)
5916 goto vccq_lpm;
5917 }
5918 }
5919 goto out;
5920
5921vccq_lpm:
5922 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5923vcc_disable:
5924 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5925out:
5926 return ret;
5927}
5928
5929static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
5930{
5931 if (ufshcd_is_link_off(hba))
5932 ufshcd_setup_hba_vreg(hba, false);
5933}
5934
5935static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
5936{
5937 if (ufshcd_is_link_off(hba))
5938 ufshcd_setup_hba_vreg(hba, true);
5939}
5940
5941/**
5942 * ufshcd_suspend - helper function for suspend operations
5943 * @hba: per adapter instance
5944 * @pm_op: desired low power operation type
5945 *
5946 * This function will try to put the UFS device and link into low power
5947 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
5948 * (System PM level).
5949 *
5950 * If this function is called during shutdown, it will make sure that
5951 * both UFS device and UFS link is powered off.
5952 *
5953 * NOTE: UFS device & link must be active before we enter in this function.
5954 *
5955 * Returns 0 for success and non-zero for failure
5956 */
5957static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
5958{
5959 int ret = 0;
5960 enum ufs_pm_level pm_lvl;
5961 enum ufs_dev_pwr_mode req_dev_pwr_mode;
5962 enum uic_link_state req_link_state;
5963
5964 hba->pm_op_in_progress = 1;
5965 if (!ufshcd_is_shutdown_pm(pm_op)) {
5966 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
5967 hba->rpm_lvl : hba->spm_lvl;
5968 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
5969 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
5970 } else {
5971 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
5972 req_link_state = UIC_LINK_OFF_STATE;
5973 }
5974
5975 /*
5976 * If we can't transition into any of the low power modes
5977 * just gate the clocks.
5978 */
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03005979 ufshcd_hold(hba, false);
5980 hba->clk_gating.is_suspended = true;
5981
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005982 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
5983 req_link_state == UIC_LINK_ACTIVE_STATE) {
5984 goto disable_clks;
5985 }
5986
5987 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
5988 (req_link_state == hba->uic_link_state))
5989 goto out;
5990
5991 /* UFS device & link must be active before we enter in this function */
5992 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
5993 ret = -EINVAL;
5994 goto out;
5995 }
5996
5997 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03005998 if (ufshcd_can_autobkops_during_suspend(hba)) {
5999 /*
6000 * The device is idle with no requests in the queue,
6001 * allow background operations if bkops status shows
6002 * that performance might be impacted.
6003 */
6004 ret = ufshcd_urgent_bkops(hba);
6005 if (ret)
6006 goto enable_gating;
6007 } else {
6008 /* make sure that auto bkops is disabled */
6009 ufshcd_disable_auto_bkops(hba);
6010 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006011 }
6012
6013 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
6014 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
6015 !ufshcd_is_runtime_pm(pm_op))) {
6016 /* ensure that bkops is disabled */
6017 ufshcd_disable_auto_bkops(hba);
6018 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
6019 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006020 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006021 }
6022
6023 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
6024 if (ret)
6025 goto set_dev_active;
6026
6027 ufshcd_vreg_set_lpm(hba);
6028
6029disable_clks:
6030 /*
Sahitya Tummala856b3482014-09-25 15:32:34 +03006031 * The clock scaling needs access to controller registers. Hence, Wait
6032 * for pending clock scaling work to be done before clocks are
6033 * turned off.
6034 */
6035 if (ufshcd_is_clkscaling_enabled(hba)) {
6036 devfreq_suspend_device(hba->devfreq);
6037 hba->clk_scaling.window_start_t = 0;
6038 }
6039 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006040 * Call vendor specific suspend callback. As these callbacks may access
6041 * vendor specific host controller register space call them before the
6042 * host clocks are ON.
6043 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006044 ret = ufshcd_vops_suspend(hba, pm_op);
6045 if (ret)
6046 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006047
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006048 ret = ufshcd_vops_setup_clocks(hba, false);
6049 if (ret)
6050 goto vops_resume;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006051
6052 if (!ufshcd_is_link_active(hba))
6053 ufshcd_setup_clocks(hba, false);
6054 else
6055 /* If link is active, device ref_clk can't be switched off */
6056 __ufshcd_setup_clocks(hba, false, true);
6057
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006058 hba->clk_gating.state = CLKS_OFF;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006059 /*
6060 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006061 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006062 */
6063 ufshcd_disable_irq(hba);
6064 /* Put the host controller in low power mode if possible */
6065 ufshcd_hba_vreg_set_lpm(hba);
6066 goto out;
6067
6068vops_resume:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006069 ufshcd_vops_resume(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006070set_link_active:
6071 ufshcd_vreg_set_hpm(hba);
6072 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
6073 ufshcd_set_link_active(hba);
6074 else if (ufshcd_is_link_off(hba))
6075 ufshcd_host_reset_and_restore(hba);
6076set_dev_active:
6077 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
6078 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006079enable_gating:
6080 hba->clk_gating.is_suspended = false;
6081 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006082out:
6083 hba->pm_op_in_progress = 0;
6084 return ret;
6085}
6086
6087/**
6088 * ufshcd_resume - helper function for resume operations
6089 * @hba: per adapter instance
6090 * @pm_op: runtime PM or system PM
6091 *
6092 * This function basically brings the UFS device, UniPro link and controller
6093 * to active state.
6094 *
6095 * Returns 0 for success and non-zero for failure
6096 */
6097static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6098{
6099 int ret;
6100 enum uic_link_state old_link_state;
6101
6102 hba->pm_op_in_progress = 1;
6103 old_link_state = hba->uic_link_state;
6104
6105 ufshcd_hba_vreg_set_hpm(hba);
6106 /* Make sure clocks are enabled before accessing controller */
6107 ret = ufshcd_setup_clocks(hba, true);
6108 if (ret)
6109 goto out;
6110
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006111 /* enable the host irq as host controller would be active soon */
6112 ret = ufshcd_enable_irq(hba);
6113 if (ret)
6114 goto disable_irq_and_vops_clks;
6115
6116 ret = ufshcd_vreg_set_hpm(hba);
6117 if (ret)
6118 goto disable_irq_and_vops_clks;
6119
6120 /*
6121 * Call vendor specific resume callback. As these callbacks may access
6122 * vendor specific host controller register space call them when the
6123 * host clocks are ON.
6124 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006125 ret = ufshcd_vops_resume(hba, pm_op);
6126 if (ret)
6127 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006128
6129 if (ufshcd_is_link_hibern8(hba)) {
6130 ret = ufshcd_uic_hibern8_exit(hba);
6131 if (!ret)
6132 ufshcd_set_link_active(hba);
6133 else
6134 goto vendor_suspend;
6135 } else if (ufshcd_is_link_off(hba)) {
6136 ret = ufshcd_host_reset_and_restore(hba);
6137 /*
6138 * ufshcd_host_reset_and_restore() should have already
6139 * set the link state as active
6140 */
6141 if (ret || !ufshcd_is_link_active(hba))
6142 goto vendor_suspend;
6143 }
6144
6145 if (!ufshcd_is_ufs_dev_active(hba)) {
6146 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6147 if (ret)
6148 goto set_old_link_state;
6149 }
6150
subhashj@codeaurora.orgcfb09f02016-12-22 18:41:22 -08006151 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
6152 ufshcd_enable_auto_bkops(hba);
6153 else
6154 /*
6155 * If BKOPs operations are urgently needed at this moment then
6156 * keep auto-bkops enabled or else disable it.
6157 */
6158 ufshcd_urgent_bkops(hba);
6159
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006160 hba->clk_gating.is_suspended = false;
6161
Sahitya Tummala856b3482014-09-25 15:32:34 +03006162 if (ufshcd_is_clkscaling_enabled(hba))
6163 devfreq_resume_device(hba->devfreq);
6164
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006165 /* Schedule clock gating in case of no access to UFS device yet */
6166 ufshcd_release(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006167 goto out;
6168
6169set_old_link_state:
6170 ufshcd_link_state_transition(hba, old_link_state, 0);
6171vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02006172 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006173disable_vreg:
6174 ufshcd_vreg_set_lpm(hba);
6175disable_irq_and_vops_clks:
6176 ufshcd_disable_irq(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006177 ufshcd_setup_clocks(hba, false);
6178out:
6179 hba->pm_op_in_progress = 0;
6180 return ret;
6181}
6182
6183/**
6184 * ufshcd_system_suspend - system suspend routine
6185 * @hba: per adapter instance
6186 * @pm_op: runtime PM or system PM
6187 *
6188 * Check the description of ufshcd_suspend() function for more details.
6189 *
6190 * Returns 0 for success and non-zero for failure
6191 */
6192int ufshcd_system_suspend(struct ufs_hba *hba)
6193{
6194 int ret = 0;
6195
6196 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03006197 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006198
6199 if (pm_runtime_suspended(hba->dev)) {
6200 if (hba->rpm_lvl == hba->spm_lvl)
6201 /*
6202 * There is possibility that device may still be in
6203 * active state during the runtime suspend.
6204 */
6205 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6206 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
6207 goto out;
6208
6209 /*
6210 * UFS device and/or UFS link low power states during runtime
6211 * suspend seems to be different than what is expected during
6212 * system suspend. Hence runtime resume the devic & link and
6213 * let the system suspend low power states to take effect.
6214 * TODO: If resume takes longer time, we might have optimize
6215 * it in future by not resuming everything if possible.
6216 */
6217 ret = ufshcd_runtime_resume(hba);
6218 if (ret)
6219 goto out;
6220 }
6221
6222 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6223out:
Dolev Ravive7850602014-09-25 15:32:36 +03006224 if (!ret)
6225 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006226 return ret;
6227}
6228EXPORT_SYMBOL(ufshcd_system_suspend);
6229
6230/**
6231 * ufshcd_system_resume - system resume routine
6232 * @hba: per adapter instance
6233 *
6234 * Returns 0 for success and non-zero for failure
6235 */
6236
6237int ufshcd_system_resume(struct ufs_hba *hba)
6238{
6239 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
6240 /*
6241 * Let the runtime resume take care of resuming
6242 * if runtime suspended.
6243 */
6244 return 0;
6245
6246 return ufshcd_resume(hba, UFS_SYSTEM_PM);
6247}
6248EXPORT_SYMBOL(ufshcd_system_resume);
6249
6250/**
6251 * ufshcd_runtime_suspend - runtime suspend routine
6252 * @hba: per adapter instance
6253 *
6254 * Check the description of ufshcd_suspend() function for more details.
6255 *
6256 * Returns 0 for success and non-zero for failure
6257 */
6258int ufshcd_runtime_suspend(struct ufs_hba *hba)
6259{
6260 if (!hba || !hba->is_powered)
6261 return 0;
6262
6263 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306264}
6265EXPORT_SYMBOL(ufshcd_runtime_suspend);
6266
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006267/**
6268 * ufshcd_runtime_resume - runtime resume routine
6269 * @hba: per adapter instance
6270 *
6271 * This function basically brings the UFS device, UniPro link and controller
6272 * to active state. Following operations are done in this function:
6273 *
6274 * 1. Turn on all the controller related clocks
6275 * 2. Bring the UniPro link out of Hibernate state
6276 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
6277 * to active state.
6278 * 4. If auto-bkops is enabled on the device, disable it.
6279 *
6280 * So following would be the possible power state after this function return
6281 * successfully:
6282 * S1: UFS device in Active state with VCC rail ON
6283 * UniPro link in Active state
6284 * All the UFS/UniPro controller clocks are ON
6285 *
6286 * Returns 0 for success and non-zero for failure
6287 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306288int ufshcd_runtime_resume(struct ufs_hba *hba)
6289{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006290 if (!hba || !hba->is_powered)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306291 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006292 else
6293 return ufshcd_resume(hba, UFS_RUNTIME_PM);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306294}
6295EXPORT_SYMBOL(ufshcd_runtime_resume);
6296
6297int ufshcd_runtime_idle(struct ufs_hba *hba)
6298{
6299 return 0;
6300}
6301EXPORT_SYMBOL(ufshcd_runtime_idle);
6302
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306303/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006304 * ufshcd_shutdown - shutdown routine
6305 * @hba: per adapter instance
6306 *
6307 * This function would power off both UFS device and UFS link.
6308 *
6309 * Returns 0 always to allow force shutdown even in case of errors.
6310 */
6311int ufshcd_shutdown(struct ufs_hba *hba)
6312{
6313 int ret = 0;
6314
6315 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6316 goto out;
6317
6318 if (pm_runtime_suspended(hba->dev)) {
6319 ret = ufshcd_runtime_resume(hba);
6320 if (ret)
6321 goto out;
6322 }
6323
6324 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6325out:
6326 if (ret)
6327 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6328 /* allow force shutdown even in case of errors */
6329 return 0;
6330}
6331EXPORT_SYMBOL(ufshcd_shutdown);
6332
6333/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306334 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306335 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306336 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306337 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306338void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306339{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05306340 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306341 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05306342 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02006343 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306344
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306345 scsi_host_put(hba->host);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006346
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006347 ufshcd_exit_clk_gating(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006348 if (ufshcd_is_clkscaling_enabled(hba))
6349 devfreq_remove_device(hba->devfreq);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006350 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306351}
6352EXPORT_SYMBOL_GPL(ufshcd_remove);
6353
6354/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02006355 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
6356 * @hba: pointer to Host Bus Adapter (HBA)
6357 */
6358void ufshcd_dealloc_host(struct ufs_hba *hba)
6359{
6360 scsi_host_put(hba->host);
6361}
6362EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6363
6364/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09006365 * ufshcd_set_dma_mask - Set dma mask based on the controller
6366 * addressing capability
6367 * @hba: per adapter instance
6368 *
6369 * Returns 0 for success, non-zero for failure
6370 */
6371static int ufshcd_set_dma_mask(struct ufs_hba *hba)
6372{
6373 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
6374 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
6375 return 0;
6376 }
6377 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
6378}
6379
6380/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006381 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306382 * @dev: pointer to device handle
6383 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306384 * Returns 0 on success, non-zero value on failure
6385 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006386int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306387{
6388 struct Scsi_Host *host;
6389 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006390 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306391
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306392 if (!dev) {
6393 dev_err(dev,
6394 "Invalid memory reference for dev is NULL\n");
6395 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306396 goto out_error;
6397 }
6398
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306399 host = scsi_host_alloc(&ufshcd_driver_template,
6400 sizeof(struct ufs_hba));
6401 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306402 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306403 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306404 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306405 }
6406 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306407 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306408 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006409 *hba_handle = hba;
6410
6411out_error:
6412 return err;
6413}
6414EXPORT_SYMBOL(ufshcd_alloc_host);
6415
Sahitya Tummala856b3482014-09-25 15:32:34 +03006416static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6417{
6418 int ret = 0;
6419 struct ufs_clk_info *clki;
6420 struct list_head *head = &hba->clk_list_head;
6421
6422 if (!head || list_empty(head))
6423 goto out;
6424
Yaniv Gardif06fcc72015-10-28 13:15:51 +02006425 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6426 if (ret)
6427 return ret;
6428
Sahitya Tummala856b3482014-09-25 15:32:34 +03006429 list_for_each_entry(clki, head, list) {
6430 if (!IS_ERR_OR_NULL(clki->clk)) {
6431 if (scale_up && clki->max_freq) {
6432 if (clki->curr_freq == clki->max_freq)
6433 continue;
6434 ret = clk_set_rate(clki->clk, clki->max_freq);
6435 if (ret) {
6436 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6437 __func__, clki->name,
6438 clki->max_freq, ret);
6439 break;
6440 }
6441 clki->curr_freq = clki->max_freq;
6442
6443 } else if (!scale_up && clki->min_freq) {
6444 if (clki->curr_freq == clki->min_freq)
6445 continue;
6446 ret = clk_set_rate(clki->clk, clki->min_freq);
6447 if (ret) {
6448 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6449 __func__, clki->name,
6450 clki->min_freq, ret);
6451 break;
6452 }
6453 clki->curr_freq = clki->min_freq;
6454 }
6455 }
6456 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6457 clki->name, clk_get_rate(clki->clk));
6458 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02006459
6460 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6461
Sahitya Tummala856b3482014-09-25 15:32:34 +03006462out:
6463 return ret;
6464}
6465
6466static int ufshcd_devfreq_target(struct device *dev,
6467 unsigned long *freq, u32 flags)
6468{
6469 int err = 0;
6470 struct ufs_hba *hba = dev_get_drvdata(dev);
6471
6472 if (!ufshcd_is_clkscaling_enabled(hba))
6473 return -EINVAL;
6474
6475 if (*freq == UINT_MAX)
6476 err = ufshcd_scale_clks(hba, true);
6477 else if (*freq == 0)
6478 err = ufshcd_scale_clks(hba, false);
6479
6480 return err;
6481}
6482
6483static int ufshcd_devfreq_get_dev_status(struct device *dev,
6484 struct devfreq_dev_status *stat)
6485{
6486 struct ufs_hba *hba = dev_get_drvdata(dev);
6487 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6488 unsigned long flags;
6489
6490 if (!ufshcd_is_clkscaling_enabled(hba))
6491 return -EINVAL;
6492
6493 memset(stat, 0, sizeof(*stat));
6494
6495 spin_lock_irqsave(hba->host->host_lock, flags);
6496 if (!scaling->window_start_t)
6497 goto start_window;
6498
6499 if (scaling->is_busy_started)
6500 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6501 scaling->busy_start_t));
6502
6503 stat->total_time = jiffies_to_usecs((long)jiffies -
6504 (long)scaling->window_start_t);
6505 stat->busy_time = scaling->tot_busy_t;
6506start_window:
6507 scaling->window_start_t = jiffies;
6508 scaling->tot_busy_t = 0;
6509
6510 if (hba->outstanding_reqs) {
6511 scaling->busy_start_t = ktime_get();
6512 scaling->is_busy_started = true;
6513 } else {
6514 scaling->busy_start_t = ktime_set(0, 0);
6515 scaling->is_busy_started = false;
6516 }
6517 spin_unlock_irqrestore(hba->host->host_lock, flags);
6518 return 0;
6519}
6520
6521static struct devfreq_dev_profile ufs_devfreq_profile = {
6522 .polling_ms = 100,
6523 .target = ufshcd_devfreq_target,
6524 .get_dev_status = ufshcd_devfreq_get_dev_status,
6525};
6526
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006527/**
6528 * ufshcd_init - Driver initialization routine
6529 * @hba: per-adapter instance
6530 * @mmio_base: base register address
6531 * @irq: Interrupt line of device
6532 * Returns 0 on success, non-zero value on failure
6533 */
6534int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6535{
6536 int err;
6537 struct Scsi_Host *host = hba->host;
6538 struct device *dev = hba->dev;
6539
6540 if (!mmio_base) {
6541 dev_err(hba->dev,
6542 "Invalid memory reference for mmio_base is NULL\n");
6543 err = -ENODEV;
6544 goto out_error;
6545 }
6546
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306547 hba->mmio_base = mmio_base;
6548 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306549
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006550 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03006551 if (err)
6552 goto out_error;
6553
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306554 /* Read capabilities registers */
6555 ufshcd_hba_capabilities(hba);
6556
6557 /* Get UFS version supported by the controller */
6558 hba->ufs_version = ufshcd_get_ufs_version(hba);
6559
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05306560 /* Get Interrupt bit mask per version */
6561 hba->intr_mask = ufshcd_get_intr_mask(hba);
6562
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09006563 err = ufshcd_set_dma_mask(hba);
6564 if (err) {
6565 dev_err(hba->dev, "set dma mask failed\n");
6566 goto out_disable;
6567 }
6568
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306569 /* Allocate memory for host memory space */
6570 err = ufshcd_memory_alloc(hba);
6571 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306572 dev_err(hba->dev, "Memory allocation failed\n");
6573 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306574 }
6575
6576 /* Configure LRB */
6577 ufshcd_host_memory_configure(hba);
6578
6579 host->can_queue = hba->nutrs;
6580 host->cmd_per_lun = hba->nutrs;
6581 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03006582 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306583 host->max_channel = UFSHCD_MAX_CHANNEL;
6584 host->unique_id = host->host_no;
6585 host->max_cmd_len = MAX_CDB_SIZE;
6586
Dolev Raviv7eb584d2014-09-25 15:32:31 +03006587 hba->max_pwr_info.is_valid = false;
6588
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306589 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306590 init_waitqueue_head(&hba->tm_wq);
6591 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306592
6593 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306594 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306595 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306596
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306597 /* Initialize UIC command mutex */
6598 mutex_init(&hba->uic_cmd_mutex);
6599
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306600 /* Initialize mutex for device management commands */
6601 mutex_init(&hba->dev_cmd.lock);
6602
6603 /* Initialize device management tag acquire wait queue */
6604 init_waitqueue_head(&hba->dev_cmd.tag_wq);
6605
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006606 ufshcd_init_clk_gating(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02006607
6608 /*
6609 * In order to avoid any spurious interrupt immediately after
6610 * registering UFS controller interrupt handler, clear any pending UFS
6611 * interrupt status and disable all the UFS interrupts.
6612 */
6613 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
6614 REG_INTERRUPT_STATUS);
6615 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
6616 /*
6617 * Make sure that UFS interrupts are disabled and any pending interrupt
6618 * status is cleared before registering UFS interrupt handler.
6619 */
6620 mb();
6621
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306622 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09006623 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306624 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306625 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006626 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006627 } else {
6628 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306629 }
6630
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306631 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306632 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306633 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006634 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306635 }
6636
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306637 /* Host controller enable */
6638 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306639 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306640 dev_err(hba->dev, "Host controller enable failed\n");
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306641 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306642 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306643
Sahitya Tummala856b3482014-09-25 15:32:34 +03006644 if (ufshcd_is_clkscaling_enabled(hba)) {
6645 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
6646 "simple_ondemand", NULL);
6647 if (IS_ERR(hba->devfreq)) {
6648 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
6649 PTR_ERR(hba->devfreq));
Wei Yongjun73811c92016-09-28 14:49:42 +00006650 err = PTR_ERR(hba->devfreq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03006651 goto out_remove_scsi_host;
6652 }
6653 /* Suspend devfreq until the UFS device is detected */
6654 devfreq_suspend_device(hba->devfreq);
6655 hba->clk_scaling.window_start_t = 0;
6656 }
6657
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05306658 /* Hold auto suspend until async scan completes */
6659 pm_runtime_get_sync(dev);
6660
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006661 /*
subhashj@codeaurora.orgc5fc9462017-04-04 19:32:20 +00006662 * We are assuming that device wasn't put in sleep/power-down
6663 * state exclusively during the boot stage before kernel.
6664 * This assumption helps avoid doing link startup twice during
6665 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006666 */
subhashj@codeaurora.orgc5fc9462017-04-04 19:32:20 +00006667 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006668
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306669 async_schedule(ufshcd_async_scan, hba);
6670
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306671 return 0;
6672
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306673out_remove_scsi_host:
6674 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006675exit_gating:
6676 ufshcd_exit_clk_gating(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306677out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006678 hba->is_irq_enabled = false;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306679 scsi_host_put(host);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03006680 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306681out_error:
6682 return err;
6683}
6684EXPORT_SYMBOL_GPL(ufshcd_init);
6685
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05306686MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
6687MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05306688MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306689MODULE_LICENSE("GPL");
6690MODULE_VERSION(UFSHCD_DRIVER_VERSION);