blob: 4ad72b6d94e2b63cd44230d5c5e03b6ac7dd00ca [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02006 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070041#include <scsi/ufs/ioctl.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030042#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020043#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020044#include <linux/of.h>
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -070045#include <linux/blkdev.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053046#include "ufshcd.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070047#include "ufshci.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020048#include "ufs_quirks.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070049#include "ufs-debugfs.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ufs.h>
53
54#ifdef CONFIG_DEBUG_FS
55
56static int ufshcd_tag_req_type(struct request *rq)
57{
58 int rq_type = TS_WRITE;
59
60 if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
61 rq_type = TS_NOT_SUPPORTED;
62 else if (rq->cmd_flags & REQ_PREFLUSH)
63 rq_type = TS_FLUSH;
64 else if (rq_data_dir(rq) == READ)
65 rq_type = (rq->cmd_flags & REQ_URGENT) ?
66 TS_URGENT_READ : TS_READ;
67 else if (rq->cmd_flags & REQ_URGENT)
68 rq_type = TS_URGENT_WRITE;
69
70 return rq_type;
71}
72
73static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
74{
75 ufsdbg_set_err_state(hba);
76 if (type < UFS_ERR_MAX)
77 hba->ufs_stats.err_stats[type]++;
78}
79
80static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
81{
82 struct request *rq =
83 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
84 u64 **tag_stats = hba->ufs_stats.tag_stats;
85 int rq_type;
86
87 if (!hba->ufs_stats.enabled)
88 return;
89
90 tag_stats[tag][TS_TAG]++;
91 if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
92 return;
93
94 WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
95 rq_type = ufshcd_tag_req_type(rq);
96 if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
97 tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
98}
99
100static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
101 struct scsi_cmnd *cmd)
102{
103 struct request *rq = cmd ? cmd->request : NULL;
104
105 if (rq && rq->cmd_type & REQ_TYPE_FS)
106 hba->ufs_stats.q_depth--;
107}
108
109static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
110{
111 int rq_type;
112 struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
113 s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
114 lrbp->issue_time_stamp);
115
116 /* update general request statistics */
117 if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
118 hba->ufs_stats.req_stats[TS_TAG].min = delta;
119 hba->ufs_stats.req_stats[TS_TAG].count++;
120 hba->ufs_stats.req_stats[TS_TAG].sum += delta;
121 if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
122 hba->ufs_stats.req_stats[TS_TAG].max = delta;
123 if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
124 hba->ufs_stats.req_stats[TS_TAG].min = delta;
125
126 rq_type = ufshcd_tag_req_type(rq);
127 if (rq_type == TS_NOT_SUPPORTED)
128 return;
129
130 /* update request type specific statistics */
131 if (hba->ufs_stats.req_stats[rq_type].count == 0)
132 hba->ufs_stats.req_stats[rq_type].min = delta;
133 hba->ufs_stats.req_stats[rq_type].count++;
134 hba->ufs_stats.req_stats[rq_type].sum += delta;
135 if (delta > hba->ufs_stats.req_stats[rq_type].max)
136 hba->ufs_stats.req_stats[rq_type].max = delta;
137 if (delta < hba->ufs_stats.req_stats[rq_type].min)
138 hba->ufs_stats.req_stats[rq_type].min = delta;
139}
140
141static void
142ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
143{
144 if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
145 hba->ufs_stats.query_stats_arr[opcode][idn]++;
146}
147
148#else
149static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
150{
151}
152
153static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
154 struct scsi_cmnd *cmd)
155{
156}
157
158static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
159{
160}
161
162static inline
163void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
164{
165}
166
167static inline
168void ufshcd_update_query_stats(struct ufs_hba *hba,
169 enum query_opcode opcode, u8 idn)
170{
171}
172#endif
173
174#define UFSHCD_REQ_SENSE_SIZE 18
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530175
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530176#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
177 UTP_TASK_REQ_COMPL |\
178 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530179/* UIC command timeout, unit: ms */
180#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530181
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530182/* NOP OUT retries waiting for NOP IN response */
183#define NOP_OUT_RETRIES 10
184/* Timeout after 30 msecs if NOP OUT hangs without response */
185#define NOP_OUT_TIMEOUT 30 /* msecs */
186
Dolev Raviv68078d52013-07-30 00:35:58 +0530187/* Query request retries */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700188#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +0530189/* Query request timeout */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700190#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +0530191
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530192/* Task management command timeout */
193#define TM_CMD_TIMEOUT 100 /* msecs */
194
Yaniv Gardi64238fb2016-02-01 15:02:43 +0200195/* maximum number of retries for a general UIC command */
196#define UFS_UIC_COMMAND_RETRIES 3
197
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300198/* maximum number of link-startup retries */
199#define DME_LINKSTARTUP_RETRIES 3
200
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +0200201/* Maximum retries for Hibern8 enter */
202#define UIC_HIBERN8_ENTER_RETRIES 3
203
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300204/* maximum number of reset retries before giving up */
205#define MAX_HOST_RESET_RETRIES 5
206
Dolev Raviv68078d52013-07-30 00:35:58 +0530207/* Expose the flag value from utp_upiu_query.value */
208#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
209
Seungwon Jeon7d568652013-08-31 21:40:20 +0530210/* Interrupt aggregation default timeout, unit: 40us */
211#define INT_AGGR_DEF_TO 0x02
212
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700213/* default value of auto suspend is 3 seconds */
214#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
215
216#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE 10
217#define UFSHCD_CLK_GATING_DELAY_MS_PERF 50
218
219/* IOCTL opcode for command - ufs set device read only */
220#define UFS_IOCTL_BLKROSET BLKROSET
221
222#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
223
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +0300224#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
225 ({ \
226 int _ret; \
227 if (_on) \
228 _ret = ufshcd_enable_vreg(_dev, _vreg); \
229 else \
230 _ret = ufshcd_disable_vreg(_dev, _vreg); \
231 _ret; \
232 })
233
Subhash Jadavani4386e022016-12-19 13:01:56 -0800234static void ufshcd_hex_dump(struct ufs_hba *hba, const char * const str,
235 const void *buf, size_t len)
236
237{
238 /*
239 * device name is expected to take up ~20 characters and "str" passed
240 * to this function is expected to be of ~10 character so we would need
241 * ~30 characters string to hold the concatenation of these 2 strings.
242 */
243 #define MAX_PREFIX_STR_SIZE 50
244 char prefix_str[MAX_PREFIX_STR_SIZE] = {0};
245
246 /* concatenate the device name and "str" */
247 snprintf(prefix_str, MAX_PREFIX_STR_SIZE, "%s %s: ",
248 dev_name(hba->dev), str);
249 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET,
250 16, 4, buf, len, false);
251}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700252
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300253static u32 ufs_query_desc_max_size[] = {
254 QUERY_DESC_DEVICE_MAX_SIZE,
255 QUERY_DESC_CONFIGURAION_MAX_SIZE,
256 QUERY_DESC_UNIT_MAX_SIZE,
257 QUERY_DESC_RFU_MAX_SIZE,
258 QUERY_DESC_INTERCONNECT_MAX_SIZE,
259 QUERY_DESC_STRING_MAX_SIZE,
260 QUERY_DESC_RFU_MAX_SIZE,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700261 QUERY_DESC_GEOMETRY_MAZ_SIZE,
Subhash Jadavanida461ce2014-09-25 15:32:25 +0300262 QUERY_DESC_POWER_MAX_SIZE,
263 QUERY_DESC_RFU_MAX_SIZE,
264};
265
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530266enum {
267 UFSHCD_MAX_CHANNEL = 0,
268 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530269 UFSHCD_CMD_PER_LUN = 32,
270 UFSHCD_CAN_QUEUE = 32,
271};
272
273/* UFSHCD states */
274enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530275 UFSHCD_STATE_RESET,
276 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530277 UFSHCD_STATE_OPERATIONAL,
278};
279
280/* UFSHCD error handling flags */
281enum {
282 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530283};
284
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530285/* UFSHCD UIC layer error flags */
286enum {
287 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200288 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
289 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
290 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
291 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
292 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530293};
294
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530295/* Interrupt configuration options */
296enum {
297 UFSHCD_INT_DISABLE,
298 UFSHCD_INT_ENABLE,
299 UFSHCD_INT_CLEAR,
300};
301
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700302#define DEFAULT_UFSHCD_DBG_PRINT_EN UFSHCD_DBG_PRINT_ALL
303
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530304#define ufshcd_set_eh_in_progress(h) \
305 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
306#define ufshcd_eh_in_progress(h) \
307 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
308#define ufshcd_clear_eh_in_progress(h) \
309 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
310
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300311#define ufshcd_set_ufs_dev_active(h) \
312 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
313#define ufshcd_set_ufs_dev_sleep(h) \
314 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
315#define ufshcd_set_ufs_dev_poweroff(h) \
316 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
317#define ufshcd_is_ufs_dev_active(h) \
318 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
319#define ufshcd_is_ufs_dev_sleep(h) \
320 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
321#define ufshcd_is_ufs_dev_poweroff(h) \
322 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
323
324static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
325 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
326 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
327 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
328 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
329 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
330 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
331};
332
333static inline enum ufs_dev_pwr_mode
334ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
335{
336 return ufs_pm_lvl_states[lvl].dev_state;
337}
338
339static inline enum uic_link_state
340ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
341{
342 return ufs_pm_lvl_states[lvl].link_state;
343}
344
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700345static inline enum ufs_pm_level
346ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
347 enum uic_link_state link_state)
348{
349 enum ufs_pm_level lvl;
350
351 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
352 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
353 (ufs_pm_lvl_states[lvl].link_state == link_state))
354 return lvl;
355 }
356
357 /* if no match found, return the level 0 */
358 return UFS_PM_LVL_0;
359}
360
361static inline bool ufshcd_is_valid_pm_lvl(int lvl)
362{
363 if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
364 return true;
365 else
366 return false;
367}
368
369static irqreturn_t ufshcd_intr(int irq, void *__hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530370static void ufshcd_tmc_handler(struct ufs_hba *hba);
371static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530372static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700373static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530374static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300375static void ufshcd_hba_exit(struct ufs_hba *hba);
376static int ufshcd_probe_hba(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700377static int ufshcd_enable_clocks(struct ufs_hba *hba);
378static int ufshcd_disable_clocks(struct ufs_hba *hba,
379 bool is_gating_context);
380static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
381 bool is_gating_context);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200382static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300383static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700384static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300385static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700386static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
387static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
388static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
389static void ufshcd_release_all(struct ufs_hba *hba);
390static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
391static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
392
Yaniv Gardi14497322016-02-01 15:02:39 +0200393static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
394{
395 return tag >= 0 && tag < hba->nutrs;
396}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300397
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700398static inline void ufshcd_enable_irq(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300399{
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300400 if (!hba->is_irq_enabled) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700401 enable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300402 hba->is_irq_enabled = true;
403 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300404}
405
406static inline void ufshcd_disable_irq(struct ufs_hba *hba)
407{
408 if (hba->is_irq_enabled) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700409 disable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300410 hba->is_irq_enabled = false;
411 }
412}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530413
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700414void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
415{
416 unsigned long flags;
417 bool unblock = false;
418
419 spin_lock_irqsave(hba->host->host_lock, flags);
420 hba->scsi_block_reqs_cnt--;
421 unblock = !hba->scsi_block_reqs_cnt;
422 spin_unlock_irqrestore(hba->host->host_lock, flags);
423 if (unblock)
424 scsi_unblock_requests(hba->host);
425}
426EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
427
428static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
429{
430 if (!hba->scsi_block_reqs_cnt++)
431 scsi_block_requests(hba->host);
432}
433
434void ufshcd_scsi_block_requests(struct ufs_hba *hba)
435{
436 unsigned long flags;
437
438 spin_lock_irqsave(hba->host->host_lock, flags);
439 __ufshcd_scsi_block_requests(hba);
440 spin_unlock_irqrestore(hba->host->host_lock, flags);
441}
442EXPORT_SYMBOL(ufshcd_scsi_block_requests);
443
Yaniv Gardib573d482016-03-10 17:37:09 +0200444/* replace non-printable or non-ASCII characters with spaces */
445static inline void ufshcd_remove_non_printable(char *val)
446{
447 if (!val)
448 return;
449
450 if (*val < 0x20 || *val > 0x7e)
451 *val = ' ';
452}
453
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700454#ifdef CONFIG_TRACEPOINTS
455static void ufshcd_add_command_trace(struct ufs_hba *hba,
456 unsigned int tag, const char *str)
457{
458 sector_t lba = -1;
459 u8 opcode = 0;
460 u32 intr, doorbell;
461 struct ufshcd_lrb *lrbp;
462 int transfer_len = -1;
463
464 lrbp = &hba->lrb[tag];
465
466 if (lrbp->cmd) { /* data phase exists */
467 opcode = (u8)(*lrbp->cmd->cmnd);
468 if ((opcode == READ_10) || (opcode == WRITE_10)) {
469 /*
470 * Currently we only fully trace read(10) and write(10)
471 * commands
472 */
473 if (lrbp->cmd->request && lrbp->cmd->request->bio)
474 lba =
475 lrbp->cmd->request->bio->bi_iter.bi_sector;
476 transfer_len = be32_to_cpu(
477 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
478 }
479 }
480
481 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
482 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
483 trace_ufshcd_command(dev_name(hba->dev), str, tag,
484 doorbell, transfer_len, intr, lba, opcode);
485}
486
487static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
488 unsigned int tag, const char *str)
489{
490 if (trace_ufshcd_command_enabled())
491 ufshcd_add_command_trace(hba, tag, str);
492}
493#else
494static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
495 unsigned int tag, const char *str)
496{
497}
498#endif
499
500static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
501{
502 struct ufs_clk_info *clki;
503 struct list_head *head = &hba->clk_list_head;
504
505 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
506 return;
507
508 if (!head || list_empty(head))
509 return;
510
511 list_for_each_entry(clki, head, list) {
512 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
513 clki->max_freq)
514 dev_err(hba->dev, "clk: %s, rate: %u\n",
515 clki->name, clki->curr_freq);
516 }
517}
518
519static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
520 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
521{
522 int i;
523
524 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
525 return;
526
527 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
528 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
529
530 if (err_hist->reg[p] == 0)
531 continue;
532 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
533 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
534 }
535}
536
537static void ufshcd_print_host_regs(struct ufs_hba *hba)
538{
539 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
540 return;
541
542 /*
543 * hex_dump reads its data without the readl macro. This might
544 * cause inconsistency issues on some platform, as the printed
545 * values may be from cache and not the most recent value.
546 * To know whether you are looking at an un-cached version verify
547 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
548 * during platform/pci probe function.
549 */
Subhash Jadavani4386e022016-12-19 13:01:56 -0800550 ufshcd_hex_dump(hba, "host regs", hba->mmio_base,
551 UFSHCI_REG_SPACE_SIZE);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700552 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
553 hba->ufs_version, hba->capabilities);
554 dev_err(hba->dev,
555 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
556 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
557 dev_err(hba->dev,
558 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
559 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
560 hba->ufs_stats.hibern8_exit_cnt);
561
562 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
563 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
564 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
565 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
566 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
567
568 ufshcd_print_clk_freqs(hba);
569
570 ufshcd_vops_dbg_register_dump(hba);
571}
572
573static
574void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
575{
576 struct ufshcd_lrb *lrbp;
577 int prdt_length;
578 int tag;
579
580 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
581 return;
582
583 for_each_set_bit(tag, &bitmap, hba->nutrs) {
584 lrbp = &hba->lrb[tag];
585
586 dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
587 tag, ktime_to_us(lrbp->issue_time_stamp));
588 dev_err(hba->dev,
589 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
590 tag, (u64)lrbp->utrd_dma_addr);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800591 ufshcd_hex_dump(hba, "UPIU TRD", lrbp->utr_descriptor_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700592 sizeof(struct utp_transfer_req_desc));
593 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
594 (u64)lrbp->ucd_req_dma_addr);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800595 ufshcd_hex_dump(hba, "UPIU REQ", lrbp->ucd_req_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700596 sizeof(struct utp_upiu_req));
597 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
598 (u64)lrbp->ucd_rsp_dma_addr);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800599 ufshcd_hex_dump(hba, "UPIU RSP", lrbp->ucd_rsp_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700600 sizeof(struct utp_upiu_rsp));
601 prdt_length =
602 le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
603 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries phys@0x%llx",
604 tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
605 if (pr_prdt)
Subhash Jadavani4386e022016-12-19 13:01:56 -0800606 ufshcd_hex_dump(hba, "UPIU PRDT", lrbp->ucd_prdt_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700607 sizeof(struct ufshcd_sg_entry) * prdt_length);
608 }
609}
610
611static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
612{
613 struct utp_task_req_desc *tmrdp;
614 int tag;
615
616 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
617 return;
618
619 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
620 tmrdp = &hba->utmrdl_base_addr[tag];
621 dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800622 ufshcd_hex_dump(hba, "TM TRD", &tmrdp->header,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700623 sizeof(struct request_desc_header));
624 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
625 tag);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800626 ufshcd_hex_dump(hba, "TM REQ", tmrdp->task_req_upiu,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700627 sizeof(struct utp_upiu_req));
628 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
629 tag);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800630 ufshcd_hex_dump(hba, "TM RSP", tmrdp->task_rsp_upiu,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700631 sizeof(struct utp_task_req_desc));
632 }
633}
634
635static void ufshcd_print_host_state(struct ufs_hba *hba)
636{
637 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
638 return;
639
640 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
641 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
642 hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
643 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
644 hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
645 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
646 hba->curr_dev_pwr_mode, hba->uic_link_state);
647 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
648 hba->pm_op_in_progress, hba->is_sys_suspended);
649 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
650 hba->auto_bkops_enabled, hba->host->host_self_blocked);
651 dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
652 hba->clk_gating.state, hba->hibern8_on_idle.state);
653 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
654 hba->eh_flags, hba->req_abort_count);
655 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
656 hba->capabilities, hba->caps);
657 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -0800658 hba->dev_info.quirks);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700659}
660
661/**
662 * ufshcd_print_pwr_info - print power params as saved in hba
663 * power info
664 * @hba: per-adapter instance
665 */
666static void ufshcd_print_pwr_info(struct ufs_hba *hba)
667{
668 char *names[] = {
669 "INVALID MODE",
670 "FAST MODE",
671 "SLOW_MODE",
672 "INVALID MODE",
673 "FASTAUTO_MODE",
674 "SLOWAUTO_MODE",
675 "INVALID MODE",
676 };
677
678 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
679 return;
680
681 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
682 __func__,
683 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
684 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
685 names[hba->pwr_info.pwr_rx],
686 names[hba->pwr_info.pwr_tx],
687 hba->pwr_info.hs_rate);
688}
689
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530690/*
691 * ufshcd_wait_for_register - wait for register value to change
692 * @hba - per-adapter interface
693 * @reg - mmio register offset
694 * @mask - mask to apply to read register value
695 * @val - wait condition
696 * @interval_us - polling interval in microsecs
697 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200698 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530699 * Returns -ETIMEDOUT on error, zero on success
700 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200701int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
702 u32 val, unsigned long interval_us,
703 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530704{
705 int err = 0;
706 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
707
708 /* ignore bits that we don't intend to wait on */
709 val = val & mask;
710
711 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200712 if (can_sleep)
713 usleep_range(interval_us, interval_us + 50);
714 else
715 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530716 if (time_after(jiffies, timeout)) {
717 if ((ufshcd_readl(hba, reg) & mask) != val)
718 err = -ETIMEDOUT;
719 break;
720 }
721 }
722
723 return err;
724}
725
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530726/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530727 * ufshcd_get_intr_mask - Get the interrupt bit mask
728 * @hba - Pointer to adapter instance
729 *
730 * Returns interrupt bit mask per version
731 */
732static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
733{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700734 u32 intr_mask = 0;
735
736 switch (hba->ufs_version) {
737 case UFSHCI_VERSION_10:
738 intr_mask = INTERRUPT_MASK_ALL_VER_10;
739 break;
740 /* allow fall through */
741 case UFSHCI_VERSION_11:
742 case UFSHCI_VERSION_20:
743 intr_mask = INTERRUPT_MASK_ALL_VER_11;
744 break;
745 /* allow fall through */
746 case UFSHCI_VERSION_21:
747 default:
748 intr_mask = INTERRUPT_MASK_ALL_VER_21;
749 }
750
751 if (!ufshcd_is_crypto_supported(hba))
752 intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
753
754 return intr_mask;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530755}
756
757/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530758 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
759 * @hba - Pointer to adapter instance
760 *
761 * Returns UFSHCI version supported by the controller
762 */
763static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
764{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +0200765 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
766 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +0300767
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530768 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530769}
770
771/**
772 * ufshcd_is_device_present - Check if any device connected to
773 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300774 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530775 *
Venkatraman S73ec5132012-07-10 19:39:23 +0530776 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530777 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300778static inline int ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530779{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +0300780 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
781 DEVICE_PRESENT) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530782}
783
784/**
785 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
786 * @lrb: pointer to local command reference block
787 *
788 * This function is used to get the OCS field from UTRD
789 * Returns the OCS field in the UTRD
790 */
791static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
792{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530793 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530794}
795
796/**
797 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
798 * @task_req_descp: pointer to utp_task_req_desc structure
799 *
800 * This function is used to get the OCS field from UTMRD
801 * Returns the OCS field in the UTMRD
802 */
803static inline int
804ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
805{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +0530806 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530807}
808
809/**
810 * ufshcd_get_tm_free_slot - get a free slot for task management request
811 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530812 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530813 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530814 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
815 * Returns 0 if free slot is not available, else return 1 with tag value
816 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530817 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530818static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530819{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530820 int tag;
821 bool ret = false;
822
823 if (!free_slot)
824 goto out;
825
826 do {
827 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
828 if (tag >= hba->nutmrs)
829 goto out;
830 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
831
832 *free_slot = tag;
833 ret = true;
834out:
835 return ret;
836}
837
838static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
839{
840 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530841}
842
843/**
844 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
845 * @hba: per adapter instance
846 * @pos: position of the bit to be cleared
847 */
848static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
849{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530850 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530851}
852
853/**
Yaniv Gardia48353f2016-02-01 15:02:40 +0200854 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
855 * @hba: per adapter instance
856 * @tag: position of the bit to be cleared
857 */
858static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
859{
860 __clear_bit(tag, &hba->outstanding_reqs);
861}
862
863/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530864 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
865 * @reg: Register value of host controller status
866 *
867 * Returns integer, 0 on Success and positive value if failed
868 */
869static inline int ufshcd_get_lists_status(u32 reg)
870{
871 /*
872 * The mask 0xFF is for the following HCS register bits
873 * Bit Description
874 * 0 Device Present
875 * 1 UTRLRDY
876 * 2 UTMRLRDY
877 * 3 UCRDY
Yaniv Gardi897efe62016-02-01 15:02:48 +0200878 * 4-7 reserved
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530879 */
Yaniv Gardi897efe62016-02-01 15:02:48 +0200880 return ((reg & 0xFF) >> 1) ^ 0x07;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530881}
882
883/**
884 * ufshcd_get_uic_cmd_result - Get the UIC command result
885 * @hba: Pointer to adapter instance
886 *
887 * This function gets the result of UIC command completion
888 * Returns 0 on success, non zero value on error
889 */
890static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
891{
Seungwon Jeonb873a2752013-06-26 22:39:26 +0530892 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530893 MASK_UIC_COMMAND_RESULT;
894}
895
896/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +0530897 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
898 * @hba: Pointer to adapter instance
899 *
900 * This function gets UIC command argument3
901 * Returns 0 on success, non zero value on error
902 */
903static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
904{
905 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
906}
907
908/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530909 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530910 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530911 */
912static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530913ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530914{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530915 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530916}
917
918/**
919 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
920 * @ucd_rsp_ptr: pointer to response UPIU
921 *
922 * This function gets the response status and scsi_status from response UPIU
923 * Returns the response result code.
924 */
925static inline int
926ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
927{
928 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
929}
930
Seungwon Jeon1c2623c2013-08-31 21:40:19 +0530931/*
932 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
933 * from response UPIU
934 * @ucd_rsp_ptr: pointer to response UPIU
935 *
936 * Return the data segment length.
937 */
938static inline unsigned int
939ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
940{
941 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
942 MASK_RSP_UPIU_DATA_SEG_LEN;
943}
944
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530945/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +0530946 * ufshcd_is_exception_event - Check if the device raised an exception event
947 * @ucd_rsp_ptr: pointer to response UPIU
948 *
949 * The function checks if the device raised an exception event indicated in
950 * the Device Information field of response UPIU.
951 *
952 * Returns true if exception is raised, false otherwise.
953 */
954static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
955{
956 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
957 MASK_RSP_EXCEPTION_EVENT ? true : false;
958}
959
960/**
Seungwon Jeon7d568652013-08-31 21:40:20 +0530961 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530962 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530963 */
964static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +0530965ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530966{
Seungwon Jeon7d568652013-08-31 21:40:20 +0530967 ufshcd_writel(hba, INT_AGGR_ENABLE |
968 INT_AGGR_COUNTER_AND_TIMER_RESET,
969 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
970}
971
972/**
973 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
974 * @hba: per adapter instance
975 * @cnt: Interrupt aggregation counter threshold
976 * @tmout: Interrupt aggregation timeout value
977 */
978static inline void
979ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
980{
981 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
982 INT_AGGR_COUNTER_THLD_VAL(cnt) |
983 INT_AGGR_TIMEOUT_VAL(tmout),
984 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530985}
986
987/**
Yaniv Gardib8521902015-05-17 18:54:57 +0300988 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
989 * @hba: per adapter instance
990 */
991static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
992{
993 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
994}
995
996/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530997 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
998 * When run-stop registers are set to 1, it indicates the
999 * host controller that it can process the requests
1000 * @hba: per adapter instance
1001 */
1002static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
1003{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301004 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
1005 REG_UTP_TASK_REQ_LIST_RUN_STOP);
1006 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
1007 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301008}
1009
1010/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301011 * ufshcd_hba_start - Start controller initialization sequence
1012 * @hba: per adapter instance
1013 */
1014static inline void ufshcd_hba_start(struct ufs_hba *hba)
1015{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001016 u32 val = CONTROLLER_ENABLE;
1017
1018 if (ufshcd_is_crypto_supported(hba))
1019 val |= CRYPTO_GENERAL_ENABLE;
1020 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301021}
1022
1023/**
1024 * ufshcd_is_hba_active - Get controller state
1025 * @hba: per adapter instance
1026 *
1027 * Returns zero if controller is active, 1 otherwise
1028 */
1029static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
1030{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301031 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301032}
1033
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001034static const char *ufschd_uic_link_state_to_string(
1035 enum uic_link_state state)
1036{
1037 switch (state) {
1038 case UIC_LINK_OFF_STATE: return "OFF";
1039 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
1040 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
1041 default: return "UNKNOWN";
1042 }
1043}
1044
1045static const char *ufschd_ufs_dev_pwr_mode_to_string(
1046 enum ufs_dev_pwr_mode state)
1047{
1048 switch (state) {
1049 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
1050 case UFS_SLEEP_PWR_MODE: return "SLEEP";
1051 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
1052 default: return "UNKNOWN";
1053 }
1054}
1055
Yaniv Gardi37113102016-03-10 17:37:16 +02001056u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
1057{
1058 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
1059 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1060 (hba->ufs_version == UFSHCI_VERSION_11))
1061 return UFS_UNIPRO_VER_1_41;
1062 else
1063 return UFS_UNIPRO_VER_1_6;
1064}
1065EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1066
1067static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1068{
1069 /*
1070 * If both host and device support UniPro ver1.6 or later, PA layer
1071 * parameters tuning happens during link startup itself.
1072 *
1073 * We can manually tune PA layer parameters if either host or device
1074 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1075 * logic simple, we will only do manual tuning if local unipro version
1076 * doesn't support ver1.6 or later.
1077 */
1078 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
1079 return true;
1080 else
1081 return false;
1082}
1083
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001084/**
1085 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1086 * @hba: per adapter instance
1087 * @scale_up: If True, set max possible frequency othewise set low frequency
1088 *
1089 * Returns 0 if successful
1090 * Returns < 0 for any other errors
1091 */
1092static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1093{
1094 int ret = 0;
1095 struct ufs_clk_info *clki;
1096 struct list_head *head = &hba->clk_list_head;
1097
1098 if (!head || list_empty(head))
1099 goto out;
1100
1101 list_for_each_entry(clki, head, list) {
1102 if (!IS_ERR_OR_NULL(clki->clk)) {
1103 if (scale_up && clki->max_freq) {
1104 if (clki->curr_freq == clki->max_freq)
1105 continue;
1106
1107 ret = clk_set_rate(clki->clk, clki->max_freq);
1108 if (ret) {
1109 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1110 __func__, clki->name,
1111 clki->max_freq, ret);
1112 break;
1113 }
1114 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1115 "scaled up", clki->name,
1116 clki->curr_freq,
1117 clki->max_freq);
1118 clki->curr_freq = clki->max_freq;
1119
1120 } else if (!scale_up && clki->min_freq) {
1121 if (clki->curr_freq == clki->min_freq)
1122 continue;
1123
1124 ret = clk_set_rate(clki->clk, clki->min_freq);
1125 if (ret) {
1126 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1127 __func__, clki->name,
1128 clki->min_freq, ret);
1129 break;
1130 }
1131 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1132 "scaled down", clki->name,
1133 clki->curr_freq,
1134 clki->min_freq);
1135 clki->curr_freq = clki->min_freq;
1136 }
1137 }
1138 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1139 clki->name, clk_get_rate(clki->clk));
1140 }
1141
1142out:
1143 return ret;
1144}
1145
1146/**
1147 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1148 * @hba: per adapter instance
1149 * @scale_up: True if scaling up and false if scaling down
1150 *
1151 * Returns 0 if successful
1152 * Returns < 0 for any other errors
1153 */
1154static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1155{
1156 int ret = 0;
1157
1158 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1159 if (ret)
1160 return ret;
1161
1162 ret = ufshcd_set_clk_freq(hba, scale_up);
1163 if (ret)
1164 return ret;
1165
1166 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1167 if (ret) {
1168 ufshcd_set_clk_freq(hba, !scale_up);
1169 return ret;
1170 }
1171
1172 return ret;
1173}
1174
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001175static void ufshcd_ungate_work(struct work_struct *work)
1176{
1177 int ret;
1178 unsigned long flags;
1179 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1180 clk_gating.ungate_work);
1181
1182 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1183
1184 spin_lock_irqsave(hba->host->host_lock, flags);
1185 if (hba->clk_gating.state == CLKS_ON) {
1186 spin_unlock_irqrestore(hba->host->host_lock, flags);
1187 goto unblock_reqs;
1188 }
1189
1190 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001191 ufshcd_hba_vreg_set_hpm(hba);
1192 ufshcd_enable_clocks(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001193
1194 /* Exit from hibern8 */
1195 if (ufshcd_can_hibern8_during_gating(hba)) {
1196 /* Prevent gating in this path */
1197 hba->clk_gating.is_suspended = true;
1198 if (ufshcd_is_link_hibern8(hba)) {
1199 ret = ufshcd_uic_hibern8_exit(hba);
1200 if (ret)
1201 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1202 __func__, ret);
1203 else
1204 ufshcd_set_link_active(hba);
1205 }
1206 hba->clk_gating.is_suspended = false;
1207 }
1208unblock_reqs:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001209 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001210}
1211
1212/**
1213 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1214 * Also, exit from hibern8 mode and set the link as active.
1215 * @hba: per adapter instance
1216 * @async: This indicates whether caller should ungate clocks asynchronously.
1217 */
1218int ufshcd_hold(struct ufs_hba *hba, bool async)
1219{
1220 int rc = 0;
1221 unsigned long flags;
1222
1223 if (!ufshcd_is_clkgating_allowed(hba))
1224 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001225 spin_lock_irqsave(hba->host->host_lock, flags);
1226 hba->clk_gating.active_reqs++;
1227
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001228 if (ufshcd_eh_in_progress(hba)) {
1229 spin_unlock_irqrestore(hba->host->host_lock, flags);
1230 return 0;
1231 }
1232
Sahitya Tummala856b3482014-09-25 15:32:34 +03001233start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001234 switch (hba->clk_gating.state) {
1235 case CLKS_ON:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001236 /*
1237 * Wait for the ungate work to complete if in progress.
1238 * Though the clocks may be in ON state, the link could
1239 * still be in hibner8 state if hibern8 is allowed
1240 * during clock gating.
1241 * Make sure we exit hibern8 state also in addition to
1242 * clocks being ON.
1243 */
1244 if (ufshcd_can_hibern8_during_gating(hba) &&
1245 ufshcd_is_link_hibern8(hba)) {
1246 spin_unlock_irqrestore(hba->host->host_lock, flags);
1247 flush_work(&hba->clk_gating.ungate_work);
1248 spin_lock_irqsave(hba->host->host_lock, flags);
1249 goto start;
1250 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001251 break;
1252 case REQ_CLKS_OFF:
1253 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1254 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001255 trace_ufshcd_clk_gating(dev_name(hba->dev),
1256 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001257 break;
1258 }
1259 /*
1260 * If we here, it means gating work is either done or
1261 * currently running. Hence, fall through to cancel gating
1262 * work and to enable clocks.
1263 */
1264 case CLKS_OFF:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001265 __ufshcd_scsi_block_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001266 hba->clk_gating.state = REQ_CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001267 trace_ufshcd_clk_gating(dev_name(hba->dev),
1268 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001269 schedule_work(&hba->clk_gating.ungate_work);
1270 /*
1271 * fall through to check if we should wait for this
1272 * work to be done or not.
1273 */
1274 case REQ_CLKS_ON:
1275 if (async) {
1276 rc = -EAGAIN;
1277 hba->clk_gating.active_reqs--;
1278 break;
1279 }
1280
1281 spin_unlock_irqrestore(hba->host->host_lock, flags);
1282 flush_work(&hba->clk_gating.ungate_work);
1283 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +03001284 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001285 goto start;
1286 default:
1287 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1288 __func__, hba->clk_gating.state);
1289 break;
1290 }
1291 spin_unlock_irqrestore(hba->host->host_lock, flags);
1292out:
1293 return rc;
1294}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001295EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001296
1297static void ufshcd_gate_work(struct work_struct *work)
1298{
1299 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1300 clk_gating.gate_work.work);
1301 unsigned long flags;
1302
1303 spin_lock_irqsave(hba->host->host_lock, flags);
1304 if (hba->clk_gating.is_suspended) {
1305 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001306 trace_ufshcd_clk_gating(dev_name(hba->dev),
1307 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001308 goto rel_lock;
1309 }
1310
1311 if (hba->clk_gating.active_reqs
1312 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1313 || hba->lrb_in_use || hba->outstanding_tasks
1314 || hba->active_uic_cmd || hba->uic_async_done)
1315 goto rel_lock;
1316
1317 spin_unlock_irqrestore(hba->host->host_lock, flags);
1318
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001319 if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
1320 hba->hibern8_on_idle.is_enabled)
1321 /*
1322 * Hibern8 enter work (on Idle) needs clocks to be ON hence
1323 * make sure that it is flushed before turning off the clocks.
1324 */
1325 flush_delayed_work(&hba->hibern8_on_idle.enter_work);
1326
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001327 /* put the link into hibern8 mode before turning off clocks */
1328 if (ufshcd_can_hibern8_during_gating(hba)) {
1329 if (ufshcd_uic_hibern8_enter(hba)) {
1330 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001331 trace_ufshcd_clk_gating(dev_name(hba->dev),
1332 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001333 goto out;
1334 }
1335 ufshcd_set_link_hibern8(hba);
1336 }
1337
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001338 if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
1339 ufshcd_disable_clocks(hba, true);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001340 else
1341 /* If link is active, device ref_clk can't be switched off */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001342 ufshcd_disable_clocks_skip_ref_clk(hba, true);
1343
1344 /* Put the host controller in low power mode if possible */
1345 ufshcd_hba_vreg_set_lpm(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001346
1347 /*
1348 * In case you are here to cancel this work the gating state
1349 * would be marked as REQ_CLKS_ON. In this case keep the state
1350 * as REQ_CLKS_ON which would anyway imply that clocks are off
1351 * and a request to turn them on is pending. By doing this way,
1352 * we keep the state machine in tact and this would ultimately
1353 * prevent from doing cancel work multiple times when there are
1354 * new requests arriving before the current cancel work is done.
1355 */
1356 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001357 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001358 hba->clk_gating.state = CLKS_OFF;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001359 trace_ufshcd_clk_gating(dev_name(hba->dev),
1360 hba->clk_gating.state);
1361 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001362rel_lock:
1363 spin_unlock_irqrestore(hba->host->host_lock, flags);
1364out:
1365 return;
1366}
1367
1368/* host lock must be held before calling this variant */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001369static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001370{
1371 if (!ufshcd_is_clkgating_allowed(hba))
1372 return;
1373
1374 hba->clk_gating.active_reqs--;
1375
1376 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1377 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1378 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001379 || hba->active_uic_cmd || hba->uic_async_done
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001380 || ufshcd_eh_in_progress(hba) || no_sched)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001381 return;
1382
1383 hba->clk_gating.state = REQ_CLKS_OFF;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001384 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1385
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001386 schedule_delayed_work(&hba->clk_gating.gate_work,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001387 msecs_to_jiffies(hba->clk_gating.delay_ms));
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001388}
1389
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001390void ufshcd_release(struct ufs_hba *hba, bool no_sched)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001391{
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001395 __ufshcd_release(hba, no_sched);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001396 spin_unlock_irqrestore(hba->host->host_lock, flags);
1397}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001398EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001399
1400static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1401 struct device_attribute *attr, char *buf)
1402{
1403 struct ufs_hba *hba = dev_get_drvdata(dev);
1404
1405 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1406}
1407
1408static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1409 struct device_attribute *attr, const char *buf, size_t count)
1410{
1411 struct ufs_hba *hba = dev_get_drvdata(dev);
1412 unsigned long flags, value;
1413
1414 if (kstrtoul(buf, 0, &value))
1415 return -EINVAL;
1416
1417 spin_lock_irqsave(hba->host->host_lock, flags);
1418 hba->clk_gating.delay_ms = value;
1419 spin_unlock_irqrestore(hba->host->host_lock, flags);
1420 return count;
1421}
1422
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001423static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
1424 struct device_attribute *attr, char *buf)
1425{
1426 struct ufs_hba *hba = dev_get_drvdata(dev);
1427
1428 return snprintf(buf, PAGE_SIZE, "%lu\n",
1429 hba->clk_gating.delay_ms_pwr_save);
1430}
1431
1432static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
1433 struct device_attribute *attr, const char *buf, size_t count)
1434{
1435 struct ufs_hba *hba = dev_get_drvdata(dev);
1436 unsigned long flags, value;
1437
1438 if (kstrtoul(buf, 0, &value))
1439 return -EINVAL;
1440
1441 spin_lock_irqsave(hba->host->host_lock, flags);
1442
1443 hba->clk_gating.delay_ms_pwr_save = value;
1444 if (ufshcd_is_clkscaling_supported(hba) &&
1445 !hba->clk_scaling.is_scaled_up)
1446 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
1447
1448 spin_unlock_irqrestore(hba->host->host_lock, flags);
1449 return count;
1450}
1451
1452static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
1453 struct device_attribute *attr, char *buf)
1454{
1455 struct ufs_hba *hba = dev_get_drvdata(dev);
1456
1457 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
1458}
1459
1460static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
1461 struct device_attribute *attr, const char *buf, size_t count)
1462{
1463 struct ufs_hba *hba = dev_get_drvdata(dev);
1464 unsigned long flags, value;
1465
1466 if (kstrtoul(buf, 0, &value))
1467 return -EINVAL;
1468
1469 spin_lock_irqsave(hba->host->host_lock, flags);
1470
1471 hba->clk_gating.delay_ms_perf = value;
1472 if (ufshcd_is_clkscaling_supported(hba) &&
1473 hba->clk_scaling.is_scaled_up)
1474 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
1475
1476 spin_unlock_irqrestore(hba->host->host_lock, flags);
1477 return count;
1478}
1479
1480static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1481 struct device_attribute *attr, char *buf)
1482{
1483 struct ufs_hba *hba = dev_get_drvdata(dev);
1484
1485 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1486}
1487
1488static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1489 struct device_attribute *attr, const char *buf, size_t count)
1490{
1491 struct ufs_hba *hba = dev_get_drvdata(dev);
1492 unsigned long flags;
1493 u32 value;
1494
1495 if (kstrtou32(buf, 0, &value))
1496 return -EINVAL;
1497
1498 value = !!value;
1499 if (value == hba->clk_gating.is_enabled)
1500 goto out;
1501
1502 if (value) {
1503 ufshcd_release(hba, false);
1504 } else {
1505 spin_lock_irqsave(hba->host->host_lock, flags);
1506 hba->clk_gating.active_reqs++;
1507 spin_unlock_irqrestore(hba->host->host_lock, flags);
1508 }
1509
1510 hba->clk_gating.is_enabled = value;
1511out:
1512 return count;
1513}
1514
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001515static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1516{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001517 struct ufs_clk_gating *gating = &hba->clk_gating;
1518
1519 hba->clk_gating.state = CLKS_ON;
1520
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001521 if (!ufshcd_is_clkgating_allowed(hba))
1522 return;
1523
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001524 INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
1525 INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001526
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001527 gating->is_enabled = true;
1528
1529 /*
1530 * Scheduling the delayed work after 1 jiffies will make the work to
1531 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
1532 * for hibern8 enter work as it may impact the performance if it gets
1533 * scheduled almost immediately. Hence make sure that hibern8 enter
1534 * work gets scheduled atleast after 2 jiffies (any time between
1535 * 1000/HZ ms to 2000/HZ ms).
1536 */
1537 gating->delay_ms_pwr_save = jiffies_to_msecs(
1538 max_t(unsigned long,
1539 msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
1540 2));
1541 gating->delay_ms_perf = jiffies_to_msecs(
1542 max_t(unsigned long,
1543 msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
1544 2));
1545
1546 /* start with performance mode */
1547 gating->delay_ms = gating->delay_ms_perf;
1548
1549 if (!ufshcd_is_clkscaling_supported(hba))
1550 goto scaling_not_supported;
1551
1552 gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
1553 gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
1554 sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
1555 gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
1556 gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
1557 if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
1558 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
1559
1560 gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
1561 gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
1562 sysfs_attr_init(&gating->delay_perf_attr.attr);
1563 gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
1564 gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
1565 if (device_create_file(hba->dev, &gating->delay_perf_attr))
1566 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
1567
1568 goto add_clkgate_enable;
1569
1570scaling_not_supported:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001571 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1572 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1573 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1574 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1575 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
1576 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1577 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001578
1579add_clkgate_enable:
1580 gating->enable_attr.show = ufshcd_clkgate_enable_show;
1581 gating->enable_attr.store = ufshcd_clkgate_enable_store;
1582 sysfs_attr_init(&gating->enable_attr.attr);
1583 gating->enable_attr.attr.name = "clkgate_enable";
1584 gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
1585 if (device_create_file(hba->dev, &gating->enable_attr))
1586 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001587}
1588
1589static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1590{
1591 if (!ufshcd_is_clkgating_allowed(hba))
1592 return;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001593 if (ufshcd_is_clkscaling_supported(hba)) {
1594 device_remove_file(hba->dev,
1595 &hba->clk_gating.delay_pwr_save_attr);
1596 device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
1597 } else {
1598 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1599 }
1600 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Akinobu Mita97cd6802014-11-24 14:24:18 +09001601 cancel_work_sync(&hba->clk_gating.ungate_work);
1602 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001603}
1604
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001605static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
1606{
1607 ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
1608 AUTO_HIBERN8_IDLE_TIMER_MASK,
1609 AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
1610 REG_AUTO_HIBERN8_IDLE_TIMER);
1611 /* Make sure the timer gets applied before further operations */
1612 mb();
1613}
1614
1615/**
1616 * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
1617 *
1618 * @hba: per adapter instance
1619 * @async: This indicates whether caller wants to exit hibern8 asynchronously.
1620 *
1621 * Exit from hibern8 mode and set the link as active.
1622 *
1623 * Return 0 on success, non-zero on failure.
1624 */
1625static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
1626{
1627 int rc = 0;
1628 unsigned long flags;
1629
1630 if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1631 goto out;
1632
1633 spin_lock_irqsave(hba->host->host_lock, flags);
1634 hba->hibern8_on_idle.active_reqs++;
1635
1636 if (ufshcd_eh_in_progress(hba)) {
1637 spin_unlock_irqrestore(hba->host->host_lock, flags);
1638 return 0;
1639 }
1640
1641start:
1642 switch (hba->hibern8_on_idle.state) {
1643 case HIBERN8_EXITED:
1644 break;
1645 case REQ_HIBERN8_ENTER:
1646 if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
1647 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1648 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1649 hba->hibern8_on_idle.state);
1650 break;
1651 }
1652 /*
1653 * If we here, it means Hibern8 enter work is either done or
1654 * currently running. Hence, fall through to cancel hibern8
1655 * work and exit hibern8.
1656 */
1657 case HIBERN8_ENTERED:
1658 __ufshcd_scsi_block_requests(hba);
1659 hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
1660 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1661 hba->hibern8_on_idle.state);
1662 schedule_work(&hba->hibern8_on_idle.exit_work);
1663 /*
1664 * fall through to check if we should wait for this
1665 * work to be done or not.
1666 */
1667 case REQ_HIBERN8_EXIT:
1668 if (async) {
1669 rc = -EAGAIN;
1670 hba->hibern8_on_idle.active_reqs--;
1671 break;
1672 } else {
1673 spin_unlock_irqrestore(hba->host->host_lock, flags);
1674 flush_work(&hba->hibern8_on_idle.exit_work);
1675 /* Make sure state is HIBERN8_EXITED before returning */
1676 spin_lock_irqsave(hba->host->host_lock, flags);
1677 goto start;
1678 }
1679 default:
1680 dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
1681 __func__, hba->hibern8_on_idle.state);
1682 break;
1683 }
1684 spin_unlock_irqrestore(hba->host->host_lock, flags);
1685out:
1686 return rc;
1687}
1688
1689/* host lock must be held before calling this variant */
1690static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
1691{
1692 unsigned long delay_in_jiffies;
1693
1694 if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1695 return;
1696
1697 hba->hibern8_on_idle.active_reqs--;
1698 BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
1699
1700 if (hba->hibern8_on_idle.active_reqs
1701 || hba->hibern8_on_idle.is_suspended
1702 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1703 || hba->lrb_in_use || hba->outstanding_tasks
1704 || hba->active_uic_cmd || hba->uic_async_done
1705 || ufshcd_eh_in_progress(hba) || no_sched)
1706 return;
1707
1708 hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
1709 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1710 hba->hibern8_on_idle.state);
1711 /*
1712 * Scheduling the delayed work after 1 jiffies will make the work to
1713 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
1714 * for hibern8 enter work as it may impact the performance if it gets
1715 * scheduled almost immediately. Hence make sure that hibern8 enter
1716 * work gets scheduled atleast after 2 jiffies (any time between
1717 * 1000/HZ ms to 2000/HZ ms).
1718 */
1719 delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
1720 if (delay_in_jiffies == 1)
1721 delay_in_jiffies++;
1722
1723 schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
1724 delay_in_jiffies);
1725}
1726
1727static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
1728{
1729 unsigned long flags;
1730
1731 spin_lock_irqsave(hba->host->host_lock, flags);
1732 __ufshcd_hibern8_release(hba, no_sched);
1733 spin_unlock_irqrestore(hba->host->host_lock, flags);
1734}
1735
1736static void ufshcd_hibern8_enter_work(struct work_struct *work)
1737{
1738 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1739 hibern8_on_idle.enter_work.work);
1740 unsigned long flags;
1741
1742 spin_lock_irqsave(hba->host->host_lock, flags);
1743 if (hba->hibern8_on_idle.is_suspended) {
1744 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1745 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1746 hba->hibern8_on_idle.state);
1747 goto rel_lock;
1748 }
1749
1750 if (hba->hibern8_on_idle.active_reqs
1751 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1752 || hba->lrb_in_use || hba->outstanding_tasks
1753 || hba->active_uic_cmd || hba->uic_async_done)
1754 goto rel_lock;
1755
1756 spin_unlock_irqrestore(hba->host->host_lock, flags);
1757
1758 if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
1759 /* Enter failed */
1760 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1761 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1762 hba->hibern8_on_idle.state);
1763 goto out;
1764 }
1765 ufshcd_set_link_hibern8(hba);
1766
1767 /*
1768 * In case you are here to cancel this work the hibern8_on_idle.state
1769 * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
1770 * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
1771 * and a request to exit from it is pending. By doing this way,
1772 * we keep the state machine in tact and this would ultimately
1773 * prevent from doing cancel work multiple times when there are
1774 * new requests arriving before the current cancel work is done.
1775 */
1776 spin_lock_irqsave(hba->host->host_lock, flags);
1777 if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
1778 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
1779 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1780 hba->hibern8_on_idle.state);
1781 }
1782rel_lock:
1783 spin_unlock_irqrestore(hba->host->host_lock, flags);
1784out:
1785 return;
1786}
1787
1788static void ufshcd_hibern8_exit_work(struct work_struct *work)
1789{
1790 int ret;
1791 unsigned long flags;
1792 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1793 hibern8_on_idle.exit_work);
1794
1795 cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
1796
1797 spin_lock_irqsave(hba->host->host_lock, flags);
1798 if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
1799 || ufshcd_is_link_active(hba)) {
1800 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1801 spin_unlock_irqrestore(hba->host->host_lock, flags);
1802 goto unblock_reqs;
1803 }
1804 spin_unlock_irqrestore(hba->host->host_lock, flags);
1805
1806 /* Exit from hibern8 */
1807 if (ufshcd_is_link_hibern8(hba)) {
1808 ufshcd_hold(hba, false);
1809 ret = ufshcd_uic_hibern8_exit(hba);
1810 ufshcd_release(hba, false);
1811 if (!ret) {
1812 spin_lock_irqsave(hba->host->host_lock, flags);
1813 ufshcd_set_link_active(hba);
1814 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1815 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1816 hba->hibern8_on_idle.state);
1817 spin_unlock_irqrestore(hba->host->host_lock, flags);
1818 }
1819 }
1820unblock_reqs:
1821 ufshcd_scsi_unblock_requests(hba);
1822}
1823
1824static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
1825 struct device_attribute *attr, char *buf)
1826{
1827 struct ufs_hba *hba = dev_get_drvdata(dev);
1828
1829 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
1830}
1831
1832static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
1833 struct device_attribute *attr, const char *buf, size_t count)
1834{
1835 struct ufs_hba *hba = dev_get_drvdata(dev);
1836 unsigned long flags, value;
1837
1838 if (kstrtoul(buf, 0, &value))
1839 return -EINVAL;
1840
1841 spin_lock_irqsave(hba->host->host_lock, flags);
1842 hba->hibern8_on_idle.delay_ms = value;
1843 spin_unlock_irqrestore(hba->host->host_lock, flags);
1844
1845 /* Update auto hibern8 timer value if supported */
1846 if (ufshcd_is_auto_hibern8_supported(hba) &&
1847 hba->hibern8_on_idle.is_enabled)
1848 ufshcd_set_auto_hibern8_timer(hba,
1849 hba->hibern8_on_idle.delay_ms);
1850
1851 return count;
1852}
1853
1854static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
1855 struct device_attribute *attr, char *buf)
1856{
1857 struct ufs_hba *hba = dev_get_drvdata(dev);
1858
1859 return snprintf(buf, PAGE_SIZE, "%d\n",
1860 hba->hibern8_on_idle.is_enabled);
1861}
1862
1863static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
1864 struct device_attribute *attr, const char *buf, size_t count)
1865{
1866 struct ufs_hba *hba = dev_get_drvdata(dev);
1867 unsigned long flags;
1868 u32 value;
1869
1870 if (kstrtou32(buf, 0, &value))
1871 return -EINVAL;
1872
1873 value = !!value;
1874 if (value == hba->hibern8_on_idle.is_enabled)
1875 goto out;
1876
1877 /* Update auto hibern8 timer value if supported */
1878 if (ufshcd_is_auto_hibern8_supported(hba)) {
1879 ufshcd_set_auto_hibern8_timer(hba,
1880 value ? hba->hibern8_on_idle.delay_ms : value);
1881 goto update;
1882 }
1883
1884 if (value) {
1885 /*
1886 * As clock gating work would wait for the hibern8 enter work
1887 * to finish, clocks would remain on during hibern8 enter work.
1888 */
1889 ufshcd_hold(hba, false);
1890 ufshcd_release_all(hba);
1891 } else {
1892 spin_lock_irqsave(hba->host->host_lock, flags);
1893 hba->hibern8_on_idle.active_reqs++;
1894 spin_unlock_irqrestore(hba->host->host_lock, flags);
1895 }
1896
1897update:
1898 hba->hibern8_on_idle.is_enabled = value;
1899out:
1900 return count;
1901}
1902
1903static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
1904{
1905 /* initialize the state variable here */
1906 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1907
1908 if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
1909 !ufshcd_is_auto_hibern8_supported(hba))
1910 return;
1911
1912 if (ufshcd_is_auto_hibern8_supported(hba)) {
1913 hba->hibern8_on_idle.state = AUTO_HIBERN8;
1914 /*
1915 * Disable SW hibern8 enter on idle in case
1916 * auto hibern8 is supported
1917 */
1918 hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
1919 } else {
1920 INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
1921 ufshcd_hibern8_enter_work);
1922 INIT_WORK(&hba->hibern8_on_idle.exit_work,
1923 ufshcd_hibern8_exit_work);
1924 }
1925
1926 hba->hibern8_on_idle.delay_ms = 10;
1927 hba->hibern8_on_idle.is_enabled = true;
1928
1929 hba->hibern8_on_idle.delay_attr.show =
1930 ufshcd_hibern8_on_idle_delay_show;
1931 hba->hibern8_on_idle.delay_attr.store =
1932 ufshcd_hibern8_on_idle_delay_store;
1933 sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
1934 hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
1935 hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
1936 if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
1937 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
1938
1939 hba->hibern8_on_idle.enable_attr.show =
1940 ufshcd_hibern8_on_idle_enable_show;
1941 hba->hibern8_on_idle.enable_attr.store =
1942 ufshcd_hibern8_on_idle_enable_store;
1943 sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
1944 hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
1945 hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
1946 if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
1947 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
1948}
1949
1950static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
1951{
1952 if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
1953 !ufshcd_is_auto_hibern8_supported(hba))
1954 return;
1955 device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
1956 device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
1957}
1958
1959static void ufshcd_hold_all(struct ufs_hba *hba)
1960{
1961 ufshcd_hold(hba, false);
1962 ufshcd_hibern8_hold(hba, false);
1963}
1964
1965static void ufshcd_release_all(struct ufs_hba *hba)
1966{
1967 ufshcd_hibern8_release(hba, false);
1968 ufshcd_release(hba, false);
1969}
1970
Sahitya Tummala856b3482014-09-25 15:32:34 +03001971/* Must be called with host lock acquired */
1972static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1973{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001974 bool queue_resume_work = false;
1975
1976 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03001977 return;
1978
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001979 if (!hba->clk_scaling.active_reqs++)
1980 queue_resume_work = true;
1981
1982 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1983 return;
1984
1985 if (queue_resume_work)
1986 queue_work(hba->clk_scaling.workq,
1987 &hba->clk_scaling.resume_work);
1988
1989 if (!hba->clk_scaling.window_start_t) {
1990 hba->clk_scaling.window_start_t = jiffies;
1991 hba->clk_scaling.tot_busy_t = 0;
1992 hba->clk_scaling.is_busy_started = false;
1993 }
1994
Sahitya Tummala856b3482014-09-25 15:32:34 +03001995 if (!hba->clk_scaling.is_busy_started) {
1996 hba->clk_scaling.busy_start_t = ktime_get();
1997 hba->clk_scaling.is_busy_started = true;
1998 }
1999}
2000
2001static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2002{
2003 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2004
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002005 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03002006 return;
2007
2008 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2009 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2010 scaling->busy_start_t));
2011 scaling->busy_start_t = ktime_set(0, 0);
2012 scaling->is_busy_started = false;
2013 }
2014}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002015
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302016/**
2017 * ufshcd_send_command - Send SCSI or device management commands
2018 * @hba: per adapter instance
2019 * @task_tag: Task tag of the command
2020 */
2021static inline
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002022int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302023{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002024 int ret = 0;
2025
2026 hba->lrb[task_tag].issue_time_stamp = ktime_get();
2027 hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
Sahitya Tummala856b3482014-09-25 15:32:34 +03002028 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302029 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302030 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002031 /* Make sure that doorbell is committed immediately */
2032 wmb();
2033 ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
2034 ufshcd_update_tag_stats(hba, task_tag);
2035 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302036}
2037
2038/**
2039 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2040 * @lrb - pointer to local reference block
2041 */
2042static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2043{
2044 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302045 if (lrbp->sense_buffer &&
2046 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002047 int len_to_copy;
2048
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302049 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002050 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2051
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302052 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302053 lrbp->ucd_rsp_ptr->sr.sense_data,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002054 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302055 }
2056}
2057
2058/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302059 * ufshcd_copy_query_response() - Copy the Query Response and the data
2060 * descriptor
2061 * @hba: per adapter instance
2062 * @lrb - pointer to local reference block
2063 */
2064static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002065int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05302066{
2067 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2068
Dolev Raviv68078d52013-07-30 00:35:58 +05302069 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302070
Dolev Raviv68078d52013-07-30 00:35:58 +05302071 /* Get the descriptor */
2072 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002073 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05302074 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002075 u16 resp_len;
2076 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05302077
2078 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002079 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302080 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002081 buf_len = be16_to_cpu(
2082 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002083 if (likely(buf_len >= resp_len)) {
2084 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2085 } else {
2086 dev_warn(hba->dev,
2087 "%s: Response size is bigger than buffer",
2088 __func__);
2089 return -EINVAL;
2090 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302091 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002092
2093 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302094}
2095
2096/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302097 * ufshcd_hba_capabilities - Read controller capabilities
2098 * @hba: per adapter instance
2099 */
2100static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2101{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302102 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302103
2104 /* nutrs and nutmrs are 0 based values */
2105 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2106 hba->nutmrs =
2107 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2108}
2109
2110/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302111 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2112 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302113 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302114 * Return true on success, else false
2115 */
2116static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2117{
2118 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2119 return true;
2120 else
2121 return false;
2122}
2123
2124/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302125 * ufshcd_get_upmcrs - Get the power mode change request status
2126 * @hba: Pointer to adapter instance
2127 *
2128 * This function gets the UPMCRS field of HCS register
2129 * Returns value of UPMCRS field
2130 */
2131static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2132{
2133 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2134}
2135
2136/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302137 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2138 * @hba: per adapter instance
2139 * @uic_cmd: UIC command
2140 *
2141 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302142 */
2143static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302144ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302145{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302146 WARN_ON(hba->active_uic_cmd);
2147
2148 hba->active_uic_cmd = uic_cmd;
2149
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302150 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302151 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2152 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2153 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302154
2155 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302156 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302157 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302158}
2159
2160/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302161 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2162 * @hba: per adapter instance
2163 * @uic_command: UIC command
2164 *
2165 * Must be called with mutex held.
2166 * Returns 0 only if success.
2167 */
2168static int
2169ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2170{
2171 int ret;
2172 unsigned long flags;
2173
2174 if (wait_for_completion_timeout(&uic_cmd->done,
2175 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2176 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2177 else
2178 ret = -ETIMEDOUT;
2179
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002180 if (ret)
2181 ufsdbg_set_err_state(hba);
2182
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302183 spin_lock_irqsave(hba->host->host_lock, flags);
2184 hba->active_uic_cmd = NULL;
2185 spin_unlock_irqrestore(hba->host->host_lock, flags);
2186
2187 return ret;
2188}
2189
2190/**
2191 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2192 * @hba: per adapter instance
2193 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002194 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302195 *
2196 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002197 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302198 * Returns 0 only if success.
2199 */
2200static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002201__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2202 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302203{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302204 if (!ufshcd_ready_for_uic_cmd(hba)) {
2205 dev_err(hba->dev,
2206 "Controller not ready to accept UIC commands\n");
2207 return -EIO;
2208 }
2209
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002210 if (completion)
2211 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302212
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302213 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302214
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002215 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302216}
2217
2218/**
2219 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2220 * @hba: per adapter instance
2221 * @uic_cmd: UIC command
2222 *
2223 * Returns 0 only if success.
2224 */
2225static int
2226ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2227{
2228 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002229 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302230
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002231 ufshcd_hold_all(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302232 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002233 ufshcd_add_delay_before_dme_cmd(hba);
2234
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002235 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002236 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002237 spin_unlock_irqrestore(hba->host->host_lock, flags);
2238 if (!ret)
2239 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2240
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002241 ufshcd_save_tstamp_of_last_dme_cmd(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302242 mutex_unlock(&hba->uic_cmd_mutex);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002243 ufshcd_release_all(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302244
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002245 ufsdbg_error_inject_dispatcher(hba,
2246 ERR_INJECT_UIC, 0, &ret);
2247
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302248 return ret;
2249}
2250
2251/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302252 * ufshcd_map_sg - Map scatter-gather list to prdt
2253 * @lrbp - pointer to local reference block
2254 *
2255 * Returns 0 in case of success, non-zero value in case of failure
2256 */
2257static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
2258{
2259 struct ufshcd_sg_entry *prd_table;
2260 struct scatterlist *sg;
2261 struct scsi_cmnd *cmd;
2262 int sg_segments;
2263 int i;
2264
2265 cmd = lrbp->cmd;
2266 sg_segments = scsi_dma_map(cmd);
2267 if (sg_segments < 0)
2268 return sg_segments;
2269
2270 if (sg_segments) {
2271 lrbp->utr_descriptor_ptr->prd_table_length =
2272 cpu_to_le16((u16) (sg_segments));
2273
2274 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2275
2276 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2277 prd_table[i].size =
2278 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2279 prd_table[i].base_addr =
2280 cpu_to_le32(lower_32_bits(sg->dma_address));
2281 prd_table[i].upper_addr =
2282 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002283 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302284 }
2285 } else {
2286 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2287 }
2288
2289 return 0;
2290}
2291
2292/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302293 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302294 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302295 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302296 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302297static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302298{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302299 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2300
2301 if (hba->ufs_version == UFSHCI_VERSION_10) {
2302 u32 rw;
2303 rw = set & INTERRUPT_MASK_RW_VER_10;
2304 set = rw | ((set ^ intrs) & intrs);
2305 } else {
2306 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302307 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302308
2309 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2310}
2311
2312/**
2313 * ufshcd_disable_intr - disable interrupts
2314 * @hba: per adapter instance
2315 * @intrs: interrupt bits
2316 */
2317static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2318{
2319 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2320
2321 if (hba->ufs_version == UFSHCI_VERSION_10) {
2322 u32 rw;
2323 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2324 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2325 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2326
2327 } else {
2328 set &= ~intrs;
2329 }
2330
2331 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302332}
2333
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002334static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
2335 struct ufshcd_lrb *lrbp)
2336{
2337 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2338 u8 cc_index = 0;
2339 bool enable = false;
2340 u64 dun = 0;
2341 int ret;
2342
2343 /*
2344 * Call vendor specific code to get crypto info for this request:
2345 * enable, crypto config. index, DUN.
2346 * If bypass is set, don't bother setting the other fields.
2347 */
2348 ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
2349 if (ret) {
2350 if (ret != -EAGAIN) {
2351 dev_err(hba->dev,
2352 "%s: failed to setup crypto request (%d)\n",
2353 __func__, ret);
2354 }
2355
2356 return ret;
2357 }
2358
2359 if (!enable)
2360 goto out;
2361
2362 req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
2363 if (lrbp->cmd->request && lrbp->cmd->request->bio)
2364 dun = lrbp->cmd->request->bio->bi_iter.bi_sector;
2365
2366 req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
2367 req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
2368out:
2369 return 0;
2370}
2371
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302372/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302373 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2374 * descriptor according to request
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002375 * @hba: per adapter instance
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302376 * @lrbp: pointer to local reference block
2377 * @upiu_flags: flags required in the header
2378 * @cmd_dir: requests data direction
2379 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002380static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
2381 struct ufshcd_lrb *lrbp, u32 *upiu_flags,
2382 enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302383{
2384 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2385 u32 data_direction;
2386 u32 dword_0;
2387
2388 if (cmd_dir == DMA_FROM_DEVICE) {
2389 data_direction = UTP_DEVICE_TO_HOST;
2390 *upiu_flags = UPIU_CMD_FLAGS_READ;
2391 } else if (cmd_dir == DMA_TO_DEVICE) {
2392 data_direction = UTP_HOST_TO_DEVICE;
2393 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2394 } else {
2395 data_direction = UTP_NO_DATA_TRANSFER;
2396 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2397 }
2398
2399 dword_0 = data_direction | (lrbp->command_type
2400 << UPIU_COMMAND_TYPE_OFFSET);
2401 if (lrbp->intr_cmd)
2402 dword_0 |= UTP_REQ_DESC_INT_CMD;
2403
2404 /* Transfer request descriptor header fields */
2405 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002406 /* dword_1 is reserved, hence it is set to 0 */
2407 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302408 /*
2409 * assigning invalid value for command status. Controller
2410 * updates OCS on command completion, with the command
2411 * status
2412 */
2413 req_desc->header.dword_2 =
2414 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002415 /* dword_3 is reserved, hence it is set to 0 */
2416 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02002417
2418 req_desc->prd_table_length = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002419
2420 if (ufshcd_is_crypto_supported(hba))
2421 return ufshcd_prepare_crypto_utrd(hba, lrbp);
2422
2423 return 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302424}
2425
2426/**
2427 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2428 * for scsi commands
2429 * @lrbp - local reference block pointer
2430 * @upiu_flags - flags
2431 */
2432static
2433void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2434{
2435 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002436 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302437
2438 /* command descriptor fields */
2439 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2440 UPIU_TRANSACTION_COMMAND, upiu_flags,
2441 lrbp->lun, lrbp->task_tag);
2442 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2443 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2444
2445 /* Total EHS length and Data segment length will be zero */
2446 ucd_req_ptr->header.dword_2 = 0;
2447
2448 ucd_req_ptr->sc.exp_data_transfer_len =
2449 cpu_to_be32(lrbp->cmd->sdb.length);
2450
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002451 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002452 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002453 if (cdb_len < MAX_CDB_SIZE)
2454 memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
2455 (MAX_CDB_SIZE - cdb_len));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002456 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302457}
2458
Dolev Raviv68078d52013-07-30 00:35:58 +05302459/**
2460 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2461 * for query requsts
2462 * @hba: UFS hba
2463 * @lrbp: local reference block pointer
2464 * @upiu_flags: flags
2465 */
2466static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2467 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2468{
2469 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2470 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302471 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05302472 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2473
2474 /* Query request header */
2475 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2476 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2477 lrbp->lun, lrbp->task_tag);
2478 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2479 0, query->request.query_func, 0, 0);
2480
Zang Leigang68612852016-08-25 17:39:19 +08002481 /* Data segment length only need for WRITE_DESC */
2482 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2483 ucd_req_ptr->header.dword_2 =
2484 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2485 else
2486 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302487
2488 /* Copy the Query Request buffer as is */
2489 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2490 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302491
2492 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002493 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2494 memcpy(descp, query->descriptor, len);
2495
Yaniv Gardi51047262016-02-01 15:02:38 +02002496 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05302497}
2498
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302499static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2500{
2501 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2502
2503 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2504
2505 /* command descriptor fields */
2506 ucd_req_ptr->header.dword_0 =
2507 UPIU_HEADER_DWORD(
2508 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02002509 /* clear rest of the fields of basic header */
2510 ucd_req_ptr->header.dword_1 = 0;
2511 ucd_req_ptr->header.dword_2 = 0;
2512
2513 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302514}
2515
2516/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002517 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302518 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302519 * @lrb - pointer to local reference block
2520 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002521static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302522{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302523 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302524 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302525
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002526 switch (lrbp->command_type) {
2527 case UTP_CMD_TYPE_SCSI:
2528 if (likely(lrbp->cmd)) {
2529 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
2530 &upiu_flags, lrbp->cmd->sc_data_direction);
2531 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2532 } else {
2533 ret = -EINVAL;
2534 }
2535 break;
2536 case UTP_CMD_TYPE_DEV_MANAGE:
2537 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
2538 DMA_NONE);
2539 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2540 ufshcd_prepare_utp_query_req_upiu(
2541 hba, lrbp, upiu_flags);
2542 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2543 ufshcd_prepare_utp_nop_upiu(lrbp);
2544 else
2545 ret = -EINVAL;
2546 break;
2547 case UTP_CMD_TYPE_UFS:
2548 /* For UFS native command implementation */
2549 ret = -ENOTSUPP;
2550 dev_err(hba->dev, "%s: UFS native command are not supported\n",
2551 __func__);
2552 break;
2553 default:
2554 ret = -ENOTSUPP;
2555 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
2556 __func__, lrbp->command_type);
2557 break;
2558 } /* end of switch */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302559
2560 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302561}
2562
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002563/*
2564 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2565 * @scsi_lun: scsi LUN id
2566 *
2567 * Returns UPIU LUN id
2568 */
2569static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2570{
2571 if (scsi_is_wlun(scsi_lun))
2572 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2573 | UFS_UPIU_WLUN_ID;
2574 else
2575 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2576}
2577
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302578/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002579 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2580 * @scsi_lun: UPIU W-LUN id
2581 *
2582 * Returns SCSI W-LUN id
2583 */
2584static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2585{
2586 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2587}
2588
2589/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302590 * ufshcd_queuecommand - main entry point for SCSI requests
2591 * @cmd: command from SCSI Midlayer
2592 * @done: call back function
2593 *
2594 * Returns 0 for success, non-zero in case of failure
2595 */
2596static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2597{
2598 struct ufshcd_lrb *lrbp;
2599 struct ufs_hba *hba;
2600 unsigned long flags;
2601 int tag;
2602 int err = 0;
2603
2604 hba = shost_priv(host);
2605
2606 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02002607 if (!ufshcd_valid_tag(hba, tag)) {
2608 dev_err(hba->dev,
2609 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2610 __func__, tag, cmd, cmd->request);
2611 BUG();
2612 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302613
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002614 if (!down_read_trylock(&hba->clk_scaling_lock))
2615 return SCSI_MLQUEUE_HOST_BUSY;
2616
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302617 spin_lock_irqsave(hba->host->host_lock, flags);
2618 switch (hba->ufshcd_state) {
2619 case UFSHCD_STATE_OPERATIONAL:
2620 break;
2621 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302622 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302623 goto out_unlock;
2624 case UFSHCD_STATE_ERROR:
2625 set_host_byte(cmd, DID_ERROR);
2626 cmd->scsi_done(cmd);
2627 goto out_unlock;
2628 default:
2629 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2630 __func__, hba->ufshcd_state);
2631 set_host_byte(cmd, DID_BAD_TARGET);
2632 cmd->scsi_done(cmd);
2633 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302634 }
Yaniv Gardi53c12d02016-02-01 15:02:45 +02002635
2636 /* if error handling is in progress, don't issue commands */
2637 if (ufshcd_eh_in_progress(hba)) {
2638 set_host_byte(cmd, DID_ERROR);
2639 cmd->scsi_done(cmd);
2640 goto out_unlock;
2641 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302642 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302643
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002644 hba->req_abort_count = 0;
2645
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302646 /* acquire the tag to make sure device cmds don't use it */
2647 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2648 /*
2649 * Dev manage command in progress, requeue the command.
2650 * Requeuing the command helps in cases where the request *may*
2651 * find different tag instead of waiting for dev manage command
2652 * completion.
2653 */
2654 err = SCSI_MLQUEUE_HOST_BUSY;
2655 goto out;
2656 }
2657
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002658 err = ufshcd_hold(hba, true);
2659 if (err) {
2660 err = SCSI_MLQUEUE_HOST_BUSY;
2661 clear_bit_unlock(tag, &hba->lrb_in_use);
2662 goto out;
2663 }
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07002664
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002665 if (ufshcd_is_clkgating_allowed(hba))
2666 WARN_ON(hba->clk_gating.state != CLKS_ON);
2667
2668 err = ufshcd_hibern8_hold(hba, true);
2669 if (err) {
2670 clear_bit_unlock(tag, &hba->lrb_in_use);
2671 err = SCSI_MLQUEUE_HOST_BUSY;
2672 ufshcd_release(hba, true);
2673 goto out;
2674 }
2675 if (ufshcd_is_hibern8_on_idle_allowed(hba))
2676 WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
2677
2678 /* Vote PM QoS for the request */
2679 ufshcd_vops_pm_qos_req_start(hba, cmd->request);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03002680
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07002681 /* IO svc time latency histogram */
2682 if (hba != NULL && cmd->request != NULL) {
2683 if (hba->latency_hist_enabled &&
2684 (cmd->request->cmd_type == REQ_TYPE_FS)) {
2685 cmd->request->lat_hist_io_start = ktime_get();
2686 cmd->request->lat_hist_enabled = 1;
2687 } else
2688 cmd->request->lat_hist_enabled = 0;
2689 }
2690
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302691 WARN_ON(hba->clk_gating.state != CLKS_ON);
2692
2693 lrbp = &hba->lrb[tag];
2694
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302695 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302696 lrbp->cmd = cmd;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002697 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302698 lrbp->sense_buffer = cmd->sense_buffer;
2699 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002700 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03002701 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002702 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2703 lrbp->req_abort_skip = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302704
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002705 /* form UPIU before issuing the command */
2706 err = ufshcd_compose_upiu(hba, lrbp);
2707 if (err) {
2708 if (err != -EAGAIN)
2709 dev_err(hba->dev,
2710 "%s: failed to compose upiu %d\n",
2711 __func__, err);
2712
2713 lrbp->cmd = NULL;
2714 clear_bit_unlock(tag, &hba->lrb_in_use);
2715 ufshcd_release_all(hba);
2716 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
2717 goto out;
2718 }
Joao Pinto300bb132016-05-11 12:21:27 +01002719
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302720 err = ufshcd_map_sg(lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302721 if (err) {
2722 lrbp->cmd = NULL;
2723 clear_bit_unlock(tag, &hba->lrb_in_use);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002724 ufshcd_release_all(hba);
2725 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302726 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302727 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302728
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002729 err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
2730 if (err) {
2731 if (err != -EAGAIN)
2732 dev_err(hba->dev,
2733 "%s: failed to configure crypto engine %d\n",
2734 __func__, err);
2735
2736 scsi_dma_unmap(lrbp->cmd);
2737 lrbp->cmd = NULL;
2738 clear_bit_unlock(tag, &hba->lrb_in_use);
2739 ufshcd_release_all(hba);
2740 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
2741
2742 goto out;
2743 }
2744
2745 /* Make sure descriptors are ready before ringing the doorbell */
2746 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302747 /* issue command to the controller */
2748 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002749
2750 err = ufshcd_send_command(hba, tag);
2751 if (err) {
2752 spin_unlock_irqrestore(hba->host->host_lock, flags);
2753 scsi_dma_unmap(lrbp->cmd);
2754 lrbp->cmd = NULL;
2755 clear_bit_unlock(tag, &hba->lrb_in_use);
2756 ufshcd_release_all(hba);
2757 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
2758 ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
2759 dev_err(hba->dev, "%s: failed sending command, %d\n",
2760 __func__, err);
2761 err = DID_ERROR;
2762 goto out;
2763 }
2764
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05302765out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302766 spin_unlock_irqrestore(hba->host->host_lock, flags);
2767out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002768 up_read(&hba->clk_scaling_lock);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302769 return err;
2770}
2771
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302772static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2773 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2774{
2775 lrbp->cmd = NULL;
2776 lrbp->sense_bufflen = 0;
2777 lrbp->sense_buffer = NULL;
2778 lrbp->task_tag = tag;
2779 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002780 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302781 lrbp->intr_cmd = true; /* No interrupt aggregation */
2782 hba->dev_cmd.type = cmd_type;
2783
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002784 return ufshcd_compose_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302785}
2786
2787static int
2788ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2789{
2790 int err = 0;
2791 unsigned long flags;
2792 u32 mask = 1 << tag;
2793
2794 /* clear outstanding transaction before retry */
2795 spin_lock_irqsave(hba->host->host_lock, flags);
2796 ufshcd_utrl_clear(hba, tag);
2797 spin_unlock_irqrestore(hba->host->host_lock, flags);
2798
2799 /*
2800 * wait for for h/w to clear corresponding bit in door-bell.
2801 * max. wait is 1 sec.
2802 */
2803 err = ufshcd_wait_for_register(hba,
2804 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02002805 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302806
2807 return err;
2808}
2809
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002810static int
2811ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2812{
2813 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2814
2815 /* Get the UPIU response */
2816 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2817 UPIU_RSP_CODE_OFFSET;
2818 return query_res->response;
2819}
2820
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302821/**
2822 * ufshcd_dev_cmd_completion() - handles device management command responses
2823 * @hba: per adapter instance
2824 * @lrbp: pointer to local reference block
2825 */
2826static int
2827ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2828{
2829 int resp;
2830 int err = 0;
2831
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002832 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302833 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2834
2835 switch (resp) {
2836 case UPIU_TRANSACTION_NOP_IN:
2837 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2838 err = -EINVAL;
2839 dev_err(hba->dev, "%s: unexpected response %x\n",
2840 __func__, resp);
2841 }
2842 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05302843 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002844 err = ufshcd_check_query_response(hba, lrbp);
2845 if (!err)
2846 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05302847 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302848 case UPIU_TRANSACTION_REJECT_UPIU:
2849 /* TODO: handle Reject UPIU Response */
2850 err = -EPERM;
2851 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2852 __func__);
2853 break;
2854 default:
2855 err = -EINVAL;
2856 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2857 __func__, resp);
2858 break;
2859 }
2860
2861 return err;
2862}
2863
2864static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2865 struct ufshcd_lrb *lrbp, int max_timeout)
2866{
2867 int err = 0;
2868 unsigned long time_left;
2869 unsigned long flags;
2870
2871 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2872 msecs_to_jiffies(max_timeout));
2873
2874 spin_lock_irqsave(hba->host->host_lock, flags);
2875 hba->dev_cmd.complete = NULL;
2876 if (likely(time_left)) {
2877 err = ufshcd_get_tr_ocs(lrbp);
2878 if (!err)
2879 err = ufshcd_dev_cmd_completion(hba, lrbp);
2880 }
2881 spin_unlock_irqrestore(hba->host->host_lock, flags);
2882
2883 if (!time_left) {
2884 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002885 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2886 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302887 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02002888 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302889 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02002890 /*
2891 * in case of an error, after clearing the doorbell,
2892 * we also need to clear the outstanding_request
2893 * field in hba
2894 */
2895 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302896 }
2897
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002898 if (err)
2899 ufsdbg_set_err_state(hba);
2900
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302901 return err;
2902}
2903
2904/**
2905 * ufshcd_get_dev_cmd_tag - Get device management command tag
2906 * @hba: per-adapter instance
2907 * @tag: pointer to variable with available slot value
2908 *
2909 * Get a free slot and lock it until device management command
2910 * completes.
2911 *
2912 * Returns false if free slot is unavailable for locking, else
2913 * return true with tag value in @tag.
2914 */
2915static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2916{
2917 int tag;
2918 bool ret = false;
2919 unsigned long tmp;
2920
2921 if (!tag_out)
2922 goto out;
2923
2924 do {
2925 tmp = ~hba->lrb_in_use;
2926 tag = find_last_bit(&tmp, hba->nutrs);
2927 if (tag >= hba->nutrs)
2928 goto out;
2929 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2930
2931 *tag_out = tag;
2932 ret = true;
2933out:
2934 return ret;
2935}
2936
2937static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2938{
2939 clear_bit_unlock(tag, &hba->lrb_in_use);
2940}
2941
2942/**
2943 * ufshcd_exec_dev_cmd - API for sending device management requests
2944 * @hba - UFS hba
2945 * @cmd_type - specifies the type (NOP, Query...)
2946 * @timeout - time in seconds
2947 *
Dolev Raviv68078d52013-07-30 00:35:58 +05302948 * NOTE: Since there is only one available tag for device management commands,
2949 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302950 */
2951static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2952 enum dev_cmd_type cmd_type, int timeout)
2953{
2954 struct ufshcd_lrb *lrbp;
2955 int err;
2956 int tag;
2957 struct completion wait;
2958 unsigned long flags;
2959
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002960 down_read(&hba->clk_scaling_lock);
2961
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302962 /*
2963 * Get free slot, sleep if slots are unavailable.
2964 * Even though we use wait_event() which sleeps indefinitely,
2965 * the maximum wait time is bounded by SCSI request timeout.
2966 */
2967 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2968
2969 init_completion(&wait);
2970 lrbp = &hba->lrb[tag];
2971 WARN_ON(lrbp->cmd);
2972 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2973 if (unlikely(err))
2974 goto out_put_tag;
2975
2976 hba->dev_cmd.complete = &wait;
2977
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02002978 /* Make sure descriptors are ready before ringing the doorbell */
2979 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302980 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002981 err = ufshcd_send_command(hba, tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302982 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002983 if (err) {
2984 dev_err(hba->dev, "%s: failed sending command, %d\n",
2985 __func__, err);
2986 goto out_put_tag;
2987 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302988 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2989
2990out_put_tag:
2991 ufshcd_put_dev_cmd_tag(hba, tag);
2992 wake_up(&hba->dev_cmd.tag_wq);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002993 up_read(&hba->clk_scaling_lock);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302994 return err;
2995}
2996
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302997/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002998 * ufshcd_init_query() - init the query response and request parameters
2999 * @hba: per-adapter instance
3000 * @request: address of the request pointer to be initialized
3001 * @response: address of the response pointer to be initialized
3002 * @opcode: operation to perform
3003 * @idn: flag idn to access
3004 * @index: LU number to access
3005 * @selector: query/flag/descriptor further identification
3006 */
3007static inline void ufshcd_init_query(struct ufs_hba *hba,
3008 struct ufs_query_req **request, struct ufs_query_res **response,
3009 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3010{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003011 int idn_t = (int)idn;
3012
3013 ufsdbg_error_inject_dispatcher(hba,
3014 ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
3015 idn = idn_t;
3016
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003017 *request = &hba->dev_cmd.query.request;
3018 *response = &hba->dev_cmd.query.response;
3019 memset(*request, 0, sizeof(struct ufs_query_req));
3020 memset(*response, 0, sizeof(struct ufs_query_res));
3021 (*request)->upiu_req.opcode = opcode;
3022 (*request)->upiu_req.idn = idn;
3023 (*request)->upiu_req.index = index;
3024 (*request)->upiu_req.selector = selector;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003025
3026 ufshcd_update_query_stats(hba, opcode, idn);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003027}
3028
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003029static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3030 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
3031{
3032 int ret;
3033 int retries;
3034
3035 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3036 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
3037 if (ret)
3038 dev_dbg(hba->dev,
3039 "%s: failed with error %d, retries %d\n",
3040 __func__, ret, retries);
3041 else
3042 break;
3043 }
3044
3045 if (ret)
3046 dev_err(hba->dev,
3047 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3048 __func__, opcode, idn, ret, retries);
3049 return ret;
3050}
3051
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003052/**
Dolev Raviv68078d52013-07-30 00:35:58 +05303053 * ufshcd_query_flag() - API function for sending flag query requests
3054 * hba: per-adapter instance
3055 * query_opcode: flag query to perform
3056 * idn: flag idn to access
3057 * flag_res: the flag value after the query request completes
3058 *
3059 * Returns 0 for success, non-zero in case of failure
3060 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003061int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05303062 enum flag_idn idn, bool *flag_res)
3063{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003064 struct ufs_query_req *request = NULL;
3065 struct ufs_query_res *response = NULL;
3066 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02003067 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05303068
3069 BUG_ON(!hba);
3070
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003071 ufshcd_hold_all(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05303072 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003073 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3074 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05303075
3076 switch (opcode) {
3077 case UPIU_QUERY_OPCODE_SET_FLAG:
3078 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3079 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3080 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3081 break;
3082 case UPIU_QUERY_OPCODE_READ_FLAG:
3083 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3084 if (!flag_res) {
3085 /* No dummy reads */
3086 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3087 __func__);
3088 err = -EINVAL;
3089 goto out_unlock;
3090 }
3091 break;
3092 default:
3093 dev_err(hba->dev,
3094 "%s: Expected query flag opcode but got = %d\n",
3095 __func__, opcode);
3096 err = -EINVAL;
3097 goto out_unlock;
3098 }
Dolev Raviv68078d52013-07-30 00:35:58 +05303099
Yaniv Gardie5ad4062016-02-01 15:02:41 +02003100 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05303101
3102 if (err) {
3103 dev_err(hba->dev,
3104 "%s: Sending flag query for idn %d failed, err = %d\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003105 __func__, request->upiu_req.idn, err);
Dolev Raviv68078d52013-07-30 00:35:58 +05303106 goto out_unlock;
3107 }
3108
3109 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303110 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05303111 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3112
3113out_unlock:
3114 mutex_unlock(&hba->dev_cmd.lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003115 ufshcd_release_all(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05303116 return err;
3117}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003118EXPORT_SYMBOL(ufshcd_query_flag);
Dolev Raviv68078d52013-07-30 00:35:58 +05303119
3120/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303121 * ufshcd_query_attr - API function for sending attribute requests
3122 * hba: per-adapter instance
3123 * opcode: attribute opcode
3124 * idn: attribute idn to access
3125 * index: index field
3126 * selector: selector field
3127 * attr_val: the attribute value after the query request completes
3128 *
3129 * Returns 0 for success, non-zero in case of failure
3130*/
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003131int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303132 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3133{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003134 struct ufs_query_req *request = NULL;
3135 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303136 int err;
3137
3138 BUG_ON(!hba);
3139
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003140 ufshcd_hold_all(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303141 if (!attr_val) {
3142 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3143 __func__, opcode);
3144 err = -EINVAL;
3145 goto out;
3146 }
3147
3148 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003149 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3150 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303151
3152 switch (opcode) {
3153 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3154 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303155 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303156 break;
3157 case UPIU_QUERY_OPCODE_READ_ATTR:
3158 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3159 break;
3160 default:
3161 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3162 __func__, opcode);
3163 err = -EINVAL;
3164 goto out_unlock;
3165 }
3166
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003167 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303168
3169 if (err) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003170 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3171 __func__, opcode,
3172 request->upiu_req.idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303173 goto out_unlock;
3174 }
3175
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303176 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303177
3178out_unlock:
3179 mutex_unlock(&hba->dev_cmd.lock);
3180out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003181 ufshcd_release_all(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303182 return err;
3183}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003184EXPORT_SYMBOL(ufshcd_query_attr);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303185
3186/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003187 * ufshcd_query_attr_retry() - API function for sending query
3188 * attribute with retries
3189 * @hba: per-adapter instance
3190 * @opcode: attribute opcode
3191 * @idn: attribute idn to access
3192 * @index: index field
3193 * @selector: selector field
3194 * @attr_val: the attribute value after the query request
3195 * completes
3196 *
3197 * Returns 0 for success, non-zero in case of failure
3198*/
3199static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3200 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3201 u32 *attr_val)
3202{
3203 int ret = 0;
3204 u32 retries;
3205
3206 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3207 ret = ufshcd_query_attr(hba, opcode, idn, index,
3208 selector, attr_val);
3209 if (ret)
3210 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3211 __func__, ret, retries);
3212 else
3213 break;
3214 }
3215
3216 if (ret)
3217 dev_err(hba->dev,
3218 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003219 __func__, idn, ret, retries);
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003220 return ret;
3221}
3222
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003223static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003224 enum query_opcode opcode, enum desc_idn idn, u8 index,
3225 u8 selector, u8 *desc_buf, int *buf_len)
3226{
3227 struct ufs_query_req *request = NULL;
3228 struct ufs_query_res *response = NULL;
3229 int err;
3230
3231 BUG_ON(!hba);
3232
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003233 ufshcd_hold_all(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003234 if (!desc_buf) {
3235 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3236 __func__, opcode);
3237 err = -EINVAL;
3238 goto out;
3239 }
3240
3241 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3242 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3243 __func__, *buf_len);
3244 err = -EINVAL;
3245 goto out;
3246 }
3247
3248 mutex_lock(&hba->dev_cmd.lock);
3249 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3250 selector);
3251 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003252 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003253
3254 switch (opcode) {
3255 case UPIU_QUERY_OPCODE_WRITE_DESC:
3256 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3257 break;
3258 case UPIU_QUERY_OPCODE_READ_DESC:
3259 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3260 break;
3261 default:
3262 dev_err(hba->dev,
3263 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3264 __func__, opcode);
3265 err = -EINVAL;
3266 goto out_unlock;
3267 }
3268
3269 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3270
3271 if (err) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003272 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3273 __func__, opcode,
3274 request->upiu_req.idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003275 goto out_unlock;
3276 }
3277
3278 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003279 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003280
3281out_unlock:
3282 mutex_unlock(&hba->dev_cmd.lock);
3283out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003284 ufshcd_release_all(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003285 return err;
3286}
3287
3288/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003289 * ufshcd_query_descriptor - API function for sending descriptor requests
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003290 * hba: per-adapter instance
3291 * opcode: attribute opcode
3292 * idn: attribute idn to access
3293 * index: index field
3294 * selector: selector field
3295 * desc_buf: the buffer that contains the descriptor
3296 * buf_len: length parameter passed to the device
3297 *
3298 * Returns 0 for success, non-zero in case of failure.
3299 * The buf_len parameter will contain, on return, the length parameter
3300 * received on the response.
3301 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003302int ufshcd_query_descriptor(struct ufs_hba *hba,
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003303 enum query_opcode opcode, enum desc_idn idn, u8 index,
3304 u8 selector, u8 *desc_buf, int *buf_len)
3305{
3306 int err;
3307 int retries;
3308
3309 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3310 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3311 selector, desc_buf, buf_len);
3312 if (!err || err == -EINVAL)
3313 break;
3314 }
3315
3316 return err;
3317}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003318EXPORT_SYMBOL(ufshcd_query_descriptor);
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003319
3320/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003321 * ufshcd_read_desc_param - read the specified descriptor parameter
3322 * @hba: Pointer to adapter instance
3323 * @desc_id: descriptor idn value
3324 * @desc_index: descriptor index
3325 * @param_offset: offset of the parameter to read
3326 * @param_read_buf: pointer to buffer where parameter would be read
3327 * @param_size: sizeof(param_read_buf)
3328 *
3329 * Return 0 in case of success, non-zero otherwise
3330 */
3331static int ufshcd_read_desc_param(struct ufs_hba *hba,
3332 enum desc_idn desc_id,
3333 int desc_index,
3334 u32 param_offset,
3335 u8 *param_read_buf,
3336 u32 param_size)
3337{
3338 int ret;
3339 u8 *desc_buf;
3340 u32 buff_len;
3341 bool is_kmalloc = true;
3342
3343 /* safety checks */
3344 if (desc_id >= QUERY_DESC_IDN_MAX)
3345 return -EINVAL;
3346
3347 buff_len = ufs_query_desc_max_size[desc_id];
3348 if ((param_offset + param_size) > buff_len)
3349 return -EINVAL;
3350
3351 if (!param_offset && (param_size == buff_len)) {
3352 /* memory space already available to hold full descriptor */
3353 desc_buf = param_read_buf;
3354 is_kmalloc = false;
3355 } else {
3356 /* allocate memory to hold full descriptor */
3357 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3358 if (!desc_buf)
3359 return -ENOMEM;
3360 }
3361
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003362 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3363 desc_id, desc_index, 0, desc_buf,
3364 &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003365
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003366 if (ret) {
3367 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3368 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003369
3370 goto out;
3371 }
3372
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003373 /* Sanity check */
3374 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3375 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3376 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3377 ret = -EINVAL;
3378 goto out;
3379 }
3380
3381 /*
3382 * While reading variable size descriptors (like string descriptor),
3383 * some UFS devices may report the "LENGTH" (field in "Transaction
3384 * Specific fields" of Query Response UPIU) same as what was requested
3385 * in Query Request UPIU instead of reporting the actual size of the
3386 * variable size descriptor.
3387 * Although it's safe to ignore the "LENGTH" field for variable size
3388 * descriptors as we can always derive the length of the descriptor from
3389 * the descriptor header fields. Hence this change impose the length
3390 * match check only for fixed size descriptors (for which we always
3391 * request the correct size as part of Query Request UPIU).
3392 */
3393 if ((desc_id != QUERY_DESC_IDN_STRING) &&
3394 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3395 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3396 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3397 ret = -EINVAL;
3398 goto out;
3399 }
3400
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003401 if (is_kmalloc)
3402 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3403out:
3404 if (is_kmalloc)
3405 kfree(desc_buf);
3406 return ret;
3407}
3408
3409static inline int ufshcd_read_desc(struct ufs_hba *hba,
3410 enum desc_idn desc_id,
3411 int desc_index,
3412 u8 *buf,
3413 u32 size)
3414{
3415 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3416}
3417
3418static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3419 u8 *buf,
3420 u32 size)
3421{
3422 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3423}
3424
Yaniv Gardib573d482016-03-10 17:37:09 +02003425int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3426{
3427 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3428}
Yaniv Gardib573d482016-03-10 17:37:09 +02003429
3430/**
3431 * ufshcd_read_string_desc - read string descriptor
3432 * @hba: pointer to adapter instance
3433 * @desc_index: descriptor index
3434 * @buf: pointer to buffer where descriptor would be read
3435 * @size: size of buf
3436 * @ascii: if true convert from unicode to ascii characters
3437 *
3438 * Return 0 in case of success, non-zero otherwise
3439 */
3440int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
3441 u32 size, bool ascii)
3442{
3443 int err = 0;
3444
3445 err = ufshcd_read_desc(hba,
3446 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3447
3448 if (err) {
3449 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3450 __func__, QUERY_REQ_RETRIES, err);
3451 goto out;
3452 }
3453
3454 if (ascii) {
3455 int desc_len;
3456 int ascii_len;
3457 int i;
3458 char *buff_ascii;
3459
3460 desc_len = buf[0];
3461 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3462 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3463 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3464 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3465 __func__);
3466 err = -ENOMEM;
3467 goto out;
3468 }
3469
3470 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3471 if (!buff_ascii) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003472 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
3473 __func__, ascii_len);
Yaniv Gardib573d482016-03-10 17:37:09 +02003474 err = -ENOMEM;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003475 goto out_free_buff;
Yaniv Gardib573d482016-03-10 17:37:09 +02003476 }
3477
3478 /*
3479 * the descriptor contains string in UTF16 format
3480 * we need to convert to utf-8 so it can be displayed
3481 */
3482 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3483 desc_len - QUERY_DESC_HDR_SIZE,
3484 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3485
3486 /* replace non-printable or non-ASCII characters with spaces */
3487 for (i = 0; i < ascii_len; i++)
3488 ufshcd_remove_non_printable(&buff_ascii[i]);
3489
3490 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3491 size - QUERY_DESC_HDR_SIZE);
3492 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3493 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003494out_free_buff:
Yaniv Gardib573d482016-03-10 17:37:09 +02003495 kfree(buff_ascii);
3496 }
3497out:
3498 return err;
3499}
Yaniv Gardib573d482016-03-10 17:37:09 +02003500
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003501/**
3502 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3503 * @hba: Pointer to adapter instance
3504 * @lun: lun id
3505 * @param_offset: offset of the parameter to read
3506 * @param_read_buf: pointer to buffer where parameter would be read
3507 * @param_size: sizeof(param_read_buf)
3508 *
3509 * Return 0 in case of success, non-zero otherwise
3510 */
3511static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3512 int lun,
3513 enum unit_desc_param param_offset,
3514 u8 *param_read_buf,
3515 u32 param_size)
3516{
3517 /*
3518 * Unit descriptors are only available for general purpose LUs (LUN id
3519 * from 0 to 7) and RPMB Well known LU.
3520 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003521 if (!ufs_is_valid_unit_desc_lun(lun))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003522 return -EOPNOTSUPP;
3523
3524 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3525 param_offset, param_read_buf, param_size);
3526}
3527
3528/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303529 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3530 * @hba: per adapter instance
3531 *
3532 * 1. Allocate DMA memory for Command Descriptor array
3533 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3534 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3535 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3536 * (UTMRDL)
3537 * 4. Allocate memory for local reference block(lrb).
3538 *
3539 * Returns 0 for success, non-zero in case of failure
3540 */
3541static int ufshcd_memory_alloc(struct ufs_hba *hba)
3542{
3543 size_t utmrdl_size, utrdl_size, ucdl_size;
3544
3545 /* Allocate memory for UTP command descriptors */
3546 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003547 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3548 ucdl_size,
3549 &hba->ucdl_dma_addr,
3550 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303551
3552 /*
3553 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3554 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3555 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3556 * be aligned to 128 bytes as well
3557 */
3558 if (!hba->ucdl_base_addr ||
3559 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303560 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303561 "Command Descriptor Memory allocation failed\n");
3562 goto out;
3563 }
3564
3565 /*
3566 * Allocate memory for UTP Transfer descriptors
3567 * UFSHCI requires 1024 byte alignment of UTRD
3568 */
3569 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09003570 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3571 utrdl_size,
3572 &hba->utrdl_dma_addr,
3573 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303574 if (!hba->utrdl_base_addr ||
3575 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303576 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303577 "Transfer Descriptor Memory allocation failed\n");
3578 goto out;
3579 }
3580
3581 /*
3582 * Allocate memory for UTP Task Management descriptors
3583 * UFSHCI requires 1024 byte alignment of UTMRD
3584 */
3585 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09003586 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3587 utmrdl_size,
3588 &hba->utmrdl_dma_addr,
3589 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303590 if (!hba->utmrdl_base_addr ||
3591 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303592 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303593 "Task Management Descriptor Memory allocation failed\n");
3594 goto out;
3595 }
3596
3597 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09003598 hba->lrb = devm_kzalloc(hba->dev,
3599 hba->nutrs * sizeof(struct ufshcd_lrb),
3600 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303601 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05303602 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303603 goto out;
3604 }
3605 return 0;
3606out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303607 return -ENOMEM;
3608}
3609
3610/**
3611 * ufshcd_host_memory_configure - configure local reference block with
3612 * memory offsets
3613 * @hba: per adapter instance
3614 *
3615 * Configure Host memory space
3616 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3617 * address.
3618 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3619 * and PRDT offset.
3620 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3621 * into local reference block.
3622 */
3623static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3624{
3625 struct utp_transfer_cmd_desc *cmd_descp;
3626 struct utp_transfer_req_desc *utrdlp;
3627 dma_addr_t cmd_desc_dma_addr;
3628 dma_addr_t cmd_desc_element_addr;
3629 u16 response_offset;
3630 u16 prdt_offset;
3631 int cmd_desc_size;
3632 int i;
3633
3634 utrdlp = hba->utrdl_base_addr;
3635 cmd_descp = hba->ucdl_base_addr;
3636
3637 response_offset =
3638 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3639 prdt_offset =
3640 offsetof(struct utp_transfer_cmd_desc, prd_table);
3641
3642 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3643 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3644
3645 for (i = 0; i < hba->nutrs; i++) {
3646 /* Configure UTRD with command descriptor base address */
3647 cmd_desc_element_addr =
3648 (cmd_desc_dma_addr + (cmd_desc_size * i));
3649 utrdlp[i].command_desc_base_addr_lo =
3650 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3651 utrdlp[i].command_desc_base_addr_hi =
3652 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3653
3654 /* Response upiu and prdt offset should be in double words */
3655 utrdlp[i].response_upiu_offset =
3656 cpu_to_le16((response_offset >> 2));
3657 utrdlp[i].prd_table_offset =
3658 cpu_to_le16((prdt_offset >> 2));
3659 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05303660 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303661
3662 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003663 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3664 (i * sizeof(struct utp_transfer_req_desc));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303665 hba->lrb[i].ucd_req_ptr =
3666 (struct utp_upiu_req *)(cmd_descp + i);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003667 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303668 hba->lrb[i].ucd_rsp_ptr =
3669 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003670 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3671 response_offset;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303672 hba->lrb[i].ucd_prdt_ptr =
3673 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003674 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3675 prdt_offset;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303676 }
3677}
3678
3679/**
3680 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3681 * @hba: per adapter instance
3682 *
3683 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3684 * in order to initialize the Unipro link startup procedure.
3685 * Once the Unipro links are up, the device connected to the controller
3686 * is detected.
3687 *
3688 * Returns 0 on success, non-zero value on failure
3689 */
3690static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3691{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303692 struct uic_command uic_cmd = {0};
3693 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303694
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303695 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3696
3697 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3698 if (ret)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003699 dev_dbg(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05303700 "dme-link-startup: error code %d\n", ret);
3701 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303702}
3703
Yaniv Gardicad2e032015-03-31 17:37:14 +03003704static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3705{
3706 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3707 unsigned long min_sleep_time_us;
3708
3709 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3710 return;
3711
3712 /*
3713 * last_dme_cmd_tstamp will be 0 only for 1st call to
3714 * this function
3715 */
3716 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3717 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3718 } else {
3719 unsigned long delta =
3720 (unsigned long) ktime_to_us(
3721 ktime_sub(ktime_get(),
3722 hba->last_dme_cmd_tstamp));
3723
3724 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3725 min_sleep_time_us =
3726 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3727 else
3728 return; /* no more delay required */
3729 }
3730
3731 /* allow sleep for extra 50us if needed */
3732 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3733}
3734
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003735static inline void ufshcd_save_tstamp_of_last_dme_cmd(
3736 struct ufs_hba *hba)
3737{
3738 if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
3739 hba->last_dme_cmd_tstamp = ktime_get();
3740}
3741
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303742/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303743 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3744 * @hba: per adapter instance
3745 * @attr_sel: uic command argument1
3746 * @attr_set: attribute set type as uic command argument2
3747 * @mib_val: setting value as uic command argument3
3748 * @peer: indicate whether peer or local
3749 *
3750 * Returns 0 on success, non-zero value on failure
3751 */
3752int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3753 u8 attr_set, u32 mib_val, u8 peer)
3754{
3755 struct uic_command uic_cmd = {0};
3756 static const char *const action[] = {
3757 "dme-set",
3758 "dme-peer-set"
3759 };
3760 const char *set = action[!!peer];
3761 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003762 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303763
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003764 ufsdbg_error_inject_dispatcher(hba,
3765 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
3766
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303767 uic_cmd.command = peer ?
3768 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3769 uic_cmd.argument1 = attr_sel;
3770 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3771 uic_cmd.argument3 = mib_val;
3772
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003773 do {
3774 /* for peer attributes we retry upon failure */
3775 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3776 if (ret)
3777 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3778 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3779 } while (ret && peer && --retries);
3780
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003781 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003782 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003783 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3784 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303785
3786 return ret;
3787}
3788EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3789
3790/**
3791 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3792 * @hba: per adapter instance
3793 * @attr_sel: uic command argument1
3794 * @mib_val: the value of the attribute as returned by the UIC command
3795 * @peer: indicate whether peer or local
3796 *
3797 * Returns 0 on success, non-zero value on failure
3798 */
3799int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3800 u32 *mib_val, u8 peer)
3801{
3802 struct uic_command uic_cmd = {0};
3803 static const char *const action[] = {
3804 "dme-get",
3805 "dme-peer-get"
3806 };
3807 const char *get = action[!!peer];
3808 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003809 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003810 struct ufs_pa_layer_attr orig_pwr_info;
3811 struct ufs_pa_layer_attr temp_pwr_info;
3812 bool pwr_mode_change = false;
3813
3814 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3815 orig_pwr_info = hba->pwr_info;
3816 temp_pwr_info = orig_pwr_info;
3817
3818 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3819 orig_pwr_info.pwr_rx == FAST_MODE) {
3820 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3821 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3822 pwr_mode_change = true;
3823 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3824 orig_pwr_info.pwr_rx == SLOW_MODE) {
3825 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3826 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3827 pwr_mode_change = true;
3828 }
3829 if (pwr_mode_change) {
3830 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3831 if (ret)
3832 goto out;
3833 }
3834 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303835
3836 uic_cmd.command = peer ?
3837 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003838
3839 ufsdbg_error_inject_dispatcher(hba,
3840 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
3841
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303842 uic_cmd.argument1 = attr_sel;
3843
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003844 do {
3845 /* for peer attributes we retry upon failure */
3846 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3847 if (ret)
3848 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3849 get, UIC_GET_ATTR_ID(attr_sel), ret);
3850 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303851
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003852 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003853 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003854 get, UIC_GET_ATTR_ID(attr_sel),
3855 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02003856
3857 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303858 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03003859
3860 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3861 && pwr_mode_change)
3862 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05303863out:
3864 return ret;
3865}
3866EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3867
3868/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003869 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3870 * state) and waits for it to take effect.
3871 *
3872 * @hba: per adapter instance
3873 * @cmd: UIC command to execute
3874 *
3875 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3876 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3877 * and device UniPro link and hence it's final completion would be indicated by
3878 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3879 * addition to normal UIC command completion Status (UCCS). This function only
3880 * returns after the relevant status bits indicate the completion.
3881 *
3882 * Returns 0 on success, non-zero value on failure
3883 */
3884static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3885{
3886 struct completion uic_async_done;
3887 unsigned long flags;
3888 u8 status;
3889 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003890 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003891
3892 mutex_lock(&hba->uic_cmd_mutex);
3893 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03003894 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003895
3896 spin_lock_irqsave(hba->host->host_lock, flags);
3897 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003898 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3899 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3900 /*
3901 * Make sure UIC command completion interrupt is disabled before
3902 * issuing UIC command.
3903 */
3904 wmb();
3905 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003906 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003907 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3908 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003909 if (ret) {
3910 dev_err(hba->dev,
3911 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3912 cmd->command, cmd->argument3, ret);
3913 goto out;
3914 }
3915
3916 if (!wait_for_completion_timeout(hba->uic_async_done,
3917 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3918 dev_err(hba->dev,
3919 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3920 cmd->command, cmd->argument3);
3921 ret = -ETIMEDOUT;
3922 goto out;
3923 }
3924
3925 status = ufshcd_get_upmcrs(hba);
3926 if (status != PWR_LOCAL) {
3927 dev_err(hba->dev,
Kiwoong Kim73615422016-09-08 16:50:02 +09003928 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003929 cmd->command, status);
3930 ret = (status != PWR_OK) ? status : -1;
3931 }
3932out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003933 if (ret)
3934 ufsdbg_set_err_state(hba);
3935
3936 ufshcd_save_tstamp_of_last_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003937 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003938 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003939 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02003940 if (reenable_intr)
3941 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03003942 spin_unlock_irqrestore(hba->host->host_lock, flags);
3943 mutex_unlock(&hba->uic_cmd_mutex);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003944 return ret;
3945}
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003946
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003947int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
3948{
3949 unsigned long flags;
3950 int ret = 0;
3951 u32 tm_doorbell;
3952 u32 tr_doorbell;
3953 bool timeout = false, do_last_check = false;
3954 ktime_t start;
3955
3956 ufshcd_hold_all(hba);
3957 spin_lock_irqsave(hba->host->host_lock, flags);
3958 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
3959 ret = -EBUSY;
3960 goto out;
3961 }
3962
3963 /*
3964 * Wait for all the outstanding tasks/transfer requests.
3965 * Verify by checking the doorbell registers are clear.
3966 */
3967 start = ktime_get();
3968 do {
3969 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
3970 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3971 if (!tm_doorbell && !tr_doorbell) {
3972 timeout = false;
3973 break;
3974 } else if (do_last_check) {
3975 break;
3976 }
3977
3978 spin_unlock_irqrestore(hba->host->host_lock, flags);
3979 schedule();
3980 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
3981 wait_timeout_us) {
3982 timeout = true;
3983 /*
3984 * We might have scheduled out for long time so make
3985 * sure to check if doorbells are cleared by this time
3986 * or not.
3987 */
3988 do_last_check = true;
3989 }
3990 spin_lock_irqsave(hba->host->host_lock, flags);
3991 } while (tm_doorbell || tr_doorbell);
3992
3993 if (timeout) {
3994 dev_err(hba->dev,
3995 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
3996 __func__, tm_doorbell, tr_doorbell);
3997 ret = -EBUSY;
3998 }
3999out:
4000 spin_unlock_irqrestore(hba->host->host_lock, flags);
4001 ufshcd_release_all(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004002 return ret;
4003}
4004
4005/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304006 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4007 * using DME_SET primitives.
4008 * @hba: per adapter instance
4009 * @mode: powr mode value
4010 *
4011 * Returns 0 on success, non-zero value on failure
4012 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05304013static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304014{
4015 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004016 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304017
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03004018 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4019 ret = ufshcd_dme_set(hba,
4020 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4021 if (ret) {
4022 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4023 __func__, ret);
4024 goto out;
4025 }
4026 }
4027
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304028 uic_cmd.command = UIC_CMD_DME_SET;
4029 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4030 uic_cmd.argument3 = mode;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004031 ufshcd_hold_all(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004032 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004033 ufshcd_release_all(hba);
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03004034out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004035 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004036}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304037
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004038static int ufshcd_link_recovery(struct ufs_hba *hba)
4039{
4040 int ret;
4041 unsigned long flags;
4042
4043 spin_lock_irqsave(hba->host->host_lock, flags);
4044 hba->ufshcd_state = UFSHCD_STATE_RESET;
4045 ufshcd_set_eh_in_progress(hba);
4046 spin_unlock_irqrestore(hba->host->host_lock, flags);
4047
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004048 ret = ufshcd_vops_full_reset(hba);
4049 if (ret)
4050 dev_warn(hba->dev,
4051 "full reset returned %d, trying to recover the link\n",
4052 ret);
4053
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004054 ret = ufshcd_host_reset_and_restore(hba);
4055
4056 spin_lock_irqsave(hba->host->host_lock, flags);
4057 if (ret)
4058 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4059 ufshcd_clear_eh_in_progress(hba);
4060 spin_unlock_irqrestore(hba->host->host_lock, flags);
4061
4062 if (ret)
4063 dev_err(hba->dev, "%s: link recovery failed, err %d",
4064 __func__, ret);
4065
4066 return ret;
4067}
4068
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004069static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004070{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004071 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004072 struct uic_command uic_cmd = {0};
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004073 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004074
4075 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004076 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004077 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4078 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004079
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004080 if (ret) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004081 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
4082 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004083 __func__, ret);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004084 /*
4085 * If link recovery fails then return error so that caller
4086 * don't retry the hibern8 enter again.
4087 */
4088 if (ufshcd_link_recovery(hba))
4089 ret = -ENOLINK;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004090 } else {
4091 dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
4092 ktime_to_us(ktime_get()));
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004093 }
4094
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004095 return ret;
4096}
4097
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004098int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004099{
4100 int ret = 0, retries;
4101
4102 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
4103 ret = __ufshcd_uic_hibern8_enter(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004104 if (!ret)
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004105 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004106 /* Unable to recover the link, so no point proceeding */
4107 if (ret == -ENOLINK)
4108 BUG();
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004109 }
4110out:
4111 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004112}
4113
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004114int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004115{
4116 struct uic_command uic_cmd = {0};
4117 int ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004118 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004119
4120 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4121 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004122 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4123 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4124
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304125 if (ret) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004126 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
4127 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004128 __func__, ret);
4129 ret = ufshcd_link_recovery(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004130 /* Unable to recover the link, so no point proceeding */
4131 if (ret)
4132 BUG();
4133 } else {
4134 dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
4135 ktime_to_us(ktime_get()));
4136 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4137 hba->ufs_stats.hibern8_exit_cnt++;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304138 }
4139
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304140 return ret;
4141}
4142
Yaniv Gardi50646362014-10-23 13:25:13 +03004143 /**
4144 * ufshcd_init_pwr_info - setting the POR (power on reset)
4145 * values in hba power info
4146 * @hba: per-adapter instance
4147 */
4148static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4149{
4150 hba->pwr_info.gear_rx = UFS_PWM_G1;
4151 hba->pwr_info.gear_tx = UFS_PWM_G1;
4152 hba->pwr_info.lane_rx = 1;
4153 hba->pwr_info.lane_tx = 1;
4154 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4155 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4156 hba->pwr_info.hs_rate = 0;
4157}
4158
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304159/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004160 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4161 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304162 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004163static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304164{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004165 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4166
4167 if (hba->max_pwr_info.is_valid)
4168 return 0;
4169
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004170 pwr_info->pwr_tx = FAST_MODE;
4171 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004172 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304173
4174 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004175 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4176 &pwr_info->lane_rx);
4177 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4178 &pwr_info->lane_tx);
4179
4180 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4181 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4182 __func__,
4183 pwr_info->lane_rx,
4184 pwr_info->lane_tx);
4185 return -EINVAL;
4186 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304187
4188 /*
4189 * First, get the maximum gears of HS speed.
4190 * If a zero value, it means there is no HSGEAR capability.
4191 * Then, get the maximum gears of PWM speed.
4192 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004193 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4194 if (!pwr_info->gear_rx) {
4195 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4196 &pwr_info->gear_rx);
4197 if (!pwr_info->gear_rx) {
4198 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4199 __func__, pwr_info->gear_rx);
4200 return -EINVAL;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004201 } else {
4202 if (hba->limit_rx_pwm_gear > 0 &&
4203 (hba->limit_rx_pwm_gear < pwr_info->gear_rx))
4204 pwr_info->gear_rx = hba->limit_rx_pwm_gear;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004205 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004206 pwr_info->pwr_rx = SLOW_MODE;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004207 } else {
4208 if (hba->limit_rx_hs_gear > 0 &&
4209 (hba->limit_rx_hs_gear < pwr_info->gear_rx))
4210 pwr_info->gear_rx = hba->limit_rx_hs_gear;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304211 }
4212
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004213 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4214 &pwr_info->gear_tx);
4215 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304216 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004217 &pwr_info->gear_tx);
4218 if (!pwr_info->gear_tx) {
4219 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4220 __func__, pwr_info->gear_tx);
4221 return -EINVAL;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004222 } else {
4223 if (hba->limit_tx_pwm_gear > 0 &&
4224 (hba->limit_tx_pwm_gear < pwr_info->gear_tx))
4225 pwr_info->gear_tx = hba->limit_tx_pwm_gear;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004226 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004227 pwr_info->pwr_tx = SLOW_MODE;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004228 } else {
4229 if (hba->limit_tx_hs_gear > 0 &&
4230 (hba->limit_tx_hs_gear < pwr_info->gear_tx))
4231 pwr_info->gear_tx = hba->limit_tx_hs_gear;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004232 }
4233
4234 hba->max_pwr_info.is_valid = true;
4235 return 0;
4236}
4237
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004238int ufshcd_change_power_mode(struct ufs_hba *hba,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004239 struct ufs_pa_layer_attr *pwr_mode)
4240{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004241 int ret = 0;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004242
4243 /* if already configured to the requested pwr_mode */
4244 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4245 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4246 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4247 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4248 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4249 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4250 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4251 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4252 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304253 }
4254
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004255 ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
4256 if (ret)
4257 return ret;
4258
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304259 /*
4260 * Configure attributes for power mode change with below.
4261 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4262 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4263 * - PA_HSSERIES
4264 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004265 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4266 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4267 pwr_mode->lane_rx);
4268 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4269 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304270 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004271 else
4272 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304273
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004274 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4275 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4276 pwr_mode->lane_tx);
4277 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4278 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304279 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004280 else
4281 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304282
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004283 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4284 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4285 pwr_mode->pwr_rx == FAST_MODE ||
4286 pwr_mode->pwr_tx == FAST_MODE)
4287 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4288 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304289
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004290 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4291 DL_FC0ProtectionTimeOutVal_Default);
4292 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4293 DL_TC0ReplayTimeOutVal_Default);
4294 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4295 DL_AFC0ReqTimeOutVal_Default);
4296
4297 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4298 DL_FC0ProtectionTimeOutVal_Default);
4299 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4300 DL_TC0ReplayTimeOutVal_Default);
4301 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4302 DL_AFC0ReqTimeOutVal_Default);
4303
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004304 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4305 | pwr_mode->pwr_tx);
4306
4307 if (ret) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004308 ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304309 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004310 "%s: power mode change failed %d\n", __func__, ret);
4311 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004312 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4313 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004314
4315 memcpy(&hba->pwr_info, pwr_mode,
4316 sizeof(struct ufs_pa_layer_attr));
4317 }
4318
4319 return ret;
4320}
4321
4322/**
4323 * ufshcd_config_pwr_mode - configure a new power mode
4324 * @hba: per-adapter instance
4325 * @desired_pwr_mode: desired power configuration
4326 */
4327static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4328 struct ufs_pa_layer_attr *desired_pwr_mode)
4329{
4330 struct ufs_pa_layer_attr final_params = { 0 };
4331 int ret;
4332
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004333 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4334 desired_pwr_mode, &final_params);
4335
4336 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004337 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4338
4339 ret = ufshcd_change_power_mode(hba, &final_params);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004340 if (!ret)
4341 ufshcd_print_pwr_info(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304342
4343 return ret;
4344}
4345
4346/**
Dolev Raviv68078d52013-07-30 00:35:58 +05304347 * ufshcd_complete_dev_init() - checks device readiness
4348 * hba: per-adapter instance
4349 *
4350 * Set fDeviceInit flag and poll until device toggles it.
4351 */
4352static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4353{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004354 int i;
4355 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05304356 bool flag_res = 1;
4357
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004358 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4359 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05304360 if (err) {
4361 dev_err(hba->dev,
4362 "%s setting fDeviceInit flag failed with error %d\n",
4363 __func__, err);
4364 goto out;
4365 }
4366
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004367 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4368 for (i = 0; i < 1000 && !err && flag_res; i++)
4369 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4370 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4371
Dolev Raviv68078d52013-07-30 00:35:58 +05304372 if (err)
4373 dev_err(hba->dev,
4374 "%s reading fDeviceInit flag failed with error %d\n",
4375 __func__, err);
4376 else if (flag_res)
4377 dev_err(hba->dev,
4378 "%s fDeviceInit was not cleared by the device\n",
4379 __func__);
4380
4381out:
4382 return err;
4383}
4384
4385/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304386 * ufshcd_make_hba_operational - Make UFS controller operational
4387 * @hba: per adapter instance
4388 *
4389 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004390 * 1. Enable required interrupts
4391 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02004392 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004393 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304394 *
4395 * Returns 0 on success, non-zero value on failure
4396 */
4397static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4398{
4399 int err = 0;
4400 u32 reg;
4401
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304402 /* Enable required interrupts */
4403 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4404
4405 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03004406 if (ufshcd_is_intr_aggr_allowed(hba))
4407 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4408 else
4409 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304410
4411 /* Configure UTRL and UTMRL base address registers */
4412 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4413 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4414 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4415 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4416 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4417 REG_UTP_TASK_REQ_LIST_BASE_L);
4418 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4419 REG_UTP_TASK_REQ_LIST_BASE_H);
4420
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304421 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02004422 * Make sure base address and interrupt setup are updated before
4423 * enabling the run/stop registers below.
4424 */
4425 wmb();
4426
4427 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304428 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304429 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004430 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304431 if (!(ufshcd_get_lists_status(reg))) {
4432 ufshcd_enable_run_stop_reg(hba);
4433 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304434 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304435 "Host controller not ready to process requests");
4436 err = -EIO;
4437 goto out;
4438 }
4439
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304440out:
4441 return err;
4442}
4443
4444/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02004445 * ufshcd_hba_stop - Send controller to reset state
4446 * @hba: per adapter instance
4447 * @can_sleep: perform sleep or just spin
4448 */
4449static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4450{
4451 int err;
4452
4453 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4454 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4455 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4456 10, 1, can_sleep);
4457 if (err)
4458 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4459}
4460
4461/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304462 * ufshcd_hba_enable - initialize the controller
4463 * @hba: per adapter instance
4464 *
4465 * The controller resets itself and controller firmware initialization
4466 * sequence kicks off. When controller is ready it will set
4467 * the Host Controller Enable bit to 1.
4468 *
4469 * Returns 0 on success, non-zero value on failure
4470 */
4471static int ufshcd_hba_enable(struct ufs_hba *hba)
4472{
4473 int retry;
4474
4475 /*
4476 * msleep of 1 and 5 used in this function might result in msleep(20),
4477 * but it was necessary to send the UFS FPGA to reset mode during
4478 * development and testing of this driver. msleep can be changed to
4479 * mdelay and retry count can be reduced based on the controller.
4480 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02004481 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304482 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02004483 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304484
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004485 /* UniPro link is disabled at this point */
4486 ufshcd_set_link_off(hba);
4487
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004488 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004489
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304490 /* start controller initialization sequence */
4491 ufshcd_hba_start(hba);
4492
4493 /*
4494 * To initialize a UFS host controller HCE bit must be set to 1.
4495 * During initialization the HCE bit value changes from 1->0->1.
4496 * When the host controller completes initialization sequence
4497 * it sets the value of HCE bit to 1. The same HCE bit is read back
4498 * to check if the controller has completed initialization sequence.
4499 * So without this delay the value HCE = 1, set in the previous
4500 * instruction might be read back.
4501 * This delay can be changed based on the controller.
4502 */
4503 msleep(1);
4504
4505 /* wait for the host controller to complete initialization */
4506 retry = 10;
4507 while (ufshcd_is_hba_active(hba)) {
4508 if (retry) {
4509 retry--;
4510 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304511 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304512 "Controller enable failed\n");
4513 return -EIO;
4514 }
4515 msleep(5);
4516 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004517
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004518 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004519 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004520
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004521 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004522
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304523 return 0;
4524}
4525
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004526static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4527{
4528 int tx_lanes, i, err = 0;
4529
4530 if (!peer)
4531 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4532 &tx_lanes);
4533 else
4534 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4535 &tx_lanes);
4536 for (i = 0; i < tx_lanes; i++) {
4537 if (!peer)
4538 err = ufshcd_dme_set(hba,
4539 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4540 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4541 0);
4542 else
4543 err = ufshcd_dme_peer_set(hba,
4544 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4545 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4546 0);
4547 if (err) {
4548 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4549 __func__, peer, i, err);
4550 break;
4551 }
4552 }
4553
4554 return err;
4555}
4556
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004557static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
4558{
4559 return ufshcd_disable_tx_lcc(hba, false);
4560}
4561
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004562static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4563{
4564 return ufshcd_disable_tx_lcc(hba, true);
4565}
4566
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304567/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304568 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304569 * @hba: per adapter instance
4570 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304571 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304572 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304573static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304574{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304575 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004576 int retries = DME_LINKSTARTUP_RETRIES;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004577 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304578
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004579 /*
4580 * If UFS device isn't active then we will have to issue link startup
4581 * 2 times to make sure the device state move to active.
4582 */
4583 if (!ufshcd_is_ufs_dev_active(hba))
4584 link_startup_again = true;
4585
4586link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004587 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004588 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304589
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004590 ret = ufshcd_dme_link_startup(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004591 if (ret)
4592 ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004593
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004594 /* check if device is detected by inter-connect layer */
4595 if (!ret && !ufshcd_is_device_present(hba)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004596 ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004597 dev_err(hba->dev, "%s: Device not present\n", __func__);
4598 ret = -ENXIO;
4599 goto out;
4600 }
4601
4602 /*
4603 * DME link lost indication is only received when link is up,
4604 * but we can't be sure if the link is up until link startup
4605 * succeeds. So reset the local Uni-Pro and try again.
4606 */
4607 if (ret && ufshcd_hba_enable(hba))
4608 goto out;
4609 } while (ret && retries--);
4610
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304611 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03004612 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304613 goto out;
4614
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004615 if (link_startup_again) {
4616 link_startup_again = false;
4617 retries = DME_LINKSTARTUP_RETRIES;
4618 goto link_startup;
4619 }
4620
4621 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4622 ufshcd_init_pwr_info(hba);
4623 ufshcd_print_pwr_info(hba);
4624
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03004625 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4626 ret = ufshcd_disable_device_tx_lcc(hba);
4627 if (ret)
4628 goto out;
4629 }
4630
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08004631 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004632 ret = ufshcd_disable_host_tx_lcc(hba);
4633 if (ret)
4634 goto out;
4635 }
4636
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004637 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004638 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4639 if (ret)
4640 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004641
4642 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304643out:
4644 if (ret)
4645 dev_err(hba->dev, "link startup failed %d\n", ret);
4646 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304647}
4648
4649/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304650 * ufshcd_verify_dev_init() - Verify device initialization
4651 * @hba: per-adapter instance
4652 *
4653 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4654 * device Transport Protocol (UTP) layer is ready after a reset.
4655 * If the UTP layer at the device side is not initialized, it may
4656 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4657 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4658 */
4659static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4660{
4661 int err = 0;
4662 int retries;
4663
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004664 ufshcd_hold_all(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304665 mutex_lock(&hba->dev_cmd.lock);
4666 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4667 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4668 NOP_OUT_TIMEOUT);
4669
4670 if (!err || err == -ETIMEDOUT)
4671 break;
4672
4673 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4674 }
4675 mutex_unlock(&hba->dev_cmd.lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004676 ufshcd_release_all(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304677
4678 if (err)
4679 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4680 return err;
4681}
4682
4683/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004684 * ufshcd_set_queue_depth - set lun queue depth
4685 * @sdev: pointer to SCSI device
4686 *
4687 * Read bLUQueueDepth value and activate scsi tagged command
4688 * queueing. For WLUN, queue depth is set to 1. For best-effort
4689 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4690 * value that host can queue.
4691 */
4692static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4693{
4694 int ret = 0;
4695 u8 lun_qdepth;
4696 struct ufs_hba *hba;
4697
4698 hba = shost_priv(sdev->host);
4699
4700 lun_qdepth = hba->nutrs;
4701 ret = ufshcd_read_unit_desc_param(hba,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004702 ufshcd_scsi_to_upiu_lun(sdev->lun),
4703 UNIT_DESC_PARAM_LU_Q_DEPTH,
4704 &lun_qdepth,
4705 sizeof(lun_qdepth));
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004706
4707 /* Some WLUN doesn't support unit descriptor */
4708 if (ret == -EOPNOTSUPP)
4709 lun_qdepth = 1;
4710 else if (!lun_qdepth)
4711 /* eventually, we can figure out the real queue depth */
4712 lun_qdepth = hba->nutrs;
4713 else
4714 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4715
4716 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4717 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004718 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004719}
4720
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004721/*
4722 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4723 * @hba: per-adapter instance
4724 * @lun: UFS device lun id
4725 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4726 *
4727 * Returns 0 in case of success and b_lu_write_protect status would be returned
4728 * @b_lu_write_protect parameter.
4729 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4730 * Returns -EINVAL in case of invalid parameters passed to this function.
4731 */
4732static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4733 u8 lun,
4734 u8 *b_lu_write_protect)
4735{
4736 int ret;
4737
4738 if (!b_lu_write_protect)
4739 ret = -EINVAL;
4740 /*
4741 * According to UFS device spec, RPMB LU can't be write
4742 * protected so skip reading bLUWriteProtect parameter for
4743 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4744 */
4745 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4746 ret = -ENOTSUPP;
4747 else
4748 ret = ufshcd_read_unit_desc_param(hba,
4749 lun,
4750 UNIT_DESC_PARAM_LU_WR_PROTECT,
4751 b_lu_write_protect,
4752 sizeof(*b_lu_write_protect));
4753 return ret;
4754}
4755
4756/**
4757 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4758 * status
4759 * @hba: per-adapter instance
4760 * @sdev: pointer to SCSI device
4761 *
4762 */
4763static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4764 struct scsi_device *sdev)
4765{
4766 if (hba->dev_info.f_power_on_wp_en &&
4767 !hba->dev_info.is_lu_power_on_wp) {
4768 u8 b_lu_write_protect;
4769
4770 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4771 &b_lu_write_protect) &&
4772 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4773 hba->dev_info.is_lu_power_on_wp = true;
4774 }
4775}
4776
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004777/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304778 * ufshcd_slave_alloc - handle initial SCSI device configurations
4779 * @sdev: pointer to SCSI device
4780 *
4781 * Returns success
4782 */
4783static int ufshcd_slave_alloc(struct scsi_device *sdev)
4784{
4785 struct ufs_hba *hba;
4786
4787 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304788
4789 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4790 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304791
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05304792 /* allow SCSI layer to restart the device in case of errors */
4793 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004794
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03004795 /* REPORT SUPPORTED OPERATION CODES is not supported */
4796 sdev->no_report_opcodes = 1;
4797
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004798 /* WRITE_SAME command is not supported*/
4799 sdev->no_write_same = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004800
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004801 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004802
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004803 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4804
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004805 return 0;
4806}
4807
4808/**
4809 * ufshcd_change_queue_depth - change queue depth
4810 * @sdev: pointer to SCSI device
4811 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004812 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004813 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004814 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004815static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03004816{
4817 struct ufs_hba *hba = shost_priv(sdev->host);
4818
4819 if (depth > hba->nutrs)
4820 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004821 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304822}
4823
4824/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004825 * ufshcd_slave_configure - adjust SCSI device configurations
4826 * @sdev: pointer to SCSI device
4827 */
4828static int ufshcd_slave_configure(struct scsi_device *sdev)
4829{
4830 struct request_queue *q = sdev->request_queue;
Subhash Jadavani5ea586f2016-08-17 19:08:09 -07004831 struct ufs_hba *hba = shost_priv(sdev->host);
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004832
4833 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4834 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4835
Subhash Jadavani5ea586f2016-08-17 19:08:09 -07004836 if (hba->scsi_cmd_timeout) {
4837 blk_queue_rq_timeout(q, hba->scsi_cmd_timeout * HZ);
4838 scsi_set_cmd_timeout_override(sdev, hba->scsi_cmd_timeout * HZ);
4839 }
4840
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004841 sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
4842 sdev->use_rpm_auto = 1;
4843
Akinobu Mitaeeda4742014-07-01 23:00:32 +09004844 return 0;
4845}
4846
4847/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304848 * ufshcd_slave_destroy - remove SCSI device configurations
4849 * @sdev: pointer to SCSI device
4850 */
4851static void ufshcd_slave_destroy(struct scsi_device *sdev)
4852{
4853 struct ufs_hba *hba;
4854
4855 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004856 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004857 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4858 unsigned long flags;
4859
4860 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03004861 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03004862 spin_unlock_irqrestore(hba->host->host_lock, flags);
4863 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304864}
4865
4866/**
4867 * ufshcd_task_req_compl - handle task management request completion
4868 * @hba: per adapter instance
4869 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304870 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304871 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304872 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304873 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304874static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304875{
4876 struct utp_task_req_desc *task_req_descp;
4877 struct utp_upiu_task_rsp *task_rsp_upiup;
4878 unsigned long flags;
4879 int ocs_value;
4880 int task_result;
4881
4882 spin_lock_irqsave(hba->host->host_lock, flags);
4883
4884 /* Clear completed tasks from outstanding_tasks */
4885 __clear_bit(index, &hba->outstanding_tasks);
4886
4887 task_req_descp = hba->utmrdl_base_addr;
4888 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4889
4890 if (ocs_value == OCS_SUCCESS) {
4891 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4892 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09004893 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4894 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304895 if (resp)
4896 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304897 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304898 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4899 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304900 }
4901 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05304902
4903 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304904}
4905
4906/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304907 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4908 * @lrb: pointer to local reference block of completed command
4909 * @scsi_status: SCSI command status
4910 *
4911 * Returns value base on SCSI command status
4912 */
4913static inline int
4914ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4915{
4916 int result = 0;
4917
4918 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304919 case SAM_STAT_CHECK_CONDITION:
4920 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304921 case SAM_STAT_GOOD:
4922 result |= DID_OK << 16 |
4923 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304924 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304925 break;
4926 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304927 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304928 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05304929 ufshcd_copy_sense_data(lrbp);
4930 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304931 break;
4932 default:
4933 result |= DID_ERROR << 16;
4934 break;
4935 } /* end of switch */
4936
4937 return result;
4938}
4939
4940/**
4941 * ufshcd_transfer_rsp_status - Get overall status of the response
4942 * @hba: per adapter instance
4943 * @lrb: pointer to local reference block of completed command
4944 *
4945 * Returns result of the command to notify SCSI midlayer
4946 */
4947static inline int
4948ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4949{
4950 int result = 0;
4951 int scsi_status;
4952 int ocs;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004953 bool print_prdt;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304954
4955 /* overall command status of utrd */
4956 ocs = ufshcd_get_tr_ocs(lrbp);
4957
4958 switch (ocs) {
4959 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304960 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004961 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304962 switch (result) {
4963 case UPIU_TRANSACTION_RESPONSE:
4964 /*
4965 * get the response UPIU result to extract
4966 * the SCSI command status
4967 */
4968 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4969
4970 /*
4971 * get the result based on SCSI status response
4972 * to notify the SCSI midlayer of the command status
4973 */
4974 scsi_status = result & MASK_SCSI_STATUS;
4975 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304976
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02004977 /*
4978 * Currently we are only supporting BKOPs exception
4979 * events hence we can ignore BKOPs exception event
4980 * during power management callbacks. BKOPs exception
4981 * event is not expected to be raised in runtime suspend
4982 * callback as it allows the urgent bkops.
4983 * During system suspend, we are anyway forcefully
4984 * disabling the bkops and if urgent bkops is needed
4985 * it will be enabled on system resume. Long term
4986 * solution could be to abort the system suspend if
4987 * UFS device needs urgent BKOPs.
4988 */
4989 if (!hba->pm_op_in_progress &&
4990 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05304991 schedule_work(&hba->eeh_work);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304992 break;
4993 case UPIU_TRANSACTION_REJECT_UPIU:
4994 /* TODO: handle Reject UPIU Response */
4995 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304996 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304997 "Reject UPIU not fully implemented\n");
4998 break;
4999 default:
5000 result = DID_ERROR << 16;
5001 dev_err(hba->dev,
5002 "Unexpected request response code = %x\n",
5003 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305004 break;
5005 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305006 break;
5007 case OCS_ABORTED:
5008 result |= DID_ABORT << 16;
5009 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305010 case OCS_INVALID_COMMAND_STATUS:
5011 result |= DID_REQUEUE << 16;
5012 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305013 case OCS_INVALID_CMD_TABLE_ATTR:
5014 case OCS_INVALID_PRDT_ATTR:
5015 case OCS_MISMATCH_DATA_BUF_SIZE:
5016 case OCS_MISMATCH_RESP_UPIU_SIZE:
5017 case OCS_PEER_COMM_FAILURE:
5018 case OCS_FATAL_ERROR:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005019 case OCS_DEVICE_FATAL_ERROR:
5020 case OCS_INVALID_CRYPTO_CONFIG:
5021 case OCS_GENERAL_CRYPTO_ERROR:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305022 default:
5023 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305024 dev_err(hba->dev,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005025 "OCS error from controller = %x for tag %d\n",
5026 ocs, lrbp->task_tag);
5027 ufshcd_print_host_regs(hba);
5028 ufshcd_print_host_state(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305029 break;
5030 } /* end of switch */
5031
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005032 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
5033 print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
5034 ocs == OCS_MISMATCH_DATA_BUF_SIZE);
5035 ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
5036 }
5037
5038 if ((host_byte(result) == DID_ERROR) ||
5039 (host_byte(result) == DID_ABORT))
5040 ufsdbg_set_err_state(hba);
5041
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305042 return result;
5043}
5044
5045/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305046 * ufshcd_uic_cmd_compl - handle completion of uic command
5047 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305048 * @intr_status: interrupt status generated by the controller
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305049 */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305050static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305051{
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305052 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305053 hba->active_uic_cmd->argument2 |=
5054 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05305055 hba->active_uic_cmd->argument3 =
5056 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305057 complete(&hba->active_uic_cmd->done);
5058 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305059
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005060 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
5061 complete(hba->uic_async_done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305062}
5063
5064/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005065 * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
5066 * @hba: per adapter instance
5067 * @result: error result to inform scsi layer about
5068 */
5069void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
5070{
5071 u8 index;
5072 struct ufshcd_lrb *lrbp;
5073 struct scsi_cmnd *cmd;
5074
5075 if (!hba->outstanding_reqs)
5076 return;
5077
5078 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5079 lrbp = &hba->lrb[index];
5080 cmd = lrbp->cmd;
5081 if (cmd) {
5082 ufshcd_cond_add_cmd_trace(hba, index, "failed");
5083 ufshcd_update_error_stats(hba,
5084 UFS_ERR_INT_FATAL_ERRORS);
5085 scsi_dma_unmap(cmd);
5086 cmd->result = result;
5087 /* Clear pending transfer requests */
5088 ufshcd_clear_cmd(hba, index);
5089 ufshcd_outstanding_req_clear(hba, index);
5090 clear_bit_unlock(index, &hba->lrb_in_use);
5091 lrbp->complete_time_stamp = ktime_get();
5092 update_req_stats(hba, lrbp);
5093 /* Mark completed command as NULL in LRB */
5094 lrbp->cmd = NULL;
5095 ufshcd_release_all(hba);
5096 if (cmd->request) {
5097 /*
5098 * As we are accessing the "request" structure,
5099 * this must be called before calling
5100 * ->scsi_done() callback.
5101 */
5102 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5103 true);
5104 ufshcd_vops_crypto_engine_cfg_end(hba,
5105 lrbp, cmd->request);
5106 }
5107 /* Do not touch lrbp after scsi done */
5108 cmd->scsi_done(cmd);
5109 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5110 if (hba->dev_cmd.complete) {
5111 ufshcd_cond_add_cmd_trace(hba, index,
5112 "dev_failed");
5113 ufshcd_outstanding_req_clear(hba, index);
5114 complete(hba->dev_cmd.complete);
5115 }
5116 }
5117 if (ufshcd_is_clkscaling_supported(hba))
5118 hba->clk_scaling.active_reqs--;
5119 }
5120}
5121
5122/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005123 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305124 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005125 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305126 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005127static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5128 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305129{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305130 struct ufshcd_lrb *lrbp;
5131 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305132 int result;
5133 int index;
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07005134 struct request *req;
Dolev Ravive9d501b2014-07-01 12:22:37 +03005135
Dolev Ravive9d501b2014-07-01 12:22:37 +03005136 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5137 lrbp = &hba->lrb[index];
5138 cmd = lrbp->cmd;
5139 if (cmd) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005140 ufshcd_cond_add_cmd_trace(hba, index, "complete");
5141 ufshcd_update_tag_stats_completion(hba, cmd);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005142 result = ufshcd_transfer_rsp_status(hba, lrbp);
5143 scsi_dma_unmap(cmd);
5144 cmd->result = result;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005145 clear_bit_unlock(index, &hba->lrb_in_use);
5146 lrbp->complete_time_stamp = ktime_get();
5147 update_req_stats(hba, lrbp);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005148 /* Mark completed command as NULL in LRB */
5149 lrbp->cmd = NULL;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005150 __ufshcd_release(hba, false);
5151 __ufshcd_hibern8_release(hba, false);
5152 if (cmd->request) {
5153 /*
5154 * As we are accessing the "request" structure,
5155 * this must be called before calling
5156 * ->scsi_done() callback.
5157 */
5158 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5159 false);
5160 ufshcd_vops_crypto_engine_cfg_end(hba,
5161 lrbp, cmd->request);
5162 }
5163
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07005164 req = cmd->request;
5165 if (req) {
5166 /* Update IO svc time latency histogram */
5167 if (req->lat_hist_enabled) {
5168 ktime_t completion;
5169 u_int64_t delta_us;
5170
5171 completion = ktime_get();
5172 delta_us = ktime_us_delta(completion,
5173 req->lat_hist_io_start);
5174 /* rq_data_dir() => true if WRITE */
5175 blk_update_latency_hist(&hba->io_lat_s,
5176 (rq_data_dir(req) == READ),
5177 delta_us);
5178 }
5179 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03005180 /* Do not touch lrbp after scsi done */
5181 cmd->scsi_done(cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005182 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5183 if (hba->dev_cmd.complete) {
5184 ufshcd_cond_add_cmd_trace(hba, index,
5185 "dev_complete");
Dolev Ravive9d501b2014-07-01 12:22:37 +03005186 complete(hba->dev_cmd.complete);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005187 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03005188 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005189 if (ufshcd_is_clkscaling_supported(hba))
5190 hba->clk_scaling.active_reqs--;
Dolev Ravive9d501b2014-07-01 12:22:37 +03005191 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305192
5193 /* clear corresponding bits of completed commands */
5194 hba->outstanding_reqs ^= completed_reqs;
5195
Sahitya Tummala856b3482014-09-25 15:32:34 +03005196 ufshcd_clk_scaling_update_busy(hba);
5197
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305198 /* we might have free'd some tags above */
5199 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305200}
5201
5202/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005203 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5204 * @hba: per adapter instance
5205 */
5206static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
5207{
5208 unsigned long completed_reqs;
5209 u32 tr_doorbell;
5210
5211 /* Resetting interrupt aggregation counters first and reading the
5212 * DOOR_BELL afterward allows us to handle all the completed requests.
5213 * In order to prevent other interrupts starvation the DB is read once
5214 * after reset. The down side of this solution is the possibility of
5215 * false interrupt if device completes another request after resetting
5216 * aggregation and before reading the DB.
5217 */
5218 if (ufshcd_is_intr_aggr_allowed(hba))
5219 ufshcd_reset_intr_aggr(hba);
5220
5221 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5222 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5223
5224 __ufshcd_transfer_req_compl(hba, completed_reqs);
5225}
5226
5227/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305228 * ufshcd_disable_ee - disable exception event
5229 * @hba: per-adapter instance
5230 * @mask: exception event to disable
5231 *
5232 * Disables exception event in the device so that the EVENT_ALERT
5233 * bit is not set.
5234 *
5235 * Returns zero on success, non-zero error value on failure.
5236 */
5237static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5238{
5239 int err = 0;
5240 u32 val;
5241
5242 if (!(hba->ee_ctrl_mask & mask))
5243 goto out;
5244
5245 val = hba->ee_ctrl_mask & ~mask;
5246 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005247 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305248 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5249 if (!err)
5250 hba->ee_ctrl_mask &= ~mask;
5251out:
5252 return err;
5253}
5254
5255/**
5256 * ufshcd_enable_ee - enable exception event
5257 * @hba: per-adapter instance
5258 * @mask: exception event to enable
5259 *
5260 * Enable corresponding exception event in the device to allow
5261 * device to alert host in critical scenarios.
5262 *
5263 * Returns zero on success, non-zero error value on failure.
5264 */
5265static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5266{
5267 int err = 0;
5268 u32 val;
5269
5270 if (hba->ee_ctrl_mask & mask)
5271 goto out;
5272
5273 val = hba->ee_ctrl_mask | mask;
5274 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005275 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305276 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5277 if (!err)
5278 hba->ee_ctrl_mask |= mask;
5279out:
5280 return err;
5281}
5282
5283/**
5284 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5285 * @hba: per-adapter instance
5286 *
5287 * Allow device to manage background operations on its own. Enabling
5288 * this might lead to inconsistent latencies during normal data transfers
5289 * as the device is allowed to manage its own way of handling background
5290 * operations.
5291 *
5292 * Returns zero on success, non-zero on failure.
5293 */
5294static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5295{
5296 int err = 0;
5297
5298 if (hba->auto_bkops_enabled)
5299 goto out;
5300
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005301 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305302 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5303 if (err) {
5304 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5305 __func__, err);
5306 goto out;
5307 }
5308
5309 hba->auto_bkops_enabled = true;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005310 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305311
5312 /* No need of URGENT_BKOPS exception from the device */
5313 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5314 if (err)
5315 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5316 __func__, err);
5317out:
5318 return err;
5319}
5320
5321/**
5322 * ufshcd_disable_auto_bkops - block device in doing background operations
5323 * @hba: per-adapter instance
5324 *
5325 * Disabling background operations improves command response latency but
5326 * has drawback of device moving into critical state where the device is
5327 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5328 * host is idle so that BKOPS are managed effectively without any negative
5329 * impacts.
5330 *
5331 * Returns zero on success, non-zero on failure.
5332 */
5333static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5334{
5335 int err = 0;
5336
5337 if (!hba->auto_bkops_enabled)
5338 goto out;
5339
5340 /*
5341 * If host assisted BKOPs is to be enabled, make sure
5342 * urgent bkops exception is allowed.
5343 */
5344 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5345 if (err) {
5346 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5347 __func__, err);
5348 goto out;
5349 }
5350
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005351 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305352 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5353 if (err) {
5354 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5355 __func__, err);
5356 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5357 goto out;
5358 }
5359
5360 hba->auto_bkops_enabled = false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005361 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305362out:
5363 return err;
5364}
5365
5366/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005367 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305368 * @hba: per adapter instance
5369 *
5370 * After a device reset the device may toggle the BKOPS_EN flag
5371 * to default value. The s/w tracking variables should be updated
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005372 * as well. This function would change the auto-bkops state based on
5373 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305374 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005375static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305376{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005377 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5378 hba->auto_bkops_enabled = false;
5379 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5380 ufshcd_enable_auto_bkops(hba);
5381 } else {
5382 hba->auto_bkops_enabled = true;
5383 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5384 ufshcd_disable_auto_bkops(hba);
5385 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305386}
5387
5388static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5389{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005390 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305391 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5392}
5393
5394/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005395 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5396 * @hba: per-adapter instance
5397 * @status: bkops_status value
5398 *
5399 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5400 * flag in the device to permit background operations if the device
5401 * bkops_status is greater than or equal to "status" argument passed to
5402 * this function, disable otherwise.
5403 *
5404 * Returns 0 for success, non-zero in case of failure.
5405 *
5406 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5407 * to know whether auto bkops is enabled or disabled after this function
5408 * returns control to it.
5409 */
5410static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5411 enum bkops_status status)
5412{
5413 int err;
5414 u32 curr_status = 0;
5415
5416 err = ufshcd_get_bkops_status(hba, &curr_status);
5417 if (err) {
5418 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5419 __func__, err);
5420 goto out;
5421 } else if (curr_status > BKOPS_STATUS_MAX) {
5422 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5423 __func__, curr_status);
5424 err = -EINVAL;
5425 goto out;
5426 }
5427
5428 if (curr_status >= status)
5429 err = ufshcd_enable_auto_bkops(hba);
5430 else
5431 err = ufshcd_disable_auto_bkops(hba);
5432out:
5433 return err;
5434}
5435
5436/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305437 * ufshcd_urgent_bkops - handle urgent bkops exception event
5438 * @hba: per-adapter instance
5439 *
5440 * Enable fBackgroundOpsEn flag in the device to permit background
5441 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005442 *
5443 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5444 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305445 */
5446static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5447{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005448 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305449}
5450
5451static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5452{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005453 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305454 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5455}
5456
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005457static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5458{
5459 int err;
5460 u32 curr_status = 0;
5461
5462 if (hba->is_urgent_bkops_lvl_checked)
5463 goto enable_auto_bkops;
5464
5465 err = ufshcd_get_bkops_status(hba, &curr_status);
5466 if (err) {
5467 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5468 __func__, err);
5469 goto out;
5470 }
5471
5472 /*
5473 * We are seeing that some devices are raising the urgent bkops
5474 * exception events even when BKOPS status doesn't indicate performace
5475 * impacted or critical. Handle these device by determining their urgent
5476 * bkops status at runtime.
5477 */
5478 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5479 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5480 __func__, curr_status);
5481 /* update the current status as the urgent bkops level */
5482 hba->urgent_bkops_lvl = curr_status;
5483 hba->is_urgent_bkops_lvl_checked = true;
5484 }
5485
5486enable_auto_bkops:
5487 err = ufshcd_enable_auto_bkops(hba);
5488out:
5489 if (err < 0)
5490 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5491 __func__, err);
5492}
5493
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305494/**
5495 * ufshcd_exception_event_handler - handle exceptions raised by device
5496 * @work: pointer to work data
5497 *
5498 * Read bExceptionEventStatus attribute from the device and handle the
5499 * exception event accordingly.
5500 */
5501static void ufshcd_exception_event_handler(struct work_struct *work)
5502{
5503 struct ufs_hba *hba;
5504 int err;
5505 u32 status = 0;
5506 hba = container_of(work, struct ufs_hba, eeh_work);
5507
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305508 pm_runtime_get_sync(hba->dev);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005509 ufshcd_scsi_block_requests(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305510 err = ufshcd_get_ee_status(hba, &status);
5511 if (err) {
5512 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5513 __func__, err);
5514 goto out;
5515 }
5516
5517 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02005518
5519 if (status & MASK_EE_URGENT_BKOPS)
5520 ufshcd_bkops_exception_event_handler(hba);
5521
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305522out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005523 ufshcd_scsi_unblock_requests(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305524 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305525 return;
5526}
5527
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005528/* Complete requests that have door-bell cleared */
5529static void ufshcd_complete_requests(struct ufs_hba *hba)
5530{
5531 ufshcd_transfer_req_compl(hba);
5532 ufshcd_tmc_handler(hba);
5533}
5534
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305535/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02005536 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5537 * to recover from the DL NAC errors or not.
5538 * @hba: per-adapter instance
5539 *
5540 * Returns true if error handling is required, false otherwise
5541 */
5542static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5543{
5544 unsigned long flags;
5545 bool err_handling = true;
5546
5547 spin_lock_irqsave(hba->host->host_lock, flags);
5548 /*
5549 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5550 * device fatal error and/or DL NAC & REPLAY timeout errors.
5551 */
5552 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5553 goto out;
5554
5555 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5556 ((hba->saved_err & UIC_ERROR) &&
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005557 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
5558 /*
5559 * we have to do error recovery but atleast silence the error
5560 * logs.
5561 */
5562 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005563 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005564 }
Yaniv Gardi583fa622016-03-10 17:37:13 +02005565
5566 if ((hba->saved_err & UIC_ERROR) &&
5567 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5568 int err;
5569 /*
5570 * wait for 50ms to see if we can get any other errors or not.
5571 */
5572 spin_unlock_irqrestore(hba->host->host_lock, flags);
5573 msleep(50);
5574 spin_lock_irqsave(hba->host->host_lock, flags);
5575
5576 /*
5577 * now check if we have got any other severe errors other than
5578 * DL NAC error?
5579 */
5580 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5581 ((hba->saved_err & UIC_ERROR) &&
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005582 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
5583 if (((hba->saved_err & INT_FATAL_ERRORS) ==
5584 DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
5585 ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
5586 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005587 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005588 }
Yaniv Gardi583fa622016-03-10 17:37:13 +02005589
5590 /*
5591 * As DL NAC is the only error received so far, send out NOP
5592 * command to confirm if link is still active or not.
5593 * - If we don't get any response then do error recovery.
5594 * - If we get response then clear the DL NAC error bit.
5595 */
5596
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005597 /* silence the error logs from NOP command */
5598 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005599 spin_unlock_irqrestore(hba->host->host_lock, flags);
5600 err = ufshcd_verify_dev_init(hba);
5601 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005602 hba->silence_err_logs = false;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005603
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005604 if (err) {
5605 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005606 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005607 }
Yaniv Gardi583fa622016-03-10 17:37:13 +02005608
5609 /* Link seems to be alive hence ignore the DL NAC errors */
5610 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5611 hba->saved_err &= ~UIC_ERROR;
5612 /* clear NAC error */
5613 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5614 if (!hba->saved_uic_err) {
5615 err_handling = false;
5616 goto out;
5617 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005618 /*
5619 * there seems to be some errors other than NAC, so do error
5620 * recovery
5621 */
5622 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02005623 }
5624out:
5625 spin_unlock_irqrestore(hba->host->host_lock, flags);
5626 return err_handling;
5627}
5628
5629/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305630 * ufshcd_err_handler - handle UFS errors that require s/w attention
5631 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305632 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305633static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305634{
5635 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305636 unsigned long flags;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005637 bool err_xfer = false, err_tm = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305638 int err = 0;
5639 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005640 bool needs_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305641
5642 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305643
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005644 ufsdbg_set_err_state(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305645 pm_runtime_get_sync(hba->dev);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005646 ufshcd_hold_all(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305647
5648 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005649 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305650 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305651
5652 hba->ufshcd_state = UFSHCD_STATE_RESET;
5653 ufshcd_set_eh_in_progress(hba);
5654
5655 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005656 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02005657
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08005658 if (hba->dev_info.quirks &
5659 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
Yaniv Gardi583fa622016-03-10 17:37:13 +02005660 bool ret;
5661
5662 spin_unlock_irqrestore(hba->host->host_lock, flags);
5663 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5664 ret = ufshcd_quirk_dl_nac_errors(hba);
5665 spin_lock_irqsave(hba->host->host_lock, flags);
5666 if (!ret)
5667 goto skip_err_handling;
5668 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005669
5670 /*
5671 * Dump controller state before resetting. Transfer requests state
5672 * will be dump as part of the request completion.
5673 */
5674 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5675 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
5676 __func__, hba->saved_err, hba->saved_uic_err);
5677 if (!hba->silence_err_logs) {
5678 ufshcd_print_host_regs(hba);
5679 ufshcd_print_host_state(hba);
5680 ufshcd_print_pwr_info(hba);
5681 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5682 }
5683 }
5684
5685 if ((hba->saved_err & INT_FATAL_ERRORS) || hba->saved_ce_err ||
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005686 ((hba->saved_err & UIC_ERROR) &&
5687 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5688 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5689 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5690 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305691
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005692 /*
5693 * if host reset is required then skip clearing the pending
5694 * transfers forcefully because they will automatically get
5695 * cleared after link startup.
5696 */
5697 if (needs_reset)
5698 goto skip_pending_xfer_clear;
5699
5700 /* release lock as clear command might sleep */
5701 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305702 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005703 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5704 if (ufshcd_clear_cmd(hba, tag)) {
5705 err_xfer = true;
5706 goto lock_skip_pending_xfer_clear;
5707 }
5708 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305709
5710 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005711 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5712 if (ufshcd_clear_tm_cmd(hba, tag)) {
5713 err_tm = true;
5714 goto lock_skip_pending_xfer_clear;
5715 }
5716 }
5717
5718lock_skip_pending_xfer_clear:
5719 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305720
5721 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005722 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305723
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005724 if (err_xfer || err_tm)
5725 needs_reset = true;
5726
5727skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305728 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005729 if (needs_reset) {
5730 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5731
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005732 if (hba->saved_err & INT_FATAL_ERRORS)
5733 ufshcd_update_error_stats(hba,
5734 UFS_ERR_INT_FATAL_ERRORS);
5735 if (hba->saved_ce_err)
5736 ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
5737
5738 if (hba->saved_err & UIC_ERROR)
5739 ufshcd_update_error_stats(hba,
5740 UFS_ERR_INT_UIC_ERROR);
5741
5742 if (err_xfer || err_tm)
5743 ufshcd_update_error_stats(hba,
5744 UFS_ERR_CLEAR_PEND_XFER_TM);
5745
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005746 /*
5747 * ufshcd_reset_and_restore() does the link reinitialization
5748 * which will need atleast one empty doorbell slot to send the
5749 * device management commands (NOP and query commands).
5750 * If there is no slot empty at this moment then free up last
5751 * slot forcefully.
5752 */
5753 if (hba->outstanding_reqs == max_doorbells)
5754 __ufshcd_transfer_req_compl(hba,
5755 (1UL << (hba->nutrs - 1)));
5756
5757 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305758 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005759 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305760 if (err) {
5761 dev_err(hba->dev, "%s: reset and restore failed\n",
5762 __func__);
5763 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5764 }
5765 /*
5766 * Inform scsi mid-layer that we did reset and allow to handle
5767 * Unit Attention properly.
5768 */
5769 scsi_report_bus_reset(hba->host, 0);
5770 hba->saved_err = 0;
5771 hba->saved_uic_err = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005772 hba->saved_ce_err = 0;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305773 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005774
Yaniv Gardi583fa622016-03-10 17:37:13 +02005775skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005776 if (!needs_reset) {
5777 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5778 if (hba->saved_err || hba->saved_uic_err)
5779 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5780 __func__, hba->saved_err, hba->saved_uic_err);
5781 }
5782
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005783 hba->silence_err_logs = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305784 ufshcd_clear_eh_in_progress(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305785out:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005786 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005787 ufshcd_scsi_unblock_requests(hba);
5788 ufshcd_release_all(hba);
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05305789 pm_runtime_put_sync(hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305790}
5791
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005792static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5793 u32 reg)
5794{
5795 reg_hist->reg[reg_hist->pos] = reg;
5796 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5797 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5798}
5799
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305800/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305801 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5802 * @hba: per-adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305803 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305804static void ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305805{
5806 u32 reg;
5807
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005808 /* PHY layer lane error */
5809 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5810 /* Ignore LINERESET indication, as this is not an error */
5811 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5812 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5813 /*
5814 * To know whether this error is fatal or not, DB timeout
5815 * must be checked but this error is handled separately.
5816 */
5817 dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
5818 __func__, reg);
5819 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5820 }
5821
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305822 /* PA_INIT_ERROR is fatal and needs UIC reset */
5823 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005824 if (reg)
5825 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5826
5827 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305828 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08005829 } else if (hba->dev_info.quirks &
Yaniv Gardi583fa622016-03-10 17:37:13 +02005830 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5831 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5832 hba->uic_error |=
5833 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5834 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5835 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5836 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305837
5838 /* UIC NL/TL/DME errors needs software retry */
5839 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005840 if (reg) {
5841 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305842 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005843 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305844
5845 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005846 if (reg) {
5847 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305848 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005849 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305850
5851 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005852 if (reg) {
5853 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305854 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005855 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305856
5857 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5858 __func__, hba->uic_error);
5859}
5860
5861/**
5862 * ufshcd_check_errors - Check for errors that need s/w attention
5863 * @hba: per-adapter instance
5864 */
5865static void ufshcd_check_errors(struct ufs_hba *hba)
5866{
5867 bool queue_eh_work = false;
5868
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005869 if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305870 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305871
5872 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305873 hba->uic_error = 0;
5874 ufshcd_update_uic_error(hba);
5875 if (hba->uic_error)
5876 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305877 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305878
5879 if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005880 /*
5881 * update the transfer error masks to sticky bits, let's do this
5882 * irrespective of current ufshcd_state.
5883 */
5884 hba->saved_err |= hba->errors;
5885 hba->saved_uic_err |= hba->uic_error;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005886 hba->saved_ce_err |= hba->ce_error;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005887
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305888 /* handle fatal errors only when link is functional */
5889 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5890 /* block commands from scsi mid-layer */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005891 __ufshcd_scsi_block_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305892
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305893 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5894 schedule_work(&hba->eh_work);
5895 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05305896 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305897 /*
5898 * if (!queue_eh_work) -
5899 * Other errors are either non-fatal where host recovers
5900 * itself without s/w intervention or errors that will be
5901 * handled by the SCSI core layer.
5902 */
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305903}
5904
5905/**
5906 * ufshcd_tmc_handler - handle task management function completion
5907 * @hba: per adapter instance
5908 */
5909static void ufshcd_tmc_handler(struct ufs_hba *hba)
5910{
5911 u32 tm_doorbell;
5912
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305913 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305914 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305915 wake_up(&hba->tm_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305916}
5917
5918/**
5919 * ufshcd_sl_intr - Interrupt service routine
5920 * @hba: per adapter instance
5921 * @intr_status: contains interrupts generated by the controller
5922 */
5923static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5924{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005925 ufsdbg_error_inject_dispatcher(hba,
5926 ERR_INJECT_INTR, intr_status, &intr_status);
5927
5928 ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
5929
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305930 hba->errors = UFSHCD_ERROR_MASK & intr_status;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005931 if (hba->errors || hba->ce_error)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305932 ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305933
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305934 if (intr_status & UFSHCD_UIC_MASK)
5935 ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305936
5937 if (intr_status & UTP_TASK_REQ_COMPL)
5938 ufshcd_tmc_handler(hba);
5939
5940 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5941 ufshcd_transfer_req_compl(hba);
5942}
5943
5944/**
5945 * ufshcd_intr - Main interrupt service routine
5946 * @irq: irq number
5947 * @__hba: pointer to adapter instance
5948 *
5949 * Returns IRQ_HANDLED - If interrupt is valid
5950 * IRQ_NONE - If invalid interrupt
5951 */
5952static irqreturn_t ufshcd_intr(int irq, void *__hba)
5953{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005954 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305955 irqreturn_t retval = IRQ_NONE;
5956 struct ufs_hba *hba = __hba;
5957
5958 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05305959 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005960 enabled_intr_status =
5961 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305962
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005963 if (intr_status)
Seungwon Jeon261ea452013-06-26 22:39:28 +05305964 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02005965
5966 if (enabled_intr_status) {
5967 ufshcd_sl_intr(hba, enabled_intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305968 retval = IRQ_HANDLED;
5969 }
5970 spin_unlock(hba->host->host_lock);
5971 return retval;
5972}
5973
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305974static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5975{
5976 int err = 0;
5977 u32 mask = 1 << tag;
5978 unsigned long flags;
5979
5980 if (!test_bit(tag, &hba->outstanding_tasks))
5981 goto out;
5982
5983 spin_lock_irqsave(hba->host->host_lock, flags);
5984 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
5985 spin_unlock_irqrestore(hba->host->host_lock, flags);
5986
5987 /* poll for max. 1 sec to clear door bell register by h/w */
5988 err = ufshcd_wait_for_register(hba,
5989 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02005990 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305991out:
5992 return err;
5993}
5994
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305995/**
5996 * ufshcd_issue_tm_cmd - issues task management commands to controller
5997 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305998 * @lun_id: LUN ID to which TM command is sent
5999 * @task_id: task ID to which the TM command is applicable
6000 * @tm_function: task management function opcode
6001 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306002 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306003 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306004 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306005static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6006 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306007{
6008 struct utp_task_req_desc *task_req_descp;
6009 struct utp_upiu_task_req *task_req_upiup;
6010 struct Scsi_Host *host;
6011 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306012 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306013 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306014 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306015
6016 host = hba->host;
6017
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306018 /*
6019 * Get free slot, sleep if slots are unavailable.
6020 * Even though we use wait_event() which sleeps indefinitely,
6021 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6022 */
6023 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006024 ufshcd_hold_all(hba);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306025
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306026 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306027 task_req_descp = hba->utmrdl_base_addr;
6028 task_req_descp += free_slot;
6029
6030 /* Configure task request descriptor */
6031 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6032 task_req_descp->header.dword_2 =
6033 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6034
6035 /* Configure task request UPIU */
6036 task_req_upiup =
6037 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306038 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306039 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306040 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306041 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306042 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306043 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03006044 /*
6045 * The host shall provide the same value for LUN field in the basic
6046 * header and for Input Parameter.
6047 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306048 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
6049 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306050
6051 /* send command to the controller */
6052 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02006053
6054 /* Make sure descriptors are ready before ringing the task doorbell */
6055 wmb();
6056
Seungwon Jeonb873a2752013-06-26 22:39:26 +05306057 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006058 /* Make sure that doorbell is committed immediately */
6059 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306060
6061 spin_unlock_irqrestore(host->host_lock, flags);
6062
6063 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306064 err = wait_event_timeout(hba->tm_wq,
6065 test_bit(free_slot, &hba->tm_condition),
6066 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306067 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306068 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6069 __func__, tm_function);
6070 if (ufshcd_clear_tm_cmd(hba, free_slot))
6071 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6072 __func__, free_slot);
6073 err = -ETIMEDOUT;
6074 } else {
6075 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306076 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306077
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306078 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306079 ufshcd_put_tm_slot(hba, free_slot);
6080 wake_up(&hba->tm_tag_wq);
6081
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006082 ufshcd_release_all(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306083 return err;
6084}
6085
6086/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306087 * ufshcd_eh_device_reset_handler - device reset handler registered to
6088 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306089 * @cmd: SCSI command pointer
6090 *
6091 * Returns SUCCESS/FAILED
6092 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306093static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306094{
6095 struct Scsi_Host *host;
6096 struct ufs_hba *hba;
6097 unsigned int tag;
6098 u32 pos;
6099 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306100 u8 resp = 0xF;
6101 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306102 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306103
6104 host = cmd->device->host;
6105 hba = shost_priv(host);
6106 tag = cmd->request->tag;
6107
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306108 lrbp = &hba->lrb[tag];
6109 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6110 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306111 if (!err)
6112 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306113 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306114 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306115
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306116 /* clear the commands that were pending for corresponding LUN */
6117 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6118 if (hba->lrb[pos].lun == lrbp->lun) {
6119 err = ufshcd_clear_cmd(hba, pos);
6120 if (err)
6121 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306122 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306123 }
6124 spin_lock_irqsave(host->host_lock, flags);
6125 ufshcd_transfer_req_compl(hba);
6126 spin_unlock_irqrestore(host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006127
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306128out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006129 hba->req_abort_count = 0;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306130 if (!err) {
6131 err = SUCCESS;
6132 } else {
6133 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6134 err = FAILED;
6135 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306136 return err;
6137}
6138
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006139static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6140{
6141 struct ufshcd_lrb *lrbp;
6142 int tag;
6143
6144 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6145 lrbp = &hba->lrb[tag];
6146 lrbp->req_abort_skip = true;
6147 }
6148}
6149
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306150/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306151 * ufshcd_abort - abort a specific command
6152 * @cmd: SCSI command pointer
6153 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306154 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6155 * command, and in host controller by clearing the door-bell register. There can
6156 * be race between controller sending the command to the device while abort is
6157 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6158 * really issued and then try to abort it.
6159 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306160 * Returns SUCCESS/FAILED
6161 */
6162static int ufshcd_abort(struct scsi_cmnd *cmd)
6163{
6164 struct Scsi_Host *host;
6165 struct ufs_hba *hba;
6166 unsigned long flags;
6167 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306168 int err = 0;
6169 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306170 u8 resp = 0xF;
6171 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03006172 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306173
6174 host = cmd->device->host;
6175 hba = shost_priv(host);
6176 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02006177 if (!ufshcd_valid_tag(hba, tag)) {
6178 dev_err(hba->dev,
6179 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6180 __func__, tag, cmd, cmd->request);
6181 BUG();
6182 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306183
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006184 lrbp = &hba->lrb[tag];
6185
6186 ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
6187
6188 /*
6189 * Task abort to the device W-LUN is illegal. When this command
6190 * will fail, due to spec violation, scsi err handling next step
6191 * will be to send LU reset which, again, is a spec violation.
6192 * To avoid these unnecessary/illegal step we skip to the last error
6193 * handling stage: reset and restore.
6194 */
6195 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6196 return ufshcd_eh_host_reset_handler(cmd);
6197
6198 ufshcd_hold_all(hba);
Dolev Ravive9d501b2014-07-01 12:22:37 +03006199 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02006200 /* If command is already aborted/completed, return SUCCESS */
6201 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6202 dev_err(hba->dev,
6203 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6204 __func__, tag, hba->outstanding_reqs, reg);
6205 goto out;
6206 }
6207
Dolev Ravive9d501b2014-07-01 12:22:37 +03006208 if (!(reg & (1 << tag))) {
6209 dev_err(hba->dev,
6210 "%s: cmd was completed, but without a notifying intr, tag = %d",
6211 __func__, tag);
6212 }
6213
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006214 /* Print Transfer Request of aborted task */
6215 dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
6216
6217 /*
6218 * Print detailed info about aborted request.
6219 * As more than one request might get aborted at the same time,
6220 * print full information only for the first aborted request in order
6221 * to reduce repeated printouts. For other aborted requests only print
6222 * basic details.
6223 */
6224 scsi_print_command(cmd);
6225 if (!hba->req_abort_count) {
6226 ufshcd_print_host_regs(hba);
6227 ufshcd_print_host_state(hba);
6228 ufshcd_print_pwr_info(hba);
6229 ufshcd_print_trs(hba, 1 << tag, true);
6230 } else {
6231 ufshcd_print_trs(hba, 1 << tag, false);
6232 }
6233 hba->req_abort_count++;
6234
6235
6236 /* Skip task abort in case previous aborts failed and report failure */
6237 if (lrbp->req_abort_skip) {
6238 err = -EIO;
6239 goto out;
6240 }
6241
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306242 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6243 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6244 UFS_QUERY_TASK, &resp);
6245 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6246 /* cmd pending in the device */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006247 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
6248 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306249 break;
6250 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306251 /*
6252 * cmd not pending in the device, check if it is
6253 * in transition.
6254 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006255 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
6256 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306257 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6258 if (reg & (1 << tag)) {
6259 /* sleep for max. 200us to stabilize */
6260 usleep_range(100, 200);
6261 continue;
6262 }
6263 /* command completed already */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006264 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
6265 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306266 goto out;
6267 } else {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006268 dev_err(hba->dev,
6269 "%s: no response from device. tag = %d, err %d",
6270 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306271 if (!err)
6272 err = resp; /* service response error */
6273 goto out;
6274 }
6275 }
6276
6277 if (!poll_cnt) {
6278 err = -EBUSY;
6279 goto out;
6280 }
6281
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306282 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6283 UFS_ABORT_TASK, &resp);
6284 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006285 if (!err) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306286 err = resp; /* service response error */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006287 dev_err(hba->dev, "%s: issued. tag = %d, err %d",
6288 __func__, tag, err);
6289 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306290 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306291 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306292
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306293 err = ufshcd_clear_cmd(hba, tag);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006294 if (err) {
6295 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
6296 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306297 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006298 }
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306299
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306300 scsi_dma_unmap(cmd);
6301
6302 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02006303 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306304 hba->lrb[tag].cmd = NULL;
6305 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306306
6307 clear_bit_unlock(tag, &hba->lrb_in_use);
6308 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006309
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306310out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306311 if (!err) {
6312 err = SUCCESS;
6313 } else {
6314 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006315 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306316 err = FAILED;
6317 }
6318
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006319 /*
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006320 * This ufshcd_release_all() corresponds to the original scsi cmd that
6321 * got aborted here (as we won't get any IRQ for it).
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03006322 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006323 ufshcd_release_all(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306324 return err;
6325}
6326
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306327/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306328 * ufshcd_host_reset_and_restore - reset and restore host controller
6329 * @hba: per-adapter instance
6330 *
6331 * Note that host controller reset may issue DME_RESET to
6332 * local and remote (device) Uni-Pro stack and the attributes
6333 * are reset to default state.
6334 *
6335 * Returns zero on success, non-zero on failure
6336 */
6337static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6338{
6339 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306340 unsigned long flags;
6341
6342 /* Reset the host controller */
6343 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02006344 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306345 spin_unlock_irqrestore(hba->host->host_lock, flags);
6346
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006347 /* scale up clocks to max frequency before full reinitialization */
6348 ufshcd_set_clk_freq(hba, true);
6349
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306350 err = ufshcd_hba_enable(hba);
6351 if (err)
6352 goto out;
6353
6354 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006355 err = ufshcd_probe_hba(hba);
6356
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006357 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306358 err = -EIO;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006359 goto out;
6360 }
6361
6362 if (!err) {
6363 err = ufshcd_vops_crypto_engine_reset(hba);
6364 if (err) {
6365 dev_err(hba->dev,
6366 "%s: failed to reset crypto engine %d\n",
6367 __func__, err);
6368 goto out;
6369 }
6370 }
6371
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306372out:
6373 if (err)
6374 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6375
6376 return err;
6377}
6378
6379/**
6380 * ufshcd_reset_and_restore - reset and re-initialize host/device
6381 * @hba: per-adapter instance
6382 *
6383 * Reset and recover device, host and re-establish link. This
6384 * is helpful to recover the communication in fatal error conditions.
6385 *
6386 * Returns zero on success, non-zero on failure
6387 */
6388static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6389{
6390 int err = 0;
6391 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006392 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306393
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006394 do {
6395 err = ufshcd_host_reset_and_restore(hba);
6396 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306397
6398 /*
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006399 * There is no point proceeding even after failing
6400 * to recover after multiple retries.
6401 */
6402 if (err)
6403 BUG();
6404 /*
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306405 * After reset the door-bell might be cleared, complete
6406 * outstanding requests in s/w here.
6407 */
6408 spin_lock_irqsave(hba->host->host_lock, flags);
6409 ufshcd_transfer_req_compl(hba);
6410 ufshcd_tmc_handler(hba);
6411 spin_unlock_irqrestore(hba->host->host_lock, flags);
6412
6413 return err;
6414}
6415
6416/**
6417 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6418 * @cmd - SCSI command pointer
6419 *
6420 * Returns SUCCESS/FAILED
6421 */
6422static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6423{
6424 int err;
6425 unsigned long flags;
6426 struct ufs_hba *hba;
6427
6428 hba = shost_priv(cmd->device->host);
6429
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006430 ufshcd_hold_all(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306431 /*
6432 * Check if there is any race with fatal error handling.
6433 * If so, wait for it to complete. Even though fatal error
6434 * handling does reset and restore in some cases, don't assume
6435 * anything out of it. We are just avoiding race here.
6436 */
6437 do {
6438 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306439 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306440 hba->ufshcd_state == UFSHCD_STATE_RESET))
6441 break;
6442 spin_unlock_irqrestore(hba->host->host_lock, flags);
6443 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306444 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306445 } while (1);
6446
6447 hba->ufshcd_state = UFSHCD_STATE_RESET;
6448 ufshcd_set_eh_in_progress(hba);
6449 spin_unlock_irqrestore(hba->host->host_lock, flags);
6450
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006451 ufshcd_update_error_stats(hba, UFS_ERR_EH);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306452 err = ufshcd_reset_and_restore(hba);
6453
6454 spin_lock_irqsave(hba->host->host_lock, flags);
6455 if (!err) {
6456 err = SUCCESS;
6457 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6458 } else {
6459 err = FAILED;
6460 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6461 }
6462 ufshcd_clear_eh_in_progress(hba);
6463 spin_unlock_irqrestore(hba->host->host_lock, flags);
6464
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006465 ufshcd_release_all(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306466 return err;
6467}
6468
6469/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006470 * ufshcd_get_max_icc_level - calculate the ICC level
6471 * @sup_curr_uA: max. current supported by the regulator
6472 * @start_scan: row at the desc table to start scan from
6473 * @buff: power descriptor buffer
6474 *
6475 * Returns calculated max ICC level for specific regulator
6476 */
6477static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6478{
6479 int i;
6480 int curr_uA;
6481 u16 data;
6482 u16 unit;
6483
6484 for (i = start_scan; i >= 0; i--) {
6485 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
6486 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6487 ATTR_ICC_LVL_UNIT_OFFSET;
6488 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6489 switch (unit) {
6490 case UFSHCD_NANO_AMP:
6491 curr_uA = curr_uA / 1000;
6492 break;
6493 case UFSHCD_MILI_AMP:
6494 curr_uA = curr_uA * 1000;
6495 break;
6496 case UFSHCD_AMP:
6497 curr_uA = curr_uA * 1000 * 1000;
6498 break;
6499 case UFSHCD_MICRO_AMP:
6500 default:
6501 break;
6502 }
6503 if (sup_curr_uA >= curr_uA)
6504 break;
6505 }
6506 if (i < 0) {
6507 i = 0;
6508 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6509 }
6510
6511 return (u32)i;
6512}
6513
6514/**
Subhash Jadavania8d1ba32016-12-12 18:19:21 -08006515 * ufshcd_find_max_sup_active_icc_level - find the max ICC level
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006516 * In case regulators are not initialized we'll return 0
6517 * @hba: per-adapter instance
6518 * @desc_buf: power descriptor buffer to extract ICC levels from.
6519 * @len: length of desc_buff
6520 *
Subhash Jadavania8d1ba32016-12-12 18:19:21 -08006521 * Returns calculated max ICC level
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006522 */
6523static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6524 u8 *desc_buf, int len)
6525{
6526 u32 icc_level = 0;
6527
Subhash Jadavania8d1ba32016-12-12 18:19:21 -08006528 /*
6529 * VCCQ rail is optional for removable UFS card and also most of the
6530 * vendors don't use this rail for embedded UFS devices as well. So
6531 * it is normal that VCCQ rail may not be provided for given platform.
6532 */
6533 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq2) {
6534 dev_err(hba->dev, "%s: Regulator capability was not set, bActiveICCLevel=%d\n",
6535 __func__, icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006536 goto out;
6537 }
6538
6539 if (hba->vreg_info.vcc)
6540 icc_level = ufshcd_get_max_icc_level(
6541 hba->vreg_info.vcc->max_uA,
6542 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6543 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6544
6545 if (hba->vreg_info.vccq)
6546 icc_level = ufshcd_get_max_icc_level(
6547 hba->vreg_info.vccq->max_uA,
6548 icc_level,
6549 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6550
6551 if (hba->vreg_info.vccq2)
6552 icc_level = ufshcd_get_max_icc_level(
6553 hba->vreg_info.vccq2->max_uA,
6554 icc_level,
6555 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6556out:
6557 return icc_level;
6558}
6559
Subhash Jadavani8a93dbd2016-12-12 17:59:44 -08006560static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006561{
6562 int ret;
6563 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
6564 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
Subhash Jadavani35732e52016-12-09 16:09:42 -08006565 u32 icc_level;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006566
6567 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6568 if (ret) {
6569 dev_err(hba->dev,
6570 "%s: Failed reading power descriptor.len = %d ret = %d",
6571 __func__, buff_len, ret);
6572 return;
6573 }
6574
Subhash Jadavani35732e52016-12-09 16:09:42 -08006575 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
6576 buff_len);
6577 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006578
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02006579 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Subhash Jadavani35732e52016-12-09 16:09:42 -08006580 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006581
6582 if (ret)
6583 dev_err(hba->dev,
6584 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
Subhash Jadavani35732e52016-12-09 16:09:42 -08006585 __func__, icc_level, ret);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03006586}
6587
6588/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006589 * ufshcd_scsi_add_wlus - Adds required W-LUs
6590 * @hba: per-adapter instance
6591 *
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006592 * UFS devices can support upto 4 well known logical units:
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006593 * "REPORT_LUNS" (address: 01h)
6594 * "UFS Device" (address: 50h)
6595 * "RPMB" (address: 44h)
6596 * "BOOT" (address: 30h)
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006597 *
6598 * "REPORT_LUNS" & "UFS Device" are mandatory for all device classes (see
6599 * "bDeviceSubClass" parameter of device descriptor) while "BOOT" is supported
6600 * only for bootable devices. "RPMB" is only supported with embedded devices.
6601 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006602 * UFS device's power management needs to be controlled by "POWER CONDITION"
6603 * field of SSU (START STOP UNIT) command. But this "power condition" field
6604 * will take effect only when its sent to "UFS device" well known logical unit
6605 * hence we require the scsi_device instance to represent this logical unit in
6606 * order for the UFS host driver to send the SSU command for power management.
6607
6608 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6609 * Block) LU so user space process can control this LU. User space may also
6610 * want to have access to BOOT LU.
6611
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006612 * This function tries to add scsi device instances for each of all well known
6613 * LUs (except "REPORT LUNS" LU) depending on device class.
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006614 *
6615 * Returns zero on success (all required W-LUs are added successfully),
6616 * non-zero error value on failure (if failed to add any of the required W-LU).
6617 */
6618static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6619{
6620 int ret = 0;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006621 struct scsi_device *sdev_rpmb;
6622 struct scsi_device *sdev_boot;
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006623 bool is_bootable_dev = false;
6624 bool is_embedded_dev = false;
6625
6626 if ((hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_BOOTABLE) ||
6627 (hba->dev_info.b_device_sub_class == UFS_DEV_REMOVABLE_BOOTABLE))
6628 is_bootable_dev = true;
6629
6630 if ((hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_BOOTABLE) ||
6631 (hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_NON_BOOTABLE))
6632 is_embedded_dev = true;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006633
6634 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6635 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6636 if (IS_ERR(hba->sdev_ufs_device)) {
6637 ret = PTR_ERR(hba->sdev_ufs_device);
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006638 dev_err(hba->dev, "%s: failed adding DEVICE_WLUN. ret %d\n",
6639 __func__, ret);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006640 hba->sdev_ufs_device = NULL;
6641 goto out;
6642 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03006643 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006644
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006645 if (is_bootable_dev) {
6646 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6647 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN),
6648 NULL);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006649
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006650 if (IS_ERR(sdev_boot)) {
6651 dev_err(hba->dev, "%s: failed adding BOOT_WLUN. ret %d\n",
6652 __func__, ret);
6653 ret = PTR_ERR(sdev_boot);
6654 goto remove_sdev_ufs_device;
6655 }
6656 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006657 }
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006658
6659 if (is_embedded_dev) {
6660 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6661 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN),
6662 NULL);
6663 if (IS_ERR(sdev_rpmb)) {
6664 dev_err(hba->dev, "%s: failed adding RPMB_WLUN. ret %d\n",
6665 __func__, ret);
6666 ret = PTR_ERR(sdev_rpmb);
6667 goto remove_sdev_boot;
6668 }
6669 scsi_device_put(sdev_rpmb);
6670 }
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006671 goto out;
6672
6673remove_sdev_boot:
Subhash Jadavani2df121a2016-12-15 18:27:31 -08006674 if (is_bootable_dev)
6675 scsi_remove_device(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03006676remove_sdev_ufs_device:
6677 scsi_remove_device(hba->sdev_ufs_device);
6678out:
6679 return ret;
6680}
6681
6682/**
Yaniv Gardi37113102016-03-10 17:37:16 +02006683 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6684 * @hba: per-adapter instance
6685 *
6686 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6687 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6688 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6689 * the hibern8 exit latency.
6690 *
6691 * Returns zero on success, non-zero error value on failure.
6692 */
6693static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6694{
6695 int ret = 0;
6696 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6697
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006698 if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
6699 return 0;
6700
Yaniv Gardi37113102016-03-10 17:37:16 +02006701 ret = ufshcd_dme_peer_get(hba,
6702 UIC_ARG_MIB_SEL(
6703 RX_MIN_ACTIVATETIME_CAPABILITY,
6704 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6705 &peer_rx_min_activatetime);
6706 if (ret)
6707 goto out;
6708
6709 /* make sure proper unit conversion is applied */
6710 tuned_pa_tactivate =
6711 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6712 / PA_TACTIVATE_TIME_UNIT_US);
6713 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6714 tuned_pa_tactivate);
6715
6716out:
6717 return ret;
6718}
6719
6720/**
6721 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6722 * @hba: per-adapter instance
6723 *
6724 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6725 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6726 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6727 * This optimal value can help reduce the hibern8 exit latency.
6728 *
6729 * Returns zero on success, non-zero error value on failure.
6730 */
6731static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6732{
6733 int ret = 0;
6734 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6735 u32 max_hibern8_time, tuned_pa_hibern8time;
6736
6737 ret = ufshcd_dme_get(hba,
6738 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6739 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6740 &local_tx_hibern8_time_cap);
6741 if (ret)
6742 goto out;
6743
6744 ret = ufshcd_dme_peer_get(hba,
6745 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6746 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6747 &peer_rx_hibern8_time_cap);
6748 if (ret)
6749 goto out;
6750
6751 max_hibern8_time = max(local_tx_hibern8_time_cap,
6752 peer_rx_hibern8_time_cap);
6753 /* make sure proper unit conversion is applied */
6754 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6755 / PA_HIBERN8_TIME_UNIT_US);
6756 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6757 tuned_pa_hibern8time);
6758out:
6759 return ret;
6760}
6761
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006762/**
6763 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6764 * less than device PA_TACTIVATE time.
6765 * @hba: per-adapter instance
6766 *
6767 * Some UFS devices require host PA_TACTIVATE to be lower than device
6768 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6769 * for such devices.
6770 *
6771 * Returns zero on success, non-zero error value on failure.
6772 */
6773static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6774{
6775 int ret = 0;
6776 u32 granularity, peer_granularity;
6777 u32 pa_tactivate, peer_pa_tactivate;
6778 u32 pa_tactivate_us, peer_pa_tactivate_us;
6779 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6780
6781 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6782 &granularity);
6783 if (ret)
6784 goto out;
6785
6786 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6787 &peer_granularity);
6788 if (ret)
6789 goto out;
6790
6791 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6792 (granularity > PA_GRANULARITY_MAX_VAL)) {
6793 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6794 __func__, granularity);
6795 return -EINVAL;
6796 }
6797
6798 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6799 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6800 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6801 __func__, peer_granularity);
6802 return -EINVAL;
6803 }
6804
6805 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6806 if (ret)
6807 goto out;
6808
6809 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6810 &peer_pa_tactivate);
6811 if (ret)
6812 goto out;
6813
6814 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6815 peer_pa_tactivate_us = peer_pa_tactivate *
6816 gran_to_us_table[peer_granularity - 1];
6817
6818 if (pa_tactivate_us > peer_pa_tactivate_us) {
6819 u32 new_peer_pa_tactivate;
6820
6821 new_peer_pa_tactivate = pa_tactivate_us /
6822 gran_to_us_table[peer_granularity - 1];
6823 new_peer_pa_tactivate++;
6824 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6825 new_peer_pa_tactivate);
6826 }
6827
6828out:
6829 return ret;
6830}
6831
Yaniv Gardi37113102016-03-10 17:37:16 +02006832static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6833{
6834 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6835 ufshcd_tune_pa_tactivate(hba);
6836 ufshcd_tune_pa_hibern8time(hba);
6837 }
6838
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08006839 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
Yaniv Gardi37113102016-03-10 17:37:16 +02006840 /* set 1ms timeout for PA_TACTIVATE */
6841 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006842
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08006843 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006844 ufshcd_quirk_tune_host_pa_tactivate(hba);
6845
6846 ufshcd_vops_apply_dev_quirks(hba);
6847}
6848
6849static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6850{
6851 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6852
6853 hba->ufs_stats.hibern8_exit_cnt = 0;
6854 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6855
6856 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6857 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6858 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6859 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6860 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6861
6862 hba->req_abort_count = 0;
6863}
6864
6865static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
6866{
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08006867 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006868 if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
6869 UIC_LINK_OFF_STATE) {
6870 hba->rpm_lvl =
6871 ufs_get_desired_pm_lvl_for_dev_link_state(
6872 UFS_SLEEP_PWR_MODE,
6873 UIC_LINK_HIBERN8_STATE);
6874 dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
6875 hba->rpm_lvl);
6876 }
6877 if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
6878 UIC_LINK_OFF_STATE) {
6879 hba->spm_lvl =
6880 ufs_get_desired_pm_lvl_for_dev_link_state(
6881 UFS_SLEEP_PWR_MODE,
6882 UIC_LINK_HIBERN8_STATE);
6883 dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
6884 hba->spm_lvl);
6885 }
6886 }
Yaniv Gardi37113102016-03-10 17:37:16 +02006887}
6888
6889/**
Subhash Jadavani88f99992016-12-13 15:52:21 -08006890 * ufshcd_set_dev_ref_clk - set the device bRefClkFreq
6891 * @hba: per-adapter instance
6892 *
6893 * Read the current value of the bRefClkFreq attribute from device and update it
6894 * if host is supplying different reference clock frequency than one mentioned
6895 * in bRefClkFreq attribute.
6896 *
6897 * Returns zero on success, non-zero error value on failure.
6898 */
6899static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6900{
6901 int err = 0;
6902 int ref_clk = -1;
6903 static const char * const ref_clk_freqs[] = {"19.2 MHz", "26 MHz",
6904 "38.4 MHz", "52 MHz"};
6905
6906 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6907 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6908
6909 if (err) {
6910 dev_err(hba->dev, "%s: failed reading bRefClkFreq. err = %d\n",
6911 __func__, err);
6912 goto out;
6913 }
6914
6915 if ((ref_clk < 0) || (ref_clk > REF_CLK_FREQ_52_MHZ)) {
6916 dev_err(hba->dev, "%s: invalid ref_clk setting = %d\n",
6917 __func__, ref_clk);
6918 err = -EINVAL;
6919 goto out;
6920 }
6921
6922 if (ref_clk == hba->dev_ref_clk_freq)
6923 goto out; /* nothing to update */
6924
6925 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6926 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0,
6927 &hba->dev_ref_clk_freq);
6928
6929 if (err)
6930 dev_err(hba->dev, "%s: bRefClkFreq setting to %s failed\n",
6931 __func__, ref_clk_freqs[hba->dev_ref_clk_freq]);
6932 else
6933 /*
6934 * It is good to print this out here to debug any later failures
6935 * related to gear switch.
6936 */
6937 dev_info(hba->dev, "%s: bRefClkFreq setting to %s succeeded\n",
6938 __func__, ref_clk_freqs[hba->dev_ref_clk_freq]);
6939
6940out:
6941 return err;
6942}
6943
Subhash Jadavani344c16c2016-12-15 17:09:35 -08006944static int ufs_read_device_desc_data(struct ufs_hba *hba)
6945{
6946 int err;
6947 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
6948
6949 err = ufshcd_read_device_desc(hba, desc_buf, sizeof(desc_buf));
6950 if (err)
6951 return err;
6952
6953 /*
6954 * getting vendor (manufacturerID) and Bank Index in big endian
6955 * format
6956 */
6957 hba->dev_info.w_manufacturer_id =
6958 desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6959 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6960 hba->dev_info.b_device_sub_class =
6961 desc_buf[DEVICE_DESC_PARAM_DEVICE_SUB_CLASS];
6962 hba->dev_info.i_product_name = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6963
6964 return 0;
6965}
6966
Subhash Jadavani88f99992016-12-13 15:52:21 -08006967/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006968 * ufshcd_probe_hba - probe hba to detect device and initialize
6969 * @hba: per-adapter instance
6970 *
6971 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306972 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03006973static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306974{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306975 int ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006976 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05306977
6978 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306979 if (ret)
6980 goto out;
6981
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006982 /* Enable auto hibern8 if supported */
6983 if (ufshcd_is_auto_hibern8_supported(hba))
6984 ufshcd_set_auto_hibern8_timer(hba,
6985 hba->hibern8_on_idle.delay_ms);
Yaniv Gardi50646362014-10-23 13:25:13 +03006986
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006987 /* Debug counters initialization */
6988 ufshcd_clear_dbg_ufs_stats(hba);
Yaniv Gardiafdfff52016-03-10 17:37:15 +02006989 /* set the default level for urgent bkops */
6990 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6991 hba->is_urgent_bkops_lvl_checked = false;
6992
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006993 /* UniPro link is active now */
6994 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05306995
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306996 ret = ufshcd_verify_dev_init(hba);
6997 if (ret)
6998 goto out;
6999
Dolev Raviv68078d52013-07-30 00:35:58 +05307000 ret = ufshcd_complete_dev_init(hba);
7001 if (ret)
7002 goto out;
7003
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007004 /* clear any previous UFS device information */
7005 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7006
Subhash Jadavani344c16c2016-12-15 17:09:35 -08007007 /* cache important parameters from device descriptor for later use */
7008 ret = ufs_read_device_desc_data(hba);
7009 if (ret)
7010 goto out;
7011
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007012 ufs_advertise_fixup_device(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02007013 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02007014
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007015 ufshcd_apply_pm_quirks(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02007016 ret = ufshcd_set_vccq_rail_unused(hba,
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08007017 (hba->dev_info.quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
Yaniv Gardi60f01872016-03-10 17:37:11 +02007018 if (ret)
7019 goto out;
7020
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007021 /* UFS device is also active now */
7022 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307023 ufshcd_force_reset_auto_bkops(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307024
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007025 if (ufshcd_get_max_pwr_mode(hba)) {
7026 dev_err(hba->dev,
7027 "%s: Failed getting max supported power mode\n",
7028 __func__);
7029 } else {
Subhash Jadavani88f99992016-12-13 15:52:21 -08007030 /*
7031 * Set the right value to bRefClkFreq before attempting to
7032 * switch to HS gears.
7033 */
7034 ufshcd_set_dev_ref_clk(hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007035 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007036 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007037 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7038 __func__, ret);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007039 goto out;
7040 }
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007041 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007042
Subhash Jadavani8a93dbd2016-12-12 17:59:44 -08007043 /*
7044 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7045 * and for removable UFS card as well, hence always set the parameter.
7046 * Note: Error handler may issue the device reset hence resetting
7047 * bActiveICCLevel as well so it is always safe to set this here.
7048 */
7049 ufshcd_set_active_icc_lvl(hba);
7050
Yaniv Gardi53c12d02016-02-01 15:02:45 +02007051 /* set the state as operational after switching to desired gear */
7052 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007053 /*
7054 * If we are in error handling context or in power management callbacks
7055 * context, no need to scan the host
7056 */
7057 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7058 bool flag;
7059
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02007060 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7061 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007062 hba->dev_info.f_power_on_wp_en = flag;
7063
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007064 /* Add required well known logical units to scsi mid layer */
7065 if (ufshcd_scsi_add_wlus(hba))
7066 goto out;
7067
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307068 scsi_scan_host(hba->host);
7069 pm_runtime_put_sync(hba->dev);
7070 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007071
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007072 /* Resume devfreq after UFS device is detected */
7073 if (ufshcd_is_clkscaling_supported(hba)) {
7074 memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info,
7075 sizeof(struct ufs_pa_layer_attr));
7076 hba->clk_scaling.saved_pwr_info.is_valid = true;
7077 hba->clk_scaling.is_scaled_up = true;
7078 ufshcd_resume_clkscaling(hba);
7079 hba->clk_scaling.is_allowed = true;
7080 }
7081
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307082out:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007083 /*
7084 * If we failed to initialize the device or the device is not
7085 * present, turn off the power/clocks etc.
7086 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007087 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7088 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007089 ufshcd_hba_exit(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007090 }
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007091
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007092 trace_ufshcd_init(dev_name(hba->dev), ret,
7093 ktime_to_us(ktime_sub(ktime_get(), start)),
7094 hba->curr_dev_pwr_mode, hba->uic_link_state);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007095 return ret;
7096}
7097
7098/**
7099 * ufshcd_async_scan - asynchronous execution for probing hba
7100 * @data: data pointer to pass to this function
7101 * @cookie: cookie data
7102 */
7103static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7104{
7105 struct ufs_hba *hba = (struct ufs_hba *)data;
7106
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007107 /*
7108 * Don't allow clock gating and hibern8 enter for faster device
7109 * detection.
7110 */
7111 ufshcd_hold_all(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007112 ufshcd_probe_hba(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007113 ufshcd_release_all(hba);
7114}
7115
7116/**
7117 * ufshcd_query_ioctl - perform user read queries
7118 * @hba: per-adapter instance
7119 * @lun: used for lun specific queries
7120 * @buffer: user space buffer for reading and submitting query data and params
7121 * @return: 0 for success negative error code otherwise
7122 *
7123 * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
7124 * It will read the opcode, idn and buf_length parameters, and, put the
7125 * response in the buffer field while updating the used size in buf_length.
7126 */
7127static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
7128{
7129 struct ufs_ioctl_query_data *ioctl_data;
7130 int err = 0;
7131 int length = 0;
7132 void *data_ptr;
7133 bool flag;
7134 u32 att;
7135 u8 index;
7136 u8 *desc = NULL;
7137
7138 ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
7139 if (!ioctl_data) {
7140 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
7141 sizeof(struct ufs_ioctl_query_data));
7142 err = -ENOMEM;
7143 goto out;
7144 }
7145
7146 /* extract params from user buffer */
7147 err = copy_from_user(ioctl_data, buffer,
7148 sizeof(struct ufs_ioctl_query_data));
7149 if (err) {
7150 dev_err(hba->dev,
7151 "%s: Failed copying buffer from user, err %d\n",
7152 __func__, err);
7153 goto out_release_mem;
7154 }
7155
7156 /* verify legal parameters & send query */
7157 switch (ioctl_data->opcode) {
7158 case UPIU_QUERY_OPCODE_READ_DESC:
7159 switch (ioctl_data->idn) {
7160 case QUERY_DESC_IDN_DEVICE:
7161 case QUERY_DESC_IDN_CONFIGURAION:
7162 case QUERY_DESC_IDN_INTERCONNECT:
7163 case QUERY_DESC_IDN_GEOMETRY:
7164 case QUERY_DESC_IDN_POWER:
7165 index = 0;
7166 break;
7167 case QUERY_DESC_IDN_UNIT:
7168 if (!ufs_is_valid_unit_desc_lun(lun)) {
7169 dev_err(hba->dev,
7170 "%s: No unit descriptor for lun 0x%x\n",
7171 __func__, lun);
7172 err = -EINVAL;
7173 goto out_release_mem;
7174 }
7175 index = lun;
7176 break;
7177 default:
7178 goto out_einval;
7179 }
7180 length = min_t(int, QUERY_DESC_MAX_SIZE,
7181 ioctl_data->buf_size);
7182 desc = kzalloc(length, GFP_KERNEL);
7183 if (!desc) {
7184 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
7185 __func__, length);
7186 err = -ENOMEM;
7187 goto out_release_mem;
7188 }
7189 err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
7190 ioctl_data->idn, index, 0, desc, &length);
7191 break;
7192 case UPIU_QUERY_OPCODE_READ_ATTR:
7193 switch (ioctl_data->idn) {
7194 case QUERY_ATTR_IDN_BOOT_LU_EN:
7195 case QUERY_ATTR_IDN_POWER_MODE:
7196 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
7197 case QUERY_ATTR_IDN_OOO_DATA_EN:
7198 case QUERY_ATTR_IDN_BKOPS_STATUS:
7199 case QUERY_ATTR_IDN_PURGE_STATUS:
7200 case QUERY_ATTR_IDN_MAX_DATA_IN:
7201 case QUERY_ATTR_IDN_MAX_DATA_OUT:
7202 case QUERY_ATTR_IDN_REF_CLK_FREQ:
7203 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
7204 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
7205 case QUERY_ATTR_IDN_EE_CONTROL:
7206 case QUERY_ATTR_IDN_EE_STATUS:
7207 case QUERY_ATTR_IDN_SECONDS_PASSED:
7208 index = 0;
7209 break;
7210 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
7211 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
7212 index = lun;
7213 break;
7214 default:
7215 goto out_einval;
7216 }
7217 err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
7218 index, 0, &att);
7219 break;
7220
7221 case UPIU_QUERY_OPCODE_WRITE_ATTR:
7222 err = copy_from_user(&att,
7223 buffer + sizeof(struct ufs_ioctl_query_data),
7224 sizeof(u32));
7225 if (err) {
7226 dev_err(hba->dev,
7227 "%s: Failed copying buffer from user, err %d\n",
7228 __func__, err);
7229 goto out_release_mem;
7230 }
7231
7232 switch (ioctl_data->idn) {
7233 case QUERY_ATTR_IDN_BOOT_LU_EN:
7234 index = 0;
7235 if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
7236 dev_err(hba->dev,
7237 "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
7238 __func__, ioctl_data->opcode,
7239 (unsigned int)ioctl_data->idn, att);
7240 err = -EINVAL;
7241 goto out_release_mem;
7242 }
7243 break;
7244 default:
7245 goto out_einval;
7246 }
7247 err = ufshcd_query_attr(hba, ioctl_data->opcode,
7248 ioctl_data->idn, index, 0, &att);
7249 break;
7250
7251 case UPIU_QUERY_OPCODE_READ_FLAG:
7252 switch (ioctl_data->idn) {
7253 case QUERY_FLAG_IDN_FDEVICEINIT:
7254 case QUERY_FLAG_IDN_PERMANENT_WPE:
7255 case QUERY_FLAG_IDN_PWR_ON_WPE:
7256 case QUERY_FLAG_IDN_BKOPS_EN:
7257 case QUERY_FLAG_IDN_PURGE_ENABLE:
7258 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
7259 case QUERY_FLAG_IDN_BUSY_RTC:
7260 break;
7261 default:
7262 goto out_einval;
7263 }
7264 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
7265 ioctl_data->idn, &flag);
7266 break;
7267 default:
7268 goto out_einval;
7269 }
7270
7271 if (err) {
7272 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
7273 ioctl_data->idn);
7274 goto out_release_mem;
7275 }
7276
7277 /*
7278 * copy response data
7279 * As we might end up reading less data then what is specified in
7280 * "ioctl_data->buf_size". So we are updating "ioctl_data->
7281 * buf_size" to what exactly we have read.
7282 */
7283 switch (ioctl_data->opcode) {
7284 case UPIU_QUERY_OPCODE_READ_DESC:
7285 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
7286 data_ptr = desc;
7287 break;
7288 case UPIU_QUERY_OPCODE_READ_ATTR:
7289 ioctl_data->buf_size = sizeof(u32);
7290 data_ptr = &att;
7291 break;
7292 case UPIU_QUERY_OPCODE_READ_FLAG:
7293 ioctl_data->buf_size = 1;
7294 data_ptr = &flag;
7295 break;
7296 case UPIU_QUERY_OPCODE_WRITE_ATTR:
7297 goto out_release_mem;
7298 default:
7299 goto out_einval;
7300 }
7301
7302 /* copy to user */
7303 err = copy_to_user(buffer, ioctl_data,
7304 sizeof(struct ufs_ioctl_query_data));
7305 if (err)
7306 dev_err(hba->dev, "%s: Failed copying back to user.\n",
7307 __func__);
7308 err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
7309 data_ptr, ioctl_data->buf_size);
7310 if (err)
7311 dev_err(hba->dev, "%s: err %d copying back to user.\n",
7312 __func__, err);
7313 goto out_release_mem;
7314
7315out_einval:
7316 dev_err(hba->dev,
7317 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
7318 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
7319 err = -EINVAL;
7320out_release_mem:
7321 kfree(ioctl_data);
7322 kfree(desc);
7323out:
7324 return err;
7325}
7326
7327/**
7328 * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
7329 * @dev: scsi device required for per LUN queries
7330 * @cmd: command opcode
7331 * @buffer: user space buffer for transferring data
7332 *
7333 * Supported commands:
7334 * UFS_IOCTL_QUERY
7335 */
7336static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
7337{
7338 struct ufs_hba *hba = shost_priv(dev->host);
7339 int err = 0;
7340
7341 BUG_ON(!hba);
7342 if (!buffer) {
7343 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
7344 return -EINVAL;
7345 }
7346
7347 switch (cmd) {
7348 case UFS_IOCTL_QUERY:
7349 pm_runtime_get_sync(hba->dev);
7350 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
7351 buffer);
7352 pm_runtime_put_sync(hba->dev);
7353 break;
7354 default:
7355 err = -ENOIOCTLCMD;
7356 dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
7357 cmd);
7358 break;
7359 }
7360
7361 return err;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307362}
7363
Yaniv Gardif550c652016-03-10 17:37:07 +02007364static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7365{
7366 unsigned long flags;
7367 struct Scsi_Host *host;
7368 struct ufs_hba *hba;
7369 int index;
7370 bool found = false;
7371
7372 if (!scmd || !scmd->device || !scmd->device->host)
7373 return BLK_EH_NOT_HANDLED;
7374
7375 host = scmd->device->host;
7376 hba = shost_priv(host);
7377 if (!hba)
7378 return BLK_EH_NOT_HANDLED;
7379
7380 spin_lock_irqsave(host->host_lock, flags);
7381
7382 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7383 if (hba->lrb[index].cmd == scmd) {
7384 found = true;
7385 break;
7386 }
7387 }
7388
7389 spin_unlock_irqrestore(host->host_lock, flags);
7390
7391 /*
7392 * Bypass SCSI error handling and reset the block layer timer if this
7393 * SCSI command was not actually dispatched to UFS driver, otherwise
7394 * let SCSI layer handle the error as usual.
7395 */
7396 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
7397}
7398
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307399static struct scsi_host_template ufshcd_driver_template = {
7400 .module = THIS_MODULE,
7401 .name = UFSHCD,
7402 .proc_name = UFSHCD,
7403 .queuecommand = ufshcd_queuecommand,
7404 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09007405 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307406 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03007407 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307408 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307409 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7410 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02007411 .eh_timed_out = ufshcd_eh_timed_out,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007412 .ioctl = ufshcd_ioctl,
7413#ifdef CONFIG_COMPAT
7414 .compat_ioctl = ufshcd_ioctl,
7415#endif
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307416 .this_id = -1,
7417 .sg_tablesize = SG_ALL,
7418 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7419 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007420 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01007421 .track_queue_depth = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307422};
7423
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007424static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7425 int ua)
7426{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08007427 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007428
Bjorn Andersson7b16a072015-02-11 19:35:28 -08007429 if (!vreg)
7430 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007431
Bjorn Andersson7b16a072015-02-11 19:35:28 -08007432 ret = regulator_set_load(vreg->reg, ua);
7433 if (ret < 0) {
7434 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7435 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007436 }
7437
7438 return ret;
7439}
7440
7441static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7442 struct ufs_vreg *vreg)
7443{
Yaniv Gardi60f01872016-03-10 17:37:11 +02007444 if (!vreg)
7445 return 0;
7446 else if (vreg->unused)
7447 return 0;
7448 else
7449 return ufshcd_config_vreg_load(hba->dev, vreg,
7450 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007451}
7452
7453static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7454 struct ufs_vreg *vreg)
7455{
Yaniv Gardi60f01872016-03-10 17:37:11 +02007456 if (!vreg)
7457 return 0;
7458 else if (vreg->unused)
7459 return 0;
7460 else
7461 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007462}
7463
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007464static int ufshcd_config_vreg(struct device *dev,
7465 struct ufs_vreg *vreg, bool on)
7466{
7467 int ret = 0;
7468 struct regulator *reg = vreg->reg;
7469 const char *name = vreg->name;
7470 int min_uV, uA_load;
7471
7472 BUG_ON(!vreg);
7473
7474 if (regulator_count_voltages(reg) > 0) {
7475 min_uV = on ? vreg->min_uV : 0;
7476 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7477 if (ret) {
7478 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
7479 __func__, name, ret);
7480 goto out;
7481 }
7482
7483 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007484 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7485 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007486 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007487 }
7488out:
7489 return ret;
7490}
7491
7492static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7493{
7494 int ret = 0;
7495
Yaniv Gardi60f01872016-03-10 17:37:11 +02007496 if (!vreg)
7497 goto out;
7498 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007499 goto out;
7500
7501 ret = ufshcd_config_vreg(dev, vreg, true);
7502 if (!ret)
7503 ret = regulator_enable(vreg->reg);
7504
7505 if (!ret)
7506 vreg->enabled = true;
7507 else
7508 dev_err(dev, "%s: %s enable failed, err=%d\n",
7509 __func__, vreg->name, ret);
7510out:
7511 return ret;
7512}
7513
7514static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7515{
7516 int ret = 0;
7517
Yaniv Gardi60f01872016-03-10 17:37:11 +02007518 if (!vreg)
7519 goto out;
7520 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007521 goto out;
7522
7523 ret = regulator_disable(vreg->reg);
7524
7525 if (!ret) {
7526 /* ignore errors on applying disable config */
7527 ufshcd_config_vreg(dev, vreg, false);
7528 vreg->enabled = false;
7529 } else {
7530 dev_err(dev, "%s: %s disable failed, err=%d\n",
7531 __func__, vreg->name, ret);
7532 }
7533out:
7534 return ret;
7535}
7536
7537static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7538{
7539 int ret = 0;
7540 struct device *dev = hba->dev;
7541 struct ufs_vreg_info *info = &hba->vreg_info;
7542
7543 if (!info)
7544 goto out;
7545
7546 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7547 if (ret)
7548 goto out;
7549
7550 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7551 if (ret)
7552 goto out;
7553
7554 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7555 if (ret)
7556 goto out;
7557
7558out:
7559 if (ret) {
7560 ufshcd_toggle_vreg(dev, info->vccq2, false);
7561 ufshcd_toggle_vreg(dev, info->vccq, false);
7562 ufshcd_toggle_vreg(dev, info->vcc, false);
7563 }
7564 return ret;
7565}
7566
Raviv Shvili6a771a62014-09-25 15:32:24 +03007567static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7568{
7569 struct ufs_vreg_info *info = &hba->vreg_info;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007570 int ret = 0;
Raviv Shvili6a771a62014-09-25 15:32:24 +03007571
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007572 if (info->vdd_hba) {
7573 ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
Raviv Shvili6a771a62014-09-25 15:32:24 +03007574
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007575 if (!ret)
7576 ufshcd_vops_update_sec_cfg(hba, on);
7577 }
7578
7579 return ret;
Raviv Shvili6a771a62014-09-25 15:32:24 +03007580}
7581
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007582static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7583{
7584 int ret = 0;
7585
7586 if (!vreg)
7587 goto out;
7588
7589 vreg->reg = devm_regulator_get(dev, vreg->name);
7590 if (IS_ERR(vreg->reg)) {
7591 ret = PTR_ERR(vreg->reg);
7592 dev_err(dev, "%s: %s get failed, err=%d\n",
7593 __func__, vreg->name, ret);
7594 }
7595out:
7596 return ret;
7597}
7598
7599static int ufshcd_init_vreg(struct ufs_hba *hba)
7600{
7601 int ret = 0;
7602 struct device *dev = hba->dev;
7603 struct ufs_vreg_info *info = &hba->vreg_info;
7604
7605 if (!info)
7606 goto out;
7607
7608 ret = ufshcd_get_vreg(dev, info->vcc);
7609 if (ret)
7610 goto out;
7611
7612 ret = ufshcd_get_vreg(dev, info->vccq);
7613 if (ret)
7614 goto out;
7615
7616 ret = ufshcd_get_vreg(dev, info->vccq2);
7617out:
7618 return ret;
7619}
7620
Raviv Shvili6a771a62014-09-25 15:32:24 +03007621static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7622{
7623 struct ufs_vreg_info *info = &hba->vreg_info;
7624
7625 if (info)
7626 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7627
7628 return 0;
7629}
7630
Yaniv Gardi60f01872016-03-10 17:37:11 +02007631static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
7632{
7633 int ret = 0;
7634 struct ufs_vreg_info *info = &hba->vreg_info;
7635
7636 if (!info)
7637 goto out;
7638 else if (!info->vccq)
7639 goto out;
7640
7641 if (unused) {
7642 /* shut off the rail here */
7643 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7644 /*
7645 * Mark this rail as no longer used, so it doesn't get enabled
7646 * later by mistake
7647 */
7648 if (!ret)
7649 info->vccq->unused = true;
7650 } else {
7651 /*
7652 * rail should have been already enabled hence just make sure
7653 * that unused flag is cleared.
7654 */
7655 info->vccq->unused = false;
7656 }
7657out:
7658 return ret;
7659}
7660
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007661static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7662 bool skip_ref_clk, bool is_gating_context)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007663{
7664 int ret = 0;
7665 struct ufs_clk_info *clki;
7666 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007667 unsigned long flags;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007668 ktime_t start = ktime_get();
7669 bool clk_state_changed = false;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007670
7671 if (!head || list_empty(head))
7672 goto out;
7673
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007674 /*
7675 * vendor specific setup_clocks ops may depend on clocks managed by
7676 * this standard driver hence call the vendor specific setup_clocks
7677 * before disabling the clocks managed here.
7678 */
7679 if (!on) {
7680 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
7681 if (ret)
7682 return ret;
7683 }
7684
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007685 list_for_each_entry(clki, head, list) {
7686 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007687 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7688 continue;
7689
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007690 clk_state_changed = on ^ clki->enabled;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007691 if (on && !clki->enabled) {
7692 ret = clk_prepare_enable(clki->clk);
7693 if (ret) {
7694 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7695 __func__, clki->name, ret);
7696 goto out;
7697 }
7698 } else if (!on && clki->enabled) {
7699 clk_disable_unprepare(clki->clk);
7700 }
7701 clki->enabled = on;
7702 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7703 clki->name, on ? "en" : "dis");
7704 }
7705 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007706
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007707 /*
7708 * vendor specific setup_clocks ops may depend on clocks managed by
7709 * this standard driver hence call the vendor specific setup_clocks
7710 * after enabling the clocks managed here.
7711 */
7712 if (on)
7713 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
7714
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007715out:
7716 if (ret) {
7717 list_for_each_entry(clki, head, list) {
7718 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7719 clk_disable_unprepare(clki->clk);
7720 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007721 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007722 spin_lock_irqsave(hba->host->host_lock, flags);
7723 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007724 trace_ufshcd_clk_gating(dev_name(hba->dev),
7725 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007726 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007727 /* restore the secure configuration as clocks are enabled */
7728 ufshcd_vops_update_sec_cfg(hba, true);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007729 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007730
7731 if (clk_state_changed)
7732 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7733 (on ? "on" : "off"),
7734 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007735 return ret;
7736}
7737
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007738static int ufshcd_enable_clocks(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007739{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007740 return ufshcd_setup_clocks(hba, true, false, false);
7741}
7742
7743static int ufshcd_disable_clocks(struct ufs_hba *hba,
7744 bool is_gating_context)
7745{
7746 return ufshcd_setup_clocks(hba, false, false, is_gating_context);
7747}
7748
7749static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
7750 bool is_gating_context)
7751{
7752 return ufshcd_setup_clocks(hba, false, true, is_gating_context);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007753}
7754
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007755static int ufshcd_init_clocks(struct ufs_hba *hba)
7756{
7757 int ret = 0;
7758 struct ufs_clk_info *clki;
7759 struct device *dev = hba->dev;
7760 struct list_head *head = &hba->clk_list_head;
7761
7762 if (!head || list_empty(head))
7763 goto out;
7764
7765 list_for_each_entry(clki, head, list) {
7766 if (!clki->name)
7767 continue;
7768
7769 clki->clk = devm_clk_get(dev, clki->name);
7770 if (IS_ERR(clki->clk)) {
7771 ret = PTR_ERR(clki->clk);
7772 dev_err(dev, "%s: %s clk get failed, %d\n",
7773 __func__, clki->name, ret);
7774 goto out;
7775 }
7776
7777 if (clki->max_freq) {
7778 ret = clk_set_rate(clki->clk, clki->max_freq);
7779 if (ret) {
7780 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7781 __func__, clki->name,
7782 clki->max_freq, ret);
7783 goto out;
7784 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03007785 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007786 }
7787 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7788 clki->name, clk_get_rate(clki->clk));
7789 }
7790out:
7791 return ret;
7792}
7793
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007794static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7795{
7796 int err = 0;
7797
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007798 if (!hba->var || !hba->var->vops)
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007799 goto out;
7800
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007801 err = ufshcd_vops_init(hba);
7802 if (err)
7803 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007804
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007805 err = ufshcd_vops_setup_regulators(hba, true);
7806 if (err)
7807 goto out_exit;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007808
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007809 goto out;
7810
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007811out_exit:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007812 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007813out:
7814 if (err)
7815 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007816 __func__, ufshcd_get_var_name(hba), err);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007817 return err;
7818}
7819
7820static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7821{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007822 if (!hba->var || !hba->var->vops)
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007823 return;
7824
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007825 ufshcd_vops_setup_regulators(hba, false);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007826
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02007827 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03007828}
7829
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007830static int ufshcd_hba_init(struct ufs_hba *hba)
7831{
7832 int err;
7833
Raviv Shvili6a771a62014-09-25 15:32:24 +03007834 /*
7835 * Handle host controller power separately from the UFS device power
7836 * rails as it will help controlling the UFS host controller power
7837 * collapse easily which is different than UFS device power collapse.
7838 * Also, enable the host controller power before we go ahead with rest
7839 * of the initialization here.
7840 */
7841 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007842 if (err)
7843 goto out;
7844
Raviv Shvili6a771a62014-09-25 15:32:24 +03007845 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007846 if (err)
7847 goto out;
7848
Raviv Shvili6a771a62014-09-25 15:32:24 +03007849 err = ufshcd_init_clocks(hba);
7850 if (err)
7851 goto out_disable_hba_vreg;
7852
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007853 err = ufshcd_enable_clocks(hba);
Raviv Shvili6a771a62014-09-25 15:32:24 +03007854 if (err)
7855 goto out_disable_hba_vreg;
7856
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007857 err = ufshcd_init_vreg(hba);
7858 if (err)
7859 goto out_disable_clks;
7860
7861 err = ufshcd_setup_vreg(hba, true);
7862 if (err)
7863 goto out_disable_clks;
7864
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007865 err = ufshcd_variant_hba_init(hba);
7866 if (err)
7867 goto out_disable_vreg;
7868
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007869 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007870 goto out;
7871
7872out_disable_vreg:
7873 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03007874out_disable_clks:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007875 ufshcd_disable_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03007876out_disable_hba_vreg:
7877 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007878out:
7879 return err;
7880}
7881
7882static void ufshcd_hba_exit(struct ufs_hba *hba)
7883{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007884 if (hba->is_powered) {
7885 ufshcd_variant_hba_exit(hba);
7886 ufshcd_setup_vreg(hba, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007887 if (ufshcd_is_clkscaling_supported(hba)) {
7888 ufshcd_suspend_clkscaling(hba);
7889 destroy_workqueue(hba->clk_scaling.workq);
7890 }
7891 ufshcd_disable_clocks(hba, false);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007892 ufshcd_setup_hba_vreg(hba, false);
7893 hba->is_powered = false;
7894 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03007895}
7896
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007897static int
7898ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307899{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007900 unsigned char cmd[6] = {REQUEST_SENSE,
7901 0,
7902 0,
7903 0,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007904 UFSHCD_REQ_SENSE_SIZE,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007905 0};
7906 char *buffer;
7907 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307908
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007909 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007910 if (!buffer) {
7911 ret = -ENOMEM;
7912 goto out;
7913 }
7914
7915 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007916 UFSHCD_REQ_SENSE_SIZE, NULL,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007917 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
7918 if (ret)
7919 pr_err("%s: failed with err %d\n", __func__, ret);
7920
7921 kfree(buffer);
7922out:
7923 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307924}
7925
7926/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007927 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7928 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307929 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007930 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307931 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007932 * Returns 0 if requested power mode is set successfully
7933 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307934 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007935static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7936 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307937{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007938 unsigned char cmd[6] = { START_STOP };
7939 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007940 struct scsi_device *sdp;
7941 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007942 int ret;
7943
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007944 spin_lock_irqsave(hba->host->host_lock, flags);
7945 sdp = hba->sdev_ufs_device;
7946 if (sdp) {
7947 ret = scsi_device_get(sdp);
7948 if (!ret && !scsi_device_online(sdp)) {
7949 ret = -ENODEV;
7950 scsi_device_put(sdp);
7951 }
7952 } else {
7953 ret = -ENODEV;
7954 }
7955 spin_unlock_irqrestore(hba->host->host_lock, flags);
7956
7957 if (ret)
7958 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007959
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307960 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007961 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7962 * handling, which would wait for host to be resumed. Since we know
7963 * we are functional while we are here, skip host resume in error
7964 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307965 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007966 hba->host->eh_noresume = 1;
Subhash Jadavani23df2312016-12-16 12:54:30 -08007967 if (!hba->dev_info.is_ufs_dev_wlun_ua_cleared) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007968 ret = ufshcd_send_request_sense(hba, sdp);
7969 if (ret)
7970 goto out;
7971 /* Unit attention condition is cleared now */
Subhash Jadavani23df2312016-12-16 12:54:30 -08007972 hba->dev_info.is_ufs_dev_wlun_ua_cleared = 1;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007973 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307974
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007975 cmd[4] = pwr_mode << 4;
7976
7977 /*
7978 * Current function would be generally called from the power management
7979 * callbacks hence set the REQ_PM flag so that it doesn't resume the
7980 * already suspended childs.
7981 */
7982 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
7983 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
7984 if (ret) {
7985 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02007986 "START_STOP failed for power mode: %d, result %x\n",
7987 pwr_mode, ret);
Hannes Reinecke21045512015-01-08 07:43:46 +01007988 if (driver_byte(ret) & DRIVER_SENSE)
7989 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007990 }
7991
7992 if (!ret)
7993 hba->curr_dev_pwr_mode = pwr_mode;
7994out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007995 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007996 hba->host->eh_noresume = 0;
7997 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307998}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05307999
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008000static int ufshcd_link_state_transition(struct ufs_hba *hba,
8001 enum uic_link_state req_link_state,
8002 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308003{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008004 int ret = 0;
8005
8006 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308007 return 0;
8008
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008009 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8010 ret = ufshcd_uic_hibern8_enter(hba);
8011 if (!ret)
8012 ufshcd_set_link_hibern8(hba);
8013 else
8014 goto out;
8015 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308016 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008017 * If autobkops is enabled, link can't be turned off because
8018 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308019 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008020 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8021 (!check_for_bkops || (check_for_bkops &&
8022 !hba->auto_bkops_enabled))) {
8023 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02008024 * Let's make sure that link is in low power mode, we are doing
8025 * this currently by putting the link in Hibern8. Otherway to
8026 * put the link in low power mode is to send the DME end point
8027 * to device and then send the DME reset command to local
8028 * unipro. But putting the link in hibern8 is much faster.
8029 */
8030 ret = ufshcd_uic_hibern8_enter(hba);
8031 if (ret)
8032 goto out;
8033 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008034 * Change controller state to "reset state" which
8035 * should also put the link in off/reset state
8036 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02008037 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008038 /*
8039 * TODO: Check if we need any delay to make sure that
8040 * controller is reset
8041 */
8042 ufshcd_set_link_off(hba);
8043 }
8044
8045out:
8046 return ret;
8047}
8048
8049static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8050{
8051 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02008052 * It seems some UFS devices may keep drawing more than sleep current
8053 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8054 * To avoid this situation, add 2ms delay before putting these UFS
8055 * rails in LPM mode.
8056 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008057 if (!ufshcd_is_link_active(hba))
Yaniv Gardib799fdf2016-03-10 17:37:18 +02008058 usleep_range(2000, 2100);
8059
8060 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008061 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8062 * power.
8063 *
8064 * If UFS device and link is in OFF state, all power supplies (VCC,
8065 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8066 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8067 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8068 *
8069 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8070 * in low power state which would save some power.
8071 */
8072 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8073 !hba->dev_info.is_lu_power_on_wp) {
8074 ufshcd_setup_vreg(hba, false);
8075 } else if (!ufshcd_is_ufs_dev_active(hba)) {
8076 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8077 if (!ufshcd_is_link_active(hba)) {
8078 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8079 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8080 }
8081 }
8082}
8083
8084static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8085{
8086 int ret = 0;
8087
8088 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8089 !hba->dev_info.is_lu_power_on_wp) {
8090 ret = ufshcd_setup_vreg(hba, true);
8091 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008092 if (!ret && !ufshcd_is_link_active(hba)) {
8093 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8094 if (ret)
8095 goto vcc_disable;
8096 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8097 if (ret)
8098 goto vccq_lpm;
8099 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008100 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008101 }
8102 goto out;
8103
8104vccq_lpm:
8105 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8106vcc_disable:
8107 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8108out:
8109 return ret;
8110}
8111
8112static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8113{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008114 if (ufshcd_is_link_off(hba) ||
8115 (ufshcd_is_link_hibern8(hba)
8116 && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008117 ufshcd_setup_hba_vreg(hba, false);
8118}
8119
8120static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8121{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008122 if (ufshcd_is_link_off(hba) ||
8123 (ufshcd_is_link_hibern8(hba)
8124 && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008125 ufshcd_setup_hba_vreg(hba, true);
8126}
8127
8128/**
8129 * ufshcd_suspend - helper function for suspend operations
8130 * @hba: per adapter instance
8131 * @pm_op: desired low power operation type
8132 *
8133 * This function will try to put the UFS device and link into low power
8134 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8135 * (System PM level).
8136 *
8137 * If this function is called during shutdown, it will make sure that
8138 * both UFS device and UFS link is powered off.
8139 *
8140 * NOTE: UFS device & link must be active before we enter in this function.
8141 *
8142 * Returns 0 for success and non-zero for failure
8143 */
8144static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8145{
8146 int ret = 0;
8147 enum ufs_pm_level pm_lvl;
8148 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8149 enum uic_link_state req_link_state;
8150
8151 hba->pm_op_in_progress = 1;
8152 if (!ufshcd_is_shutdown_pm(pm_op)) {
8153 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8154 hba->rpm_lvl : hba->spm_lvl;
8155 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8156 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8157 } else {
8158 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8159 req_link_state = UIC_LINK_OFF_STATE;
8160 }
8161
8162 /*
8163 * If we can't transition into any of the low power modes
8164 * just gate the clocks.
8165 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008166 WARN_ON(hba->hibern8_on_idle.is_enabled &&
8167 hba->hibern8_on_idle.active_reqs);
8168 ufshcd_hold_all(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008169 hba->clk_gating.is_suspended = true;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008170 hba->hibern8_on_idle.is_suspended = true;
8171
8172 if (hba->clk_scaling.is_allowed) {
8173 cancel_work_sync(&hba->clk_scaling.suspend_work);
8174 cancel_work_sync(&hba->clk_scaling.resume_work);
8175 ufshcd_suspend_clkscaling(hba);
8176 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008177
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008178 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8179 req_link_state == UIC_LINK_ACTIVE_STATE) {
8180 goto disable_clks;
8181 }
8182
8183 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8184 (req_link_state == hba->uic_link_state))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008185 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008186
8187 /* UFS device & link must be active before we enter in this function */
8188 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8189 ret = -EINVAL;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008190 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008191 }
8192
8193 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03008194 if (ufshcd_can_autobkops_during_suspend(hba)) {
8195 /*
8196 * The device is idle with no requests in the queue,
8197 * allow background operations if bkops status shows
8198 * that performance might be impacted.
8199 */
8200 ret = ufshcd_urgent_bkops(hba);
8201 if (ret)
8202 goto enable_gating;
8203 } else {
8204 /* make sure that auto bkops is disabled */
8205 ufshcd_disable_auto_bkops(hba);
8206 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008207 }
8208
8209 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
8210 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8211 !ufshcd_is_runtime_pm(pm_op))) {
8212 /* ensure that bkops is disabled */
8213 ufshcd_disable_auto_bkops(hba);
8214 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8215 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008216 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008217 }
8218
8219 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8220 if (ret)
8221 goto set_dev_active;
8222
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008223 if (ufshcd_is_link_hibern8(hba) &&
8224 ufshcd_is_hibern8_on_idle_allowed(hba))
8225 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
8226
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008227 ufshcd_vreg_set_lpm(hba);
8228
8229disable_clks:
8230 /*
8231 * Call vendor specific suspend callback. As these callbacks may access
8232 * vendor specific host controller register space call them before the
8233 * host clocks are ON.
8234 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008235 ret = ufshcd_vops_suspend(hba, pm_op);
8236 if (ret)
8237 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008238
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008239 if (!ufshcd_is_link_active(hba))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008240 ret = ufshcd_disable_clocks(hba, false);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008241 else
8242 /* If link is active, device ref_clk can't be switched off */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008243 ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
8244 if (ret)
8245 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008246
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008247 if (ufshcd_is_clkgating_allowed(hba)) {
8248 hba->clk_gating.state = CLKS_OFF;
8249 trace_ufshcd_clk_gating(dev_name(hba->dev),
8250 hba->clk_gating.state);
8251 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008252 /*
8253 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008254 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008255 */
8256 ufshcd_disable_irq(hba);
8257 /* Put the host controller in low power mode if possible */
8258 ufshcd_hba_vreg_set_lpm(hba);
8259 goto out;
8260
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008261set_link_active:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008262 if (hba->clk_scaling.is_allowed)
8263 ufshcd_resume_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008264 ufshcd_vreg_set_hpm(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008265 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008266 ufshcd_set_link_active(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008267 } else if (ufshcd_is_link_off(hba)) {
8268 ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008269 ufshcd_host_reset_and_restore(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008270 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008271set_dev_active:
8272 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8273 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008274enable_gating:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008275 if (hba->clk_scaling.is_allowed)
8276 ufshcd_resume_clkscaling(hba);
8277 hba->hibern8_on_idle.is_suspended = false;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008278 hba->clk_gating.is_suspended = false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008279 ufshcd_release_all(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008280out:
8281 hba->pm_op_in_progress = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008282
8283 if (ret)
8284 ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
8285
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008286 return ret;
8287}
8288
8289/**
8290 * ufshcd_resume - helper function for resume operations
8291 * @hba: per adapter instance
8292 * @pm_op: runtime PM or system PM
8293 *
8294 * This function basically brings the UFS device, UniPro link and controller
8295 * to active state.
8296 *
8297 * Returns 0 for success and non-zero for failure
8298 */
8299static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8300{
8301 int ret;
8302 enum uic_link_state old_link_state;
8303
8304 hba->pm_op_in_progress = 1;
8305 old_link_state = hba->uic_link_state;
8306
8307 ufshcd_hba_vreg_set_hpm(hba);
8308 /* Make sure clocks are enabled before accessing controller */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008309 ret = ufshcd_enable_clocks(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008310 if (ret)
8311 goto out;
8312
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008313 /* enable the host irq as host controller would be active soon */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008314 ufshcd_enable_irq(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008315
8316 ret = ufshcd_vreg_set_hpm(hba);
8317 if (ret)
8318 goto disable_irq_and_vops_clks;
8319
8320 /*
8321 * Call vendor specific resume callback. As these callbacks may access
8322 * vendor specific host controller register space call them when the
8323 * host clocks are ON.
8324 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008325 ret = ufshcd_vops_resume(hba, pm_op);
8326 if (ret)
8327 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008328
8329 if (ufshcd_is_link_hibern8(hba)) {
8330 ret = ufshcd_uic_hibern8_exit(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008331 if (!ret) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008332 ufshcd_set_link_active(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008333 if (ufshcd_is_hibern8_on_idle_allowed(hba))
8334 hba->hibern8_on_idle.state = HIBERN8_EXITED;
8335 } else {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008336 goto vendor_suspend;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008337 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008338 } else if (ufshcd_is_link_off(hba)) {
8339 ret = ufshcd_host_reset_and_restore(hba);
8340 /*
8341 * ufshcd_host_reset_and_restore() should have already
8342 * set the link state as active
8343 */
8344 if (ret || !ufshcd_is_link_active(hba))
8345 goto vendor_suspend;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008346 /* mark link state as hibern8 exited */
8347 if (ufshcd_is_hibern8_on_idle_allowed(hba))
8348 hba->hibern8_on_idle.state = HIBERN8_EXITED;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008349 }
8350
8351 if (!ufshcd_is_ufs_dev_active(hba)) {
8352 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8353 if (ret)
8354 goto set_old_link_state;
8355 }
8356
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008357 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8358 ufshcd_enable_auto_bkops(hba);
8359 else
8360 /*
8361 * If BKOPs operations are urgently needed at this moment then
8362 * keep auto-bkops enabled or else disable it.
8363 */
8364 ufshcd_urgent_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008365
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008366 hba->clk_gating.is_suspended = false;
8367 hba->hibern8_on_idle.is_suspended = false;
8368
8369 if (hba->clk_scaling.is_allowed)
8370 ufshcd_resume_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03008371
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008372 /* Schedule clock gating in case of no access to UFS device yet */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008373 ufshcd_release_all(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008374 goto out;
8375
8376set_old_link_state:
8377 ufshcd_link_state_transition(hba, old_link_state, 0);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008378 if (ufshcd_is_link_hibern8(hba) &&
8379 ufshcd_is_hibern8_on_idle_allowed(hba))
8380 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008381vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008382 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008383disable_vreg:
8384 ufshcd_vreg_set_lpm(hba);
8385disable_irq_and_vops_clks:
8386 ufshcd_disable_irq(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008387 if (hba->clk_scaling.is_allowed)
8388 ufshcd_suspend_clkscaling(hba);
8389 ufshcd_disable_clocks(hba, false);
8390 if (ufshcd_is_clkgating_allowed(hba))
8391 hba->clk_gating.state = CLKS_OFF;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008392out:
8393 hba->pm_op_in_progress = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008394
8395 if (ret)
8396 ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
8397
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008398 return ret;
8399}
8400
8401/**
8402 * ufshcd_system_suspend - system suspend routine
8403 * @hba: per adapter instance
8404 * @pm_op: runtime PM or system PM
8405 *
8406 * Check the description of ufshcd_suspend() function for more details.
8407 *
8408 * Returns 0 for success and non-zero for failure
8409 */
8410int ufshcd_system_suspend(struct ufs_hba *hba)
8411{
8412 int ret = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008413 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008414
8415 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03008416 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008417
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008418 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8419 hba->curr_dev_pwr_mode) &&
8420 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8421 hba->uic_link_state))
8422 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008423
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008424 if (pm_runtime_suspended(hba->dev)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008425 /*
8426 * UFS device and/or UFS link low power states during runtime
8427 * suspend seems to be different than what is expected during
8428 * system suspend. Hence runtime resume the devic & link and
8429 * let the system suspend low power states to take effect.
8430 * TODO: If resume takes longer time, we might have optimize
8431 * it in future by not resuming everything if possible.
8432 */
8433 ret = ufshcd_runtime_resume(hba);
8434 if (ret)
8435 goto out;
8436 }
8437
8438 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8439out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008440 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8441 ktime_to_us(ktime_sub(ktime_get(), start)),
8442 hba->curr_dev_pwr_mode, hba->uic_link_state);
Dolev Ravive7850602014-09-25 15:32:36 +03008443 if (!ret)
8444 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008445 return ret;
8446}
8447EXPORT_SYMBOL(ufshcd_system_suspend);
8448
8449/**
8450 * ufshcd_system_resume - system resume routine
8451 * @hba: per adapter instance
8452 *
8453 * Returns 0 for success and non-zero for failure
8454 */
8455
8456int ufshcd_system_resume(struct ufs_hba *hba)
8457{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008458 int ret = 0;
8459 ktime_t start = ktime_get();
8460
8461 if (!hba)
8462 return -EINVAL;
8463
8464 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008465 /*
8466 * Let the runtime resume take care of resuming
8467 * if runtime suspended.
8468 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008469 goto out;
8470 else
8471 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8472out:
8473 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8474 ktime_to_us(ktime_sub(ktime_get(), start)),
8475 hba->curr_dev_pwr_mode, hba->uic_link_state);
8476 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008477}
8478EXPORT_SYMBOL(ufshcd_system_resume);
8479
8480/**
8481 * ufshcd_runtime_suspend - runtime suspend routine
8482 * @hba: per adapter instance
8483 *
8484 * Check the description of ufshcd_suspend() function for more details.
8485 *
8486 * Returns 0 for success and non-zero for failure
8487 */
8488int ufshcd_runtime_suspend(struct ufs_hba *hba)
8489{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008490 int ret = 0;
8491 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008492
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008493 if (!hba)
8494 return -EINVAL;
8495
8496 if (!hba->is_powered)
8497 goto out;
8498 else
8499 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8500out:
8501 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8502 ktime_to_us(ktime_sub(ktime_get(), start)),
8503 hba->curr_dev_pwr_mode,
8504 hba->uic_link_state);
8505 return ret;
8506
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308507}
8508EXPORT_SYMBOL(ufshcd_runtime_suspend);
8509
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008510/**
8511 * ufshcd_runtime_resume - runtime resume routine
8512 * @hba: per adapter instance
8513 *
8514 * This function basically brings the UFS device, UniPro link and controller
8515 * to active state. Following operations are done in this function:
8516 *
8517 * 1. Turn on all the controller related clocks
8518 * 2. Bring the UniPro link out of Hibernate state
8519 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8520 * to active state.
8521 * 4. If auto-bkops is enabled on the device, disable it.
8522 *
8523 * So following would be the possible power state after this function return
8524 * successfully:
8525 * S1: UFS device in Active state with VCC rail ON
8526 * UniPro link in Active state
8527 * All the UFS/UniPro controller clocks are ON
8528 *
8529 * Returns 0 for success and non-zero for failure
8530 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308531int ufshcd_runtime_resume(struct ufs_hba *hba)
8532{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008533 int ret = 0;
8534 ktime_t start = ktime_get();
8535
8536 if (!hba)
8537 return -EINVAL;
8538
8539 if (!hba->is_powered)
8540 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008541 else
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008542 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8543out:
8544 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8545 ktime_to_us(ktime_sub(ktime_get(), start)),
8546 hba->curr_dev_pwr_mode,
8547 hba->uic_link_state);
8548 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05308549}
8550EXPORT_SYMBOL(ufshcd_runtime_resume);
8551
8552int ufshcd_runtime_idle(struct ufs_hba *hba)
8553{
8554 return 0;
8555}
8556EXPORT_SYMBOL(ufshcd_runtime_idle);
8557
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008558static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
8559 struct device_attribute *attr,
8560 const char *buf, size_t count,
8561 bool rpm)
8562{
8563 struct ufs_hba *hba = dev_get_drvdata(dev);
8564 unsigned long flags, value;
8565
8566 if (kstrtoul(buf, 0, &value))
8567 return -EINVAL;
8568
8569 if (value >= UFS_PM_LVL_MAX)
8570 return -EINVAL;
8571
8572 spin_lock_irqsave(hba->host->host_lock, flags);
8573 if (rpm)
8574 hba->rpm_lvl = value;
8575 else
8576 hba->spm_lvl = value;
8577 ufshcd_apply_pm_quirks(hba);
8578 spin_unlock_irqrestore(hba->host->host_lock, flags);
8579 return count;
8580}
8581
8582static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
8583 struct device_attribute *attr, char *buf)
8584{
8585 struct ufs_hba *hba = dev_get_drvdata(dev);
8586 int curr_len;
8587 u8 lvl;
8588
8589 curr_len = snprintf(buf, PAGE_SIZE,
8590 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
8591 hba->rpm_lvl,
8592 ufschd_ufs_dev_pwr_mode_to_string(
8593 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
8594 ufschd_uic_link_state_to_string(
8595 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
8596
8597 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8598 "\nAll available Runtime PM levels info:\n");
8599 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
8600 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8601 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
8602 lvl,
8603 ufschd_ufs_dev_pwr_mode_to_string(
8604 ufs_pm_lvl_states[lvl].dev_state),
8605 ufschd_uic_link_state_to_string(
8606 ufs_pm_lvl_states[lvl].link_state));
8607
8608 return curr_len;
8609}
8610
8611static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
8612 struct device_attribute *attr, const char *buf, size_t count)
8613{
8614 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
8615}
8616
8617static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
8618{
8619 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
8620 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
8621 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
8622 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
8623 hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
8624 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
8625 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
8626}
8627
8628static ssize_t ufshcd_spm_lvl_show(struct device *dev,
8629 struct device_attribute *attr, char *buf)
8630{
8631 struct ufs_hba *hba = dev_get_drvdata(dev);
8632 int curr_len;
8633 u8 lvl;
8634
8635 curr_len = snprintf(buf, PAGE_SIZE,
8636 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
8637 hba->spm_lvl,
8638 ufschd_ufs_dev_pwr_mode_to_string(
8639 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
8640 ufschd_uic_link_state_to_string(
8641 ufs_pm_lvl_states[hba->spm_lvl].link_state));
8642
8643 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8644 "\nAll available System PM levels info:\n");
8645 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
8646 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
8647 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
8648 lvl,
8649 ufschd_ufs_dev_pwr_mode_to_string(
8650 ufs_pm_lvl_states[lvl].dev_state),
8651 ufschd_uic_link_state_to_string(
8652 ufs_pm_lvl_states[lvl].link_state));
8653
8654 return curr_len;
8655}
8656
8657static ssize_t ufshcd_spm_lvl_store(struct device *dev,
8658 struct device_attribute *attr, const char *buf, size_t count)
8659{
8660 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
8661}
8662
8663static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
8664{
8665 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
8666 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
8667 sysfs_attr_init(&hba->spm_lvl_attr.attr);
8668 hba->spm_lvl_attr.attr.name = "spm_lvl";
8669 hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
8670 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
8671 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
8672}
8673
8674static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
8675{
8676 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
8677 ufshcd_add_spm_lvl_sysfs_nodes(hba);
8678}
8679
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308680/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008681 * ufshcd_shutdown - shutdown routine
8682 * @hba: per adapter instance
8683 *
8684 * This function would power off both UFS device and UFS link.
8685 *
8686 * Returns 0 always to allow force shutdown even in case of errors.
8687 */
8688int ufshcd_shutdown(struct ufs_hba *hba)
8689{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008690 /*
8691 * TODO: This function should send the power down notification to
8692 * UFS device and then power off the UFS link. But we need to be sure
8693 * that there will not be any new UFS requests issued after this.
8694 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008695 return 0;
8696}
8697EXPORT_SYMBOL(ufshcd_shutdown);
8698
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07008699/*
8700 * Values permitted 0, 1, 2.
8701 * 0 -> Disable IO latency histograms (default)
8702 * 1 -> Enable IO latency histograms
8703 * 2 -> Zero out IO latency histograms
8704 */
8705static ssize_t
8706latency_hist_store(struct device *dev, struct device_attribute *attr,
8707 const char *buf, size_t count)
8708{
8709 struct ufs_hba *hba = dev_get_drvdata(dev);
8710 long value;
8711
8712 if (kstrtol(buf, 0, &value))
8713 return -EINVAL;
8714 if (value == BLK_IO_LAT_HIST_ZERO)
8715 blk_zero_latency_hist(&hba->io_lat_s);
8716 else if (value == BLK_IO_LAT_HIST_ENABLE ||
8717 value == BLK_IO_LAT_HIST_DISABLE)
8718 hba->latency_hist_enabled = value;
8719 return count;
8720}
8721
8722ssize_t
8723latency_hist_show(struct device *dev, struct device_attribute *attr,
8724 char *buf)
8725{
8726 struct ufs_hba *hba = dev_get_drvdata(dev);
8727
8728 return blk_latency_hist_show(&hba->io_lat_s, buf);
8729}
8730
8731static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
8732 latency_hist_show, latency_hist_store);
8733
8734static void
8735ufshcd_init_latency_hist(struct ufs_hba *hba)
8736{
8737 if (device_create_file(hba->dev, &dev_attr_latency_hist))
8738 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
8739}
8740
8741static void
8742ufshcd_exit_latency_hist(struct ufs_hba *hba)
8743{
8744 device_create_file(hba->dev, &dev_attr_latency_hist);
8745}
8746
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008747/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308748 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308749 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308750 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308751 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308752void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308753{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05308754 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308755 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05308756 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02008757 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308758
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008759 ufshcd_exit_clk_gating(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008760 ufshcd_exit_hibern8_on_idle(hba);
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07008761 ufshcd_exit_latency_hist(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008762 if (ufshcd_is_clkscaling_supported(hba)) {
8763 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Kyle Yan65be4a52016-10-31 15:05:00 -07008764 if (hba->devfreq)
8765 devfreq_remove_device(hba->devfreq);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008766 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008767 ufshcd_hba_exit(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008768 ufsdbg_remove_debugfs(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308769}
8770EXPORT_SYMBOL_GPL(ufshcd_remove);
8771
8772/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02008773 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8774 * @hba: pointer to Host Bus Adapter (HBA)
8775 */
8776void ufshcd_dealloc_host(struct ufs_hba *hba)
8777{
8778 scsi_host_put(hba->host);
8779}
8780EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8781
8782/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09008783 * ufshcd_set_dma_mask - Set dma mask based on the controller
8784 * addressing capability
8785 * @hba: per adapter instance
8786 *
8787 * Returns 0 for success, non-zero for failure
8788 */
8789static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8790{
8791 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8792 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8793 return 0;
8794 }
8795 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8796}
8797
8798/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008799 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308800 * @dev: pointer to device handle
8801 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308802 * Returns 0 on success, non-zero value on failure
8803 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008804int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308805{
8806 struct Scsi_Host *host;
8807 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008808 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308809
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308810 if (!dev) {
8811 dev_err(dev,
8812 "Invalid memory reference for dev is NULL\n");
8813 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308814 goto out_error;
8815 }
8816
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308817 host = scsi_host_alloc(&ufshcd_driver_template,
8818 sizeof(struct ufs_hba));
8819 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308820 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308821 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308822 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308823 }
8824 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308825 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308826 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008827 *hba_handle = hba;
8828
8829out_error:
8830 return err;
8831}
8832EXPORT_SYMBOL(ufshcd_alloc_host);
8833
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008834/**
8835 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
8836 * @hba: per adapter instance
8837 * @scale_up: True if scaling up and false if scaling down
8838 *
8839 * Returns true if scaling is required, false otherwise.
8840 */
8841static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
8842 bool scale_up)
Sahitya Tummala856b3482014-09-25 15:32:34 +03008843{
Sahitya Tummala856b3482014-09-25 15:32:34 +03008844 struct ufs_clk_info *clki;
8845 struct list_head *head = &hba->clk_list_head;
8846
8847 if (!head || list_empty(head))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008848 return false;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02008849
Sahitya Tummala856b3482014-09-25 15:32:34 +03008850 list_for_each_entry(clki, head, list) {
8851 if (!IS_ERR_OR_NULL(clki->clk)) {
8852 if (scale_up && clki->max_freq) {
8853 if (clki->curr_freq == clki->max_freq)
8854 continue;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008855 return true;
Sahitya Tummala856b3482014-09-25 15:32:34 +03008856 } else if (!scale_up && clki->min_freq) {
8857 if (clki->curr_freq == clki->min_freq)
8858 continue;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008859 return true;
Sahitya Tummala856b3482014-09-25 15:32:34 +03008860 }
8861 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03008862 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +02008863
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008864 return false;
8865}
Yaniv Gardif06fcc72015-10-28 13:15:51 +02008866
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008867/**
8868 * ufshcd_scale_gear - scale up/down UFS gear
8869 * @hba: per adapter instance
8870 * @scale_up: True for scaling up gear and false for scaling down
8871 *
8872 * Returns 0 for success,
8873 * Returns -EBUSY if scaling can't happen at this time
8874 * Returns non-zero for any other errors
8875 */
8876static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
8877{
8878 int ret = 0;
8879 struct ufs_pa_layer_attr new_pwr_info;
8880 u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
8881
8882 BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
8883
8884 if (scale_up) {
8885 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
8886 sizeof(struct ufs_pa_layer_attr));
8887 } else {
8888 memcpy(&new_pwr_info, &hba->pwr_info,
8889 sizeof(struct ufs_pa_layer_attr));
8890
8891 if (hba->pwr_info.gear_tx > scale_down_gear
8892 || hba->pwr_info.gear_rx > scale_down_gear) {
8893 /* save the current power mode */
8894 memcpy(&hba->clk_scaling.saved_pwr_info.info,
8895 &hba->pwr_info,
8896 sizeof(struct ufs_pa_layer_attr));
8897
8898 /* scale down gear */
8899 new_pwr_info.gear_tx = scale_down_gear;
8900 new_pwr_info.gear_rx = scale_down_gear;
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08008901 if (!(hba->dev_info.quirks & UFS_DEVICE_NO_FASTAUTO)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008902 new_pwr_info.pwr_tx = FASTAUTO_MODE;
8903 new_pwr_info.pwr_rx = FASTAUTO_MODE;
8904 }
8905 }
8906 }
8907
8908 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
8909
8910 if (ret)
8911 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
8912 __func__, ret,
8913 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
8914 new_pwr_info.gear_tx, new_pwr_info.gear_rx,
8915 scale_up);
8916
Sahitya Tummala856b3482014-09-25 15:32:34 +03008917 return ret;
8918}
8919
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008920static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
8921{
8922 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
8923 int ret = 0;
8924 /*
8925 * make sure that there are no outstanding requests when
8926 * clock scaling is in progress
8927 */
8928 ufshcd_scsi_block_requests(hba);
8929 down_write(&hba->clk_scaling_lock);
8930 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
8931 ret = -EBUSY;
8932 up_write(&hba->clk_scaling_lock);
8933 ufshcd_scsi_unblock_requests(hba);
8934 }
8935
8936 return ret;
8937}
8938
8939static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
8940{
8941 up_write(&hba->clk_scaling_lock);
8942 ufshcd_scsi_unblock_requests(hba);
8943}
8944
8945/**
8946 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
8947 * @hba: per adapter instance
8948 * @scale_up: True for scaling up and false for scalin down
8949 *
8950 * Returns 0 for success,
8951 * Returns -EBUSY if scaling can't happen at this time
8952 * Returns non-zero for any other errors
8953 */
8954static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
8955{
8956 int ret = 0;
8957
8958 /* let's not get into low power until clock scaling is completed */
8959 ufshcd_hold_all(hba);
8960
8961 ret = ufshcd_clock_scaling_prepare(hba);
8962 if (ret)
8963 goto out;
8964
8965 /* scale down the gear before scaling down clocks */
8966 if (!scale_up) {
8967 ret = ufshcd_scale_gear(hba, false);
8968 if (ret)
8969 goto clk_scaling_unprepare;
8970 }
8971
8972 ret = ufshcd_scale_clks(hba, scale_up);
8973 if (ret)
8974 goto scale_up_gear;
8975
8976 /* scale up the gear after scaling up clocks */
8977 if (scale_up) {
8978 ret = ufshcd_scale_gear(hba, true);
8979 if (ret) {
8980 ufshcd_scale_clks(hba, false);
8981 goto clk_scaling_unprepare;
8982 }
8983 }
8984
8985 if (!ret) {
8986 hba->clk_scaling.is_scaled_up = scale_up;
8987 if (scale_up)
8988 hba->clk_gating.delay_ms =
8989 hba->clk_gating.delay_ms_perf;
8990 else
8991 hba->clk_gating.delay_ms =
8992 hba->clk_gating.delay_ms_pwr_save;
8993 }
8994
8995 goto clk_scaling_unprepare;
8996
8997scale_up_gear:
8998 if (!scale_up)
8999 ufshcd_scale_gear(hba, true);
9000clk_scaling_unprepare:
9001 ufshcd_clock_scaling_unprepare(hba);
9002out:
9003 ufshcd_release_all(hba);
9004 return ret;
9005}
9006
9007static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9008{
9009 unsigned long flags;
9010
9011 devfreq_suspend_device(hba->devfreq);
9012 spin_lock_irqsave(hba->host->host_lock, flags);
9013 hba->clk_scaling.window_start_t = 0;
9014 spin_unlock_irqrestore(hba->host->host_lock, flags);
9015}
9016
9017static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
9018{
9019 unsigned long flags;
9020 bool suspend = false;
9021
9022 if (!ufshcd_is_clkscaling_supported(hba))
9023 return;
9024
9025 spin_lock_irqsave(hba->host->host_lock, flags);
9026 if (!hba->clk_scaling.is_suspended) {
9027 suspend = true;
9028 hba->clk_scaling.is_suspended = true;
9029 }
9030 spin_unlock_irqrestore(hba->host->host_lock, flags);
9031
9032 if (suspend)
9033 __ufshcd_suspend_clkscaling(hba);
9034}
9035
9036static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
9037{
9038 unsigned long flags;
9039 bool resume = false;
9040
9041 if (!ufshcd_is_clkscaling_supported(hba))
9042 return;
9043
9044 spin_lock_irqsave(hba->host->host_lock, flags);
9045 if (hba->clk_scaling.is_suspended) {
9046 resume = true;
9047 hba->clk_scaling.is_suspended = false;
9048 }
9049 spin_unlock_irqrestore(hba->host->host_lock, flags);
9050
9051 if (resume)
9052 devfreq_resume_device(hba->devfreq);
9053}
9054
9055static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
9056 struct device_attribute *attr, char *buf)
9057{
9058 struct ufs_hba *hba = dev_get_drvdata(dev);
9059
9060 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
9061}
9062
9063static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
9064 struct device_attribute *attr, const char *buf, size_t count)
9065{
9066 struct ufs_hba *hba = dev_get_drvdata(dev);
9067 u32 value;
9068 int err;
9069
9070 if (kstrtou32(buf, 0, &value))
9071 return -EINVAL;
9072
9073 value = !!value;
9074 if (value == hba->clk_scaling.is_allowed)
9075 goto out;
9076
9077 pm_runtime_get_sync(hba->dev);
9078 ufshcd_hold(hba, false);
9079
9080 cancel_work_sync(&hba->clk_scaling.suspend_work);
9081 cancel_work_sync(&hba->clk_scaling.resume_work);
9082
9083 hba->clk_scaling.is_allowed = value;
9084
9085 if (value) {
9086 ufshcd_resume_clkscaling(hba);
9087 } else {
9088 ufshcd_suspend_clkscaling(hba);
9089 err = ufshcd_devfreq_scale(hba, true);
9090 if (err)
9091 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
9092 __func__, err);
9093 }
9094
9095 ufshcd_release(hba, false);
9096 pm_runtime_put_sync(hba->dev);
9097out:
9098 return count;
9099}
9100
9101static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
9102{
9103 struct ufs_hba *hba = container_of(work, struct ufs_hba,
9104 clk_scaling.suspend_work);
9105 unsigned long irq_flags;
9106
9107 spin_lock_irqsave(hba->host->host_lock, irq_flags);
9108 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
9109 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9110 return;
9111 }
9112 hba->clk_scaling.is_suspended = true;
9113 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9114
9115 __ufshcd_suspend_clkscaling(hba);
9116}
9117
9118static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
9119{
9120 struct ufs_hba *hba = container_of(work, struct ufs_hba,
9121 clk_scaling.resume_work);
9122 unsigned long irq_flags;
9123
9124 spin_lock_irqsave(hba->host->host_lock, irq_flags);
9125 if (!hba->clk_scaling.is_suspended) {
9126 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9127 return;
9128 }
9129 hba->clk_scaling.is_suspended = false;
9130 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9131
9132 devfreq_resume_device(hba->devfreq);
9133}
9134
Sahitya Tummala856b3482014-09-25 15:32:34 +03009135static int ufshcd_devfreq_target(struct device *dev,
9136 unsigned long *freq, u32 flags)
9137{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009138 int ret = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +03009139 struct ufs_hba *hba = dev_get_drvdata(dev);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009140 unsigned long irq_flags;
9141 ktime_t start;
9142 bool scale_up, sched_clk_scaling_suspend_work = false;
Sahitya Tummala856b3482014-09-25 15:32:34 +03009143
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009144 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03009145 return -EINVAL;
9146
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009147 if ((*freq > 0) && (*freq < UINT_MAX)) {
9148 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
9149 return -EINVAL;
9150 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03009151
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009152 spin_lock_irqsave(hba->host->host_lock, irq_flags);
9153 if (ufshcd_eh_in_progress(hba)) {
9154 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9155 return 0;
9156 }
9157
9158 if (!hba->clk_scaling.active_reqs)
9159 sched_clk_scaling_suspend_work = true;
9160
9161 scale_up = (*freq == UINT_MAX) ? true : false;
9162 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
9163 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9164 ret = 0;
9165 goto out; /* no state change required */
9166 }
9167 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
9168
9169 start = ktime_get();
9170 ret = ufshcd_devfreq_scale(hba, scale_up);
9171 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
9172 (scale_up ? "up" : "down"),
9173 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9174
9175out:
9176 if (sched_clk_scaling_suspend_work)
9177 queue_work(hba->clk_scaling.workq,
9178 &hba->clk_scaling.suspend_work);
9179
9180 return ret;
Sahitya Tummala856b3482014-09-25 15:32:34 +03009181}
9182
9183static int ufshcd_devfreq_get_dev_status(struct device *dev,
9184 struct devfreq_dev_status *stat)
9185{
9186 struct ufs_hba *hba = dev_get_drvdata(dev);
9187 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
9188 unsigned long flags;
9189
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009190 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03009191 return -EINVAL;
9192
9193 memset(stat, 0, sizeof(*stat));
9194
9195 spin_lock_irqsave(hba->host->host_lock, flags);
9196 if (!scaling->window_start_t)
9197 goto start_window;
9198
9199 if (scaling->is_busy_started)
9200 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
9201 scaling->busy_start_t));
9202
9203 stat->total_time = jiffies_to_usecs((long)jiffies -
9204 (long)scaling->window_start_t);
9205 stat->busy_time = scaling->tot_busy_t;
9206start_window:
9207 scaling->window_start_t = jiffies;
9208 scaling->tot_busy_t = 0;
9209
9210 if (hba->outstanding_reqs) {
9211 scaling->busy_start_t = ktime_get();
9212 scaling->is_busy_started = true;
9213 } else {
9214 scaling->busy_start_t = ktime_set(0, 0);
9215 scaling->is_busy_started = false;
9216 }
9217 spin_unlock_irqrestore(hba->host->host_lock, flags);
9218 return 0;
9219}
9220
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009221#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
9222static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
9223 .upthreshold = 35,
9224 .downdifferential = 30,
9225 .simple_scaling = 1,
9226};
9227
9228static void *gov_data = &ufshcd_ondemand_data;
9229#else
9230static void *gov_data;
9231#endif
9232
Sahitya Tummala856b3482014-09-25 15:32:34 +03009233static struct devfreq_dev_profile ufs_devfreq_profile = {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009234 .polling_ms = 40,
Sahitya Tummala856b3482014-09-25 15:32:34 +03009235 .target = ufshcd_devfreq_target,
9236 .get_dev_status = ufshcd_devfreq_get_dev_status,
9237};
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009238static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
9239{
9240 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
9241 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
9242 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
9243 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
9244 hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
9245 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
9246 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
9247}
Sahitya Tummala856b3482014-09-25 15:32:34 +03009248
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009249static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
9250{
9251 struct device *dev = hba->dev;
9252 int ret;
9253
9254 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
9255 &hba->lanes_per_direction);
9256 if (ret) {
9257 dev_dbg(hba->dev,
9258 "%s: failed to read lanes-per-direction, ret=%d\n",
9259 __func__, ret);
9260 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
9261 }
9262}
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009263/**
9264 * ufshcd_init - Driver initialization routine
9265 * @hba: per-adapter instance
9266 * @mmio_base: base register address
9267 * @irq: Interrupt line of device
9268 * Returns 0 on success, non-zero value on failure
9269 */
9270int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9271{
9272 int err;
9273 struct Scsi_Host *host = hba->host;
9274 struct device *dev = hba->dev;
9275
9276 if (!mmio_base) {
9277 dev_err(hba->dev,
9278 "Invalid memory reference for mmio_base is NULL\n");
9279 err = -ENODEV;
9280 goto out_error;
9281 }
9282
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309283 hba->mmio_base = mmio_base;
9284 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309285
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009286 ufshcd_init_lanes_per_dir(hba);
9287
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03009288 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009289 if (err)
9290 goto out_error;
9291
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309292 /* Read capabilities registers */
9293 ufshcd_hba_capabilities(hba);
9294
9295 /* Get UFS version supported by the controller */
9296 hba->ufs_version = ufshcd_get_ufs_version(hba);
9297
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009298 /* print error message if ufs_version is not valid */
9299 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9300 (hba->ufs_version != UFSHCI_VERSION_11) &&
9301 (hba->ufs_version != UFSHCI_VERSION_20) &&
9302 (hba->ufs_version != UFSHCI_VERSION_21))
9303 dev_err(hba->dev, "invalid UFS version 0x%x\n",
9304 hba->ufs_version);
9305
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05309306 /* Get Interrupt bit mask per version */
9307 hba->intr_mask = ufshcd_get_intr_mask(hba);
9308
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009309 /* Enable debug prints */
9310 hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
9311
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09009312 err = ufshcd_set_dma_mask(hba);
9313 if (err) {
9314 dev_err(hba->dev, "set dma mask failed\n");
9315 goto out_disable;
9316 }
9317
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309318 /* Allocate memory for host memory space */
9319 err = ufshcd_memory_alloc(hba);
9320 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309321 dev_err(hba->dev, "Memory allocation failed\n");
9322 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309323 }
9324
9325 /* Configure LRB */
9326 ufshcd_host_memory_configure(hba);
9327
9328 host->can_queue = hba->nutrs;
9329 host->cmd_per_lun = hba->nutrs;
9330 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03009331 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309332 host->max_channel = UFSHCD_MAX_CHANNEL;
9333 host->unique_id = host->host_no;
9334 host->max_cmd_len = MAX_CDB_SIZE;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009335 host->set_dbd_for_caching = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309336
Dolev Raviv7eb584d2014-09-25 15:32:31 +03009337 hba->max_pwr_info.is_valid = false;
9338
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309339 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05309340 init_waitqueue_head(&hba->tm_wq);
9341 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309342
9343 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05309344 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309345 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309346
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309347 /* Initialize UIC command mutex */
9348 mutex_init(&hba->uic_cmd_mutex);
9349
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05309350 /* Initialize mutex for device management commands */
9351 mutex_init(&hba->dev_cmd.lock);
9352
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009353 init_rwsem(&hba->clk_scaling_lock);
9354
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05309355 /* Initialize device management tag acquire wait queue */
9356 init_waitqueue_head(&hba->dev_cmd.tag_wq);
9357
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009358 ufshcd_init_clk_gating(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009359 ufshcd_init_hibern8_on_idle(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +02009360
9361 /*
9362 * In order to avoid any spurious interrupt immediately after
9363 * registering UFS controller interrupt handler, clear any pending UFS
9364 * interrupt status and disable all the UFS interrupts.
9365 */
9366 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9367 REG_INTERRUPT_STATUS);
9368 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9369 /*
9370 * Make sure that UFS interrupts are disabled and any pending interrupt
9371 * status is cleared before registering UFS interrupt handler.
9372 */
9373 mb();
9374
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309375 /* IRQ registration */
Seungwon Jeon2953f852013-06-27 13:31:54 +09009376 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309377 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309378 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009379 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009380 } else {
9381 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309382 }
9383
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309384 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309385 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309386 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009387 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309388 }
9389
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309390 /* Host controller enable */
9391 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309392 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309393 dev_err(hba->dev, "Host controller enable failed\n");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009394 ufshcd_print_host_regs(hba);
9395 ufshcd_print_host_state(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309396 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309397 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309398
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009399 if (ufshcd_is_clkscaling_supported(hba)) {
9400 char wq_name[sizeof("ufs_clkscaling_00")];
9401
Sahitya Tummala856b3482014-09-25 15:32:34 +03009402 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009403 "simple_ondemand", gov_data);
Sahitya Tummala856b3482014-09-25 15:32:34 +03009404 if (IS_ERR(hba->devfreq)) {
9405 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
9406 PTR_ERR(hba->devfreq));
Wei Yongjun73811c92016-09-28 14:49:42 +00009407 err = PTR_ERR(hba->devfreq);
Sahitya Tummala856b3482014-09-25 15:32:34 +03009408 goto out_remove_scsi_host;
9409 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009410 hba->clk_scaling.is_suspended = false;
9411
9412 INIT_WORK(&hba->clk_scaling.suspend_work,
9413 ufshcd_clk_scaling_suspend_work);
9414 INIT_WORK(&hba->clk_scaling.resume_work,
9415 ufshcd_clk_scaling_resume_work);
9416
9417 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
9418 host->host_no);
9419 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
9420
Sahitya Tummala856b3482014-09-25 15:32:34 +03009421 /* Suspend devfreq until the UFS device is detected */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009422 ufshcd_suspend_clkscaling(hba);
9423 ufshcd_clkscaling_init_sysfs(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03009424 }
9425
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009426 /*
9427 * If rpm_lvl and and spm_lvl are not already set to valid levels,
9428 * set the default power management level for UFS runtime and system
9429 * suspend. Default power saving mode selected is keeping UFS link in
9430 * Hibern8 state and UFS device in sleep.
9431 */
9432 if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
9433 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9434 UFS_SLEEP_PWR_MODE,
9435 UIC_LINK_HIBERN8_STATE);
9436 if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
9437 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9438 UFS_SLEEP_PWR_MODE,
9439 UIC_LINK_HIBERN8_STATE);
9440
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05309441 /* Hold auto suspend until async scan completes */
9442 pm_runtime_get_sync(dev);
9443
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07009444 ufshcd_init_latency_hist(hba);
9445
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009446 /*
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009447 * We are assuming that device wasn't put in sleep/power-down
9448 * state exclusively during the boot stage before kernel.
9449 * This assumption helps avoid doing link startup twice during
9450 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009451 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009452 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009453
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05309454 async_schedule(ufshcd_async_scan, hba);
9455
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009456 ufsdbg_add_debugfs(hba);
9457
9458 ufshcd_add_sysfs_nodes(hba);
9459
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309460 return 0;
9461
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309462out_remove_scsi_host:
9463 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009464exit_gating:
9465 ufshcd_exit_clk_gating(hba);
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07009466 ufshcd_exit_latency_hist(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309467out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009468 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03009469 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309470out_error:
9471 return err;
9472}
9473EXPORT_SYMBOL_GPL(ufshcd_init);
9474
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309475MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9476MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +05309477MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309478MODULE_LICENSE("GPL");
9479MODULE_VERSION(UFSHCD_DRIVER_VERSION);