blob: 02dfbcc24b47fd9d06e363641ed7bdbc6c69b6e1 [file] [log] [blame]
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301/*
Vinayak Holikattie0eca632013-02-25 21:44:33 +05302 * Universal Flash Storage Host controller driver Core
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305 * Copyright (C) 2011-2013 Samsung India Software Operations
Stephen Boyd9bc70c32017-03-01 16:58:38 -08006 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053011 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053016 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053018 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053024 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030035 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053038 */
39
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053040#include <linux/async.h>
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070041#include <scsi/ufs/ioctl.h>
Sahitya Tummala856b3482014-09-25 15:32:34 +030042#include <linux/devfreq.h>
Yaniv Gardib573d482016-03-10 17:37:09 +020043#include <linux/nls.h>
Yaniv Gardi54b879b2016-03-10 17:37:05 +020044#include <linux/of.h>
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -070045#include <linux/blkdev.h>
Vinayak Holikattie0eca632013-02-25 21:44:33 +053046#include "ufshcd.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070047#include "ufshci.h"
Yaniv Gardic58ab7a2016-03-10 17:37:10 +020048#include "ufs_quirks.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070049#include "ufs-debugfs.h"
Subhash Jadavani9c807702017-04-01 00:35:51 -070050#include "ufs-qcom.h"
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070051
52#define CREATE_TRACE_POINTS
53#include <trace/events/ufs.h>
54
55#ifdef CONFIG_DEBUG_FS
56
57static int ufshcd_tag_req_type(struct request *rq)
58{
59 int rq_type = TS_WRITE;
60
61 if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
62 rq_type = TS_NOT_SUPPORTED;
63 else if (rq->cmd_flags & REQ_PREFLUSH)
64 rq_type = TS_FLUSH;
65 else if (rq_data_dir(rq) == READ)
66 rq_type = (rq->cmd_flags & REQ_URGENT) ?
67 TS_URGENT_READ : TS_READ;
68 else if (rq->cmd_flags & REQ_URGENT)
69 rq_type = TS_URGENT_WRITE;
70
71 return rq_type;
72}
73
74static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
75{
76 ufsdbg_set_err_state(hba);
77 if (type < UFS_ERR_MAX)
78 hba->ufs_stats.err_stats[type]++;
79}
80
81static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
82{
83 struct request *rq =
84 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
85 u64 **tag_stats = hba->ufs_stats.tag_stats;
86 int rq_type;
87
88 if (!hba->ufs_stats.enabled)
89 return;
90
91 tag_stats[tag][TS_TAG]++;
92 if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
93 return;
94
95 WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
96 rq_type = ufshcd_tag_req_type(rq);
97 if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
98 tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
99}
100
101static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
102 struct scsi_cmnd *cmd)
103{
104 struct request *rq = cmd ? cmd->request : NULL;
105
106 if (rq && rq->cmd_type & REQ_TYPE_FS)
107 hba->ufs_stats.q_depth--;
108}
109
110static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
111{
112 int rq_type;
113 struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
114 s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
115 lrbp->issue_time_stamp);
116
117 /* update general request statistics */
118 if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
119 hba->ufs_stats.req_stats[TS_TAG].min = delta;
120 hba->ufs_stats.req_stats[TS_TAG].count++;
121 hba->ufs_stats.req_stats[TS_TAG].sum += delta;
122 if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
123 hba->ufs_stats.req_stats[TS_TAG].max = delta;
124 if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
125 hba->ufs_stats.req_stats[TS_TAG].min = delta;
126
127 rq_type = ufshcd_tag_req_type(rq);
128 if (rq_type == TS_NOT_SUPPORTED)
129 return;
130
131 /* update request type specific statistics */
132 if (hba->ufs_stats.req_stats[rq_type].count == 0)
133 hba->ufs_stats.req_stats[rq_type].min = delta;
134 hba->ufs_stats.req_stats[rq_type].count++;
135 hba->ufs_stats.req_stats[rq_type].sum += delta;
136 if (delta > hba->ufs_stats.req_stats[rq_type].max)
137 hba->ufs_stats.req_stats[rq_type].max = delta;
138 if (delta < hba->ufs_stats.req_stats[rq_type].min)
139 hba->ufs_stats.req_stats[rq_type].min = delta;
140}
141
142static void
143ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
144{
145 if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
146 hba->ufs_stats.query_stats_arr[opcode][idn]++;
147}
148
149#else
150static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
151{
152}
153
154static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
155 struct scsi_cmnd *cmd)
156{
157}
158
159static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
160{
161}
162
163static inline
164void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
165{
166}
167
168static inline
169void ufshcd_update_query_stats(struct ufs_hba *hba,
170 enum query_opcode opcode, u8 idn)
171{
172}
173#endif
174
Asutosh Das3923c232017-09-15 16:14:26 +0530175#define PWR_INFO_MASK 0xF
176#define PWR_RX_OFFSET 4
177
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700178#define UFSHCD_REQ_SENSE_SIZE 18
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530179
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530180#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
181 UTP_TASK_REQ_COMPL |\
182 UFSHCD_ERROR_MASK)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +0530183/* UIC command timeout, unit: ms */
184#define UIC_CMD_TIMEOUT 500
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530185
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530186/* NOP OUT retries waiting for NOP IN response */
187#define NOP_OUT_RETRIES 10
188/* Timeout after 30 msecs if NOP OUT hangs without response */
189#define NOP_OUT_TIMEOUT 30 /* msecs */
190
Dolev Raviv68078d52013-07-30 00:35:58 +0530191/* Query request retries */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700192#define QUERY_REQ_RETRIES 3
Dolev Raviv68078d52013-07-30 00:35:58 +0530193/* Query request timeout */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700194#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
Dolev Raviv68078d52013-07-30 00:35:58 +0530195
Sujit Reddy Thummae2933132014-05-26 10:59:12 +0530196/* Task management command timeout */
197#define TM_CMD_TIMEOUT 100 /* msecs */
198
Yaniv Gardi64238fb2016-02-01 15:02:43 +0200199/* maximum number of retries for a general UIC command */
200#define UFS_UIC_COMMAND_RETRIES 3
201
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300202/* maximum number of link-startup retries */
203#define DME_LINKSTARTUP_RETRIES 3
204
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +0200205/* Maximum retries for Hibern8 enter */
206#define UIC_HIBERN8_ENTER_RETRIES 3
207
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300208/* maximum number of reset retries before giving up */
209#define MAX_HOST_RESET_RETRIES 5
210
Dolev Raviv68078d52013-07-30 00:35:58 +0530211/* Expose the flag value from utp_upiu_query.value */
212#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
213
Seungwon Jeon7d568652013-08-31 21:40:20 +0530214/* Interrupt aggregation default timeout, unit: 40us */
215#define INT_AGGR_DEF_TO 0x02
216
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700217/* default value of auto suspend is 3 seconds */
218#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
219
220#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE 10
221#define UFSHCD_CLK_GATING_DELAY_MS_PERF 50
222
223/* IOCTL opcode for command - ufs set device read only */
224#define UFS_IOCTL_BLKROSET BLKROSET
225
226#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
227
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +0300228#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
229 ({ \
230 int _ret; \
231 if (_on) \
232 _ret = ufshcd_enable_vreg(_dev, _vreg); \
233 else \
234 _ret = ufshcd_disable_vreg(_dev, _vreg); \
235 _ret; \
236 })
237
Subhash Jadavani4386e022016-12-19 13:01:56 -0800238static void ufshcd_hex_dump(struct ufs_hba *hba, const char * const str,
239 const void *buf, size_t len)
240
241{
242 /*
243 * device name is expected to take up ~20 characters and "str" passed
244 * to this function is expected to be of ~10 character so we would need
245 * ~30 characters string to hold the concatenation of these 2 strings.
246 */
247 #define MAX_PREFIX_STR_SIZE 50
248 char prefix_str[MAX_PREFIX_STR_SIZE] = {0};
249
250 /* concatenate the device name and "str" */
251 snprintf(prefix_str, MAX_PREFIX_STR_SIZE, "%s %s: ",
252 dev_name(hba->dev), str);
253 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET,
254 16, 4, buf, len, false);
255}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700256
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530257enum {
258 UFSHCD_MAX_CHANNEL = 0,
259 UFSHCD_MAX_ID = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530260 UFSHCD_CMD_PER_LUN = 32,
261 UFSHCD_CAN_QUEUE = 32,
262};
263
264/* UFSHCD states */
265enum {
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530266 UFSHCD_STATE_RESET,
267 UFSHCD_STATE_ERROR,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530268 UFSHCD_STATE_OPERATIONAL,
Zang Leiganga17bddc2017-04-04 19:32:20 +0000269 UFSHCD_STATE_EH_SCHEDULED,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530270};
271
272/* UFSHCD error handling flags */
273enum {
274 UFSHCD_EH_IN_PROGRESS = (1 << 0),
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530275};
276
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530277/* UFSHCD UIC layer error flags */
278enum {
279 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +0200280 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
281 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
282 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
283 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
284 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530285};
286
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530287/* Interrupt configuration options */
288enum {
289 UFSHCD_INT_DISABLE,
290 UFSHCD_INT_ENABLE,
291 UFSHCD_INT_CLEAR,
292};
293
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700294#define DEFAULT_UFSHCD_DBG_PRINT_EN UFSHCD_DBG_PRINT_ALL
295
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530296#define ufshcd_set_eh_in_progress(h) \
297 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
298#define ufshcd_eh_in_progress(h) \
299 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
300#define ufshcd_clear_eh_in_progress(h) \
301 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
302
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300303#define ufshcd_set_ufs_dev_active(h) \
304 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
305#define ufshcd_set_ufs_dev_sleep(h) \
306 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
307#define ufshcd_set_ufs_dev_poweroff(h) \
308 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
309#define ufshcd_is_ufs_dev_active(h) \
310 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
311#define ufshcd_is_ufs_dev_sleep(h) \
312 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
313#define ufshcd_is_ufs_dev_poweroff(h) \
314 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
315
316static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
317 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
318 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
319 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
320 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
321 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
322 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
323};
324
325static inline enum ufs_dev_pwr_mode
326ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
327{
328 return ufs_pm_lvl_states[lvl].dev_state;
329}
330
331static inline enum uic_link_state
332ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
333{
334 return ufs_pm_lvl_states[lvl].link_state;
335}
336
Subhash Jadavanief542222017-08-02 16:23:55 -0700337static inline void ufshcd_set_card_online(struct ufs_hba *hba)
338{
339 atomic_set(&hba->card_state, UFS_CARD_STATE_ONLINE);
340}
341
342static inline void ufshcd_set_card_offline(struct ufs_hba *hba)
343{
344 atomic_set(&hba->card_state, UFS_CARD_STATE_OFFLINE);
345}
346
347static inline bool ufshcd_is_card_online(struct ufs_hba *hba)
348{
349 return (atomic_read(&hba->card_state) == UFS_CARD_STATE_ONLINE);
350}
351
352static inline bool ufshcd_is_card_offline(struct ufs_hba *hba)
353{
354 return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
355}
356
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700357static inline enum ufs_pm_level
358ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
359 enum uic_link_state link_state)
360{
361 enum ufs_pm_level lvl;
362
363 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
364 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
365 (ufs_pm_lvl_states[lvl].link_state == link_state))
366 return lvl;
367 }
368
369 /* if no match found, return the level 0 */
370 return UFS_PM_LVL_0;
371}
372
373static inline bool ufshcd_is_valid_pm_lvl(int lvl)
374{
375 if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
376 return true;
377 else
378 return false;
379}
380
381static irqreturn_t ufshcd_intr(int irq, void *__hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700382static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530383static void ufshcd_async_scan(void *data, async_cookie_t cookie);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530384static int ufshcd_reset_and_restore(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700385static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +0530386static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +0300387static void ufshcd_hba_exit(struct ufs_hba *hba);
388static int ufshcd_probe_hba(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700389static int ufshcd_enable_clocks(struct ufs_hba *hba);
390static int ufshcd_disable_clocks(struct ufs_hba *hba,
391 bool is_gating_context);
392static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
393 bool is_gating_context);
Subhash Jadavanid13daec2017-05-15 18:17:57 -0700394static void ufshcd_hold_all(struct ufs_hba *hba);
395static void ufshcd_release_all(struct ufs_hba *hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +0200396static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
Yaniv Gardicad2e032015-03-31 17:37:14 +0300397static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700398static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300399static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700400static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
401static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
402static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
403static void ufshcd_release_all(struct ufs_hba *hba);
404static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
405static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700406static int ufshcd_devfreq_target(struct device *dev,
407 unsigned long *freq, u32 flags);
408static int ufshcd_devfreq_get_dev_status(struct device *dev,
409 struct devfreq_dev_status *stat);
Subhash Jadavanief542222017-08-02 16:23:55 -0700410static void __ufshcd_shutdown_clkscaling(struct ufs_hba *hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -0700411
412#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
413static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
Subhash Jadavani99c76de2017-10-12 14:32:35 -0700414 .upthreshold = 70,
415 .downdifferential = 65,
Subhash Jadavani9c807702017-04-01 00:35:51 -0700416 .simple_scaling = 1,
417};
418
419static void *gov_data = &ufshcd_ondemand_data;
420#else
421static void *gov_data;
422#endif
423
424static struct devfreq_dev_profile ufs_devfreq_profile = {
Subhash Jadavani99c76de2017-10-12 14:32:35 -0700425 .polling_ms = 60,
Subhash Jadavani9c807702017-04-01 00:35:51 -0700426 .target = ufshcd_devfreq_target,
427 .get_dev_status = ufshcd_devfreq_get_dev_status,
428};
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700429
Yaniv Gardi14497322016-02-01 15:02:39 +0200430static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
431{
432 return tag >= 0 && tag < hba->nutrs;
433}
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300434
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700435static inline void ufshcd_enable_irq(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300436{
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300437 if (!hba->is_irq_enabled) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700438 enable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300439 hba->is_irq_enabled = true;
440 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300441}
442
443static inline void ufshcd_disable_irq(struct ufs_hba *hba)
444{
445 if (hba->is_irq_enabled) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700446 disable_irq(hba->irq);
Subhash Jadavani57d104c2014-09-25 15:32:30 +0300447 hba->is_irq_enabled = false;
448 }
449}
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +0530450
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700451void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
452{
453 unsigned long flags;
454 bool unblock = false;
455
456 spin_lock_irqsave(hba->host->host_lock, flags);
457 hba->scsi_block_reqs_cnt--;
458 unblock = !hba->scsi_block_reqs_cnt;
459 spin_unlock_irqrestore(hba->host->host_lock, flags);
460 if (unblock)
461 scsi_unblock_requests(hba->host);
462}
463EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
464
465static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
466{
467 if (!hba->scsi_block_reqs_cnt++)
468 scsi_block_requests(hba->host);
469}
470
471void ufshcd_scsi_block_requests(struct ufs_hba *hba)
472{
473 unsigned long flags;
474
475 spin_lock_irqsave(hba->host->host_lock, flags);
476 __ufshcd_scsi_block_requests(hba);
477 spin_unlock_irqrestore(hba->host->host_lock, flags);
478}
479EXPORT_SYMBOL(ufshcd_scsi_block_requests);
480
Subhash Jadavani9c807702017-04-01 00:35:51 -0700481static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
482{
483 int ret = 0;
484
485 if (!hba->pctrl)
486 return 0;
487
488 /* Assert reset if ctrl == true */
489 if (ctrl)
490 ret = pinctrl_select_state(hba->pctrl,
491 pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
492 else
493 ret = pinctrl_select_state(hba->pctrl,
494 pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
495
496 if (ret < 0)
497 dev_err(hba->dev, "%s: %s failed with err %d\n",
498 __func__, ctrl ? "Assert" : "Deassert", ret);
499
500 return ret;
501}
502
503static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
504{
505 return ufshcd_device_reset_ctrl(hba, true);
506}
507
508static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
509{
510 return ufshcd_device_reset_ctrl(hba, false);
511}
512
513static int ufshcd_reset_device(struct ufs_hba *hba)
514{
515 int ret;
516
517 /* reset the connected UFS device */
518 ret = ufshcd_assert_device_reset(hba);
519 if (ret)
520 goto out;
521 /*
522 * The reset signal is active low.
523 * The UFS device shall detect more than or equal to 1us of positive
524 * or negative RST_n pulse width.
525 * To be on safe side, keep the reset low for atleast 10us.
526 */
527 usleep_range(10, 15);
528
529 ret = ufshcd_deassert_device_reset(hba);
530 if (ret)
531 goto out;
532 /* same as assert, wait for atleast 10us after deassert */
533 usleep_range(10, 15);
534out:
535 return ret;
536}
537
Yaniv Gardib573d482016-03-10 17:37:09 +0200538/* replace non-printable or non-ASCII characters with spaces */
539static inline void ufshcd_remove_non_printable(char *val)
540{
Subhash Jadavanibe096032017-03-23 12:55:25 -0700541 if (!val || !*val)
Yaniv Gardib573d482016-03-10 17:37:09 +0200542 return;
543
544 if (*val < 0x20 || *val > 0x7e)
545 *val = ' ';
546}
547
Can Guof6411eb2017-06-09 15:17:22 +0800548#define UFSHCD_MAX_CMD_LOGGING 200
Can Guob7147732017-04-18 16:22:56 +0800549
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700550#ifdef CONFIG_TRACEPOINTS
Can Guob7147732017-04-18 16:22:56 +0800551static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
Subhash Jadavani114437e2017-11-08 16:22:16 -0800552 struct ufshcd_cmd_log_entry *entry)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700553{
Can Guob7147732017-04-18 16:22:56 +0800554 if (trace_ufshcd_command_enabled()) {
555 u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
556
557 trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
558 entry->doorbell, entry->transfer_len, intr,
Subhash Jadavani114437e2017-11-08 16:22:16 -0800559 entry->lba, entry->cmd_id);
Can Guob7147732017-04-18 16:22:56 +0800560 }
561}
562#else
563static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
Subhash Jadavani114437e2017-11-08 16:22:16 -0800564 struct ufshcd_cmd_log_entry *entry)
Can Guob7147732017-04-18 16:22:56 +0800565{
566}
567#endif
568
569#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
570static void ufshcd_cmd_log_init(struct ufs_hba *hba)
571{
572 /* Allocate log entries */
573 if (!hba->cmd_log.entries) {
574 hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
575 sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
576 if (!hba->cmd_log.entries)
577 return;
578 dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
579 __func__);
580 }
581}
582
583static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
584 unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
Subhash Jadavani114437e2017-11-08 16:22:16 -0800585 sector_t lba, int transfer_len)
Can Guob7147732017-04-18 16:22:56 +0800586{
587 struct ufshcd_cmd_log_entry *entry;
588
589 if (!hba->cmd_log.entries)
590 return;
591
592 entry = &hba->cmd_log.entries[hba->cmd_log.pos];
593 entry->lun = lun;
594 entry->str = str;
595 entry->cmd_type = cmd_type;
596 entry->cmd_id = cmd_id;
597 entry->lba = lba;
598 entry->transfer_len = transfer_len;
599 entry->idn = idn;
600 entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
601 entry->tag = tag;
602 entry->tstamp = ktime_get();
603 entry->outstanding_reqs = hba->outstanding_reqs;
604 entry->seq_num = hba->cmd_log.seq_num;
605 hba->cmd_log.seq_num++;
606 hba->cmd_log.pos =
607 (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
608
Subhash Jadavani114437e2017-11-08 16:22:16 -0800609 ufshcd_add_command_trace(hba, entry);
Can Guob7147732017-04-18 16:22:56 +0800610}
611
612static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
613 unsigned int tag, u8 cmd_id, u8 idn)
614{
Subhash Jadavani114437e2017-11-08 16:22:16 -0800615 __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn, 0, 0, 0);
Can Guob7147732017-04-18 16:22:56 +0800616}
617
618static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
619{
Subhash Jadavani114437e2017-11-08 16:22:16 -0800620 ufshcd_cmd_log(hba, str, "dme", 0, cmd_id, 0);
Can Guob7147732017-04-18 16:22:56 +0800621}
622
Can Guof6411eb2017-06-09 15:17:22 +0800623static void ufshcd_print_cmd_log(struct ufs_hba *hba)
Can Guob7147732017-04-18 16:22:56 +0800624{
625 int i;
626 int pos;
627 struct ufshcd_cmd_log_entry *p;
628
629 if (!hba->cmd_log.entries)
630 return;
631
632 pos = hba->cmd_log.pos;
633 for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
634 p = &hba->cmd_log.entries[pos];
635 pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
636
637 if (ktime_to_us(p->tstamp)) {
638 pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
639 p->cmd_type, p->str, p->seq_num,
640 p->lun, p->cmd_id, (unsigned long long)p->lba,
641 p->transfer_len, p->tag, p->doorbell,
642 p->outstanding_reqs, p->idn,
643 ktime_to_us(p->tstamp));
644 usleep_range(1000, 1100);
645 }
646 }
647}
648#else
649static void ufshcd_cmd_log_init(struct ufs_hba *hba)
650{
651}
652
653static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
654 unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
Subhash Jadavani114437e2017-11-08 16:22:16 -0800655 sector_t lba, int transfer_len)
Can Guob7147732017-04-18 16:22:56 +0800656{
657 struct ufshcd_cmd_log_entry entry;
658
659 entry.str = str;
660 entry.lba = lba;
Sayali Lokhandeba15e032017-12-18 12:12:56 +0530661 entry->cmd_id = cmd_id;
Can Guob7147732017-04-18 16:22:56 +0800662 entry.transfer_len = transfer_len;
663 entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
664 entry.tag = tag;
665
Subhash Jadavani114437e2017-11-08 16:22:16 -0800666 ufshcd_add_command_trace(hba, &entry);
Can Guob7147732017-04-18 16:22:56 +0800667}
668
669static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
670{
671}
672
Can Guof6411eb2017-06-09 15:17:22 +0800673static void ufshcd_print_cmd_log(struct ufs_hba *hba)
Can Guob7147732017-04-18 16:22:56 +0800674{
675}
676#endif
677
678#ifdef CONFIG_TRACEPOINTS
679static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
680 unsigned int tag, const char *str)
681{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700682 struct ufshcd_lrb *lrbp;
Can Guob7147732017-04-18 16:22:56 +0800683 char *cmd_type = NULL;
684 u8 opcode = 0;
685 u8 cmd_id = 0, idn = 0;
Subhash Jadavani114437e2017-11-08 16:22:16 -0800686 sector_t lba = 0;
687 int transfer_len = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700688
689 lrbp = &hba->lrb[tag];
690
691 if (lrbp->cmd) { /* data phase exists */
692 opcode = (u8)(*lrbp->cmd->cmnd);
693 if ((opcode == READ_10) || (opcode == WRITE_10)) {
694 /*
695 * Currently we only fully trace read(10) and write(10)
696 * commands
697 */
698 if (lrbp->cmd->request && lrbp->cmd->request->bio)
699 lba =
Can Guob7147732017-04-18 16:22:56 +0800700 lrbp->cmd->request->bio->bi_iter.bi_sector;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700701 transfer_len = be32_to_cpu(
702 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
703 }
704 }
705
Can Guob7147732017-04-18 16:22:56 +0800706 if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
707 cmd_type = "scsi";
708 cmd_id = (u8)(*lrbp->cmd->cmnd);
709 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
710 if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
711 cmd_type = "nop";
712 cmd_id = 0;
713 } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
714 cmd_type = "query";
715 cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
716 idn = hba->dev_cmd.query.request.upiu_req.idn;
717 }
718 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700719
Can Guob7147732017-04-18 16:22:56 +0800720 __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
Subhash Jadavani114437e2017-11-08 16:22:16 -0800721 lrbp->lun, lba, transfer_len);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700722}
723#else
724static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
725 unsigned int tag, const char *str)
726{
727}
728#endif
729
730static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
731{
732 struct ufs_clk_info *clki;
733 struct list_head *head = &hba->clk_list_head;
734
735 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
736 return;
737
738 if (!head || list_empty(head))
739 return;
740
741 list_for_each_entry(clki, head, list) {
742 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
743 clki->max_freq)
744 dev_err(hba->dev, "clk: %s, rate: %u\n",
745 clki->name, clki->curr_freq);
746 }
747}
748
749static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
750 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
751{
752 int i;
753
754 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
755 return;
756
757 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
758 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
759
760 if (err_hist->reg[p] == 0)
761 continue;
762 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
763 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
764 }
765}
766
Subhash Jadavani9c807702017-04-01 00:35:51 -0700767static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700768{
769 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
770 return;
771
772 /*
773 * hex_dump reads its data without the readl macro. This might
774 * cause inconsistency issues on some platform, as the printed
775 * values may be from cache and not the most recent value.
776 * To know whether you are looking at an un-cached version verify
777 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
778 * during platform/pci probe function.
779 */
Subhash Jadavani4386e022016-12-19 13:01:56 -0800780 ufshcd_hex_dump(hba, "host regs", hba->mmio_base,
781 UFSHCI_REG_SPACE_SIZE);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700782 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
783 hba->ufs_version, hba->capabilities);
784 dev_err(hba->dev,
785 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
786 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
787 dev_err(hba->dev,
788 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
789 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
790 hba->ufs_stats.hibern8_exit_cnt);
791
792 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
793 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
794 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
795 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
796 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
797
798 ufshcd_print_clk_freqs(hba);
799
Subhash Jadavani9c807702017-04-01 00:35:51 -0700800 ufshcd_vops_dbg_register_dump(hba, no_sleep);
801}
802
803static void ufshcd_print_host_regs(struct ufs_hba *hba)
804{
805 __ufshcd_print_host_regs(hba, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700806}
807
808static
809void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
810{
811 struct ufshcd_lrb *lrbp;
812 int prdt_length;
813 int tag;
814
815 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
816 return;
817
818 for_each_set_bit(tag, &bitmap, hba->nutrs) {
819 lrbp = &hba->lrb[tag];
820
821 dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
822 tag, ktime_to_us(lrbp->issue_time_stamp));
823 dev_err(hba->dev,
824 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
825 tag, (u64)lrbp->utrd_dma_addr);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800826 ufshcd_hex_dump(hba, "UPIU TRD", lrbp->utr_descriptor_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700827 sizeof(struct utp_transfer_req_desc));
828 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
829 (u64)lrbp->ucd_req_dma_addr);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800830 ufshcd_hex_dump(hba, "UPIU REQ", lrbp->ucd_req_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700831 sizeof(struct utp_upiu_req));
832 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
833 (u64)lrbp->ucd_rsp_dma_addr);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800834 ufshcd_hex_dump(hba, "UPIU RSP", lrbp->ucd_rsp_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700835 sizeof(struct utp_upiu_rsp));
836 prdt_length =
837 le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
838 dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries phys@0x%llx",
839 tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
840 if (pr_prdt)
Subhash Jadavani4386e022016-12-19 13:01:56 -0800841 ufshcd_hex_dump(hba, "UPIU PRDT", lrbp->ucd_prdt_ptr,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700842 sizeof(struct ufshcd_sg_entry) * prdt_length);
843 }
844}
845
846static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
847{
848 struct utp_task_req_desc *tmrdp;
849 int tag;
850
851 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
852 return;
853
854 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
855 tmrdp = &hba->utmrdl_base_addr[tag];
856 dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800857 ufshcd_hex_dump(hba, "TM TRD", &tmrdp->header,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700858 sizeof(struct request_desc_header));
859 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
860 tag);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800861 ufshcd_hex_dump(hba, "TM REQ", tmrdp->task_req_upiu,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700862 sizeof(struct utp_upiu_req));
863 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
864 tag);
Subhash Jadavani4386e022016-12-19 13:01:56 -0800865 ufshcd_hex_dump(hba, "TM RSP", tmrdp->task_rsp_upiu,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700866 sizeof(struct utp_task_req_desc));
867 }
868}
869
Sayali Lokhande26c4bb52017-09-12 14:44:46 +0530870static void ufshcd_print_fsm_state(struct ufs_hba *hba)
871{
872 int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
873
874 err = ufshcd_dme_get(hba,
875 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
876 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
877 &tx_fsm_val);
878 dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
879 tx_fsm_val, err);
880 err = ufshcd_dme_get(hba,
881 UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
882 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
883 &rx_fsm_val);
884 dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
885 rx_fsm_val, err);
886}
887
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700888static void ufshcd_print_host_state(struct ufs_hba *hba)
889{
890 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
891 return;
892
893 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
894 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
895 hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
896 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
897 hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
898 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
899 hba->curr_dev_pwr_mode, hba->uic_link_state);
900 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
901 hba->pm_op_in_progress, hba->is_sys_suspended);
902 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
903 hba->auto_bkops_enabled, hba->host->host_self_blocked);
904 dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
905 hba->clk_gating.state, hba->hibern8_on_idle.state);
906 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
907 hba->eh_flags, hba->req_abort_count);
908 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
909 hba->capabilities, hba->caps);
910 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -0800911 hba->dev_info.quirks);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700912}
913
914/**
915 * ufshcd_print_pwr_info - print power params as saved in hba
916 * power info
917 * @hba: per-adapter instance
918 */
919static void ufshcd_print_pwr_info(struct ufs_hba *hba)
920{
921 char *names[] = {
922 "INVALID MODE",
923 "FAST MODE",
924 "SLOW_MODE",
925 "INVALID MODE",
926 "FASTAUTO_MODE",
927 "SLOWAUTO_MODE",
928 "INVALID MODE",
929 };
930
931 if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
932 return;
933
934 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
935 __func__,
936 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
937 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
938 names[hba->pwr_info.pwr_rx],
939 names[hba->pwr_info.pwr_tx],
940 hba->pwr_info.hs_rate);
941}
942
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530943/*
944 * ufshcd_wait_for_register - wait for register value to change
945 * @hba - per-adapter interface
946 * @reg - mmio register offset
947 * @mask - mask to apply to read register value
948 * @val - wait condition
949 * @interval_us - polling interval in microsecs
950 * @timeout_ms - timeout in millisecs
Yaniv Gardi596585a2016-03-10 17:37:08 +0200951 * @can_sleep - perform sleep or just spin
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530952 * Returns -ETIMEDOUT on error, zero on success
953 */
Yaniv Gardi596585a2016-03-10 17:37:08 +0200954int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
955 u32 val, unsigned long interval_us,
956 unsigned long timeout_ms, bool can_sleep)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530957{
958 int err = 0;
959 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
960
961 /* ignore bits that we don't intend to wait on */
962 val = val & mask;
963
964 while ((ufshcd_readl(hba, reg) & mask) != val) {
Yaniv Gardi596585a2016-03-10 17:37:08 +0200965 if (can_sleep)
966 usleep_range(interval_us, interval_us + 50);
967 else
968 udelay(interval_us);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +0530969 if (time_after(jiffies, timeout)) {
970 if ((ufshcd_readl(hba, reg) & mask) != val)
971 err = -ETIMEDOUT;
972 break;
973 }
974 }
975
976 return err;
977}
978
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +0530979/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +0530980 * ufshcd_get_intr_mask - Get the interrupt bit mask
981 * @hba - Pointer to adapter instance
982 *
983 * Returns interrupt bit mask per version
984 */
985static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
986{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -0700987 u32 intr_mask = 0;
988
989 switch (hba->ufs_version) {
990 case UFSHCI_VERSION_10:
991 intr_mask = INTERRUPT_MASK_ALL_VER_10;
992 break;
993 /* allow fall through */
994 case UFSHCI_VERSION_11:
995 case UFSHCI_VERSION_20:
996 intr_mask = INTERRUPT_MASK_ALL_VER_11;
997 break;
998 /* allow fall through */
999 case UFSHCI_VERSION_21:
1000 default:
1001 intr_mask = INTERRUPT_MASK_ALL_VER_21;
1002 }
1003
1004 if (!ufshcd_is_crypto_supported(hba))
1005 intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
1006
1007 return intr_mask;
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05301008}
1009
1010/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301011 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
1012 * @hba - Pointer to adapter instance
1013 *
1014 * Returns UFSHCI version supported by the controller
1015 */
1016static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
1017{
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02001018 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
1019 return ufshcd_vops_get_ufs_hci_version(hba);
Yaniv Gardi9949e702015-05-17 18:55:05 +03001020
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301021 return ufshcd_readl(hba, REG_UFS_VERSION);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301022}
1023
1024/**
1025 * ufshcd_is_device_present - Check if any device connected to
1026 * the host controller
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03001027 * @hba: pointer to adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301028 *
Venkatraman S73ec5132012-07-10 19:39:23 +05301029 * Returns 1 if device present, 0 if no device detected
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301030 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03001031static inline int ufshcd_is_device_present(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301032{
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03001033 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
1034 DEVICE_PRESENT) ? 1 : 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301035}
1036
1037/**
1038 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
1039 * @lrb: pointer to local command reference block
1040 *
1041 * This function is used to get the OCS field from UTRD
1042 * Returns the OCS field in the UTRD
1043 */
1044static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
1045{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301046 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301047}
1048
1049/**
1050 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
1051 * @task_req_descp: pointer to utp_task_req_desc structure
1052 *
1053 * This function is used to get the OCS field from UTMRD
1054 * Returns the OCS field in the UTMRD
1055 */
1056static inline int
1057ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
1058{
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05301059 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301060}
1061
1062/**
1063 * ufshcd_get_tm_free_slot - get a free slot for task management request
1064 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05301065 * @free_slot: pointer to variable with available slot value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301066 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05301067 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
1068 * Returns 0 if free slot is not available, else return 1 with tag value
1069 * in @free_slot.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301070 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05301071static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301072{
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05301073 int tag;
1074 bool ret = false;
1075
1076 if (!free_slot)
1077 goto out;
1078
1079 do {
1080 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
1081 if (tag >= hba->nutmrs)
1082 goto out;
1083 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
1084
1085 *free_slot = tag;
1086 ret = true;
1087out:
1088 return ret;
1089}
1090
1091static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
1092{
1093 clear_bit_unlock(slot, &hba->tm_slots_in_use);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301094}
1095
1096/**
1097 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
1098 * @hba: per adapter instance
1099 * @pos: position of the bit to be cleared
1100 */
1101static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
1102{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301103 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301104}
1105
1106/**
Yaniv Gardia48353f2016-02-01 15:02:40 +02001107 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
1108 * @hba: per adapter instance
1109 * @tag: position of the bit to be cleared
1110 */
1111static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
1112{
1113 __clear_bit(tag, &hba->outstanding_reqs);
1114}
1115
1116/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301117 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
1118 * @reg: Register value of host controller status
1119 *
1120 * Returns integer, 0 on Success and positive value if failed
1121 */
1122static inline int ufshcd_get_lists_status(u32 reg)
1123{
1124 /*
1125 * The mask 0xFF is for the following HCS register bits
1126 * Bit Description
1127 * 0 Device Present
1128 * 1 UTRLRDY
1129 * 2 UTMRLRDY
1130 * 3 UCRDY
Yaniv Gardi897efe62016-02-01 15:02:48 +02001131 * 4-7 reserved
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301132 */
Yaniv Gardi897efe62016-02-01 15:02:48 +02001133 return ((reg & 0xFF) >> 1) ^ 0x07;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301134}
1135
1136/**
1137 * ufshcd_get_uic_cmd_result - Get the UIC command result
1138 * @hba: Pointer to adapter instance
1139 *
1140 * This function gets the result of UIC command completion
1141 * Returns 0 on success, non zero value on error
1142 */
1143static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
1144{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301145 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301146 MASK_UIC_COMMAND_RESULT;
1147}
1148
1149/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05301150 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
1151 * @hba: Pointer to adapter instance
1152 *
1153 * This function gets UIC command argument3
1154 * Returns 0 on success, non zero value on error
1155 */
1156static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
1157{
1158 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
1159}
1160
1161/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301162 * ufshcd_get_req_rsp - returns the TR response transaction type
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301163 * @ucd_rsp_ptr: pointer to response UPIU
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301164 */
1165static inline int
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301166ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301167{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05301168 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301169}
1170
1171/**
1172 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
1173 * @ucd_rsp_ptr: pointer to response UPIU
1174 *
1175 * This function gets the response status and scsi_status from response UPIU
1176 * Returns the response result code.
1177 */
1178static inline int
1179ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
1180{
1181 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
1182}
1183
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05301184/*
1185 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
1186 * from response UPIU
1187 * @ucd_rsp_ptr: pointer to response UPIU
1188 *
1189 * Return the data segment length.
1190 */
1191static inline unsigned int
1192ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
1193{
1194 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1195 MASK_RSP_UPIU_DATA_SEG_LEN;
1196}
1197
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301198/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05301199 * ufshcd_is_exception_event - Check if the device raised an exception event
1200 * @ucd_rsp_ptr: pointer to response UPIU
1201 *
1202 * The function checks if the device raised an exception event indicated in
1203 * the Device Information field of response UPIU.
1204 *
1205 * Returns true if exception is raised, false otherwise.
1206 */
1207static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
1208{
1209 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
1210 MASK_RSP_EXCEPTION_EVENT ? true : false;
1211}
1212
1213/**
Seungwon Jeon7d568652013-08-31 21:40:20 +05301214 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301215 * @hba: per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301216 */
1217static inline void
Seungwon Jeon7d568652013-08-31 21:40:20 +05301218ufshcd_reset_intr_aggr(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301219{
Seungwon Jeon7d568652013-08-31 21:40:20 +05301220 ufshcd_writel(hba, INT_AGGR_ENABLE |
1221 INT_AGGR_COUNTER_AND_TIMER_RESET,
1222 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1223}
1224
1225/**
1226 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
1227 * @hba: per adapter instance
1228 * @cnt: Interrupt aggregation counter threshold
1229 * @tmout: Interrupt aggregation timeout value
1230 */
1231static inline void
1232ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
1233{
1234 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
1235 INT_AGGR_COUNTER_THLD_VAL(cnt) |
1236 INT_AGGR_TIMEOUT_VAL(tmout),
1237 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301238}
1239
1240/**
Yaniv Gardib8521902015-05-17 18:54:57 +03001241 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
1242 * @hba: per adapter instance
1243 */
1244static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
1245{
1246 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
1247}
1248
1249/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301250 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
1251 * When run-stop registers are set to 1, it indicates the
1252 * host controller that it can process the requests
1253 * @hba: per adapter instance
1254 */
1255static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
1256{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301257 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
1258 REG_UTP_TASK_REQ_LIST_RUN_STOP);
1259 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
1260 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301261}
1262
1263/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301264 * ufshcd_hba_start - Start controller initialization sequence
1265 * @hba: per adapter instance
1266 */
1267static inline void ufshcd_hba_start(struct ufs_hba *hba)
1268{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001269 u32 val = CONTROLLER_ENABLE;
1270
1271 if (ufshcd_is_crypto_supported(hba))
1272 val |= CRYPTO_GENERAL_ENABLE;
1273 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301274}
1275
1276/**
1277 * ufshcd_is_hba_active - Get controller state
1278 * @hba: per adapter instance
1279 *
1280 * Returns zero if controller is active, 1 otherwise
1281 */
1282static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
1283{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05301284 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05301285}
1286
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001287static const char *ufschd_uic_link_state_to_string(
1288 enum uic_link_state state)
1289{
1290 switch (state) {
1291 case UIC_LINK_OFF_STATE: return "OFF";
1292 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
1293 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
1294 default: return "UNKNOWN";
1295 }
1296}
1297
1298static const char *ufschd_ufs_dev_pwr_mode_to_string(
1299 enum ufs_dev_pwr_mode state)
1300{
1301 switch (state) {
1302 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
1303 case UFS_SLEEP_PWR_MODE: return "SLEEP";
1304 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
1305 default: return "UNKNOWN";
1306 }
1307}
1308
Yaniv Gardi37113102016-03-10 17:37:16 +02001309u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
1310{
1311 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
1312 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
1313 (hba->ufs_version == UFSHCI_VERSION_11))
1314 return UFS_UNIPRO_VER_1_41;
1315 else
1316 return UFS_UNIPRO_VER_1_6;
1317}
1318EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1319
1320static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1321{
1322 /*
1323 * If both host and device support UniPro ver1.6 or later, PA layer
1324 * parameters tuning happens during link startup itself.
1325 *
1326 * We can manually tune PA layer parameters if either host or device
1327 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1328 * logic simple, we will only do manual tuning if local unipro version
1329 * doesn't support ver1.6 or later.
1330 */
1331 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
1332 return true;
1333 else
1334 return false;
1335}
1336
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001337/**
1338 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1339 * @hba: per adapter instance
1340 * @scale_up: If True, set max possible frequency othewise set low frequency
1341 *
1342 * Returns 0 if successful
1343 * Returns < 0 for any other errors
1344 */
1345static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1346{
1347 int ret = 0;
1348 struct ufs_clk_info *clki;
1349 struct list_head *head = &hba->clk_list_head;
1350
1351 if (!head || list_empty(head))
1352 goto out;
1353
1354 list_for_each_entry(clki, head, list) {
1355 if (!IS_ERR_OR_NULL(clki->clk)) {
1356 if (scale_up && clki->max_freq) {
1357 if (clki->curr_freq == clki->max_freq)
1358 continue;
1359
1360 ret = clk_set_rate(clki->clk, clki->max_freq);
1361 if (ret) {
1362 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1363 __func__, clki->name,
1364 clki->max_freq, ret);
1365 break;
1366 }
1367 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1368 "scaled up", clki->name,
1369 clki->curr_freq,
1370 clki->max_freq);
1371 clki->curr_freq = clki->max_freq;
1372
1373 } else if (!scale_up && clki->min_freq) {
1374 if (clki->curr_freq == clki->min_freq)
1375 continue;
1376
1377 ret = clk_set_rate(clki->clk, clki->min_freq);
1378 if (ret) {
1379 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1380 __func__, clki->name,
1381 clki->min_freq, ret);
1382 break;
1383 }
1384 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1385 "scaled down", clki->name,
1386 clki->curr_freq,
1387 clki->min_freq);
1388 clki->curr_freq = clki->min_freq;
1389 }
1390 }
1391 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1392 clki->name, clk_get_rate(clki->clk));
1393 }
1394
1395out:
1396 return ret;
1397}
1398
1399/**
1400 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1401 * @hba: per adapter instance
1402 * @scale_up: True if scaling up and false if scaling down
1403 *
1404 * Returns 0 if successful
1405 * Returns < 0 for any other errors
1406 */
1407static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1408{
1409 int ret = 0;
1410
1411 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1412 if (ret)
1413 return ret;
1414
1415 ret = ufshcd_set_clk_freq(hba, scale_up);
1416 if (ret)
1417 return ret;
1418
1419 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1420 if (ret) {
1421 ufshcd_set_clk_freq(hba, !scale_up);
1422 return ret;
1423 }
1424
1425 return ret;
1426}
1427
Subhash Jadavani9c807702017-04-01 00:35:51 -07001428static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
1429{
1430 hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
1431 cancel_work_sync(&hba->clk_gating.gate_work);
1432}
1433
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001434static void ufshcd_ungate_work(struct work_struct *work)
1435{
1436 int ret;
1437 unsigned long flags;
1438 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1439 clk_gating.ungate_work);
1440
Subhash Jadavani9c807702017-04-01 00:35:51 -07001441 ufshcd_cancel_gate_work(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001442
1443 spin_lock_irqsave(hba->host->host_lock, flags);
1444 if (hba->clk_gating.state == CLKS_ON) {
1445 spin_unlock_irqrestore(hba->host->host_lock, flags);
1446 goto unblock_reqs;
1447 }
1448
1449 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001450 ufshcd_hba_vreg_set_hpm(hba);
1451 ufshcd_enable_clocks(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001452
1453 /* Exit from hibern8 */
1454 if (ufshcd_can_hibern8_during_gating(hba)) {
1455 /* Prevent gating in this path */
1456 hba->clk_gating.is_suspended = true;
1457 if (ufshcd_is_link_hibern8(hba)) {
1458 ret = ufshcd_uic_hibern8_exit(hba);
1459 if (ret)
1460 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1461 __func__, ret);
1462 else
1463 ufshcd_set_link_active(hba);
1464 }
1465 hba->clk_gating.is_suspended = false;
1466 }
1467unblock_reqs:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001468 ufshcd_scsi_unblock_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001469}
1470
1471/**
1472 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1473 * Also, exit from hibern8 mode and set the link as active.
1474 * @hba: per adapter instance
1475 * @async: This indicates whether caller should ungate clocks asynchronously.
1476 */
1477int ufshcd_hold(struct ufs_hba *hba, bool async)
1478{
1479 int rc = 0;
1480 unsigned long flags;
1481
1482 if (!ufshcd_is_clkgating_allowed(hba))
1483 goto out;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001484 spin_lock_irqsave(hba->host->host_lock, flags);
1485 hba->clk_gating.active_reqs++;
1486
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001487 if (ufshcd_eh_in_progress(hba)) {
1488 spin_unlock_irqrestore(hba->host->host_lock, flags);
1489 return 0;
1490 }
1491
Sahitya Tummala856b3482014-09-25 15:32:34 +03001492start:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001493 switch (hba->clk_gating.state) {
1494 case CLKS_ON:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001495 /*
1496 * Wait for the ungate work to complete if in progress.
1497 * Though the clocks may be in ON state, the link could
1498 * still be in hibner8 state if hibern8 is allowed
1499 * during clock gating.
1500 * Make sure we exit hibern8 state also in addition to
1501 * clocks being ON.
1502 */
1503 if (ufshcd_can_hibern8_during_gating(hba) &&
1504 ufshcd_is_link_hibern8(hba)) {
1505 spin_unlock_irqrestore(hba->host->host_lock, flags);
1506 flush_work(&hba->clk_gating.ungate_work);
1507 spin_lock_irqsave(hba->host->host_lock, flags);
1508 goto start;
1509 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001510 break;
1511 case REQ_CLKS_OFF:
Subhash Jadavani9c807702017-04-01 00:35:51 -07001512 /*
1513 * If the timer was active but the callback was not running
1514 * we have nothing to do, just change state and return.
1515 */
1516 if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001517 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001518 trace_ufshcd_clk_gating(dev_name(hba->dev),
1519 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001520 break;
1521 }
1522 /*
Subhash Jadavani9c807702017-04-01 00:35:51 -07001523 * If we are here, it means gating work is either done or
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001524 * currently running. Hence, fall through to cancel gating
1525 * work and to enable clocks.
1526 */
1527 case CLKS_OFF:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001528 __ufshcd_scsi_block_requests(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001529 hba->clk_gating.state = REQ_CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001530 trace_ufshcd_clk_gating(dev_name(hba->dev),
1531 hba->clk_gating.state);
Sayali Lokhandeab6db442017-07-06 12:03:10 +05301532 queue_work(hba->clk_gating.clk_gating_workq,
Subhash Jadavani9c807702017-04-01 00:35:51 -07001533 &hba->clk_gating.ungate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001534 /*
1535 * fall through to check if we should wait for this
1536 * work to be done or not.
1537 */
1538 case REQ_CLKS_ON:
1539 if (async) {
1540 rc = -EAGAIN;
1541 hba->clk_gating.active_reqs--;
1542 break;
1543 }
1544
1545 spin_unlock_irqrestore(hba->host->host_lock, flags);
1546 flush_work(&hba->clk_gating.ungate_work);
1547 /* Make sure state is CLKS_ON before returning */
Sahitya Tummala856b3482014-09-25 15:32:34 +03001548 spin_lock_irqsave(hba->host->host_lock, flags);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001549 goto start;
1550 default:
1551 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1552 __func__, hba->clk_gating.state);
1553 break;
1554 }
1555 spin_unlock_irqrestore(hba->host->host_lock, flags);
1556out:
Asutosh Das3da913a2017-03-24 10:32:16 +05301557 hba->ufs_stats.clk_hold.ts = ktime_get();
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001558 return rc;
1559}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001560EXPORT_SYMBOL_GPL(ufshcd_hold);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001561
1562static void ufshcd_gate_work(struct work_struct *work)
1563{
1564 struct ufs_hba *hba = container_of(work, struct ufs_hba,
Subhash Jadavani9c807702017-04-01 00:35:51 -07001565 clk_gating.gate_work);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001566 unsigned long flags;
1567
1568 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001569 /*
1570 * In case you are here to cancel this work the gating state
1571 * would be marked as REQ_CLKS_ON. In this case save time by
1572 * skipping the gating work and exit after changing the clock
1573 * state to CLKS_ON.
1574 */
1575 if (hba->clk_gating.is_suspended ||
Asutosh Dasdd96ffa2017-03-23 15:01:56 +05301576 (hba->clk_gating.state != REQ_CLKS_OFF)) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001577 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001578 trace_ufshcd_clk_gating(dev_name(hba->dev),
1579 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001580 goto rel_lock;
1581 }
1582
1583 if (hba->clk_gating.active_reqs
1584 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1585 || hba->lrb_in_use || hba->outstanding_tasks
1586 || hba->active_uic_cmd || hba->uic_async_done)
1587 goto rel_lock;
1588
1589 spin_unlock_irqrestore(hba->host->host_lock, flags);
1590
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001591 if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
1592 hba->hibern8_on_idle.is_enabled)
1593 /*
1594 * Hibern8 enter work (on Idle) needs clocks to be ON hence
1595 * make sure that it is flushed before turning off the clocks.
1596 */
1597 flush_delayed_work(&hba->hibern8_on_idle.enter_work);
1598
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001599 /* put the link into hibern8 mode before turning off clocks */
1600 if (ufshcd_can_hibern8_during_gating(hba)) {
1601 if (ufshcd_uic_hibern8_enter(hba)) {
1602 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001603 trace_ufshcd_clk_gating(dev_name(hba->dev),
1604 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001605 goto out;
1606 }
1607 ufshcd_set_link_hibern8(hba);
1608 }
1609
Subhash Jadavani9c807702017-04-01 00:35:51 -07001610 /*
1611 * If auto hibern8 is supported then the link will already
1612 * be in hibern8 state and the ref clock can be gated.
1613 */
1614 if ((ufshcd_is_auto_hibern8_supported(hba) ||
1615 !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001616 ufshcd_disable_clocks(hba, true);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001617 else
1618 /* If link is active, device ref_clk can't be switched off */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001619 ufshcd_disable_clocks_skip_ref_clk(hba, true);
1620
1621 /* Put the host controller in low power mode if possible */
1622 ufshcd_hba_vreg_set_lpm(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001623
1624 /*
1625 * In case you are here to cancel this work the gating state
1626 * would be marked as REQ_CLKS_ON. In this case keep the state
1627 * as REQ_CLKS_ON which would anyway imply that clocks are off
1628 * and a request to turn them on is pending. By doing this way,
1629 * we keep the state machine in tact and this would ultimately
1630 * prevent from doing cancel work multiple times when there are
1631 * new requests arriving before the current cancel work is done.
1632 */
1633 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001634 if (hba->clk_gating.state == REQ_CLKS_OFF) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001635 hba->clk_gating.state = CLKS_OFF;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001636 trace_ufshcd_clk_gating(dev_name(hba->dev),
1637 hba->clk_gating.state);
1638 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001639rel_lock:
1640 spin_unlock_irqrestore(hba->host->host_lock, flags);
1641out:
1642 return;
1643}
1644
1645/* host lock must be held before calling this variant */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001646static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001647{
1648 if (!ufshcd_is_clkgating_allowed(hba))
1649 return;
1650
1651 hba->clk_gating.active_reqs--;
1652
1653 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1654 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1655 || hba->lrb_in_use || hba->outstanding_tasks
Yaniv Gardi53c12d02016-02-01 15:02:45 +02001656 || hba->active_uic_cmd || hba->uic_async_done
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001657 || ufshcd_eh_in_progress(hba) || no_sched)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001658 return;
1659
1660 hba->clk_gating.state = REQ_CLKS_OFF;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001661 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
Asutosh Das3da913a2017-03-24 10:32:16 +05301662 hba->ufs_stats.clk_rel.ts = ktime_get();
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001663
Subhash Jadavani9c807702017-04-01 00:35:51 -07001664 hrtimer_start(&hba->clk_gating.gate_hrtimer,
1665 ms_to_ktime(hba->clk_gating.delay_ms),
1666 HRTIMER_MODE_REL);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001667}
1668
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001669void ufshcd_release(struct ufs_hba *hba, bool no_sched)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001670{
1671 unsigned long flags;
1672
1673 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001674 __ufshcd_release(hba, no_sched);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001675 spin_unlock_irqrestore(hba->host->host_lock, flags);
1676}
Yaniv Gardi6e3fd442015-10-28 13:15:50 +02001677EXPORT_SYMBOL_GPL(ufshcd_release);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001678
1679static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1680 struct device_attribute *attr, char *buf)
1681{
1682 struct ufs_hba *hba = dev_get_drvdata(dev);
1683
1684 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1685}
1686
1687static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1688 struct device_attribute *attr, const char *buf, size_t count)
1689{
1690 struct ufs_hba *hba = dev_get_drvdata(dev);
1691 unsigned long flags, value;
1692
1693 if (kstrtoul(buf, 0, &value))
1694 return -EINVAL;
1695
1696 spin_lock_irqsave(hba->host->host_lock, flags);
1697 hba->clk_gating.delay_ms = value;
1698 spin_unlock_irqrestore(hba->host->host_lock, flags);
1699 return count;
1700}
1701
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001702static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
1703 struct device_attribute *attr, char *buf)
1704{
1705 struct ufs_hba *hba = dev_get_drvdata(dev);
1706
1707 return snprintf(buf, PAGE_SIZE, "%lu\n",
1708 hba->clk_gating.delay_ms_pwr_save);
1709}
1710
1711static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
1712 struct device_attribute *attr, const char *buf, size_t count)
1713{
1714 struct ufs_hba *hba = dev_get_drvdata(dev);
1715 unsigned long flags, value;
1716
1717 if (kstrtoul(buf, 0, &value))
1718 return -EINVAL;
1719
1720 spin_lock_irqsave(hba->host->host_lock, flags);
1721
1722 hba->clk_gating.delay_ms_pwr_save = value;
1723 if (ufshcd_is_clkscaling_supported(hba) &&
1724 !hba->clk_scaling.is_scaled_up)
1725 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
1726
1727 spin_unlock_irqrestore(hba->host->host_lock, flags);
1728 return count;
1729}
1730
1731static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
1732 struct device_attribute *attr, char *buf)
1733{
1734 struct ufs_hba *hba = dev_get_drvdata(dev);
1735
1736 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
1737}
1738
1739static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
1740 struct device_attribute *attr, const char *buf, size_t count)
1741{
1742 struct ufs_hba *hba = dev_get_drvdata(dev);
1743 unsigned long flags, value;
1744
1745 if (kstrtoul(buf, 0, &value))
1746 return -EINVAL;
1747
1748 spin_lock_irqsave(hba->host->host_lock, flags);
1749
1750 hba->clk_gating.delay_ms_perf = value;
1751 if (ufshcd_is_clkscaling_supported(hba) &&
1752 hba->clk_scaling.is_scaled_up)
1753 hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
1754
1755 spin_unlock_irqrestore(hba->host->host_lock, flags);
1756 return count;
1757}
1758
1759static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1760 struct device_attribute *attr, char *buf)
1761{
1762 struct ufs_hba *hba = dev_get_drvdata(dev);
1763
1764 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1765}
1766
1767static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1768 struct device_attribute *attr, const char *buf, size_t count)
1769{
1770 struct ufs_hba *hba = dev_get_drvdata(dev);
1771 unsigned long flags;
1772 u32 value;
1773
1774 if (kstrtou32(buf, 0, &value))
1775 return -EINVAL;
1776
1777 value = !!value;
1778 if (value == hba->clk_gating.is_enabled)
1779 goto out;
1780
1781 if (value) {
1782 ufshcd_release(hba, false);
1783 } else {
1784 spin_lock_irqsave(hba->host->host_lock, flags);
1785 hba->clk_gating.active_reqs++;
1786 spin_unlock_irqrestore(hba->host->host_lock, flags);
1787 }
1788
1789 hba->clk_gating.is_enabled = value;
1790out:
1791 return count;
1792}
1793
Subhash Jadavani9c807702017-04-01 00:35:51 -07001794static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
1795 struct hrtimer *timer)
1796{
1797 struct ufs_hba *hba = container_of(timer, struct ufs_hba,
1798 clk_gating.gate_hrtimer);
1799
Sayali Lokhandeab6db442017-07-06 12:03:10 +05301800 queue_work(hba->clk_gating.clk_gating_workq,
1801 &hba->clk_gating.gate_work);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001802
1803 return HRTIMER_NORESTART;
1804}
1805
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001806static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1807{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001808 struct ufs_clk_gating *gating = &hba->clk_gating;
Sayali Lokhandeab6db442017-07-06 12:03:10 +05301809 char wq_name[sizeof("ufs_clk_gating_00")];
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001810
1811 hba->clk_gating.state = CLKS_ON;
1812
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001813 if (!ufshcd_is_clkgating_allowed(hba))
1814 return;
1815
Subhash Jadavani9c807702017-04-01 00:35:51 -07001816 /*
1817 * Disable hibern8 during clk gating if
1818 * auto hibern8 is supported
1819 */
1820 if (ufshcd_is_auto_hibern8_supported(hba))
1821 hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1822
1823 INIT_WORK(&gating->gate_work, ufshcd_gate_work);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001824 INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001825 /*
1826 * Clock gating work must be executed only after auto hibern8
1827 * timeout has expired in the hardware or after aggressive
1828 * hibern8 on idle software timeout. Using jiffy based low
1829 * resolution delayed work is not reliable to guarantee this,
1830 * hence use a high resolution timer to make sure we schedule
1831 * the gate work precisely more than hibern8 timeout.
1832 *
1833 * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
1834 */
1835 hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1836 gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
1837
Sayali Lokhandeab6db442017-07-06 12:03:10 +05301838 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
Subhash Jadavani9c807702017-04-01 00:35:51 -07001839 hba->host->host_no);
Sayali Lokhandeab6db442017-07-06 12:03:10 +05301840 hba->clk_gating.clk_gating_workq =
1841 create_singlethread_workqueue(wq_name);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001842
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001843 gating->is_enabled = true;
1844
Subhash Jadavani9c807702017-04-01 00:35:51 -07001845 gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
1846 gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001847
1848 /* start with performance mode */
1849 gating->delay_ms = gating->delay_ms_perf;
1850
1851 if (!ufshcd_is_clkscaling_supported(hba))
1852 goto scaling_not_supported;
1853
1854 gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
1855 gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
1856 sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
1857 gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
1858 gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
1859 if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
1860 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
1861
1862 gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
1863 gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
1864 sysfs_attr_init(&gating->delay_perf_attr.attr);
1865 gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
1866 gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
1867 if (device_create_file(hba->dev, &gating->delay_perf_attr))
1868 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
1869
1870 goto add_clkgate_enable;
1871
1872scaling_not_supported:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001873 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1874 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1875 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1876 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1877 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
1878 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1879 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001880
1881add_clkgate_enable:
1882 gating->enable_attr.show = ufshcd_clkgate_enable_show;
1883 gating->enable_attr.store = ufshcd_clkgate_enable_store;
1884 sysfs_attr_init(&gating->enable_attr.attr);
1885 gating->enable_attr.attr.name = "clkgate_enable";
1886 gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
1887 if (device_create_file(hba->dev, &gating->enable_attr))
1888 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001889}
1890
1891static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1892{
1893 if (!ufshcd_is_clkgating_allowed(hba))
1894 return;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001895 if (ufshcd_is_clkscaling_supported(hba)) {
1896 device_remove_file(hba->dev,
1897 &hba->clk_gating.delay_pwr_save_attr);
1898 device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
1899 } else {
1900 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1901 }
1902 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
Subhash Jadavani9c807702017-04-01 00:35:51 -07001903 ufshcd_cancel_gate_work(hba);
Akinobu Mita97cd6802014-11-24 14:24:18 +09001904 cancel_work_sync(&hba->clk_gating.ungate_work);
Sayali Lokhandeab6db442017-07-06 12:03:10 +05301905 destroy_workqueue(hba->clk_gating.clk_gating_workq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03001906}
1907
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07001908static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
1909{
1910 ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
1911 AUTO_HIBERN8_IDLE_TIMER_MASK,
1912 AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
1913 REG_AUTO_HIBERN8_IDLE_TIMER);
1914 /* Make sure the timer gets applied before further operations */
1915 mb();
1916}
1917
1918/**
1919 * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
1920 *
1921 * @hba: per adapter instance
1922 * @async: This indicates whether caller wants to exit hibern8 asynchronously.
1923 *
1924 * Exit from hibern8 mode and set the link as active.
1925 *
1926 * Return 0 on success, non-zero on failure.
1927 */
1928static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
1929{
1930 int rc = 0;
1931 unsigned long flags;
1932
1933 if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1934 goto out;
1935
1936 spin_lock_irqsave(hba->host->host_lock, flags);
1937 hba->hibern8_on_idle.active_reqs++;
1938
1939 if (ufshcd_eh_in_progress(hba)) {
1940 spin_unlock_irqrestore(hba->host->host_lock, flags);
1941 return 0;
1942 }
1943
1944start:
1945 switch (hba->hibern8_on_idle.state) {
1946 case HIBERN8_EXITED:
1947 break;
1948 case REQ_HIBERN8_ENTER:
1949 if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
1950 hba->hibern8_on_idle.state = HIBERN8_EXITED;
1951 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1952 hba->hibern8_on_idle.state);
1953 break;
1954 }
1955 /*
1956 * If we here, it means Hibern8 enter work is either done or
1957 * currently running. Hence, fall through to cancel hibern8
1958 * work and exit hibern8.
1959 */
1960 case HIBERN8_ENTERED:
1961 __ufshcd_scsi_block_requests(hba);
1962 hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
1963 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
1964 hba->hibern8_on_idle.state);
1965 schedule_work(&hba->hibern8_on_idle.exit_work);
1966 /*
1967 * fall through to check if we should wait for this
1968 * work to be done or not.
1969 */
1970 case REQ_HIBERN8_EXIT:
1971 if (async) {
1972 rc = -EAGAIN;
1973 hba->hibern8_on_idle.active_reqs--;
1974 break;
1975 } else {
1976 spin_unlock_irqrestore(hba->host->host_lock, flags);
1977 flush_work(&hba->hibern8_on_idle.exit_work);
1978 /* Make sure state is HIBERN8_EXITED before returning */
1979 spin_lock_irqsave(hba->host->host_lock, flags);
1980 goto start;
1981 }
1982 default:
1983 dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
1984 __func__, hba->hibern8_on_idle.state);
1985 break;
1986 }
1987 spin_unlock_irqrestore(hba->host->host_lock, flags);
1988out:
1989 return rc;
1990}
1991
1992/* host lock must be held before calling this variant */
1993static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
1994{
1995 unsigned long delay_in_jiffies;
1996
1997 if (!ufshcd_is_hibern8_on_idle_allowed(hba))
1998 return;
1999
2000 hba->hibern8_on_idle.active_reqs--;
2001 BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
2002
2003 if (hba->hibern8_on_idle.active_reqs
2004 || hba->hibern8_on_idle.is_suspended
2005 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
2006 || hba->lrb_in_use || hba->outstanding_tasks
2007 || hba->active_uic_cmd || hba->uic_async_done
2008 || ufshcd_eh_in_progress(hba) || no_sched)
2009 return;
2010
2011 hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
2012 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2013 hba->hibern8_on_idle.state);
2014 /*
2015 * Scheduling the delayed work after 1 jiffies will make the work to
2016 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
2017 * for hibern8 enter work as it may impact the performance if it gets
2018 * scheduled almost immediately. Hence make sure that hibern8 enter
2019 * work gets scheduled atleast after 2 jiffies (any time between
2020 * 1000/HZ ms to 2000/HZ ms).
2021 */
2022 delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
2023 if (delay_in_jiffies == 1)
2024 delay_in_jiffies++;
2025
2026 schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
2027 delay_in_jiffies);
2028}
2029
2030static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
2031{
2032 unsigned long flags;
2033
2034 spin_lock_irqsave(hba->host->host_lock, flags);
2035 __ufshcd_hibern8_release(hba, no_sched);
2036 spin_unlock_irqrestore(hba->host->host_lock, flags);
2037}
2038
2039static void ufshcd_hibern8_enter_work(struct work_struct *work)
2040{
2041 struct ufs_hba *hba = container_of(work, struct ufs_hba,
2042 hibern8_on_idle.enter_work.work);
2043 unsigned long flags;
2044
2045 spin_lock_irqsave(hba->host->host_lock, flags);
2046 if (hba->hibern8_on_idle.is_suspended) {
2047 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2048 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2049 hba->hibern8_on_idle.state);
2050 goto rel_lock;
2051 }
2052
2053 if (hba->hibern8_on_idle.active_reqs
2054 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
2055 || hba->lrb_in_use || hba->outstanding_tasks
2056 || hba->active_uic_cmd || hba->uic_async_done)
2057 goto rel_lock;
2058
2059 spin_unlock_irqrestore(hba->host->host_lock, flags);
2060
2061 if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
2062 /* Enter failed */
2063 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2064 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2065 hba->hibern8_on_idle.state);
2066 goto out;
2067 }
2068 ufshcd_set_link_hibern8(hba);
2069
2070 /*
2071 * In case you are here to cancel this work the hibern8_on_idle.state
2072 * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
2073 * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
2074 * and a request to exit from it is pending. By doing this way,
2075 * we keep the state machine in tact and this would ultimately
2076 * prevent from doing cancel work multiple times when there are
2077 * new requests arriving before the current cancel work is done.
2078 */
2079 spin_lock_irqsave(hba->host->host_lock, flags);
2080 if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
2081 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
2082 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2083 hba->hibern8_on_idle.state);
2084 }
2085rel_lock:
2086 spin_unlock_irqrestore(hba->host->host_lock, flags);
2087out:
2088 return;
2089}
2090
Subhash Jadavanid13daec2017-05-15 18:17:57 -07002091static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
2092 unsigned long delay_ms)
2093{
2094 pm_runtime_get_sync(hba->dev);
2095 ufshcd_hold_all(hba);
2096 ufshcd_scsi_block_requests(hba);
2097 down_write(&hba->lock);
2098 /* wait for all the outstanding requests to finish */
2099 ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
2100 ufshcd_set_auto_hibern8_timer(hba, delay_ms);
2101 up_write(&hba->lock);
2102 ufshcd_scsi_unblock_requests(hba);
2103 ufshcd_release_all(hba);
2104 pm_runtime_put_sync(hba->dev);
2105}
2106
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002107static void ufshcd_hibern8_exit_work(struct work_struct *work)
2108{
2109 int ret;
2110 unsigned long flags;
2111 struct ufs_hba *hba = container_of(work, struct ufs_hba,
2112 hibern8_on_idle.exit_work);
2113
2114 cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
2115
2116 spin_lock_irqsave(hba->host->host_lock, flags);
2117 if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
2118 || ufshcd_is_link_active(hba)) {
2119 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2120 spin_unlock_irqrestore(hba->host->host_lock, flags);
2121 goto unblock_reqs;
2122 }
2123 spin_unlock_irqrestore(hba->host->host_lock, flags);
2124
2125 /* Exit from hibern8 */
2126 if (ufshcd_is_link_hibern8(hba)) {
Asutosh Das3da913a2017-03-24 10:32:16 +05302127 hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002128 ufshcd_hold(hba, false);
2129 ret = ufshcd_uic_hibern8_exit(hba);
Asutosh Das3da913a2017-03-24 10:32:16 +05302130 hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002131 ufshcd_release(hba, false);
2132 if (!ret) {
2133 spin_lock_irqsave(hba->host->host_lock, flags);
2134 ufshcd_set_link_active(hba);
2135 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2136 trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
2137 hba->hibern8_on_idle.state);
2138 spin_unlock_irqrestore(hba->host->host_lock, flags);
2139 }
2140 }
2141unblock_reqs:
2142 ufshcd_scsi_unblock_requests(hba);
2143}
2144
2145static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
2146 struct device_attribute *attr, char *buf)
2147{
2148 struct ufs_hba *hba = dev_get_drvdata(dev);
2149
2150 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
2151}
2152
2153static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
2154 struct device_attribute *attr, const char *buf, size_t count)
2155{
2156 struct ufs_hba *hba = dev_get_drvdata(dev);
2157 unsigned long flags, value;
Subhash Jadavanid13daec2017-05-15 18:17:57 -07002158 bool change = true;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002159
2160 if (kstrtoul(buf, 0, &value))
2161 return -EINVAL;
2162
2163 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanid13daec2017-05-15 18:17:57 -07002164 if (hba->hibern8_on_idle.delay_ms == value)
2165 change = false;
2166
2167 if (value >= hba->clk_gating.delay_ms_pwr_save ||
2168 value >= hba->clk_gating.delay_ms_perf) {
2169 dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
2170 value, hba->clk_gating.delay_ms_pwr_save,
2171 hba->clk_gating.delay_ms_perf);
2172 spin_unlock_irqrestore(hba->host->host_lock, flags);
2173 return -EINVAL;
2174 }
2175
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002176 hba->hibern8_on_idle.delay_ms = value;
2177 spin_unlock_irqrestore(hba->host->host_lock, flags);
2178
2179 /* Update auto hibern8 timer value if supported */
Subhash Jadavanid13daec2017-05-15 18:17:57 -07002180 if (change && ufshcd_is_auto_hibern8_supported(hba) &&
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002181 hba->hibern8_on_idle.is_enabled)
Subhash Jadavanid13daec2017-05-15 18:17:57 -07002182 __ufshcd_set_auto_hibern8_timer(hba,
2183 hba->hibern8_on_idle.delay_ms);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002184
2185 return count;
2186}
2187
2188static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
2189 struct device_attribute *attr, char *buf)
2190{
2191 struct ufs_hba *hba = dev_get_drvdata(dev);
2192
2193 return snprintf(buf, PAGE_SIZE, "%d\n",
2194 hba->hibern8_on_idle.is_enabled);
2195}
2196
2197static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
2198 struct device_attribute *attr, const char *buf, size_t count)
2199{
2200 struct ufs_hba *hba = dev_get_drvdata(dev);
2201 unsigned long flags;
2202 u32 value;
2203
2204 if (kstrtou32(buf, 0, &value))
2205 return -EINVAL;
2206
2207 value = !!value;
2208 if (value == hba->hibern8_on_idle.is_enabled)
2209 goto out;
2210
2211 /* Update auto hibern8 timer value if supported */
2212 if (ufshcd_is_auto_hibern8_supported(hba)) {
Subhash Jadavanid13daec2017-05-15 18:17:57 -07002213 __ufshcd_set_auto_hibern8_timer(hba,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002214 value ? hba->hibern8_on_idle.delay_ms : value);
2215 goto update;
2216 }
2217
2218 if (value) {
2219 /*
2220 * As clock gating work would wait for the hibern8 enter work
2221 * to finish, clocks would remain on during hibern8 enter work.
2222 */
2223 ufshcd_hold(hba, false);
2224 ufshcd_release_all(hba);
2225 } else {
2226 spin_lock_irqsave(hba->host->host_lock, flags);
2227 hba->hibern8_on_idle.active_reqs++;
2228 spin_unlock_irqrestore(hba->host->host_lock, flags);
2229 }
2230
2231update:
2232 hba->hibern8_on_idle.is_enabled = value;
2233out:
2234 return count;
2235}
2236
2237static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
2238{
2239 /* initialize the state variable here */
2240 hba->hibern8_on_idle.state = HIBERN8_EXITED;
2241
2242 if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2243 !ufshcd_is_auto_hibern8_supported(hba))
2244 return;
2245
2246 if (ufshcd_is_auto_hibern8_supported(hba)) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07002247 hba->hibern8_on_idle.delay_ms = 1;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002248 hba->hibern8_on_idle.state = AUTO_HIBERN8;
2249 /*
2250 * Disable SW hibern8 enter on idle in case
2251 * auto hibern8 is supported
2252 */
2253 hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
2254 } else {
Subhash Jadavani9c807702017-04-01 00:35:51 -07002255 hba->hibern8_on_idle.delay_ms = 10;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002256 INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
2257 ufshcd_hibern8_enter_work);
2258 INIT_WORK(&hba->hibern8_on_idle.exit_work,
2259 ufshcd_hibern8_exit_work);
2260 }
2261
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002262 hba->hibern8_on_idle.is_enabled = true;
2263
2264 hba->hibern8_on_idle.delay_attr.show =
2265 ufshcd_hibern8_on_idle_delay_show;
2266 hba->hibern8_on_idle.delay_attr.store =
2267 ufshcd_hibern8_on_idle_delay_store;
2268 sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
2269 hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
2270 hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
2271 if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
2272 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
2273
2274 hba->hibern8_on_idle.enable_attr.show =
2275 ufshcd_hibern8_on_idle_enable_show;
2276 hba->hibern8_on_idle.enable_attr.store =
2277 ufshcd_hibern8_on_idle_enable_store;
2278 sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
2279 hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
2280 hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
2281 if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
2282 dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
2283}
2284
2285static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
2286{
2287 if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
2288 !ufshcd_is_auto_hibern8_supported(hba))
2289 return;
2290 device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
2291 device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
2292}
2293
2294static void ufshcd_hold_all(struct ufs_hba *hba)
2295{
2296 ufshcd_hold(hba, false);
2297 ufshcd_hibern8_hold(hba, false);
2298}
2299
2300static void ufshcd_release_all(struct ufs_hba *hba)
2301{
2302 ufshcd_hibern8_release(hba, false);
2303 ufshcd_release(hba, false);
2304}
2305
Sahitya Tummala856b3482014-09-25 15:32:34 +03002306/* Must be called with host lock acquired */
2307static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2308{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002309 bool queue_resume_work = false;
2310
2311 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03002312 return;
2313
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002314 if (!hba->clk_scaling.active_reqs++)
2315 queue_resume_work = true;
2316
2317 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
2318 return;
2319
2320 if (queue_resume_work)
2321 queue_work(hba->clk_scaling.workq,
2322 &hba->clk_scaling.resume_work);
2323
2324 if (!hba->clk_scaling.window_start_t) {
2325 hba->clk_scaling.window_start_t = jiffies;
2326 hba->clk_scaling.tot_busy_t = 0;
2327 hba->clk_scaling.is_busy_started = false;
2328 }
2329
Sahitya Tummala856b3482014-09-25 15:32:34 +03002330 if (!hba->clk_scaling.is_busy_started) {
2331 hba->clk_scaling.busy_start_t = ktime_get();
2332 hba->clk_scaling.is_busy_started = true;
2333 }
2334}
2335
2336static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2337{
2338 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2339
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002340 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +03002341 return;
2342
2343 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2344 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2345 scaling->busy_start_t));
2346 scaling->busy_start_t = ktime_set(0, 0);
2347 scaling->is_busy_started = false;
2348 }
2349}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002350
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302351/**
2352 * ufshcd_send_command - Send SCSI or device management commands
2353 * @hba: per adapter instance
2354 * @task_tag: Task tag of the command
2355 */
2356static inline
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002357int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302358{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002359 int ret = 0;
2360
2361 hba->lrb[task_tag].issue_time_stamp = ktime_get();
2362 hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
Sahitya Tummala856b3482014-09-25 15:32:34 +03002363 ufshcd_clk_scaling_start_busy(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302364 __set_bit(task_tag, &hba->outstanding_reqs);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302365 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002366 /* Make sure that doorbell is committed immediately */
2367 wmb();
Subhash Jadavani114437e2017-11-08 16:22:16 -08002368 ufshcd_cond_add_cmd_trace(hba, task_tag,
2369 hba->lrb[task_tag].cmd ? "scsi_send" : "dev_cmd_send");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002370 ufshcd_update_tag_stats(hba, task_tag);
2371 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302372}
2373
2374/**
2375 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2376 * @lrb - pointer to local reference block
2377 */
2378static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2379{
2380 int len;
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05302381 if (lrbp->sense_buffer &&
2382 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002383 int len_to_copy;
2384
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302385 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002386 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2387
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302388 memcpy(lrbp->sense_buffer,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302389 lrbp->ucd_rsp_ptr->sr.sense_data,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002390 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302391 }
2392}
2393
2394/**
Dolev Raviv68078d52013-07-30 00:35:58 +05302395 * ufshcd_copy_query_response() - Copy the Query Response and the data
2396 * descriptor
2397 * @hba: per adapter instance
2398 * @lrb - pointer to local reference block
2399 */
2400static
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002401int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Dolev Raviv68078d52013-07-30 00:35:58 +05302402{
2403 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2404
Dolev Raviv68078d52013-07-30 00:35:58 +05302405 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302406
Dolev Raviv68078d52013-07-30 00:35:58 +05302407 /* Get the descriptor */
2408 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03002409 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
Dolev Raviv68078d52013-07-30 00:35:58 +05302410 GENERAL_UPIU_REQUEST_SIZE;
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002411 u16 resp_len;
2412 u16 buf_len;
Dolev Raviv68078d52013-07-30 00:35:58 +05302413
2414 /* data segment length */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002415 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
Dolev Raviv68078d52013-07-30 00:35:58 +05302416 MASK_QUERY_DATA_SEG_LEN;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03002417 buf_len = be16_to_cpu(
2418 hba->dev_cmd.query.request.upiu_req.length);
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002419 if (likely(buf_len >= resp_len)) {
2420 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2421 } else {
2422 dev_warn(hba->dev,
2423 "%s: Response size is bigger than buffer",
2424 __func__);
2425 return -EINVAL;
2426 }
Dolev Raviv68078d52013-07-30 00:35:58 +05302427 }
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002428
2429 return 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302430}
2431
2432/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302433 * ufshcd_hba_capabilities - Read controller capabilities
2434 * @hba: per adapter instance
2435 */
2436static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2437{
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302438 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302439
2440 /* nutrs and nutmrs are 0 based values */
2441 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2442 hba->nutmrs =
2443 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2444}
2445
2446/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302447 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2448 * to accept UIC commands
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302449 * @hba: per adapter instance
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302450 * Return true on success, else false
2451 */
2452static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2453{
2454 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2455 return true;
2456 else
2457 return false;
2458}
2459
2460/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05302461 * ufshcd_get_upmcrs - Get the power mode change request status
2462 * @hba: Pointer to adapter instance
2463 *
2464 * This function gets the UPMCRS field of HCS register
2465 * Returns value of UPMCRS field
2466 */
2467static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2468{
2469 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2470}
2471
2472/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302473 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2474 * @hba: per adapter instance
2475 * @uic_cmd: UIC command
2476 *
2477 * Mutex must be held.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302478 */
2479static inline void
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302480ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302481{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302482 WARN_ON(hba->active_uic_cmd);
2483
2484 hba->active_uic_cmd = uic_cmd;
2485
Subhash Jadavani114437e2017-11-08 16:22:16 -08002486 ufshcd_dme_cmd_log(hba, "dme_send", hba->active_uic_cmd->command);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302487 /* Write Args */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302488 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2489 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2490 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302491
2492 /* Write UIC Cmd */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302493 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
Seungwon Jeonb873a2752013-06-26 22:39:26 +05302494 REG_UIC_COMMAND);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302495}
2496
2497/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302498 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2499 * @hba: per adapter instance
2500 * @uic_command: UIC command
2501 *
2502 * Must be called with mutex held.
2503 * Returns 0 only if success.
2504 */
2505static int
2506ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2507{
2508 int ret;
2509 unsigned long flags;
2510
2511 if (wait_for_completion_timeout(&uic_cmd->done,
2512 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2513 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2514 else
2515 ret = -ETIMEDOUT;
2516
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002517 if (ret)
2518 ufsdbg_set_err_state(hba);
2519
Subhash Jadavani114437e2017-11-08 16:22:16 -08002520 ufshcd_dme_cmd_log(hba, "dme_cmpl_1", hba->active_uic_cmd->command);
Can Guob7147732017-04-18 16:22:56 +08002521
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302522 spin_lock_irqsave(hba->host->host_lock, flags);
2523 hba->active_uic_cmd = NULL;
2524 spin_unlock_irqrestore(hba->host->host_lock, flags);
2525
2526 return ret;
2527}
2528
2529/**
2530 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2531 * @hba: per adapter instance
2532 * @uic_cmd: UIC command
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002533 * @completion: initialize the completion only if this is set to true
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302534 *
2535 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002536 * with mutex held and host_lock locked.
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302537 * Returns 0 only if success.
2538 */
2539static int
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002540__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2541 bool completion)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302542{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302543 if (!ufshcd_ready_for_uic_cmd(hba)) {
2544 dev_err(hba->dev,
2545 "Controller not ready to accept UIC commands\n");
2546 return -EIO;
2547 }
2548
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002549 if (completion)
2550 init_completion(&uic_cmd->done);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302551
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302552 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302553
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002554 return 0;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302555}
2556
2557/**
2558 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2559 * @hba: per adapter instance
2560 * @uic_cmd: UIC command
2561 *
2562 * Returns 0 only if success.
2563 */
2564static int
2565ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2566{
2567 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002568 unsigned long flags;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302569
Asutosh Das3da913a2017-03-24 10:32:16 +05302570 hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002571 ufshcd_hold_all(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302572 mutex_lock(&hba->uic_cmd_mutex);
Yaniv Gardicad2e032015-03-31 17:37:14 +03002573 ufshcd_add_delay_before_dme_cmd(hba);
2574
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002575 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02002576 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03002577 spin_unlock_irqrestore(hba->host->host_lock, flags);
2578 if (!ret)
2579 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2580
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002581 ufshcd_save_tstamp_of_last_dme_cmd(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302582 mutex_unlock(&hba->uic_cmd_mutex);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002583 ufshcd_release_all(hba);
Asutosh Das3da913a2017-03-24 10:32:16 +05302584 hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302585
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002586 ufsdbg_error_inject_dispatcher(hba,
2587 ERR_INJECT_UIC, 0, &ret);
2588
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05302589 return ret;
2590}
2591
2592/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302593 * ufshcd_map_sg - Map scatter-gather list to prdt
2594 * @lrbp - pointer to local reference block
2595 *
2596 * Returns 0 in case of success, non-zero value in case of failure
2597 */
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00002598static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302599{
2600 struct ufshcd_sg_entry *prd_table;
2601 struct scatterlist *sg;
2602 struct scsi_cmnd *cmd;
2603 int sg_segments;
2604 int i;
2605
2606 cmd = lrbp->cmd;
2607 sg_segments = scsi_dma_map(cmd);
2608 if (sg_segments < 0)
2609 return sg_segments;
2610
2611 if (sg_segments) {
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00002612 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2613 lrbp->utr_descriptor_ptr->prd_table_length =
2614 cpu_to_le16((u16)(sg_segments *
2615 sizeof(struct ufshcd_sg_entry)));
2616 else
2617 lrbp->utr_descriptor_ptr->prd_table_length =
2618 cpu_to_le16((u16) (sg_segments));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302619
2620 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2621
2622 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2623 prd_table[i].size =
2624 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2625 prd_table[i].base_addr =
2626 cpu_to_le32(lower_32_bits(sg->dma_address));
2627 prd_table[i].upper_addr =
2628 cpu_to_le32(upper_32_bits(sg->dma_address));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002629 prd_table[i].reserved = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302630 }
2631 } else {
2632 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2633 }
2634
2635 return 0;
2636}
2637
2638/**
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302639 * ufshcd_enable_intr - enable interrupts
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302640 * @hba: per adapter instance
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302641 * @intrs: interrupt bits
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302642 */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302643static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302644{
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302645 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2646
2647 if (hba->ufs_version == UFSHCI_VERSION_10) {
2648 u32 rw;
2649 rw = set & INTERRUPT_MASK_RW_VER_10;
2650 set = rw | ((set ^ intrs) & intrs);
2651 } else {
2652 set |= intrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302653 }
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05302654
2655 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2656}
2657
2658/**
2659 * ufshcd_disable_intr - disable interrupts
2660 * @hba: per adapter instance
2661 * @intrs: interrupt bits
2662 */
2663static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2664{
2665 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2666
2667 if (hba->ufs_version == UFSHCI_VERSION_10) {
2668 u32 rw;
2669 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2670 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2671 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2672
2673 } else {
2674 set &= ~intrs;
2675 }
2676
2677 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302678}
2679
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002680static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
2681 struct ufshcd_lrb *lrbp)
2682{
2683 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2684 u8 cc_index = 0;
2685 bool enable = false;
2686 u64 dun = 0;
2687 int ret;
2688
2689 /*
2690 * Call vendor specific code to get crypto info for this request:
2691 * enable, crypto config. index, DUN.
2692 * If bypass is set, don't bother setting the other fields.
2693 */
2694 ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
2695 if (ret) {
2696 if (ret != -EAGAIN) {
2697 dev_err(hba->dev,
2698 "%s: failed to setup crypto request (%d)\n",
2699 __func__, ret);
2700 }
2701
2702 return ret;
2703 }
2704
2705 if (!enable)
2706 goto out;
2707
2708 req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002709 req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
2710 req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
2711out:
2712 return 0;
2713}
2714
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302715/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302716 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2717 * descriptor according to request
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002718 * @hba: per adapter instance
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302719 * @lrbp: pointer to local reference block
2720 * @upiu_flags: flags required in the header
2721 * @cmd_dir: requests data direction
2722 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002723static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
2724 struct ufshcd_lrb *lrbp, u32 *upiu_flags,
2725 enum dma_data_direction cmd_dir)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302726{
2727 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2728 u32 data_direction;
2729 u32 dword_0;
2730
2731 if (cmd_dir == DMA_FROM_DEVICE) {
2732 data_direction = UTP_DEVICE_TO_HOST;
2733 *upiu_flags = UPIU_CMD_FLAGS_READ;
2734 } else if (cmd_dir == DMA_TO_DEVICE) {
2735 data_direction = UTP_HOST_TO_DEVICE;
2736 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2737 } else {
2738 data_direction = UTP_NO_DATA_TRANSFER;
2739 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2740 }
2741
2742 dword_0 = data_direction | (lrbp->command_type
2743 << UPIU_COMMAND_TYPE_OFFSET);
2744 if (lrbp->intr_cmd)
2745 dword_0 |= UTP_REQ_DESC_INT_CMD;
2746
2747 /* Transfer request descriptor header fields */
2748 req_desc->header.dword_0 = cpu_to_le32(dword_0);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002749 /* dword_1 is reserved, hence it is set to 0 */
2750 req_desc->header.dword_1 = 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302751 /*
2752 * assigning invalid value for command status. Controller
2753 * updates OCS on command completion, with the command
2754 * status
2755 */
2756 req_desc->header.dword_2 =
2757 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002758 /* dword_3 is reserved, hence it is set to 0 */
2759 req_desc->header.dword_3 = 0;
Yaniv Gardi51047262016-02-01 15:02:38 +02002760
2761 req_desc->prd_table_length = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002762
2763 if (ufshcd_is_crypto_supported(hba))
2764 return ufshcd_prepare_crypto_utrd(hba, lrbp);
2765
2766 return 0;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302767}
2768
2769/**
2770 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2771 * for scsi commands
2772 * @lrbp - local reference block pointer
2773 * @upiu_flags - flags
2774 */
2775static
2776void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2777{
2778 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002779 unsigned short cdb_len;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302780
2781 /* command descriptor fields */
2782 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2783 UPIU_TRANSACTION_COMMAND, upiu_flags,
2784 lrbp->lun, lrbp->task_tag);
2785 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2786 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2787
2788 /* Total EHS length and Data segment length will be zero */
2789 ucd_req_ptr->header.dword_2 = 0;
2790
2791 ucd_req_ptr->sc.exp_data_transfer_len =
2792 cpu_to_be32(lrbp->cmd->sdb.length);
2793
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002794 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002795 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002796 if (cdb_len < MAX_CDB_SIZE)
2797 memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
2798 (MAX_CDB_SIZE - cdb_len));
Yaniv Gardi52ac95f2016-02-01 15:02:37 +02002799 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302800}
2801
Dolev Raviv68078d52013-07-30 00:35:58 +05302802/**
2803 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2804 * for query requsts
2805 * @hba: UFS hba
2806 * @lrbp: local reference block pointer
2807 * @upiu_flags: flags
2808 */
2809static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2810 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2811{
2812 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2813 struct ufs_query *query = &hba->dev_cmd.query;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05302814 u16 len = be16_to_cpu(query->request.upiu_req.length);
Dolev Raviv68078d52013-07-30 00:35:58 +05302815 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2816
2817 /* Query request header */
2818 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2819 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2820 lrbp->lun, lrbp->task_tag);
2821 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2822 0, query->request.query_func, 0, 0);
2823
Zang Leigang68612852016-08-25 17:39:19 +08002824 /* Data segment length only need for WRITE_DESC */
2825 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2826 ucd_req_ptr->header.dword_2 =
2827 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2828 else
2829 ucd_req_ptr->header.dword_2 = 0;
Dolev Raviv68078d52013-07-30 00:35:58 +05302830
2831 /* Copy the Query Request buffer as is */
2832 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2833 QUERY_OSF_SIZE);
Dolev Raviv68078d52013-07-30 00:35:58 +05302834
2835 /* Copy the Descriptor */
Dolev Ravivc6d4a832014-06-29 09:40:18 +03002836 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2837 memcpy(descp, query->descriptor, len);
2838
Yaniv Gardi51047262016-02-01 15:02:38 +02002839 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Dolev Raviv68078d52013-07-30 00:35:58 +05302840}
2841
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302842static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2843{
2844 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2845
2846 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2847
2848 /* command descriptor fields */
2849 ucd_req_ptr->header.dword_0 =
2850 UPIU_HEADER_DWORD(
2851 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
Yaniv Gardi51047262016-02-01 15:02:38 +02002852 /* clear rest of the fields of basic header */
2853 ucd_req_ptr->header.dword_1 = 0;
2854 ucd_req_ptr->header.dword_2 = 0;
2855
2856 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302857}
2858
2859/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002860 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302861 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302862 * @lrb - pointer to local reference block
2863 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002864static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302865{
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302866 u32 upiu_flags;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302867 int ret = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302868
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07002869 switch (lrbp->command_type) {
2870 case UTP_CMD_TYPE_SCSI:
2871 if (likely(lrbp->cmd)) {
2872 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
2873 &upiu_flags, lrbp->cmd->sc_data_direction);
2874 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2875 } else {
2876 ret = -EINVAL;
2877 }
2878 break;
2879 case UTP_CMD_TYPE_DEV_MANAGE:
2880 ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
2881 DMA_NONE);
2882 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2883 ufshcd_prepare_utp_query_req_upiu(
2884 hba, lrbp, upiu_flags);
2885 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2886 ufshcd_prepare_utp_nop_upiu(lrbp);
2887 else
2888 ret = -EINVAL;
2889 break;
2890 case UTP_CMD_TYPE_UFS:
2891 /* For UFS native command implementation */
2892 ret = -ENOTSUPP;
2893 dev_err(hba->dev, "%s: UFS native command are not supported\n",
2894 __func__);
2895 break;
2896 default:
2897 ret = -ENOTSUPP;
2898 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
2899 __func__, lrbp->command_type);
2900 break;
2901 } /* end of switch */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05302902
2903 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302904}
2905
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03002906/*
2907 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2908 * @scsi_lun: scsi LUN id
2909 *
2910 * Returns UPIU LUN id
2911 */
2912static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2913{
2914 if (scsi_is_wlun(scsi_lun))
2915 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2916 | UFS_UPIU_WLUN_ID;
2917 else
2918 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2919}
2920
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302921/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03002922 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2923 * @scsi_lun: UPIU W-LUN id
2924 *
2925 * Returns SCSI W-LUN id
2926 */
2927static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2928{
2929 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2930}
2931
2932/**
Subhash Jadavani9c807702017-04-01 00:35:51 -07002933 * ufshcd_get_write_lock - synchronize between shutdown, scaling &
2934 * arrival of requests
2935 * @hba: ufs host
2936 *
2937 * Lock is predominantly held by shutdown context thus, ensuring
2938 * that no requests from any other context may sneak through.
2939 */
2940static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
2941{
2942 down_write(&hba->lock);
2943}
2944
2945/**
2946 * ufshcd_get_read_lock - synchronize between shutdown, scaling &
2947 * arrival of requests
2948 * @hba: ufs host
2949 *
2950 * Returns 1 if acquired, < 0 on contention
2951 *
2952 * After shutdown's initiated, allow requests only directed to the
2953 * well known device lun. The sync between scaling & issue is maintained
2954 * as is and this restructuring syncs shutdown with these too.
2955 */
2956static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
2957{
2958 int err = 0;
2959
2960 err = down_read_trylock(&hba->lock);
2961 if (err > 0)
2962 goto out;
2963 /* let requests for well known device lun to go through */
2964 if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
2965 return 0;
2966 else if (!ufshcd_is_shutdown_ongoing(hba))
2967 return -EAGAIN;
2968 else
2969 return -EPERM;
2970
2971out:
2972 return err;
2973}
2974
2975/**
2976 * ufshcd_put_read_lock - synchronize between shutdown, scaling &
2977 * arrival of requests
2978 * @hba: ufs host
2979 *
2980 * Returns none
2981 */
2982static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
2983{
2984 up_read(&hba->lock);
2985}
2986
2987/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05302988 * ufshcd_queuecommand - main entry point for SCSI requests
2989 * @cmd: command from SCSI Midlayer
2990 * @done: call back function
2991 *
2992 * Returns 0 for success, non-zero in case of failure
2993 */
2994static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2995{
2996 struct ufshcd_lrb *lrbp;
2997 struct ufs_hba *hba;
2998 unsigned long flags;
2999 int tag;
3000 int err = 0;
Subhash Jadavani9c807702017-04-01 00:35:51 -07003001 bool has_read_lock = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303002
3003 hba = shost_priv(host);
3004
Subhash Jadavani9c807702017-04-01 00:35:51 -07003005 if (!cmd || !cmd->request || !hba)
3006 return -EINVAL;
3007
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303008 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02003009 if (!ufshcd_valid_tag(hba, tag)) {
3010 dev_err(hba->dev,
3011 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
3012 __func__, tag, cmd, cmd->request);
3013 BUG();
3014 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303015
Subhash Jadavani9c807702017-04-01 00:35:51 -07003016 err = ufshcd_get_read_lock(hba, cmd->device->lun);
3017 if (unlikely(err < 0)) {
3018 if (err == -EPERM) {
3019 set_host_byte(cmd, DID_ERROR);
3020 cmd->scsi_done(cmd);
3021 return 0;
3022 }
3023 if (err == -EAGAIN)
3024 return SCSI_MLQUEUE_HOST_BUSY;
3025 } else if (err == 1) {
3026 has_read_lock = true;
3027 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003028
Subhash Jadavanief542222017-08-02 16:23:55 -07003029 /*
3030 * err might be non-zero here but logic later in this function
3031 * assumes that err is set to 0.
3032 */
3033 err = 0;
3034
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303035 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani9c807702017-04-01 00:35:51 -07003036
3037 /* if error handling is in progress, return host busy */
3038 if (ufshcd_eh_in_progress(hba)) {
3039 err = SCSI_MLQUEUE_HOST_BUSY;
3040 goto out_unlock;
3041 }
3042
Subhash Jadavanief542222017-08-02 16:23:55 -07003043 if (hba->extcon && ufshcd_is_card_offline(hba)) {
3044 set_host_byte(cmd, DID_BAD_TARGET);
3045 cmd->scsi_done(cmd);
3046 goto out_unlock;
3047 }
3048
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303049 switch (hba->ufshcd_state) {
3050 case UFSHCD_STATE_OPERATIONAL:
3051 break;
Zang Leiganga17bddc2017-04-04 19:32:20 +00003052 case UFSHCD_STATE_EH_SCHEDULED:
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303053 case UFSHCD_STATE_RESET:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303054 err = SCSI_MLQUEUE_HOST_BUSY;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303055 goto out_unlock;
3056 case UFSHCD_STATE_ERROR:
3057 set_host_byte(cmd, DID_ERROR);
3058 cmd->scsi_done(cmd);
3059 goto out_unlock;
3060 default:
3061 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
3062 __func__, hba->ufshcd_state);
3063 set_host_byte(cmd, DID_BAD_TARGET);
3064 cmd->scsi_done(cmd);
3065 goto out_unlock;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303066 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303067 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303068
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003069 hba->req_abort_count = 0;
3070
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303071 /* acquire the tag to make sure device cmds don't use it */
3072 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
3073 /*
3074 * Dev manage command in progress, requeue the command.
3075 * Requeuing the command helps in cases where the request *may*
3076 * find different tag instead of waiting for dev manage command
3077 * completion.
3078 */
3079 err = SCSI_MLQUEUE_HOST_BUSY;
3080 goto out;
3081 }
3082
Asutosh Das3da913a2017-03-24 10:32:16 +05303083 hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003084 err = ufshcd_hold(hba, true);
3085 if (err) {
3086 err = SCSI_MLQUEUE_HOST_BUSY;
3087 clear_bit_unlock(tag, &hba->lrb_in_use);
3088 goto out;
3089 }
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07003090
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003091 if (ufshcd_is_clkgating_allowed(hba))
3092 WARN_ON(hba->clk_gating.state != CLKS_ON);
3093
3094 err = ufshcd_hibern8_hold(hba, true);
3095 if (err) {
3096 clear_bit_unlock(tag, &hba->lrb_in_use);
3097 err = SCSI_MLQUEUE_HOST_BUSY;
Asutosh Das3da913a2017-03-24 10:32:16 +05303098 hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003099 ufshcd_release(hba, true);
3100 goto out;
3101 }
3102 if (ufshcd_is_hibern8_on_idle_allowed(hba))
3103 WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
3104
3105 /* Vote PM QoS for the request */
3106 ufshcd_vops_pm_qos_req_start(hba, cmd->request);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03003107
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07003108 /* IO svc time latency histogram */
Subhash Jadavani9c807702017-04-01 00:35:51 -07003109 if (hba->latency_hist_enabled &&
3110 (cmd->request->cmd_type == REQ_TYPE_FS)) {
3111 cmd->request->lat_hist_io_start = ktime_get();
3112 cmd->request->lat_hist_enabled = 1;
3113 } else {
3114 cmd->request->lat_hist_enabled = 0;
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07003115 }
3116
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303117 WARN_ON(hba->clk_gating.state != CLKS_ON);
3118
3119 lrbp = &hba->lrb[tag];
3120
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303121 WARN_ON(lrbp->cmd);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303122 lrbp->cmd = cmd;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003123 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303124 lrbp->sense_buffer = cmd->sense_buffer;
3125 lrbp->task_tag = tag;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03003126 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
Yaniv Gardib8521902015-05-17 18:54:57 +03003127 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003128 lrbp->command_type = UTP_CMD_TYPE_SCSI;
3129 lrbp->req_abort_skip = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303130
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003131 /* form UPIU before issuing the command */
3132 err = ufshcd_compose_upiu(hba, lrbp);
3133 if (err) {
3134 if (err != -EAGAIN)
3135 dev_err(hba->dev,
3136 "%s: failed to compose upiu %d\n",
3137 __func__, err);
Stephen Boyd9bc70c32017-03-01 16:58:38 -08003138 lrbp->cmd = NULL;
3139 clear_bit_unlock(tag, &hba->lrb_in_use);
3140 ufshcd_release_all(hba);
3141 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3142 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003143 }
Joao Pinto300bb132016-05-11 12:21:27 +01003144
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00003145 err = ufshcd_map_sg(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303146 if (err) {
3147 lrbp->cmd = NULL;
3148 clear_bit_unlock(tag, &hba->lrb_in_use);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003149 ufshcd_release_all(hba);
3150 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303151 goto out;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303152 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303153
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003154 err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
3155 if (err) {
3156 if (err != -EAGAIN)
3157 dev_err(hba->dev,
3158 "%s: failed to configure crypto engine %d\n",
3159 __func__, err);
3160
3161 scsi_dma_unmap(lrbp->cmd);
3162 lrbp->cmd = NULL;
3163 clear_bit_unlock(tag, &hba->lrb_in_use);
3164 ufshcd_release_all(hba);
3165 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3166
3167 goto out;
3168 }
3169
3170 /* Make sure descriptors are ready before ringing the doorbell */
3171 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303172 /* issue command to the controller */
3173 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003174
3175 err = ufshcd_send_command(hba, tag);
3176 if (err) {
3177 spin_unlock_irqrestore(hba->host->host_lock, flags);
3178 scsi_dma_unmap(lrbp->cmd);
3179 lrbp->cmd = NULL;
3180 clear_bit_unlock(tag, &hba->lrb_in_use);
3181 ufshcd_release_all(hba);
3182 ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
3183 ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
3184 dev_err(hba->dev, "%s: failed sending command, %d\n",
3185 __func__, err);
3186 err = DID_ERROR;
3187 goto out;
3188 }
3189
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05303190out_unlock:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303191 spin_unlock_irqrestore(hba->host->host_lock, flags);
3192out:
Subhash Jadavani9c807702017-04-01 00:35:51 -07003193 if (has_read_lock)
3194 ufshcd_put_read_lock(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303195 return err;
3196}
3197
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303198static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3199 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3200{
3201 lrbp->cmd = NULL;
3202 lrbp->sense_bufflen = 0;
3203 lrbp->sense_buffer = NULL;
3204 lrbp->task_tag = tag;
3205 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003206 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303207 lrbp->intr_cmd = true; /* No interrupt aggregation */
3208 hba->dev_cmd.type = cmd_type;
3209
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003210 return ufshcd_compose_upiu(hba, lrbp);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303211}
3212
3213static int
3214ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
3215{
3216 int err = 0;
3217 unsigned long flags;
3218 u32 mask = 1 << tag;
3219
3220 /* clear outstanding transaction before retry */
3221 spin_lock_irqsave(hba->host->host_lock, flags);
3222 ufshcd_utrl_clear(hba, tag);
3223 spin_unlock_irqrestore(hba->host->host_lock, flags);
3224
3225 /*
3226 * wait for for h/w to clear corresponding bit in door-bell.
3227 * max. wait is 1 sec.
3228 */
3229 err = ufshcd_wait_for_register(hba,
3230 REG_UTP_TRANSFER_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02003231 mask, ~mask, 1000, 1000, true);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303232
3233 return err;
3234}
3235
Dolev Ravivc6d4a832014-06-29 09:40:18 +03003236static int
3237ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3238{
3239 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
3240
3241 /* Get the UPIU response */
3242 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
3243 UPIU_RSP_CODE_OFFSET;
3244 return query_res->response;
3245}
3246
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303247/**
3248 * ufshcd_dev_cmd_completion() - handles device management command responses
3249 * @hba: per adapter instance
3250 * @lrbp: pointer to local reference block
3251 */
3252static int
3253ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3254{
3255 int resp;
3256 int err = 0;
3257
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003258 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303259 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3260
3261 switch (resp) {
3262 case UPIU_TRANSACTION_NOP_IN:
3263 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3264 err = -EINVAL;
3265 dev_err(hba->dev, "%s: unexpected response %x\n",
3266 __func__, resp);
3267 }
3268 break;
Dolev Raviv68078d52013-07-30 00:35:58 +05303269 case UPIU_TRANSACTION_QUERY_RSP:
Dolev Ravivc6d4a832014-06-29 09:40:18 +03003270 err = ufshcd_check_query_response(hba, lrbp);
3271 if (!err)
3272 err = ufshcd_copy_query_response(hba, lrbp);
Dolev Raviv68078d52013-07-30 00:35:58 +05303273 break;
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303274 case UPIU_TRANSACTION_REJECT_UPIU:
3275 /* TODO: handle Reject UPIU Response */
3276 err = -EPERM;
3277 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3278 __func__);
3279 break;
3280 default:
3281 err = -EINVAL;
3282 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3283 __func__, resp);
3284 break;
3285 }
3286
3287 return err;
3288}
3289
3290static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3291 struct ufshcd_lrb *lrbp, int max_timeout)
3292{
3293 int err = 0;
3294 unsigned long time_left;
3295 unsigned long flags;
3296
3297 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3298 msecs_to_jiffies(max_timeout));
3299
3300 spin_lock_irqsave(hba->host->host_lock, flags);
3301 hba->dev_cmd.complete = NULL;
3302 if (likely(time_left)) {
3303 err = ufshcd_get_tr_ocs(lrbp);
3304 if (!err)
3305 err = ufshcd_dev_cmd_completion(hba, lrbp);
3306 }
3307 spin_unlock_irqrestore(hba->host->host_lock, flags);
3308
3309 if (!time_left) {
3310 err = -ETIMEDOUT;
Yaniv Gardia48353f2016-02-01 15:02:40 +02003311 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3312 __func__, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303313 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
Yaniv Gardia48353f2016-02-01 15:02:40 +02003314 /* successfully cleared the command, retry if needed */
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303315 err = -EAGAIN;
Yaniv Gardia48353f2016-02-01 15:02:40 +02003316 /*
3317 * in case of an error, after clearing the doorbell,
3318 * we also need to clear the outstanding_request
3319 * field in hba
3320 */
3321 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303322 }
3323
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003324 if (err)
3325 ufsdbg_set_err_state(hba);
3326
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303327 return err;
3328}
3329
3330/**
3331 * ufshcd_get_dev_cmd_tag - Get device management command tag
3332 * @hba: per-adapter instance
3333 * @tag: pointer to variable with available slot value
3334 *
3335 * Get a free slot and lock it until device management command
3336 * completes.
3337 *
3338 * Returns false if free slot is unavailable for locking, else
3339 * return true with tag value in @tag.
3340 */
3341static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
3342{
3343 int tag;
3344 bool ret = false;
3345 unsigned long tmp;
3346
3347 if (!tag_out)
3348 goto out;
3349
3350 do {
3351 tmp = ~hba->lrb_in_use;
3352 tag = find_last_bit(&tmp, hba->nutrs);
3353 if (tag >= hba->nutrs)
3354 goto out;
3355 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
3356
3357 *tag_out = tag;
3358 ret = true;
3359out:
3360 return ret;
3361}
3362
3363static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
3364{
3365 clear_bit_unlock(tag, &hba->lrb_in_use);
3366}
3367
3368/**
3369 * ufshcd_exec_dev_cmd - API for sending device management requests
3370 * @hba - UFS hba
3371 * @cmd_type - specifies the type (NOP, Query...)
3372 * @timeout - time in seconds
3373 *
Dolev Raviv68078d52013-07-30 00:35:58 +05303374 * NOTE: Since there is only one available tag for device management commands,
3375 * it is expected you hold the hba->dev_cmd.lock mutex.
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303376 */
3377static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3378 enum dev_cmd_type cmd_type, int timeout)
3379{
3380 struct ufshcd_lrb *lrbp;
3381 int err;
3382 int tag;
3383 struct completion wait;
3384 unsigned long flags;
3385
Subhash Jadavani9c807702017-04-01 00:35:51 -07003386 /*
3387 * May get invoked from shutdown and IOCTL contexts.
3388 * In shutdown context, it comes in with lock acquired.
Bao D. Nguyen80d4ffb2017-06-05 17:31:53 -07003389 * In error recovery context, it may come with lock acquired.
Subhash Jadavani9c807702017-04-01 00:35:51 -07003390 */
Bao D. Nguyen80d4ffb2017-06-05 17:31:53 -07003391
3392 if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
Subhash Jadavani9c807702017-04-01 00:35:51 -07003393 down_read(&hba->lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003394
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303395 /*
3396 * Get free slot, sleep if slots are unavailable.
3397 * Even though we use wait_event() which sleeps indefinitely,
3398 * the maximum wait time is bounded by SCSI request timeout.
3399 */
3400 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
3401
3402 init_completion(&wait);
3403 lrbp = &hba->lrb[tag];
3404 WARN_ON(lrbp->cmd);
3405 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3406 if (unlikely(err))
3407 goto out_put_tag;
3408
3409 hba->dev_cmd.complete = &wait;
3410
Yaniv Gardie3dfdc52016-02-01 15:02:49 +02003411 /* Make sure descriptors are ready before ringing the doorbell */
3412 wmb();
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303413 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003414 err = ufshcd_send_command(hba, tag);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303415 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003416 if (err) {
3417 dev_err(hba->dev, "%s: failed sending command, %d\n",
3418 __func__, err);
3419 goto out_put_tag;
3420 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303421 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3422
3423out_put_tag:
3424 ufshcd_put_dev_cmd_tag(hba, tag);
3425 wake_up(&hba->dev_cmd.tag_wq);
Bao D. Nguyen80d4ffb2017-06-05 17:31:53 -07003426 if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
Subhash Jadavani9c807702017-04-01 00:35:51 -07003427 up_read(&hba->lock);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05303428 return err;
3429}
3430
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05303431/**
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003432 * ufshcd_init_query() - init the query response and request parameters
3433 * @hba: per-adapter instance
3434 * @request: address of the request pointer to be initialized
3435 * @response: address of the response pointer to be initialized
3436 * @opcode: operation to perform
3437 * @idn: flag idn to access
3438 * @index: LU number to access
3439 * @selector: query/flag/descriptor further identification
3440 */
3441static inline void ufshcd_init_query(struct ufs_hba *hba,
3442 struct ufs_query_req **request, struct ufs_query_res **response,
3443 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3444{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003445 int idn_t = (int)idn;
3446
3447 ufsdbg_error_inject_dispatcher(hba,
3448 ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
3449 idn = idn_t;
3450
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003451 *request = &hba->dev_cmd.query.request;
3452 *response = &hba->dev_cmd.query.response;
3453 memset(*request, 0, sizeof(struct ufs_query_req));
3454 memset(*response, 0, sizeof(struct ufs_query_res));
3455 (*request)->upiu_req.opcode = opcode;
3456 (*request)->upiu_req.idn = idn;
3457 (*request)->upiu_req.index = index;
3458 (*request)->upiu_req.selector = selector;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003459
3460 ufshcd_update_query_stats(hba, opcode, idn);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003461}
3462
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003463static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3464 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
3465{
3466 int ret;
3467 int retries;
3468
3469 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3470 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
3471 if (ret)
3472 dev_dbg(hba->dev,
3473 "%s: failed with error %d, retries %d\n",
3474 __func__, ret, retries);
3475 else
3476 break;
3477 }
3478
3479 if (ret)
3480 dev_err(hba->dev,
3481 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3482 __func__, opcode, idn, ret, retries);
3483 return ret;
3484}
3485
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003486/**
Dolev Raviv68078d52013-07-30 00:35:58 +05303487 * ufshcd_query_flag() - API function for sending flag query requests
3488 * hba: per-adapter instance
3489 * query_opcode: flag query to perform
3490 * idn: flag idn to access
3491 * flag_res: the flag value after the query request completes
3492 *
3493 * Returns 0 for success, non-zero in case of failure
3494 */
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02003495int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
Dolev Raviv68078d52013-07-30 00:35:58 +05303496 enum flag_idn idn, bool *flag_res)
3497{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003498 struct ufs_query_req *request = NULL;
3499 struct ufs_query_res *response = NULL;
3500 int err, index = 0, selector = 0;
Yaniv Gardie5ad4062016-02-01 15:02:41 +02003501 int timeout = QUERY_REQ_TIMEOUT;
Dolev Raviv68078d52013-07-30 00:35:58 +05303502
3503 BUG_ON(!hba);
3504
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003505 ufshcd_hold_all(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05303506 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003507 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3508 selector);
Dolev Raviv68078d52013-07-30 00:35:58 +05303509
3510 switch (opcode) {
3511 case UPIU_QUERY_OPCODE_SET_FLAG:
3512 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3513 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3514 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3515 break;
3516 case UPIU_QUERY_OPCODE_READ_FLAG:
3517 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3518 if (!flag_res) {
3519 /* No dummy reads */
3520 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3521 __func__);
3522 err = -EINVAL;
3523 goto out_unlock;
3524 }
3525 break;
3526 default:
3527 dev_err(hba->dev,
3528 "%s: Expected query flag opcode but got = %d\n",
3529 __func__, opcode);
3530 err = -EINVAL;
3531 goto out_unlock;
3532 }
Dolev Raviv68078d52013-07-30 00:35:58 +05303533
Yaniv Gardie5ad4062016-02-01 15:02:41 +02003534 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
Dolev Raviv68078d52013-07-30 00:35:58 +05303535
3536 if (err) {
3537 dev_err(hba->dev,
3538 "%s: Sending flag query for idn %d failed, err = %d\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003539 __func__, request->upiu_req.idn, err);
Dolev Raviv68078d52013-07-30 00:35:58 +05303540 goto out_unlock;
3541 }
3542
3543 if (flag_res)
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303544 *flag_res = (be32_to_cpu(response->upiu_res.value) &
Dolev Raviv68078d52013-07-30 00:35:58 +05303545 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3546
3547out_unlock:
3548 mutex_unlock(&hba->dev_cmd.lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003549 ufshcd_release_all(hba);
Dolev Raviv68078d52013-07-30 00:35:58 +05303550 return err;
3551}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003552EXPORT_SYMBOL(ufshcd_query_flag);
Dolev Raviv68078d52013-07-30 00:35:58 +05303553
3554/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303555 * ufshcd_query_attr - API function for sending attribute requests
3556 * hba: per-adapter instance
3557 * opcode: attribute opcode
3558 * idn: attribute idn to access
3559 * index: index field
3560 * selector: selector field
3561 * attr_val: the attribute value after the query request completes
3562 *
3563 * Returns 0 for success, non-zero in case of failure
3564*/
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003565int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303566 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3567{
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003568 struct ufs_query_req *request = NULL;
3569 struct ufs_query_res *response = NULL;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303570 int err;
3571
3572 BUG_ON(!hba);
3573
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003574 ufshcd_hold_all(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303575 if (!attr_val) {
3576 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3577 __func__, opcode);
3578 err = -EINVAL;
3579 goto out;
3580 }
3581
3582 mutex_lock(&hba->dev_cmd.lock);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003583 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3584 selector);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303585
3586 switch (opcode) {
3587 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3588 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303589 request->upiu_req.value = cpu_to_be32(*attr_val);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303590 break;
3591 case UPIU_QUERY_OPCODE_READ_ATTR:
3592 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3593 break;
3594 default:
3595 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3596 __func__, opcode);
3597 err = -EINVAL;
3598 goto out_unlock;
3599 }
3600
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003601 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303602
3603 if (err) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003604 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3605 __func__, opcode,
3606 request->upiu_req.idn, index, err);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303607 goto out_unlock;
3608 }
3609
Sujit Reddy Thummae8c8e822014-05-26 10:59:10 +05303610 *attr_val = be32_to_cpu(response->upiu_res.value);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303611
3612out_unlock:
3613 mutex_unlock(&hba->dev_cmd.lock);
3614out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003615 ufshcd_release_all(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303616 return err;
3617}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003618EXPORT_SYMBOL(ufshcd_query_attr);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05303619
3620/**
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003621 * ufshcd_query_attr_retry() - API function for sending query
3622 * attribute with retries
3623 * @hba: per-adapter instance
3624 * @opcode: attribute opcode
3625 * @idn: attribute idn to access
3626 * @index: index field
3627 * @selector: selector field
3628 * @attr_val: the attribute value after the query request
3629 * completes
3630 *
3631 * Returns 0 for success, non-zero in case of failure
3632*/
3633static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3634 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3635 u32 *attr_val)
3636{
3637 int ret = 0;
3638 u32 retries;
3639
3640 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3641 ret = ufshcd_query_attr(hba, opcode, idn, index,
3642 selector, attr_val);
3643 if (ret)
3644 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3645 __func__, ret, retries);
3646 else
3647 break;
3648 }
3649
3650 if (ret)
3651 dev_err(hba->dev,
3652 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003653 __func__, idn, ret, retries);
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02003654 return ret;
3655}
3656
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003657static int __ufshcd_query_descriptor(struct ufs_hba *hba,
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003658 enum query_opcode opcode, enum desc_idn idn, u8 index,
3659 u8 selector, u8 *desc_buf, int *buf_len)
3660{
3661 struct ufs_query_req *request = NULL;
3662 struct ufs_query_res *response = NULL;
3663 int err;
3664
3665 BUG_ON(!hba);
3666
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003667 ufshcd_hold_all(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003668 if (!desc_buf) {
3669 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3670 __func__, opcode);
3671 err = -EINVAL;
3672 goto out;
3673 }
3674
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303675 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003676 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3677 __func__, *buf_len);
3678 err = -EINVAL;
3679 goto out;
3680 }
3681
3682 mutex_lock(&hba->dev_cmd.lock);
3683 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3684 selector);
3685 hba->dev_cmd.query.descriptor = desc_buf;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003686 request->upiu_req.length = cpu_to_be16(*buf_len);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003687
3688 switch (opcode) {
3689 case UPIU_QUERY_OPCODE_WRITE_DESC:
3690 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3691 break;
3692 case UPIU_QUERY_OPCODE_READ_DESC:
3693 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3694 break;
3695 default:
3696 dev_err(hba->dev,
3697 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3698 __func__, opcode);
3699 err = -EINVAL;
3700 goto out_unlock;
3701 }
3702
3703 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3704
3705 if (err) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003706 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3707 __func__, opcode,
3708 request->upiu_req.idn, index, err);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003709 goto out_unlock;
3710 }
3711
3712 hba->dev_cmd.query.descriptor = NULL;
Sujit Reddy Thummaea2aab22014-07-23 09:31:12 +03003713 *buf_len = be16_to_cpu(response->upiu_res.length);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003714
3715out_unlock:
3716 mutex_unlock(&hba->dev_cmd.lock);
3717out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003718 ufshcd_release_all(hba);
Dolev Ravivd44a5f92014-06-29 09:40:17 +03003719 return err;
3720}
3721
3722/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003723 * ufshcd_query_descriptor - API function for sending descriptor requests
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003724 * hba: per-adapter instance
3725 * opcode: attribute opcode
3726 * idn: attribute idn to access
3727 * index: index field
3728 * selector: selector field
3729 * desc_buf: the buffer that contains the descriptor
3730 * buf_len: length parameter passed to the device
3731 *
3732 * Returns 0 for success, non-zero in case of failure.
3733 * The buf_len parameter will contain, on return, the length parameter
3734 * received on the response.
3735 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003736int ufshcd_query_descriptor(struct ufs_hba *hba,
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003737 enum query_opcode opcode, enum desc_idn idn, u8 index,
3738 u8 selector, u8 *desc_buf, int *buf_len)
3739{
3740 int err;
3741 int retries;
3742
3743 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3744 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3745 selector, desc_buf, buf_len);
3746 if (!err || err == -EINVAL)
3747 break;
3748 }
3749
3750 return err;
3751}
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003752EXPORT_SYMBOL(ufshcd_query_descriptor);
Yaniv Gardia70e91b2016-03-10 17:37:14 +02003753
3754/**
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303755 * ufshcd_read_desc_length - read the specified descriptor length from header
3756 * @hba: Pointer to adapter instance
3757 * @desc_id: descriptor idn value
3758 * @desc_index: descriptor index
3759 * @desc_length: pointer to variable to read the length of descriptor
3760 *
3761 * Return 0 in case of success, non-zero otherwise
3762 */
3763static int ufshcd_read_desc_length(struct ufs_hba *hba,
3764 enum desc_idn desc_id,
3765 int desc_index,
3766 int *desc_length)
3767{
3768 int ret;
3769 u8 header[QUERY_DESC_HDR_SIZE];
3770 int header_len = QUERY_DESC_HDR_SIZE;
3771
3772 if (desc_id >= QUERY_DESC_IDN_MAX)
3773 return -EINVAL;
3774
3775 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3776 desc_id, desc_index, 0, header,
3777 &header_len);
3778
3779 if (ret) {
3780 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3781 __func__, desc_id);
3782 return ret;
3783 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3784 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3785 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3786 desc_id);
3787 ret = -EINVAL;
3788 }
3789
3790 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3791 return ret;
3792
3793}
3794
3795/**
3796 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3797 * @hba: Pointer to adapter instance
3798 * @desc_id: descriptor idn value
3799 * @desc_len: mapped desc length (out)
3800 *
3801 * Return 0 in case of success, non-zero otherwise
3802 */
3803int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3804 enum desc_idn desc_id, int *desc_len)
3805{
3806 switch (desc_id) {
3807 case QUERY_DESC_IDN_DEVICE:
3808 *desc_len = hba->desc_size.dev_desc;
3809 break;
3810 case QUERY_DESC_IDN_POWER:
3811 *desc_len = hba->desc_size.pwr_desc;
3812 break;
3813 case QUERY_DESC_IDN_GEOMETRY:
3814 *desc_len = hba->desc_size.geom_desc;
3815 break;
3816 case QUERY_DESC_IDN_CONFIGURATION:
3817 *desc_len = hba->desc_size.conf_desc;
3818 break;
3819 case QUERY_DESC_IDN_UNIT:
3820 *desc_len = hba->desc_size.unit_desc;
3821 break;
3822 case QUERY_DESC_IDN_INTERCONNECT:
3823 *desc_len = hba->desc_size.interc_desc;
3824 break;
3825 case QUERY_DESC_IDN_STRING:
3826 *desc_len = QUERY_DESC_MAX_SIZE;
3827 break;
3828 case QUERY_DESC_IDN_RFU_0:
3829 case QUERY_DESC_IDN_RFU_1:
3830 *desc_len = 0;
3831 break;
3832 default:
3833 *desc_len = 0;
3834 return -EINVAL;
3835 }
3836 return 0;
3837}
3838EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3839
3840/**
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003841 * ufshcd_read_desc_param - read the specified descriptor parameter
3842 * @hba: Pointer to adapter instance
3843 * @desc_id: descriptor idn value
3844 * @desc_index: descriptor index
3845 * @param_offset: offset of the parameter to read
3846 * @param_read_buf: pointer to buffer where parameter would be read
3847 * @param_size: sizeof(param_read_buf)
3848 *
3849 * Return 0 in case of success, non-zero otherwise
3850 */
3851static int ufshcd_read_desc_param(struct ufs_hba *hba,
3852 enum desc_idn desc_id,
3853 int desc_index,
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303854 u8 param_offset,
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003855 u8 *param_read_buf,
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303856 u8 param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003857{
3858 int ret;
3859 u8 *desc_buf;
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303860 int buff_len;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003861 bool is_kmalloc = true;
3862
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303863 /* Safety check */
3864 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003865 return -EINVAL;
3866
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303867 /* Get the max length of descriptor from structure filled up at probe
3868 * time.
3869 */
3870 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003871
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303872 /* Sanity checks */
3873 if (ret || !buff_len) {
3874 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3875 __func__);
3876 return ret;
3877 }
3878
3879 /* Check whether we need temp memory */
3880 if (param_offset != 0 || param_size < buff_len) {
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003881 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3882 if (!desc_buf)
3883 return -ENOMEM;
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303884 } else {
3885 desc_buf = param_read_buf;
3886 is_kmalloc = false;
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003887 }
3888
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303889 /* Request for full descriptor */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003890 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303891 desc_id, desc_index, 0,
3892 desc_buf, &buff_len);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003893
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003894 if (ret) {
3895 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3896 __func__, desc_id, desc_index, param_offset, ret);
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003897
3898 goto out;
3899 }
3900
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003901 /* Sanity check */
3902 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3903 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3904 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3905 ret = -EINVAL;
3906 goto out;
3907 }
3908
Michal' Potomski833ea2a2017-05-31 15:25:11 +05303909 /* Check wherher we will not copy more data, than available */
3910 if (is_kmalloc && param_size > buff_len)
3911 param_size = buff_len;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003912
Subhash Jadavanida461ce2014-09-25 15:32:25 +03003913 if (is_kmalloc)
3914 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3915out:
3916 if (is_kmalloc)
3917 kfree(desc_buf);
3918 return ret;
3919}
3920
3921static inline int ufshcd_read_desc(struct ufs_hba *hba,
3922 enum desc_idn desc_id,
3923 int desc_index,
3924 u8 *buf,
3925 u32 size)
3926{
3927 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3928}
3929
3930static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3931 u8 *buf,
3932 u32 size)
3933{
3934 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3935}
3936
Yaniv Gardib573d482016-03-10 17:37:09 +02003937int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3938{
3939 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3940}
Yaniv Gardib573d482016-03-10 17:37:09 +02003941
3942/**
3943 * ufshcd_read_string_desc - read string descriptor
3944 * @hba: pointer to adapter instance
3945 * @desc_index: descriptor index
3946 * @buf: pointer to buffer where descriptor would be read
3947 * @size: size of buf
3948 * @ascii: if true convert from unicode to ascii characters
3949 *
3950 * Return 0 in case of success, non-zero otherwise
3951 */
3952int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
3953 u32 size, bool ascii)
3954{
3955 int err = 0;
3956
3957 err = ufshcd_read_desc(hba,
3958 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3959
3960 if (err) {
3961 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3962 __func__, QUERY_REQ_RETRIES, err);
3963 goto out;
3964 }
3965
3966 if (ascii) {
3967 int desc_len;
3968 int ascii_len;
3969 int i;
3970 char *buff_ascii;
3971
3972 desc_len = buf[0];
3973 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3974 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3975 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3976 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3977 __func__);
3978 err = -ENOMEM;
3979 goto out;
3980 }
3981
Subhash Jadavanibe096032017-03-23 12:55:25 -07003982 buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
Yaniv Gardib573d482016-03-10 17:37:09 +02003983 if (!buff_ascii) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003984 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
3985 __func__, ascii_len);
Yaniv Gardib573d482016-03-10 17:37:09 +02003986 err = -ENOMEM;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07003987 goto out_free_buff;
Yaniv Gardib573d482016-03-10 17:37:09 +02003988 }
3989
3990 /*
3991 * the descriptor contains string in UTF16 format
3992 * we need to convert to utf-8 so it can be displayed
3993 */
3994 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3995 desc_len - QUERY_DESC_HDR_SIZE,
3996 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3997
3998 /* replace non-printable or non-ASCII characters with spaces */
3999 for (i = 0; i < ascii_len; i++)
4000 ufshcd_remove_non_printable(&buff_ascii[i]);
4001
4002 memset(buf + QUERY_DESC_HDR_SIZE, 0,
4003 size - QUERY_DESC_HDR_SIZE);
4004 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
4005 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004006out_free_buff:
Yaniv Gardib573d482016-03-10 17:37:09 +02004007 kfree(buff_ascii);
4008 }
4009out:
4010 return err;
4011}
Yaniv Gardib573d482016-03-10 17:37:09 +02004012
Subhash Jadavanida461ce2014-09-25 15:32:25 +03004013/**
4014 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
4015 * @hba: Pointer to adapter instance
4016 * @lun: lun id
4017 * @param_offset: offset of the parameter to read
4018 * @param_read_buf: pointer to buffer where parameter would be read
4019 * @param_size: sizeof(param_read_buf)
4020 *
4021 * Return 0 in case of success, non-zero otherwise
4022 */
4023static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
4024 int lun,
4025 enum unit_desc_param param_offset,
4026 u8 *param_read_buf,
4027 u32 param_size)
4028{
4029 /*
4030 * Unit descriptors are only available for general purpose LUs (LUN id
4031 * from 0 to 7) and RPMB Well known LU.
4032 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004033 if (!ufs_is_valid_unit_desc_lun(lun))
Subhash Jadavanida461ce2014-09-25 15:32:25 +03004034 return -EOPNOTSUPP;
4035
4036 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
4037 param_offset, param_read_buf, param_size);
4038}
4039
4040/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304041 * ufshcd_memory_alloc - allocate memory for host memory space data structures
4042 * @hba: per adapter instance
4043 *
4044 * 1. Allocate DMA memory for Command Descriptor array
4045 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
4046 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
4047 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
4048 * (UTMRDL)
4049 * 4. Allocate memory for local reference block(lrb).
4050 *
4051 * Returns 0 for success, non-zero in case of failure
4052 */
4053static int ufshcd_memory_alloc(struct ufs_hba *hba)
4054{
4055 size_t utmrdl_size, utrdl_size, ucdl_size;
4056
4057 /* Allocate memory for UTP command descriptors */
4058 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09004059 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
4060 ucdl_size,
4061 &hba->ucdl_dma_addr,
4062 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304063
4064 /*
4065 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
4066 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
4067 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
4068 * be aligned to 128 bytes as well
4069 */
4070 if (!hba->ucdl_base_addr ||
4071 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304072 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304073 "Command Descriptor Memory allocation failed\n");
4074 goto out;
4075 }
4076
4077 /*
4078 * Allocate memory for UTP Transfer descriptors
4079 * UFSHCI requires 1024 byte alignment of UTRD
4080 */
4081 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
Seungwon Jeon2953f852013-06-27 13:31:54 +09004082 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
4083 utrdl_size,
4084 &hba->utrdl_dma_addr,
4085 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304086 if (!hba->utrdl_base_addr ||
4087 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304088 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304089 "Transfer Descriptor Memory allocation failed\n");
4090 goto out;
4091 }
4092
4093 /*
4094 * Allocate memory for UTP Task Management descriptors
4095 * UFSHCI requires 1024 byte alignment of UTMRD
4096 */
4097 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
Seungwon Jeon2953f852013-06-27 13:31:54 +09004098 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
4099 utmrdl_size,
4100 &hba->utmrdl_dma_addr,
4101 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304102 if (!hba->utmrdl_base_addr ||
4103 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304104 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304105 "Task Management Descriptor Memory allocation failed\n");
4106 goto out;
4107 }
4108
4109 /* Allocate memory for local reference block */
Seungwon Jeon2953f852013-06-27 13:31:54 +09004110 hba->lrb = devm_kzalloc(hba->dev,
4111 hba->nutrs * sizeof(struct ufshcd_lrb),
4112 GFP_KERNEL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304113 if (!hba->lrb) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05304114 dev_err(hba->dev, "LRB Memory allocation failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304115 goto out;
4116 }
4117 return 0;
4118out:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304119 return -ENOMEM;
4120}
4121
4122/**
4123 * ufshcd_host_memory_configure - configure local reference block with
4124 * memory offsets
4125 * @hba: per adapter instance
4126 *
4127 * Configure Host memory space
4128 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
4129 * address.
4130 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
4131 * and PRDT offset.
4132 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
4133 * into local reference block.
4134 */
4135static void ufshcd_host_memory_configure(struct ufs_hba *hba)
4136{
4137 struct utp_transfer_cmd_desc *cmd_descp;
4138 struct utp_transfer_req_desc *utrdlp;
4139 dma_addr_t cmd_desc_dma_addr;
4140 dma_addr_t cmd_desc_element_addr;
4141 u16 response_offset;
4142 u16 prdt_offset;
4143 int cmd_desc_size;
4144 int i;
4145
4146 utrdlp = hba->utrdl_base_addr;
4147 cmd_descp = hba->ucdl_base_addr;
4148
4149 response_offset =
4150 offsetof(struct utp_transfer_cmd_desc, response_upiu);
4151 prdt_offset =
4152 offsetof(struct utp_transfer_cmd_desc, prd_table);
4153
4154 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
4155 cmd_desc_dma_addr = hba->ucdl_dma_addr;
4156
4157 for (i = 0; i < hba->nutrs; i++) {
4158 /* Configure UTRD with command descriptor base address */
4159 cmd_desc_element_addr =
4160 (cmd_desc_dma_addr + (cmd_desc_size * i));
4161 utrdlp[i].command_desc_base_addr_lo =
4162 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
4163 utrdlp[i].command_desc_base_addr_hi =
4164 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
4165
4166 /* Response upiu and prdt offset should be in double words */
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00004167 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
4168 utrdlp[i].response_upiu_offset =
4169 cpu_to_le16(response_offset);
4170 utrdlp[i].prd_table_offset =
4171 cpu_to_le16(prdt_offset);
4172 utrdlp[i].response_upiu_length =
4173 cpu_to_le16(ALIGNED_UPIU_SIZE);
4174 } else {
4175 utrdlp[i].response_upiu_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304176 cpu_to_le16((response_offset >> 2));
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00004177 utrdlp[i].prd_table_offset =
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304178 cpu_to_le16((prdt_offset >> 2));
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00004179 utrdlp[i].response_upiu_length =
Sujit Reddy Thumma3ca316c2013-06-26 22:39:30 +05304180 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
Kiwoong Kim9b41ed72017-04-04 19:32:05 +00004181 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304182
4183 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004184 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
4185 (i * sizeof(struct utp_transfer_req_desc));
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05304186 hba->lrb[i].ucd_req_ptr =
4187 (struct utp_upiu_req *)(cmd_descp + i);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004188 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304189 hba->lrb[i].ucd_rsp_ptr =
4190 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004191 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
4192 response_offset;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304193 hba->lrb[i].ucd_prdt_ptr =
4194 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004195 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
4196 prdt_offset;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304197 }
4198}
4199
4200/**
4201 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
4202 * @hba: per adapter instance
4203 *
4204 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
4205 * in order to initialize the Unipro link startup procedure.
4206 * Once the Unipro links are up, the device connected to the controller
4207 * is detected.
4208 *
4209 * Returns 0 on success, non-zero value on failure
4210 */
4211static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4212{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304213 struct uic_command uic_cmd = {0};
4214 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304215
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304216 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
4217
4218 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4219 if (ret)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004220 dev_dbg(hba->dev,
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304221 "dme-link-startup: error code %d\n", ret);
4222 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304223}
4224
Yaniv Gardicad2e032015-03-31 17:37:14 +03004225static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4226{
4227 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4228 unsigned long min_sleep_time_us;
4229
4230 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4231 return;
4232
4233 /*
4234 * last_dme_cmd_tstamp will be 0 only for 1st call to
4235 * this function
4236 */
4237 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4238 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4239 } else {
4240 unsigned long delta =
4241 (unsigned long) ktime_to_us(
4242 ktime_sub(ktime_get(),
4243 hba->last_dme_cmd_tstamp));
4244
4245 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4246 min_sleep_time_us =
4247 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4248 else
4249 return; /* no more delay required */
4250 }
4251
4252 /* allow sleep for extra 50us if needed */
4253 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4254}
4255
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004256static inline void ufshcd_save_tstamp_of_last_dme_cmd(
4257 struct ufs_hba *hba)
4258{
4259 if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
4260 hba->last_dme_cmd_tstamp = ktime_get();
4261}
4262
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304263/**
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304264 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4265 * @hba: per adapter instance
4266 * @attr_sel: uic command argument1
4267 * @attr_set: attribute set type as uic command argument2
4268 * @mib_val: setting value as uic command argument3
4269 * @peer: indicate whether peer or local
4270 *
4271 * Returns 0 on success, non-zero value on failure
4272 */
4273int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4274 u8 attr_set, u32 mib_val, u8 peer)
4275{
4276 struct uic_command uic_cmd = {0};
4277 static const char *const action[] = {
4278 "dme-set",
4279 "dme-peer-set"
4280 };
4281 const char *set = action[!!peer];
4282 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004283 int retries = UFS_UIC_COMMAND_RETRIES;
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304284
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004285 ufsdbg_error_inject_dispatcher(hba,
4286 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4287
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304288 uic_cmd.command = peer ?
4289 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4290 uic_cmd.argument1 = attr_sel;
4291 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4292 uic_cmd.argument3 = mib_val;
4293
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004294 do {
4295 /* for peer attributes we retry upon failure */
4296 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4297 if (ret)
4298 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4299 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4300 } while (ret && peer && --retries);
4301
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004302 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004303 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004304 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4305 UFS_UIC_COMMAND_RETRIES - retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304306
4307 return ret;
4308}
4309EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4310
4311/**
4312 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4313 * @hba: per adapter instance
4314 * @attr_sel: uic command argument1
4315 * @mib_val: the value of the attribute as returned by the UIC command
4316 * @peer: indicate whether peer or local
4317 *
4318 * Returns 0 on success, non-zero value on failure
4319 */
4320int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4321 u32 *mib_val, u8 peer)
4322{
4323 struct uic_command uic_cmd = {0};
4324 static const char *const action[] = {
4325 "dme-get",
4326 "dme-peer-get"
4327 };
4328 const char *get = action[!!peer];
4329 int ret;
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004330 int retries = UFS_UIC_COMMAND_RETRIES;
Yaniv Gardi874237f2015-05-17 18:55:03 +03004331 struct ufs_pa_layer_attr orig_pwr_info;
4332 struct ufs_pa_layer_attr temp_pwr_info;
4333 bool pwr_mode_change = false;
4334
4335 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4336 orig_pwr_info = hba->pwr_info;
4337 temp_pwr_info = orig_pwr_info;
4338
4339 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4340 orig_pwr_info.pwr_rx == FAST_MODE) {
4341 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4342 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4343 pwr_mode_change = true;
4344 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4345 orig_pwr_info.pwr_rx == SLOW_MODE) {
4346 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4347 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4348 pwr_mode_change = true;
4349 }
4350 if (pwr_mode_change) {
4351 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4352 if (ret)
4353 goto out;
4354 }
4355 }
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304356
4357 uic_cmd.command = peer ?
4358 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004359
4360 ufsdbg_error_inject_dispatcher(hba,
4361 ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
4362
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304363 uic_cmd.argument1 = attr_sel;
4364
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004365 do {
4366 /* for peer attributes we retry upon failure */
4367 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4368 if (ret)
4369 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4370 get, UIC_GET_ATTR_ID(attr_sel), ret);
4371 } while (ret && peer && --retries);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304372
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004373 if (ret)
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004374 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004375 get, UIC_GET_ATTR_ID(attr_sel),
4376 UFS_UIC_COMMAND_RETRIES - retries);
Yaniv Gardi64238fb2016-02-01 15:02:43 +02004377
4378 if (mib_val && !ret)
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304379 *mib_val = uic_cmd.argument3;
Yaniv Gardi874237f2015-05-17 18:55:03 +03004380
4381 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4382 && pwr_mode_change)
4383 ufshcd_change_power_mode(hba, &orig_pwr_info);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05304384out:
4385 return ret;
4386}
4387EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4388
4389/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004390 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4391 * state) and waits for it to take effect.
4392 *
4393 * @hba: per adapter instance
4394 * @cmd: UIC command to execute
4395 *
4396 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4397 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4398 * and device UniPro link and hence it's final completion would be indicated by
4399 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4400 * addition to normal UIC command completion Status (UCCS). This function only
4401 * returns after the relevant status bits indicate the completion.
4402 *
4403 * Returns 0 on success, non-zero value on failure
4404 */
4405static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4406{
4407 struct completion uic_async_done;
4408 unsigned long flags;
4409 u8 status;
4410 int ret;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004411 bool reenable_intr = false;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004412
4413 mutex_lock(&hba->uic_cmd_mutex);
4414 init_completion(&uic_async_done);
Yaniv Gardicad2e032015-03-31 17:37:14 +03004415 ufshcd_add_delay_before_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004416
4417 spin_lock_irqsave(hba->host->host_lock, flags);
4418 hba->uic_async_done = &uic_async_done;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004419 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4420 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4421 /*
4422 * Make sure UIC command completion interrupt is disabled before
4423 * issuing UIC command.
4424 */
4425 wmb();
4426 reenable_intr = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004427 }
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004428 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4429 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004430 if (ret) {
4431 dev_err(hba->dev,
4432 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4433 cmd->command, cmd->argument3, ret);
4434 goto out;
4435 }
4436
4437 if (!wait_for_completion_timeout(hba->uic_async_done,
4438 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4439 dev_err(hba->dev,
4440 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4441 cmd->command, cmd->argument3);
4442 ret = -ETIMEDOUT;
4443 goto out;
4444 }
4445
4446 status = ufshcd_get_upmcrs(hba);
4447 if (status != PWR_LOCAL) {
4448 dev_err(hba->dev,
Kiwoong Kim73615422016-09-08 16:50:02 +09004449 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004450 cmd->command, status);
4451 ret = (status != PWR_OK) ? status : -1;
4452 }
Subhash Jadavani114437e2017-11-08 16:22:16 -08004453 ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
Can Guob7147732017-04-18 16:22:56 +08004454
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004455out:
Subhash Jadavani9c807702017-04-01 00:35:51 -07004456 if (ret) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004457 ufsdbg_set_err_state(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07004458 ufshcd_print_host_state(hba);
4459 ufshcd_print_pwr_info(hba);
4460 ufshcd_print_host_regs(hba);
Can Guof6411eb2017-06-09 15:17:22 +08004461 ufshcd_print_cmd_log(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07004462 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004463
4464 ufshcd_save_tstamp_of_last_dme_cmd(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004465 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004466 hba->active_uic_cmd = NULL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004467 hba->uic_async_done = NULL;
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02004468 if (reenable_intr)
4469 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004470 spin_unlock_irqrestore(hba->host->host_lock, flags);
4471 mutex_unlock(&hba->uic_cmd_mutex);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004472 return ret;
4473}
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004474
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004475int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
4476{
4477 unsigned long flags;
4478 int ret = 0;
4479 u32 tm_doorbell;
4480 u32 tr_doorbell;
4481 bool timeout = false, do_last_check = false;
4482 ktime_t start;
4483
4484 ufshcd_hold_all(hba);
4485 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004486 /*
4487 * Wait for all the outstanding tasks/transfer requests.
4488 * Verify by checking the doorbell registers are clear.
4489 */
4490 start = ktime_get();
4491 do {
Subhash Jadavani9c807702017-04-01 00:35:51 -07004492 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
4493 ret = -EBUSY;
4494 goto out;
4495 }
4496
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004497 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
4498 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4499 if (!tm_doorbell && !tr_doorbell) {
4500 timeout = false;
4501 break;
4502 } else if (do_last_check) {
4503 break;
4504 }
4505
4506 spin_unlock_irqrestore(hba->host->host_lock, flags);
4507 schedule();
4508 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
4509 wait_timeout_us) {
4510 timeout = true;
4511 /*
4512 * We might have scheduled out for long time so make
4513 * sure to check if doorbells are cleared by this time
4514 * or not.
4515 */
4516 do_last_check = true;
4517 }
4518 spin_lock_irqsave(hba->host->host_lock, flags);
4519 } while (tm_doorbell || tr_doorbell);
4520
4521 if (timeout) {
4522 dev_err(hba->dev,
4523 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
4524 __func__, tm_doorbell, tr_doorbell);
4525 ret = -EBUSY;
4526 }
4527out:
4528 spin_unlock_irqrestore(hba->host->host_lock, flags);
4529 ufshcd_release_all(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004530 return ret;
4531}
4532
4533/**
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304534 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4535 * using DME_SET primitives.
4536 * @hba: per adapter instance
4537 * @mode: powr mode value
4538 *
4539 * Returns 0 on success, non-zero value on failure
4540 */
Sujit Reddy Thummabdbe5d22014-05-26 10:59:11 +05304541static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304542{
4543 struct uic_command uic_cmd = {0};
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004544 int ret;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304545
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03004546 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4547 ret = ufshcd_dme_set(hba,
4548 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4549 if (ret) {
4550 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4551 __func__, ret);
4552 goto out;
4553 }
4554 }
4555
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304556 uic_cmd.command = UIC_CMD_DME_SET;
4557 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4558 uic_cmd.argument3 = mode;
Asutosh Das3da913a2017-03-24 10:32:16 +05304559 hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004560 ufshcd_hold_all(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004561 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Asutosh Das3da913a2017-03-24 10:32:16 +05304562 hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004563 ufshcd_release_all(hba);
Yaniv Gardic3a2f9e2015-05-17 18:55:01 +03004564out:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03004565 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004566}
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304567
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004568static int ufshcd_link_recovery(struct ufs_hba *hba)
4569{
Subhash Jadavani9c807702017-04-01 00:35:51 -07004570 int ret = 0;
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004571 unsigned long flags;
4572
Subhash Jadavani9c807702017-04-01 00:35:51 -07004573 /*
4574 * Check if there is any race with fatal error handling.
4575 * If so, wait for it to complete. Even though fatal error
4576 * handling does reset and restore in some cases, don't assume
4577 * anything out of it. We are just avoiding race here.
4578 */
4579 do {
4580 spin_lock_irqsave(hba->host->host_lock, flags);
4581 if (!(work_pending(&hba->eh_work) ||
4582 hba->ufshcd_state == UFSHCD_STATE_RESET))
4583 break;
4584 spin_unlock_irqrestore(hba->host->host_lock, flags);
4585 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4586 flush_work(&hba->eh_work);
4587 } while (1);
4588
4589
4590 /*
4591 * we don't know if previous reset had really reset the host controller
4592 * or not. So let's force reset here to be sure.
4593 */
4594 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4595 hba->force_host_reset = true;
4596 schedule_work(&hba->eh_work);
4597
4598 /* wait for the reset work to finish */
4599 do {
4600 if (!(work_pending(&hba->eh_work) ||
4601 hba->ufshcd_state == UFSHCD_STATE_RESET))
4602 break;
4603 spin_unlock_irqrestore(hba->host->host_lock, flags);
4604 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
4605 flush_work(&hba->eh_work);
4606 spin_lock_irqsave(hba->host->host_lock, flags);
4607 } while (1);
4608
4609 if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
4610 ufshcd_is_link_active(hba)))
4611 ret = -ENOLINK;
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004612 spin_unlock_irqrestore(hba->host->host_lock, flags);
4613
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004614 return ret;
4615}
4616
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004617static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004618{
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004619 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004620 struct uic_command uic_cmd = {0};
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004621 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004622
4623 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004624 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004625 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4626 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004627
Subhash Jadavani9c807702017-04-01 00:35:51 -07004628 /*
4629 * Do full reinit if enter failed or if LINERESET was detected during
4630 * Hibern8 operation. After LINERESET, link moves to default PWM-G1
4631 * mode hence full reinit is required to move link to HS speeds.
4632 */
4633 if (ret || hba->full_init_linereset) {
Subhash Jadavani68e11712017-03-24 14:44:01 -07004634 int err;
4635
Subhash Jadavani9c807702017-04-01 00:35:51 -07004636 hba->full_init_linereset = false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004637 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
4638 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004639 __func__, ret);
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004640 /*
Subhash Jadavani68e11712017-03-24 14:44:01 -07004641 * If link recovery fails then return error code (-ENOLINK)
4642 * returned ufshcd_link_recovery().
4643 * If link recovery succeeds then return -EAGAIN to attempt
4644 * hibern8 enter retry again.
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004645 */
Subhash Jadavani68e11712017-03-24 14:44:01 -07004646 err = ufshcd_link_recovery(hba);
4647 if (err) {
4648 dev_err(hba->dev, "%s: link recovery failed", __func__);
4649 ret = err;
4650 } else {
4651 ret = -EAGAIN;
4652 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004653 } else {
4654 dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
4655 ktime_to_us(ktime_get()));
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004656 }
4657
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004658 return ret;
4659}
4660
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004661int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004662{
4663 int ret = 0, retries;
4664
4665 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
4666 ret = __ufshcd_uic_hibern8_enter(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004667 if (!ret)
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004668 goto out;
Subhash Jadavani68e11712017-03-24 14:44:01 -07004669 else if (ret != -EAGAIN)
4670 /* Unable to recover the link, so no point proceeding */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004671 BUG();
Yaniv Gardi87d0b4a2016-02-01 15:02:44 +02004672 }
4673out:
4674 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004675}
4676
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004677int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004678{
4679 struct uic_command uic_cmd = {0};
4680 int ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004681 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03004682
4683 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4684 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004685 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4686 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4687
Subhash Jadavani9c807702017-04-01 00:35:51 -07004688 /* Do full reinit if exit failed */
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304689 if (ret) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004690 ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
4691 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
Yaniv Gardi53c12d02016-02-01 15:02:45 +02004692 __func__, ret);
4693 ret = ufshcd_link_recovery(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004694 /* Unable to recover the link, so no point proceeding */
4695 if (ret)
4696 BUG();
4697 } else {
4698 dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
4699 ktime_to_us(ktime_get()));
4700 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4701 hba->ufs_stats.hibern8_exit_cnt++;
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304702 }
4703
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304704 return ret;
4705}
4706
Yaniv Gardi50646362014-10-23 13:25:13 +03004707 /**
4708 * ufshcd_init_pwr_info - setting the POR (power on reset)
4709 * values in hba power info
4710 * @hba: per-adapter instance
4711 */
4712static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4713{
4714 hba->pwr_info.gear_rx = UFS_PWM_G1;
4715 hba->pwr_info.gear_tx = UFS_PWM_G1;
4716 hba->pwr_info.lane_rx = 1;
4717 hba->pwr_info.lane_tx = 1;
4718 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4719 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4720 hba->pwr_info.hs_rate = 0;
4721}
4722
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05304723/**
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004724 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4725 * @hba: per-adapter instance
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304726 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004727static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304728{
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004729 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4730
4731 if (hba->max_pwr_info.is_valid)
4732 return 0;
4733
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004734 pwr_info->pwr_tx = FAST_MODE;
4735 pwr_info->pwr_rx = FAST_MODE;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004736 pwr_info->hs_rate = PA_HS_MODE_B;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304737
4738 /* Get the connected lane count */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004739 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4740 &pwr_info->lane_rx);
4741 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4742 &pwr_info->lane_tx);
4743
4744 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4745 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4746 __func__,
4747 pwr_info->lane_rx,
4748 pwr_info->lane_tx);
4749 return -EINVAL;
4750 }
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304751
4752 /*
4753 * First, get the maximum gears of HS speed.
4754 * If a zero value, it means there is no HSGEAR capability.
4755 * Then, get the maximum gears of PWM speed.
4756 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004757 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4758 if (!pwr_info->gear_rx) {
4759 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4760 &pwr_info->gear_rx);
4761 if (!pwr_info->gear_rx) {
4762 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4763 __func__, pwr_info->gear_rx);
4764 return -EINVAL;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004765 } else {
4766 if (hba->limit_rx_pwm_gear > 0 &&
4767 (hba->limit_rx_pwm_gear < pwr_info->gear_rx))
4768 pwr_info->gear_rx = hba->limit_rx_pwm_gear;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004769 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004770 pwr_info->pwr_rx = SLOW_MODE;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004771 } else {
4772 if (hba->limit_rx_hs_gear > 0 &&
4773 (hba->limit_rx_hs_gear < pwr_info->gear_rx))
4774 pwr_info->gear_rx = hba->limit_rx_hs_gear;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304775 }
4776
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004777 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4778 &pwr_info->gear_tx);
4779 if (!pwr_info->gear_tx) {
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304780 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004781 &pwr_info->gear_tx);
4782 if (!pwr_info->gear_tx) {
4783 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4784 __func__, pwr_info->gear_tx);
4785 return -EINVAL;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004786 } else {
4787 if (hba->limit_tx_pwm_gear > 0 &&
4788 (hba->limit_tx_pwm_gear < pwr_info->gear_tx))
4789 pwr_info->gear_tx = hba->limit_tx_pwm_gear;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004790 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004791 pwr_info->pwr_tx = SLOW_MODE;
Subhash Jadavani5e45e702016-08-09 18:43:10 -07004792 } else {
4793 if (hba->limit_tx_hs_gear > 0 &&
4794 (hba->limit_tx_hs_gear < pwr_info->gear_tx))
4795 pwr_info->gear_tx = hba->limit_tx_hs_gear;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004796 }
4797
4798 hba->max_pwr_info.is_valid = true;
4799 return 0;
4800}
4801
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004802int ufshcd_change_power_mode(struct ufs_hba *hba,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004803 struct ufs_pa_layer_attr *pwr_mode)
4804{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004805 int ret = 0;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004806
4807 /* if already configured to the requested pwr_mode */
Asutosh Das3923c232017-09-15 16:14:26 +05304808 if (!hba->restore_needed &&
4809 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4810 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004811 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4812 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4813 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4814 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4815 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4816 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4817 return 0;
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304818 }
4819
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004820 ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
4821 if (ret)
4822 return ret;
4823
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304824 /*
4825 * Configure attributes for power mode change with below.
4826 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4827 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4828 * - PA_HSSERIES
4829 */
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004830 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4831 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4832 pwr_mode->lane_rx);
4833 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4834 pwr_mode->pwr_rx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304835 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004836 else
4837 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304838
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004839 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4840 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4841 pwr_mode->lane_tx);
4842 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4843 pwr_mode->pwr_tx == FAST_MODE)
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304844 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004845 else
4846 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304847
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004848 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4849 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4850 pwr_mode->pwr_rx == FAST_MODE ||
4851 pwr_mode->pwr_tx == FAST_MODE)
4852 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4853 pwr_mode->hs_rate);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304854
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004855 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4856 DL_FC0ProtectionTimeOutVal_Default);
4857 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4858 DL_TC0ReplayTimeOutVal_Default);
4859 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4860 DL_AFC0ReqTimeOutVal_Default);
4861
4862 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4863 DL_FC0ProtectionTimeOutVal_Default);
4864 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4865 DL_TC0ReplayTimeOutVal_Default);
4866 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4867 DL_AFC0ReqTimeOutVal_Default);
4868
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004869 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4870 | pwr_mode->pwr_tx);
4871
4872 if (ret) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004873 ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304874 dev_err(hba->dev,
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004875 "%s: power mode change failed %d\n", __func__, ret);
4876 } else {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004877 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4878 pwr_mode);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004879
4880 memcpy(&hba->pwr_info, pwr_mode,
4881 sizeof(struct ufs_pa_layer_attr));
Sayali Lokhandebb03f312017-09-20 19:39:18 +05304882 hba->ufs_stats.power_mode_change_cnt++;
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004883 }
4884
4885 return ret;
4886}
4887
4888/**
4889 * ufshcd_config_pwr_mode - configure a new power mode
4890 * @hba: per-adapter instance
4891 * @desired_pwr_mode: desired power configuration
4892 */
4893static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4894 struct ufs_pa_layer_attr *desired_pwr_mode)
4895{
4896 struct ufs_pa_layer_attr final_params = { 0 };
4897 int ret;
4898
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02004899 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4900 desired_pwr_mode, &final_params);
4901
4902 if (ret)
Dolev Raviv7eb584d2014-09-25 15:32:31 +03004903 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4904
4905 ret = ufshcd_change_power_mode(hba, &final_params);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07004906 if (!ret)
4907 ufshcd_print_pwr_info(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05304908
4909 return ret;
4910}
4911
4912/**
Dolev Raviv68078d52013-07-30 00:35:58 +05304913 * ufshcd_complete_dev_init() - checks device readiness
4914 * hba: per-adapter instance
4915 *
4916 * Set fDeviceInit flag and poll until device toggles it.
4917 */
4918static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4919{
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004920 int i;
4921 int err;
Dolev Raviv68078d52013-07-30 00:35:58 +05304922 bool flag_res = 1;
4923
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004924 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4925 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
Dolev Raviv68078d52013-07-30 00:35:58 +05304926 if (err) {
4927 dev_err(hba->dev,
4928 "%s setting fDeviceInit flag failed with error %d\n",
4929 __func__, err);
4930 goto out;
4931 }
4932
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02004933 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4934 for (i = 0; i < 1000 && !err && flag_res; i++)
4935 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4936 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4937
Dolev Raviv68078d52013-07-30 00:35:58 +05304938 if (err)
4939 dev_err(hba->dev,
4940 "%s reading fDeviceInit flag failed with error %d\n",
4941 __func__, err);
4942 else if (flag_res)
4943 dev_err(hba->dev,
4944 "%s fDeviceInit was not cleared by the device\n",
4945 __func__);
4946
4947out:
4948 return err;
4949}
4950
4951/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304952 * ufshcd_make_hba_operational - Make UFS controller operational
4953 * @hba: per adapter instance
4954 *
4955 * To bring UFS host controller to operational state,
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004956 * 1. Enable required interrupts
4957 * 2. Configure interrupt aggregation
Yaniv Gardi897efe62016-02-01 15:02:48 +02004958 * 3. Program UTRL and UTMRL base address
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004959 * 4. Configure run-stop-registers
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304960 *
4961 * Returns 0 on success, non-zero value on failure
4962 */
4963static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4964{
4965 int err = 0;
4966 u32 reg;
4967
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304968 /* Enable required interrupts */
4969 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4970
4971 /* Configure interrupt aggregation */
Yaniv Gardib8521902015-05-17 18:54:57 +03004972 if (ufshcd_is_intr_aggr_allowed(hba))
4973 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4974 else
4975 ufshcd_disable_intr_aggr(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05304976
4977 /* Configure UTRL and UTMRL base address registers */
4978 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4979 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4980 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4981 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4982 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4983 REG_UTP_TASK_REQ_LIST_BASE_L);
4984 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4985 REG_UTP_TASK_REQ_LIST_BASE_H);
4986
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304987 /*
Yaniv Gardi897efe62016-02-01 15:02:48 +02004988 * Make sure base address and interrupt setup are updated before
4989 * enabling the run/stop registers below.
4990 */
4991 wmb();
4992
4993 /*
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304994 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304995 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03004996 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05304997 if (!(ufshcd_get_lists_status(reg))) {
4998 ufshcd_enable_run_stop_reg(hba);
4999 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305000 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305001 "Host controller not ready to process requests");
5002 err = -EIO;
5003 goto out;
5004 }
5005
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305006out:
5007 return err;
5008}
5009
5010/**
Yaniv Gardi596585a2016-03-10 17:37:08 +02005011 * ufshcd_hba_stop - Send controller to reset state
5012 * @hba: per adapter instance
5013 * @can_sleep: perform sleep or just spin
5014 */
5015static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
5016{
5017 int err;
5018
5019 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
5020 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
5021 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
5022 10, 1, can_sleep);
5023 if (err)
5024 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
5025}
5026
5027/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305028 * ufshcd_hba_enable - initialize the controller
5029 * @hba: per adapter instance
5030 *
5031 * The controller resets itself and controller firmware initialization
5032 * sequence kicks off. When controller is ready it will set
5033 * the Host Controller Enable bit to 1.
5034 *
5035 * Returns 0 on success, non-zero value on failure
5036 */
5037static int ufshcd_hba_enable(struct ufs_hba *hba)
5038{
5039 int retry;
5040
5041 /*
5042 * msleep of 1 and 5 used in this function might result in msleep(20),
5043 * but it was necessary to send the UFS FPGA to reset mode during
5044 * development and testing of this driver. msleep can be changed to
5045 * mdelay and retry count can be reduced based on the controller.
5046 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02005047 if (!ufshcd_is_hba_active(hba))
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305048 /* change controller state to "reset state" */
Yaniv Gardi596585a2016-03-10 17:37:08 +02005049 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305050
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005051 /* UniPro link is disabled at this point */
5052 ufshcd_set_link_off(hba);
5053
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005054 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005055
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305056 /* start controller initialization sequence */
5057 ufshcd_hba_start(hba);
5058
5059 /*
5060 * To initialize a UFS host controller HCE bit must be set to 1.
5061 * During initialization the HCE bit value changes from 1->0->1.
5062 * When the host controller completes initialization sequence
5063 * it sets the value of HCE bit to 1. The same HCE bit is read back
5064 * to check if the controller has completed initialization sequence.
5065 * So without this delay the value HCE = 1, set in the previous
5066 * instruction might be read back.
5067 * This delay can be changed based on the controller.
5068 */
5069 msleep(1);
5070
5071 /* wait for the host controller to complete initialization */
5072 retry = 10;
5073 while (ufshcd_is_hba_active(hba)) {
5074 if (retry) {
5075 retry--;
5076 } else {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305077 dev_err(hba->dev,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305078 "Controller enable failed\n");
5079 return -EIO;
5080 }
5081 msleep(5);
5082 }
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005083
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005084 /* enable UIC related interrupts */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005085 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005086
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005087 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005088
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305089 return 0;
5090}
5091
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03005092static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
5093{
5094 int tx_lanes, i, err = 0;
5095
5096 if (!peer)
5097 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
5098 &tx_lanes);
5099 else
5100 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
5101 &tx_lanes);
5102 for (i = 0; i < tx_lanes; i++) {
5103 if (!peer)
5104 err = ufshcd_dme_set(hba,
5105 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
5106 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
5107 0);
5108 else
5109 err = ufshcd_dme_peer_set(hba,
5110 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
5111 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
5112 0);
5113 if (err) {
5114 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
5115 __func__, peer, i, err);
5116 break;
5117 }
5118 }
5119
5120 return err;
5121}
5122
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005123static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
5124{
5125 return ufshcd_disable_tx_lcc(hba, false);
5126}
5127
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03005128static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
5129{
5130 return ufshcd_disable_tx_lcc(hba, true);
5131}
5132
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305133/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305134 * ufshcd_link_startup - Initialize unipro link startup
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305135 * @hba: per adapter instance
5136 *
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305137 * Returns 0 for success, non-zero in case of failure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305138 */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305139static int ufshcd_link_startup(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305140{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305141 int ret;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005142 int retries = DME_LINKSTARTUP_RETRIES;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005143 bool link_startup_again = false;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305144
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005145 /*
5146 * If UFS device isn't active then we will have to issue link startup
5147 * 2 times to make sure the device state move to active.
5148 */
5149 if (!ufshcd_is_ufs_dev_active(hba))
5150 link_startup_again = true;
5151
5152link_startup:
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005153 do {
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005154 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305155
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005156 ret = ufshcd_dme_link_startup(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005157 if (ret)
5158 ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005159
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005160 /* check if device is detected by inter-connect layer */
5161 if (!ret && !ufshcd_is_device_present(hba)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005162 ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005163 dev_err(hba->dev, "%s: Device not present\n", __func__);
5164 ret = -ENXIO;
5165 goto out;
5166 }
5167
5168 /*
5169 * DME link lost indication is only received when link is up,
5170 * but we can't be sure if the link is up until link startup
5171 * succeeds. So reset the local Uni-Pro and try again.
5172 */
5173 if (ret && ufshcd_hba_enable(hba))
5174 goto out;
5175 } while (ret && retries--);
5176
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305177 if (ret)
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03005178 /* failed to get the link up... retire */
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305179 goto out;
5180
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005181 if (link_startup_again) {
5182 link_startup_again = false;
5183 retries = DME_LINKSTARTUP_RETRIES;
5184 goto link_startup;
5185 }
5186
5187 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5188 ufshcd_init_pwr_info(hba);
5189 ufshcd_print_pwr_info(hba);
5190
Yaniv Gardi7ca38cf2015-05-17 18:54:59 +03005191 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5192 ret = ufshcd_disable_device_tx_lcc(hba);
5193 if (ret)
5194 goto out;
5195 }
5196
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08005197 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005198 ret = ufshcd_disable_host_tx_lcc(hba);
5199 if (ret)
5200 goto out;
5201 }
5202
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005203 /* Include any host controller configuration via UIC commands */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02005204 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5205 if (ret)
5206 goto out;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03005207
5208 ret = ufshcd_make_hba_operational(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305209out:
Subhash Jadavani6808e992017-04-05 15:32:09 -07005210 if (ret)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305211 dev_err(hba->dev, "link startup failed %d\n", ret);
Subhash Jadavanief542222017-08-02 16:23:55 -07005212 /*
5213 * For some external cards, link startup succeeds only after few link
5214 * startup attempts and err_state may get set in this case.
5215 * But as the link startup has finally succeded, we are clearing the
5216 * error state.
5217 */
5218 else if (hba->extcon)
5219 ufsdbg_clr_err_state(hba);
Subhash Jadavani6808e992017-04-05 15:32:09 -07005220
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305221 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305222}
5223
5224/**
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305225 * ufshcd_verify_dev_init() - Verify device initialization
5226 * @hba: per-adapter instance
5227 *
5228 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5229 * device Transport Protocol (UTP) layer is ready after a reset.
5230 * If the UTP layer at the device side is not initialized, it may
5231 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5232 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5233 */
5234static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5235{
5236 int err = 0;
5237 int retries;
5238
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005239 ufshcd_hold_all(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305240 mutex_lock(&hba->dev_cmd.lock);
5241 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5242 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5243 NOP_OUT_TIMEOUT);
5244
5245 if (!err || err == -ETIMEDOUT)
5246 break;
5247
5248 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5249 }
5250 mutex_unlock(&hba->dev_cmd.lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005251 ufshcd_release_all(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305252
5253 if (err)
5254 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5255 return err;
5256}
5257
5258/**
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005259 * ufshcd_set_queue_depth - set lun queue depth
5260 * @sdev: pointer to SCSI device
5261 *
5262 * Read bLUQueueDepth value and activate scsi tagged command
5263 * queueing. For WLUN, queue depth is set to 1. For best-effort
5264 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
5265 * value that host can queue.
5266 */
5267static void ufshcd_set_queue_depth(struct scsi_device *sdev)
5268{
5269 int ret = 0;
5270 u8 lun_qdepth;
5271 struct ufs_hba *hba;
5272
5273 hba = shost_priv(sdev->host);
5274
5275 lun_qdepth = hba->nutrs;
5276 ret = ufshcd_read_unit_desc_param(hba,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005277 ufshcd_scsi_to_upiu_lun(sdev->lun),
5278 UNIT_DESC_PARAM_LU_Q_DEPTH,
5279 &lun_qdepth,
5280 sizeof(lun_qdepth));
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005281
5282 /* Some WLUN doesn't support unit descriptor */
5283 if (ret == -EOPNOTSUPP)
5284 lun_qdepth = 1;
5285 else if (!lun_qdepth)
5286 /* eventually, we can figure out the real queue depth */
5287 lun_qdepth = hba->nutrs;
5288 else
5289 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
5290
5291 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
5292 __func__, lun_qdepth);
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01005293 scsi_change_queue_depth(sdev, lun_qdepth);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005294}
5295
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005296/*
5297 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
5298 * @hba: per-adapter instance
5299 * @lun: UFS device lun id
5300 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
5301 *
5302 * Returns 0 in case of success and b_lu_write_protect status would be returned
5303 * @b_lu_write_protect parameter.
5304 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
5305 * Returns -EINVAL in case of invalid parameters passed to this function.
5306 */
5307static int ufshcd_get_lu_wp(struct ufs_hba *hba,
5308 u8 lun,
5309 u8 *b_lu_write_protect)
5310{
5311 int ret;
5312
5313 if (!b_lu_write_protect)
5314 ret = -EINVAL;
5315 /*
5316 * According to UFS device spec, RPMB LU can't be write
5317 * protected so skip reading bLUWriteProtect parameter for
5318 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
5319 */
5320 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
5321 ret = -ENOTSUPP;
5322 else
5323 ret = ufshcd_read_unit_desc_param(hba,
5324 lun,
5325 UNIT_DESC_PARAM_LU_WR_PROTECT,
5326 b_lu_write_protect,
5327 sizeof(*b_lu_write_protect));
5328 return ret;
5329}
5330
5331/**
5332 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
5333 * status
5334 * @hba: per-adapter instance
5335 * @sdev: pointer to SCSI device
5336 *
5337 */
5338static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
5339 struct scsi_device *sdev)
5340{
5341 if (hba->dev_info.f_power_on_wp_en &&
5342 !hba->dev_info.is_lu_power_on_wp) {
5343 u8 b_lu_write_protect;
5344
5345 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
5346 &b_lu_write_protect) &&
5347 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
5348 hba->dev_info.is_lu_power_on_wp = true;
5349 }
5350}
5351
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005352/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305353 * ufshcd_slave_alloc - handle initial SCSI device configurations
5354 * @sdev: pointer to SCSI device
5355 *
5356 * Returns success
5357 */
5358static int ufshcd_slave_alloc(struct scsi_device *sdev)
5359{
5360 struct ufs_hba *hba;
5361
5362 hba = shost_priv(sdev->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305363
5364 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5365 sdev->use_10_for_ms = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305366
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305367 /* allow SCSI layer to restart the device in case of errors */
5368 sdev->allow_restart = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005369
Sujit Reddy Thummab2a6c522014-07-01 12:22:38 +03005370 /* REPORT SUPPORTED OPERATION CODES is not supported */
5371 sdev->no_report_opcodes = 1;
5372
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005373 /* WRITE_SAME command is not supported*/
5374 sdev->no_write_same = 1;
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005375
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005376 ufshcd_set_queue_depth(sdev);
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005377
Subhash Jadavani57d104c2014-09-25 15:32:30 +03005378 ufshcd_get_lu_power_on_wp_status(hba, sdev);
5379
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005380 return 0;
5381}
5382
5383/**
5384 * ufshcd_change_queue_depth - change queue depth
5385 * @sdev: pointer to SCSI device
5386 * @depth: required depth to set
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005387 *
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01005388 * Change queue depth and make sure the max. limits are not crossed.
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005389 */
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01005390static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03005391{
5392 struct ufs_hba *hba = shost_priv(sdev->host);
5393
5394 if (depth > hba->nutrs)
5395 depth = hba->nutrs;
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01005396 return scsi_change_queue_depth(sdev, depth);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305397}
5398
5399/**
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005400 * ufshcd_slave_configure - adjust SCSI device configurations
5401 * @sdev: pointer to SCSI device
5402 */
5403static int ufshcd_slave_configure(struct scsi_device *sdev)
5404{
5405 struct request_queue *q = sdev->request_queue;
Subhash Jadavani5ea586f2016-08-17 19:08:09 -07005406 struct ufs_hba *hba = shost_priv(sdev->host);
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005407
5408 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5409 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
5410
Subhash Jadavani5ea586f2016-08-17 19:08:09 -07005411 if (hba->scsi_cmd_timeout) {
5412 blk_queue_rq_timeout(q, hba->scsi_cmd_timeout * HZ);
5413 scsi_set_cmd_timeout_override(sdev, hba->scsi_cmd_timeout * HZ);
5414 }
5415
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005416 sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
5417 sdev->use_rpm_auto = 1;
5418
Akinobu Mitaeeda4742014-07-01 23:00:32 +09005419 return 0;
5420}
5421
5422/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305423 * ufshcd_slave_destroy - remove SCSI device configurations
5424 * @sdev: pointer to SCSI device
5425 */
5426static void ufshcd_slave_destroy(struct scsi_device *sdev)
5427{
5428 struct ufs_hba *hba;
5429
5430 hba = shost_priv(sdev->host);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005431 /* Drop the reference as it won't be needed anymore */
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005432 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5433 unsigned long flags;
5434
5435 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03005436 hba->sdev_ufs_device = NULL;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03005437 spin_unlock_irqrestore(hba->host->host_lock, flags);
5438 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305439}
5440
5441/**
5442 * ufshcd_task_req_compl - handle task management request completion
5443 * @hba: per adapter instance
5444 * @index: index of the completed request
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305445 * @resp: task management service response
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305446 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305447 * Returns non-zero value on error, zero on success
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305448 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305449static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305450{
5451 struct utp_task_req_desc *task_req_descp;
5452 struct utp_upiu_task_rsp *task_rsp_upiup;
5453 unsigned long flags;
5454 int ocs_value;
5455 int task_result;
5456
5457 spin_lock_irqsave(hba->host->host_lock, flags);
5458
5459 /* Clear completed tasks from outstanding_tasks */
5460 __clear_bit(index, &hba->outstanding_tasks);
5461
5462 task_req_descp = hba->utmrdl_base_addr;
5463 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
5464
5465 if (ocs_value == OCS_SUCCESS) {
5466 task_rsp_upiup = (struct utp_upiu_task_rsp *)
5467 task_req_descp[index].task_rsp_upiu;
Kiwoong Kim8794ee02016-09-09 08:22:22 +09005468 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
5469 task_result = task_result & MASK_TM_SERVICE_RESP;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305470 if (resp)
5471 *resp = (u8)task_result;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305472 } else {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305473 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5474 __func__, ocs_value);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305475 }
5476 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05305477
5478 return ocs_value;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305479}
5480
5481/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305482 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5483 * @lrb: pointer to local reference block of completed command
5484 * @scsi_status: SCSI command status
5485 *
5486 * Returns value base on SCSI command status
5487 */
5488static inline int
5489ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5490{
5491 int result = 0;
5492
5493 switch (scsi_status) {
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305494 case SAM_STAT_CHECK_CONDITION:
5495 ufshcd_copy_sense_data(lrbp);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305496 case SAM_STAT_GOOD:
5497 result |= DID_OK << 16 |
5498 COMMAND_COMPLETE << 8 |
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305499 scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305500 break;
5501 case SAM_STAT_TASK_SET_FULL:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305502 case SAM_STAT_BUSY:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305503 case SAM_STAT_TASK_ABORTED:
Seungwon Jeon1c2623c2013-08-31 21:40:19 +05305504 ufshcd_copy_sense_data(lrbp);
5505 result |= scsi_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305506 break;
5507 default:
5508 result |= DID_ERROR << 16;
5509 break;
5510 } /* end of switch */
5511
5512 return result;
5513}
5514
5515/**
5516 * ufshcd_transfer_rsp_status - Get overall status of the response
5517 * @hba: per adapter instance
5518 * @lrb: pointer to local reference block of completed command
5519 *
5520 * Returns result of the command to notify SCSI midlayer
5521 */
5522static inline int
5523ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5524{
5525 int result = 0;
5526 int scsi_status;
5527 int ocs;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005528 bool print_prdt;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305529
5530 /* overall command status of utrd */
5531 ocs = ufshcd_get_tr_ocs(lrbp);
5532
5533 switch (ocs) {
5534 case OCS_SUCCESS:
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305535 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005536 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305537 switch (result) {
5538 case UPIU_TRANSACTION_RESPONSE:
5539 /*
5540 * get the response UPIU result to extract
5541 * the SCSI command status
5542 */
5543 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5544
5545 /*
5546 * get the result based on SCSI status response
5547 * to notify the SCSI midlayer of the command status
5548 */
5549 scsi_status = result & MASK_SCSI_STATUS;
5550 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305551
Yaniv Gardif05ac2e2016-02-01 15:02:42 +02005552 /*
5553 * Currently we are only supporting BKOPs exception
5554 * events hence we can ignore BKOPs exception event
5555 * during power management callbacks. BKOPs exception
5556 * event is not expected to be raised in runtime suspend
5557 * callback as it allows the urgent bkops.
5558 * During system suspend, we are anyway forcefully
5559 * disabling the bkops and if urgent bkops is needed
5560 * it will be enabled on system resume. Long term
5561 * solution could be to abort the system suspend if
5562 * UFS device needs urgent BKOPs.
5563 */
5564 if (!hba->pm_op_in_progress &&
Sayali Lokhande52de3d32017-12-08 15:35:23 +05305565 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
5566 /*
5567 * Prevent suspend once eeh_work is scheduled
5568 * to avoid deadlock between ufshcd_suspend
5569 * and exception event handler.
5570 */
5571 if (schedule_work(&hba->eeh_work))
5572 pm_runtime_get_noresume(hba->dev);
5573 }
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305574 break;
5575 case UPIU_TRANSACTION_REJECT_UPIU:
5576 /* TODO: handle Reject UPIU Response */
5577 result = DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305578 dev_err(hba->dev,
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305579 "Reject UPIU not fully implemented\n");
5580 break;
5581 default:
5582 result = DID_ERROR << 16;
5583 dev_err(hba->dev,
5584 "Unexpected request response code = %x\n",
5585 result);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305586 break;
5587 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305588 break;
5589 case OCS_ABORTED:
5590 result |= DID_ABORT << 16;
5591 break;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05305592 case OCS_INVALID_COMMAND_STATUS:
5593 result |= DID_REQUEUE << 16;
5594 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305595 case OCS_INVALID_CMD_TABLE_ATTR:
5596 case OCS_INVALID_PRDT_ATTR:
5597 case OCS_MISMATCH_DATA_BUF_SIZE:
5598 case OCS_MISMATCH_RESP_UPIU_SIZE:
5599 case OCS_PEER_COMM_FAILURE:
5600 case OCS_FATAL_ERROR:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005601 case OCS_DEVICE_FATAL_ERROR:
5602 case OCS_INVALID_CRYPTO_CONFIG:
5603 case OCS_GENERAL_CRYPTO_ERROR:
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305604 default:
5605 result |= DID_ERROR << 16;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05305606 dev_err(hba->dev,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005607 "OCS error from controller = %x for tag %d\n",
5608 ocs, lrbp->task_tag);
Subhash Jadavani9c807702017-04-01 00:35:51 -07005609 /*
5610 * This is called in interrupt context, hence avoid sleep
5611 * while printing debug registers. Also print only the minimum
5612 * debug registers needed to debug OCS failure.
5613 */
5614 __ufshcd_print_host_regs(hba, true);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005615 ufshcd_print_host_state(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305616 break;
5617 } /* end of switch */
5618
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005619 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
5620 print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
5621 ocs == OCS_MISMATCH_DATA_BUF_SIZE);
5622 ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
5623 }
5624
5625 if ((host_byte(result) == DID_ERROR) ||
5626 (host_byte(result) == DID_ABORT))
5627 ufsdbg_set_err_state(hba);
5628
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305629 return result;
5630}
5631
5632/**
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305633 * ufshcd_uic_cmd_compl - handle completion of uic command
5634 * @hba: per adapter instance
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305635 * @intr_status: interrupt status generated by the controller
Subhash Jadavani9c807702017-04-01 00:35:51 -07005636 *
5637 * Returns
5638 * IRQ_HANDLED - If interrupt is valid
5639 * IRQ_NONE - If invalid interrupt
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305640 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07005641static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305642{
Subhash Jadavani9c807702017-04-01 00:35:51 -07005643 irqreturn_t retval = IRQ_NONE;
5644
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305645 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305646 hba->active_uic_cmd->argument2 |=
5647 ufshcd_get_uic_cmd_result(hba);
Seungwon Jeon12b4fdb2013-08-31 21:40:21 +05305648 hba->active_uic_cmd->argument3 =
5649 ufshcd_get_dme_attr_val(hba);
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305650 complete(&hba->active_uic_cmd->done);
Subhash Jadavani9c807702017-04-01 00:35:51 -07005651 retval = IRQ_HANDLED;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305652 }
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05305653
Subhash Jadavani9c807702017-04-01 00:35:51 -07005654 if (intr_status & UFSHCD_UIC_PWR_MASK) {
5655 if (hba->uic_async_done) {
5656 complete(hba->uic_async_done);
5657 retval = IRQ_HANDLED;
5658 } else if (ufshcd_is_auto_hibern8_supported(hba)) {
5659 /*
5660 * If uic_async_done flag is not set then this
5661 * is an Auto hibern8 err interrupt.
5662 * Perform a host reset followed by a full
5663 * link recovery.
5664 */
5665 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5666 hba->force_host_reset = true;
5667 dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5668 __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
5669 "Enter" : "Exit",
5670 intr_status, ufshcd_get_upmcrs(hba));
Subhash Jadavanief542222017-08-02 16:23:55 -07005671 /*
5672 * It is possible to see auto-h8 errors during card
5673 * removal, so set this flag and let the error handler
5674 * decide if this error is seen while card was present
5675 * or due to card removal.
5676 * If error is seen during card removal, we don't want
5677 * to printout the debug messages.
5678 */
5679 hba->auto_h8_err = true;
Subhash Jadavani9c807702017-04-01 00:35:51 -07005680 schedule_work(&hba->eh_work);
5681 retval = IRQ_HANDLED;
5682 }
5683 }
5684 return retval;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05305685}
5686
5687/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005688 * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
5689 * @hba: per adapter instance
5690 * @result: error result to inform scsi layer about
5691 */
5692void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
5693{
5694 u8 index;
5695 struct ufshcd_lrb *lrbp;
5696 struct scsi_cmnd *cmd;
5697
5698 if (!hba->outstanding_reqs)
5699 return;
5700
5701 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5702 lrbp = &hba->lrb[index];
5703 cmd = lrbp->cmd;
5704 if (cmd) {
Subhash Jadavani114437e2017-11-08 16:22:16 -08005705 ufshcd_cond_add_cmd_trace(hba, index, "scsi_failed");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005706 ufshcd_update_error_stats(hba,
5707 UFS_ERR_INT_FATAL_ERRORS);
5708 scsi_dma_unmap(cmd);
5709 cmd->result = result;
5710 /* Clear pending transfer requests */
5711 ufshcd_clear_cmd(hba, index);
5712 ufshcd_outstanding_req_clear(hba, index);
5713 clear_bit_unlock(index, &hba->lrb_in_use);
5714 lrbp->complete_time_stamp = ktime_get();
5715 update_req_stats(hba, lrbp);
5716 /* Mark completed command as NULL in LRB */
5717 lrbp->cmd = NULL;
5718 ufshcd_release_all(hba);
5719 if (cmd->request) {
5720 /*
5721 * As we are accessing the "request" structure,
5722 * this must be called before calling
5723 * ->scsi_done() callback.
5724 */
5725 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5726 true);
5727 ufshcd_vops_crypto_engine_cfg_end(hba,
5728 lrbp, cmd->request);
5729 }
5730 /* Do not touch lrbp after scsi done */
5731 cmd->scsi_done(cmd);
5732 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5733 if (hba->dev_cmd.complete) {
5734 ufshcd_cond_add_cmd_trace(hba, index,
Subhash Jadavani114437e2017-11-08 16:22:16 -08005735 "dev_cmd_failed");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005736 ufshcd_outstanding_req_clear(hba, index);
5737 complete(hba->dev_cmd.complete);
5738 }
5739 }
5740 if (ufshcd_is_clkscaling_supported(hba))
5741 hba->clk_scaling.active_reqs--;
5742 }
5743}
5744
5745/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005746 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305747 * @hba: per adapter instance
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005748 * @completed_reqs: requests to complete
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305749 */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005750static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5751 unsigned long completed_reqs)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305752{
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305753 struct ufshcd_lrb *lrbp;
5754 struct scsi_cmnd *cmd;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305755 int result;
5756 int index;
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07005757 struct request *req;
Dolev Ravive9d501b2014-07-01 12:22:37 +03005758
Dolev Ravive9d501b2014-07-01 12:22:37 +03005759 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5760 lrbp = &hba->lrb[index];
5761 cmd = lrbp->cmd;
5762 if (cmd) {
Subhash Jadavani114437e2017-11-08 16:22:16 -08005763 ufshcd_cond_add_cmd_trace(hba, index, "scsi_cmpl");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005764 ufshcd_update_tag_stats_completion(hba, cmd);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005765 result = ufshcd_transfer_rsp_status(hba, lrbp);
5766 scsi_dma_unmap(cmd);
5767 cmd->result = result;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005768 clear_bit_unlock(index, &hba->lrb_in_use);
5769 lrbp->complete_time_stamp = ktime_get();
5770 update_req_stats(hba, lrbp);
Dolev Ravive9d501b2014-07-01 12:22:37 +03005771 /* Mark completed command as NULL in LRB */
5772 lrbp->cmd = NULL;
Asutosh Das3da913a2017-03-24 10:32:16 +05305773 hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005774 __ufshcd_release(hba, false);
5775 __ufshcd_hibern8_release(hba, false);
5776 if (cmd->request) {
5777 /*
5778 * As we are accessing the "request" structure,
5779 * this must be called before calling
5780 * ->scsi_done() callback.
5781 */
5782 ufshcd_vops_pm_qos_req_end(hba, cmd->request,
5783 false);
5784 ufshcd_vops_crypto_engine_cfg_end(hba,
5785 lrbp, cmd->request);
5786 }
5787
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07005788 req = cmd->request;
5789 if (req) {
5790 /* Update IO svc time latency histogram */
5791 if (req->lat_hist_enabled) {
5792 ktime_t completion;
5793 u_int64_t delta_us;
5794
5795 completion = ktime_get();
5796 delta_us = ktime_us_delta(completion,
5797 req->lat_hist_io_start);
5798 /* rq_data_dir() => true if WRITE */
5799 blk_update_latency_hist(&hba->io_lat_s,
5800 (rq_data_dir(req) == READ),
5801 delta_us);
5802 }
5803 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03005804 /* Do not touch lrbp after scsi done */
5805 cmd->scsi_done(cmd);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005806 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
5807 if (hba->dev_cmd.complete) {
5808 ufshcd_cond_add_cmd_trace(hba, index,
Subhash Jadavani114437e2017-11-08 16:22:16 -08005809 "dev_cmd_cmpl");
Dolev Ravive9d501b2014-07-01 12:22:37 +03005810 complete(hba->dev_cmd.complete);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005811 }
Dolev Ravive9d501b2014-07-01 12:22:37 +03005812 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005813 if (ufshcd_is_clkscaling_supported(hba))
5814 hba->clk_scaling.active_reqs--;
Dolev Ravive9d501b2014-07-01 12:22:37 +03005815 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305816
5817 /* clear corresponding bits of completed commands */
5818 hba->outstanding_reqs ^= completed_reqs;
5819
Sahitya Tummala856b3482014-09-25 15:32:34 +03005820 ufshcd_clk_scaling_update_busy(hba);
5821
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05305822 /* we might have free'd some tags above */
5823 wake_up(&hba->dev_cmd.tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05305824}
5825
5826/**
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005827 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5828 * @hba: per adapter instance
Subhash Jadavani9c807702017-04-01 00:35:51 -07005829 *
5830 * Returns
5831 * IRQ_HANDLED - If interrupt is valid
5832 * IRQ_NONE - If invalid interrupt
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005833 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07005834static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005835{
5836 unsigned long completed_reqs;
5837 u32 tr_doorbell;
5838
5839 /* Resetting interrupt aggregation counters first and reading the
5840 * DOOR_BELL afterward allows us to handle all the completed requests.
5841 * In order to prevent other interrupts starvation the DB is read once
5842 * after reset. The down side of this solution is the possibility of
5843 * false interrupt if device completes another request after resetting
5844 * aggregation and before reading the DB.
5845 */
5846 if (ufshcd_is_intr_aggr_allowed(hba))
5847 ufshcd_reset_intr_aggr(hba);
5848
5849 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5850 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5851
Subhash Jadavani9c807702017-04-01 00:35:51 -07005852 if (completed_reqs) {
5853 __ufshcd_transfer_req_compl(hba, completed_reqs);
5854 return IRQ_HANDLED;
5855 } else {
5856 return IRQ_NONE;
5857 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02005858}
5859
5860/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305861 * ufshcd_disable_ee - disable exception event
5862 * @hba: per-adapter instance
5863 * @mask: exception event to disable
5864 *
5865 * Disables exception event in the device so that the EVENT_ALERT
5866 * bit is not set.
5867 *
5868 * Returns zero on success, non-zero error value on failure.
5869 */
5870static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5871{
5872 int err = 0;
5873 u32 val;
5874
5875 if (!(hba->ee_ctrl_mask & mask))
5876 goto out;
5877
5878 val = hba->ee_ctrl_mask & ~mask;
5879 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005880 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305881 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5882 if (!err)
5883 hba->ee_ctrl_mask &= ~mask;
5884out:
5885 return err;
5886}
5887
5888/**
5889 * ufshcd_enable_ee - enable exception event
5890 * @hba: per-adapter instance
5891 * @mask: exception event to enable
5892 *
5893 * Enable corresponding exception event in the device to allow
5894 * device to alert host in critical scenarios.
5895 *
5896 * Returns zero on success, non-zero error value on failure.
5897 */
5898static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5899{
5900 int err = 0;
5901 u32 val;
5902
5903 if (hba->ee_ctrl_mask & mask)
5904 goto out;
5905
5906 val = hba->ee_ctrl_mask | mask;
5907 val &= 0xFFFF; /* 2 bytes */
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02005908 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305909 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5910 if (!err)
5911 hba->ee_ctrl_mask |= mask;
5912out:
5913 return err;
5914}
5915
5916/**
5917 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5918 * @hba: per-adapter instance
5919 *
5920 * Allow device to manage background operations on its own. Enabling
5921 * this might lead to inconsistent latencies during normal data transfers
5922 * as the device is allowed to manage its own way of handling background
5923 * operations.
5924 *
5925 * Returns zero on success, non-zero on failure.
5926 */
5927static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5928{
5929 int err = 0;
5930
5931 if (hba->auto_bkops_enabled)
5932 goto out;
5933
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005934 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305935 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5936 if (err) {
5937 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5938 __func__, err);
5939 goto out;
5940 }
5941
5942 hba->auto_bkops_enabled = true;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005943 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305944
5945 /* No need of URGENT_BKOPS exception from the device */
5946 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5947 if (err)
5948 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5949 __func__, err);
5950out:
5951 return err;
5952}
5953
5954/**
5955 * ufshcd_disable_auto_bkops - block device in doing background operations
5956 * @hba: per-adapter instance
5957 *
5958 * Disabling background operations improves command response latency but
5959 * has drawback of device moving into critical state where the device is
5960 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5961 * host is idle so that BKOPS are managed effectively without any negative
5962 * impacts.
5963 *
5964 * Returns zero on success, non-zero on failure.
5965 */
5966static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5967{
5968 int err = 0;
5969
5970 if (!hba->auto_bkops_enabled)
5971 goto out;
5972
5973 /*
5974 * If host assisted BKOPs is to be enabled, make sure
5975 * urgent bkops exception is allowed.
5976 */
5977 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5978 if (err) {
5979 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5980 __func__, err);
5981 goto out;
5982 }
5983
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02005984 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305985 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5986 if (err) {
5987 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5988 __func__, err);
5989 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5990 goto out;
5991 }
5992
5993 hba->auto_bkops_enabled = false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07005994 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05305995out:
5996 return err;
5997}
5998
5999/**
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006000 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306001 * @hba: per adapter instance
6002 *
6003 * After a device reset the device may toggle the BKOPS_EN flag
6004 * to default value. The s/w tracking variables should be updated
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006005 * as well. This function would change the auto-bkops state based on
6006 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306007 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006008static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306009{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006010 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
6011 hba->auto_bkops_enabled = false;
6012 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
6013 ufshcd_enable_auto_bkops(hba);
6014 } else {
6015 hba->auto_bkops_enabled = true;
6016 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
6017 ufshcd_disable_auto_bkops(hba);
6018 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306019}
6020
6021static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
6022{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02006023 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306024 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
6025}
6026
6027/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006028 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
6029 * @hba: per-adapter instance
6030 * @status: bkops_status value
6031 *
6032 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
6033 * flag in the device to permit background operations if the device
6034 * bkops_status is greater than or equal to "status" argument passed to
6035 * this function, disable otherwise.
6036 *
6037 * Returns 0 for success, non-zero in case of failure.
6038 *
6039 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
6040 * to know whether auto bkops is enabled or disabled after this function
6041 * returns control to it.
6042 */
6043static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
6044 enum bkops_status status)
6045{
6046 int err;
6047 u32 curr_status = 0;
6048
6049 err = ufshcd_get_bkops_status(hba, &curr_status);
6050 if (err) {
6051 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
6052 __func__, err);
6053 goto out;
6054 } else if (curr_status > BKOPS_STATUS_MAX) {
6055 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
6056 __func__, curr_status);
6057 err = -EINVAL;
6058 goto out;
6059 }
6060
6061 if (curr_status >= status)
6062 err = ufshcd_enable_auto_bkops(hba);
6063 else
6064 err = ufshcd_disable_auto_bkops(hba);
6065out:
6066 return err;
6067}
6068
6069/**
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306070 * ufshcd_urgent_bkops - handle urgent bkops exception event
6071 * @hba: per-adapter instance
6072 *
6073 * Enable fBackgroundOpsEn flag in the device to permit background
6074 * operations.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03006075 *
6076 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
6077 * and negative error value for any other failure.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306078 */
6079static int ufshcd_urgent_bkops(struct ufs_hba *hba)
6080{
Yaniv Gardiafdfff52016-03-10 17:37:15 +02006081 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306082}
6083
6084static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
6085{
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02006086 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306087 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
6088}
6089
Yaniv Gardiafdfff52016-03-10 17:37:15 +02006090static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
6091{
6092 int err;
6093 u32 curr_status = 0;
6094
6095 if (hba->is_urgent_bkops_lvl_checked)
6096 goto enable_auto_bkops;
6097
6098 err = ufshcd_get_bkops_status(hba, &curr_status);
6099 if (err) {
6100 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
6101 __func__, err);
6102 goto out;
6103 }
6104
6105 /*
6106 * We are seeing that some devices are raising the urgent bkops
6107 * exception events even when BKOPS status doesn't indicate performace
6108 * impacted or critical. Handle these device by determining their urgent
6109 * bkops status at runtime.
6110 */
6111 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
6112 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
6113 __func__, curr_status);
6114 /* update the current status as the urgent bkops level */
6115 hba->urgent_bkops_lvl = curr_status;
6116 hba->is_urgent_bkops_lvl_checked = true;
6117 }
6118
6119enable_auto_bkops:
6120 err = ufshcd_enable_auto_bkops(hba);
6121out:
6122 if (err < 0)
6123 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
6124 __func__, err);
6125}
6126
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306127/**
6128 * ufshcd_exception_event_handler - handle exceptions raised by device
6129 * @work: pointer to work data
6130 *
6131 * Read bExceptionEventStatus attribute from the device and handle the
6132 * exception event accordingly.
6133 */
6134static void ufshcd_exception_event_handler(struct work_struct *work)
6135{
6136 struct ufs_hba *hba;
6137 int err;
6138 u32 status = 0;
6139 hba = container_of(work, struct ufs_hba, eeh_work);
6140
Sujit Reddy Thumma62694732013-07-30 00:36:00 +05306141 pm_runtime_get_sync(hba->dev);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006142 ufshcd_scsi_block_requests(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306143 err = ufshcd_get_ee_status(hba, &status);
6144 if (err) {
6145 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6146 __func__, err);
6147 goto out;
6148 }
6149
6150 status &= hba->ee_ctrl_mask;
Yaniv Gardiafdfff52016-03-10 17:37:15 +02006151
6152 if (status & MASK_EE_URGENT_BKOPS)
6153 ufshcd_bkops_exception_event_handler(hba);
6154
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306155out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006156 ufshcd_scsi_unblock_requests(hba);
Sayali Lokhande52de3d32017-12-08 15:35:23 +05306157 /*
6158 * pm_runtime_get_noresume is called while scheduling
6159 * eeh_work to avoid suspend racing with exception work.
6160 * Hence decrement usage counter using pm_runtime_put_noidle
6161 * to allow suspend on completion of exception event handler.
6162 */
6163 pm_runtime_put_noidle(hba->dev);
Asutosh Das43545b72017-11-14 17:01:40 +05306164 pm_runtime_put(hba->dev);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306165 return;
6166}
6167
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006168/* Complete requests that have door-bell cleared */
6169static void ufshcd_complete_requests(struct ufs_hba *hba)
6170{
6171 ufshcd_transfer_req_compl(hba);
6172 ufshcd_tmc_handler(hba);
6173}
6174
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05306175/**
Yaniv Gardi583fa622016-03-10 17:37:13 +02006176 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6177 * to recover from the DL NAC errors or not.
6178 * @hba: per-adapter instance
6179 *
6180 * Returns true if error handling is required, false otherwise
6181 */
6182static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6183{
6184 unsigned long flags;
6185 bool err_handling = true;
6186
6187 spin_lock_irqsave(hba->host->host_lock, flags);
6188 /*
6189 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6190 * device fatal error and/or DL NAC & REPLAY timeout errors.
6191 */
6192 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6193 goto out;
6194
6195 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6196 ((hba->saved_err & UIC_ERROR) &&
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006197 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
6198 /*
6199 * we have to do error recovery but atleast silence the error
6200 * logs.
6201 */
6202 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006203 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006204 }
Yaniv Gardi583fa622016-03-10 17:37:13 +02006205
6206 if ((hba->saved_err & UIC_ERROR) &&
6207 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6208 int err;
6209 /*
6210 * wait for 50ms to see if we can get any other errors or not.
6211 */
6212 spin_unlock_irqrestore(hba->host->host_lock, flags);
6213 msleep(50);
6214 spin_lock_irqsave(hba->host->host_lock, flags);
6215
6216 /*
6217 * now check if we have got any other severe errors other than
6218 * DL NAC error?
6219 */
6220 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6221 ((hba->saved_err & UIC_ERROR) &&
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006222 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
6223 if (((hba->saved_err & INT_FATAL_ERRORS) ==
6224 DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
6225 ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
6226 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006227 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006228 }
Yaniv Gardi583fa622016-03-10 17:37:13 +02006229
6230 /*
6231 * As DL NAC is the only error received so far, send out NOP
6232 * command to confirm if link is still active or not.
6233 * - If we don't get any response then do error recovery.
6234 * - If we get response then clear the DL NAC error bit.
6235 */
6236
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006237 /* silence the error logs from NOP command */
6238 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006239 spin_unlock_irqrestore(hba->host->host_lock, flags);
6240 err = ufshcd_verify_dev_init(hba);
6241 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006242 hba->silence_err_logs = false;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006243
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006244 if (err) {
6245 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006246 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006247 }
Yaniv Gardi583fa622016-03-10 17:37:13 +02006248
6249 /* Link seems to be alive hence ignore the DL NAC errors */
6250 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6251 hba->saved_err &= ~UIC_ERROR;
6252 /* clear NAC error */
6253 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6254 if (!hba->saved_uic_err) {
6255 err_handling = false;
6256 goto out;
6257 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006258 /*
6259 * there seems to be some errors other than NAC, so do error
6260 * recovery
6261 */
6262 hba->silence_err_logs = true;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006263 }
6264out:
6265 spin_unlock_irqrestore(hba->host->host_lock, flags);
6266 return err_handling;
6267}
6268
6269/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306270 * ufshcd_err_handler - handle UFS errors that require s/w attention
6271 * @work: pointer to work structure
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306272 */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306273static void ufshcd_err_handler(struct work_struct *work)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306274{
6275 struct ufs_hba *hba;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306276 unsigned long flags;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006277 bool err_xfer = false, err_tm = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306278 int err = 0;
6279 int tag;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006280 bool needs_reset = false;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006281 bool clks_enabled = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306282
6283 hba = container_of(work, struct ufs_hba, eh_work);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306284
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306285 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanief542222017-08-02 16:23:55 -07006286 if (hba->extcon) {
6287 if (ufshcd_is_card_online(hba)) {
6288 spin_unlock_irqrestore(hba->host->host_lock, flags);
6289 /*
6290 * TODO: need better way to ensure that this delay is
6291 * more than extcon's debounce-ms
6292 */
6293 msleep(300);
6294 spin_lock_irqsave(hba->host->host_lock, flags);
6295 }
6296
6297 /*
6298 * ignore error if card was online and offline/removed now or
6299 * card was already offline.
6300 */
6301 if (ufshcd_is_card_offline(hba)) {
6302 hba->saved_err = 0;
6303 hba->saved_uic_err = 0;
6304 hba->saved_ce_err = 0;
6305 hba->auto_h8_err = false;
6306 hba->force_host_reset = false;
6307 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6308 goto out;
6309 }
6310 }
6311
Subhash Jadavani9c807702017-04-01 00:35:51 -07006312 ufsdbg_set_err_state(hba);
6313
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006314 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306315 goto out;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306316
Subhash Jadavani9c807702017-04-01 00:35:51 -07006317 /*
6318 * Make sure the clocks are ON before we proceed with err
6319 * handling. For the majority of cases err handler would be
6320 * run with clocks ON. There is a possibility that the err
6321 * handler was scheduled due to auto hibern8 error interrupt,
6322 * in which case the clocks could be gated or be in the
6323 * process of gating when the err handler runs.
6324 */
6325 if (unlikely((hba->clk_gating.state != CLKS_ON) &&
6326 ufshcd_is_auto_hibern8_supported(hba))) {
6327 spin_unlock_irqrestore(hba->host->host_lock, flags);
Asutosh Das3da913a2017-03-24 10:32:16 +05306328 hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006329 ufshcd_hold(hba, false);
6330 spin_lock_irqsave(hba->host->host_lock, flags);
6331 clks_enabled = true;
6332 }
6333
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306334 hba->ufshcd_state = UFSHCD_STATE_RESET;
6335 ufshcd_set_eh_in_progress(hba);
6336
6337 /* Complete requests that have door-bell cleared by h/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006338 ufshcd_complete_requests(hba);
Yaniv Gardi583fa622016-03-10 17:37:13 +02006339
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08006340 if (hba->dev_info.quirks &
6341 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
Yaniv Gardi583fa622016-03-10 17:37:13 +02006342 bool ret;
6343
6344 spin_unlock_irqrestore(hba->host->host_lock, flags);
6345 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6346 ret = ufshcd_quirk_dl_nac_errors(hba);
6347 spin_lock_irqsave(hba->host->host_lock, flags);
6348 if (!ret)
6349 goto skip_err_handling;
6350 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006351
6352 /*
6353 * Dump controller state before resetting. Transfer requests state
6354 * will be dump as part of the request completion.
6355 */
Subhash Jadavanief542222017-08-02 16:23:55 -07006356 if ((hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) ||
6357 hba->auto_h8_err) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006358 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
6359 __func__, hba->saved_err, hba->saved_uic_err);
6360 if (!hba->silence_err_logs) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07006361 /* release lock as print host regs sleeps */
6362 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006363 ufshcd_print_host_regs(hba);
6364 ufshcd_print_host_state(hba);
6365 ufshcd_print_pwr_info(hba);
6366 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
Can Guof6411eb2017-06-09 15:17:22 +08006367 ufshcd_print_cmd_log(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006368 spin_lock_irqsave(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006369 }
Subhash Jadavanief542222017-08-02 16:23:55 -07006370 hba->auto_h8_err = false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006371 }
6372
Subhash Jadavani9c807702017-04-01 00:35:51 -07006373 if ((hba->saved_err & INT_FATAL_ERRORS)
6374 || hba->saved_ce_err || hba->force_host_reset ||
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006375 ((hba->saved_err & UIC_ERROR) &&
6376 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
6377 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6378 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
6379 needs_reset = true;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306380
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006381 /*
6382 * if host reset is required then skip clearing the pending
6383 * transfers forcefully because they will automatically get
6384 * cleared after link startup.
6385 */
6386 if (needs_reset)
6387 goto skip_pending_xfer_clear;
6388
6389 /* release lock as clear command might sleep */
6390 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306391 /* Clear pending transfer requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006392 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6393 if (ufshcd_clear_cmd(hba, tag)) {
6394 err_xfer = true;
6395 goto lock_skip_pending_xfer_clear;
6396 }
6397 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306398
6399 /* Clear pending task management requests */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006400 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6401 if (ufshcd_clear_tm_cmd(hba, tag)) {
6402 err_tm = true;
6403 goto lock_skip_pending_xfer_clear;
6404 }
6405 }
6406
6407lock_skip_pending_xfer_clear:
6408 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306409
6410 /* Complete the requests that are cleared by s/w */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006411 ufshcd_complete_requests(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306412
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006413 if (err_xfer || err_tm)
6414 needs_reset = true;
6415
6416skip_pending_xfer_clear:
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306417 /* Fatal errors need reset */
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006418 if (needs_reset) {
6419 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
6420
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006421 if (hba->saved_err & INT_FATAL_ERRORS)
6422 ufshcd_update_error_stats(hba,
6423 UFS_ERR_INT_FATAL_ERRORS);
6424 if (hba->saved_ce_err)
6425 ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
6426
6427 if (hba->saved_err & UIC_ERROR)
6428 ufshcd_update_error_stats(hba,
6429 UFS_ERR_INT_UIC_ERROR);
6430
6431 if (err_xfer || err_tm)
6432 ufshcd_update_error_stats(hba,
6433 UFS_ERR_CLEAR_PEND_XFER_TM);
6434
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006435 /*
6436 * ufshcd_reset_and_restore() does the link reinitialization
6437 * which will need atleast one empty doorbell slot to send the
6438 * device management commands (NOP and query commands).
6439 * If there is no slot empty at this moment then free up last
6440 * slot forcefully.
6441 */
6442 if (hba->outstanding_reqs == max_doorbells)
6443 __ufshcd_transfer_req_compl(hba,
6444 (1UL << (hba->nutrs - 1)));
6445
6446 spin_unlock_irqrestore(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306447 err = ufshcd_reset_and_restore(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006448 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306449 if (err) {
6450 dev_err(hba->dev, "%s: reset and restore failed\n",
6451 __func__);
6452 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6453 }
6454 /*
6455 * Inform scsi mid-layer that we did reset and allow to handle
6456 * Unit Attention properly.
6457 */
6458 scsi_report_bus_reset(hba->host, 0);
6459 hba->saved_err = 0;
6460 hba->saved_uic_err = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006461 hba->saved_ce_err = 0;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006462 hba->force_host_reset = false;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306463 }
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006464
Yaniv Gardi583fa622016-03-10 17:37:13 +02006465skip_err_handling:
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006466 if (!needs_reset) {
6467 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6468 if (hba->saved_err || hba->saved_uic_err)
6469 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6470 __func__, hba->saved_err, hba->saved_uic_err);
6471 }
6472
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006473 hba->silence_err_logs = false;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006474
Asutosh Das3da913a2017-03-24 10:32:16 +05306475 if (clks_enabled) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07006476 __ufshcd_release(hba, false);
Asutosh Das3da913a2017-03-24 10:32:16 +05306477 hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
6478 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306479out:
Subhash Jadavani9c807702017-04-01 00:35:51 -07006480 ufshcd_clear_eh_in_progress(hba);
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006481 spin_unlock_irqrestore(hba->host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306482}
6483
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006484static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
6485 u32 reg)
6486{
6487 reg_hist->reg[reg_hist->pos] = reg;
6488 reg_hist->tstamp[reg_hist->pos] = ktime_get();
6489 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
6490}
6491
Asutosh Das3923c232017-09-15 16:14:26 +05306492static void ufshcd_rls_handler(struct work_struct *work)
6493{
6494 struct ufs_hba *hba;
6495 int ret = 0;
6496 u32 mode;
6497
6498 hba = container_of(work, struct ufs_hba, rls_work);
6499 ufshcd_scsi_block_requests(hba);
6500 pm_runtime_get_sync(hba->dev);
6501 ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
6502 if (ret) {
6503 dev_err(hba->dev,
6504 "Timed out (%d) waiting for DB to clear\n",
6505 ret);
6506 goto out;
6507 }
6508
6509 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6510 if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
6511 hba->restore_needed = true;
6512
6513 if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
6514 hba->restore_needed = true;
6515
6516 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
6517 if (hba->pwr_info.gear_rx != mode)
6518 hba->restore_needed = true;
6519
6520 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
6521 if (hba->pwr_info.gear_tx != mode)
6522 hba->restore_needed = true;
6523
6524 if (hba->restore_needed)
6525 ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6526
6527 if (ret)
6528 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6529 __func__, ret);
6530 else
6531 hba->restore_needed = false;
6532
6533out:
6534 ufshcd_scsi_unblock_requests(hba);
6535 pm_runtime_put_sync(hba->dev);
6536}
6537
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306538/**
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306539 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6540 * @hba: per-adapter instance
Subhash Jadavani9c807702017-04-01 00:35:51 -07006541 *
6542 * Returns
6543 * IRQ_HANDLED - If interrupt is valid
6544 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306545 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07006546static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306547{
6548 u32 reg;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006549 irqreturn_t retval = IRQ_NONE;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306550
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006551 /* PHY layer lane error */
6552 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006553 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
Subhash Jadavani9c807702017-04-01 00:35:51 -07006554 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006555 /*
6556 * To know whether this error is fatal or not, DB timeout
6557 * must be checked but this error is handled separately.
6558 */
6559 dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
6560 __func__, reg);
6561 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006562
6563 /*
6564 * Don't ignore LINERESET indication during hibern8
6565 * enter operation.
6566 */
6567 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6568 struct uic_command *cmd = hba->active_uic_cmd;
6569
6570 if (cmd) {
6571 if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
6572 dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
6573 __func__, reg);
6574 hba->full_init_linereset = true;
6575 }
6576 }
Asutosh Das3923c232017-09-15 16:14:26 +05306577 if (!hba->full_init_linereset)
6578 schedule_work(&hba->rls_work);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006579 }
6580 retval |= IRQ_HANDLED;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006581 }
6582
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306583 /* PA_INIT_ERROR is fatal and needs UIC reset */
6584 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006585 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6586 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006587 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
6588
Subhash Jadavani9c807702017-04-01 00:35:51 -07006589 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
6590 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6591 } else if (hba->dev_info.quirks &
6592 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6593 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6594 hba->uic_error |=
6595 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6596 else if (reg &
6597 UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6598 hba->uic_error |=
6599 UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6600 }
6601 retval |= IRQ_HANDLED;
Yaniv Gardi583fa622016-03-10 17:37:13 +02006602 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306603
6604 /* UIC NL/TL/DME errors needs software retry */
6605 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006606 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6607 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006608 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306609 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006610 retval |= IRQ_HANDLED;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006611 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306612
6613 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006614 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6615 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006616 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306617 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006618 retval |= IRQ_HANDLED;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006619 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306620
6621 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006622 if ((reg & UIC_DME_ERROR) &&
6623 (reg & UIC_DME_ERROR_CODE_MASK)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006624 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306625 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006626 retval |= IRQ_HANDLED;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006627 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306628
6629 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6630 __func__, hba->uic_error);
Subhash Jadavani9c807702017-04-01 00:35:51 -07006631 return retval;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306632}
6633
6634/**
6635 * ufshcd_check_errors - Check for errors that need s/w attention
6636 * @hba: per-adapter instance
Subhash Jadavani9c807702017-04-01 00:35:51 -07006637 *
6638 * Returns
6639 * IRQ_HANDLED - If interrupt is valid
6640 * IRQ_NONE - If invalid interrupt
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306641 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07006642static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306643{
6644 bool queue_eh_work = false;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006645 irqreturn_t retval = IRQ_NONE;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306646
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006647 if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306648 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306649
6650 if (hba->errors & UIC_ERROR) {
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306651 hba->uic_error = 0;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006652 retval = ufshcd_update_uic_error(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306653 if (hba->uic_error)
6654 queue_eh_work = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306655 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306656
Subhash Jadavanief542222017-08-02 16:23:55 -07006657 if (hba->extcon && ufshcd_is_card_offline(hba)) {
6658 /* ignore UIC errors if card is offline */
6659 retval |= IRQ_HANDLED;
6660 } else if (queue_eh_work) {
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006661 /*
6662 * update the transfer error masks to sticky bits, let's do this
6663 * irrespective of current ufshcd_state.
6664 */
6665 hba->saved_err |= hba->errors;
6666 hba->saved_uic_err |= hba->uic_error;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006667 hba->saved_ce_err |= hba->ce_error;
Yaniv Gardi9a47ec72016-03-10 17:37:12 +02006668
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306669 /* handle fatal errors only when link is functional */
6670 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07006671 /*
6672 * Set error handling in progress flag early so that we
6673 * don't issue new requests any more.
6674 */
6675 ufshcd_set_eh_in_progress(hba);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306676
Zang Leiganga17bddc2017-04-04 19:32:20 +00006677 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306678 schedule_work(&hba->eh_work);
6679 }
Subhash Jadavani9c807702017-04-01 00:35:51 -07006680 retval |= IRQ_HANDLED;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306681 }
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05306682 /*
6683 * if (!queue_eh_work) -
6684 * Other errors are either non-fatal where host recovers
6685 * itself without s/w intervention or errors that will be
6686 * handled by the SCSI core layer.
6687 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07006688 return retval;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306689}
6690
6691/**
6692 * ufshcd_tmc_handler - handle task management function completion
6693 * @hba: per adapter instance
Subhash Jadavani9c807702017-04-01 00:35:51 -07006694 *
6695 * Returns
6696 * IRQ_HANDLED - If interrupt is valid
6697 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306698 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07006699static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306700{
6701 u32 tm_doorbell;
6702
Seungwon Jeonb873a2752013-06-26 22:39:26 +05306703 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306704 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006705 if (hba->tm_condition) {
6706 wake_up(&hba->tm_wq);
6707 return IRQ_HANDLED;
6708 } else {
6709 return IRQ_NONE;
6710 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306711}
6712
6713/**
6714 * ufshcd_sl_intr - Interrupt service routine
6715 * @hba: per adapter instance
6716 * @intr_status: contains interrupts generated by the controller
Subhash Jadavani9c807702017-04-01 00:35:51 -07006717 *
6718 * Returns
6719 * IRQ_HANDLED - If interrupt is valid
6720 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306721 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07006722static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306723{
Subhash Jadavani9c807702017-04-01 00:35:51 -07006724 irqreturn_t retval = IRQ_NONE;
6725
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006726 ufsdbg_error_inject_dispatcher(hba,
6727 ERR_INJECT_INTR, intr_status, &intr_status);
6728
6729 ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
6730
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306731 hba->errors = UFSHCD_ERROR_MASK & intr_status;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006732 if (hba->errors || hba->ce_error)
Subhash Jadavani9c807702017-04-01 00:35:51 -07006733 retval |= ufshcd_check_errors(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306734
Seungwon Jeon53b3d9c2013-08-31 21:40:22 +05306735 if (intr_status & UFSHCD_UIC_MASK)
Subhash Jadavani9c807702017-04-01 00:35:51 -07006736 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306737
6738 if (intr_status & UTP_TASK_REQ_COMPL)
Subhash Jadavani9c807702017-04-01 00:35:51 -07006739 retval |= ufshcd_tmc_handler(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306740
6741 if (intr_status & UTP_TRANSFER_REQ_COMPL)
Subhash Jadavani9c807702017-04-01 00:35:51 -07006742 retval |= ufshcd_transfer_req_compl(hba);
6743
6744 return retval;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306745}
6746
6747/**
6748 * ufshcd_intr - Main interrupt service routine
6749 * @irq: irq number
6750 * @__hba: pointer to adapter instance
6751 *
Subhash Jadavani9c807702017-04-01 00:35:51 -07006752 * Returns
6753 * IRQ_HANDLED - If interrupt is valid
6754 * IRQ_NONE - If invalid interrupt
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306755 */
6756static irqreturn_t ufshcd_intr(int irq, void *__hba)
6757{
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02006758 u32 intr_status, enabled_intr_status;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306759 irqreturn_t retval = IRQ_NONE;
6760 struct ufs_hba *hba = __hba;
Subhash Jadavani9c807702017-04-01 00:35:51 -07006761 int retries = hba->nutrs;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306762
6763 spin_lock(hba->host->host_lock);
Seungwon Jeonb873a2752013-06-26 22:39:26 +05306764 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
Asutosh Das3da913a2017-03-24 10:32:16 +05306765 hba->ufs_stats.last_intr_status = intr_status;
6766 hba->ufs_stats.last_intr_ts = ktime_get();
Subhash Jadavani9c807702017-04-01 00:35:51 -07006767 /*
6768 * There could be max of hba->nutrs reqs in flight and in worst case
6769 * if the reqs get finished 1 by 1 after the interrupt status is
6770 * read, make sure we handle them by checking the interrupt status
6771 * again in a loop until we process all of the reqs before returning.
6772 */
6773 do {
6774 enabled_intr_status =
6775 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6776 if (intr_status)
6777 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6778 if (enabled_intr_status)
6779 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
Yaniv Gardid75f7fe2016-02-01 15:02:47 +02006780
Subhash Jadavani9c807702017-04-01 00:35:51 -07006781 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6782 } while (intr_status && --retries);
6783
6784 if (retval == IRQ_NONE) {
6785 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6786 __func__, intr_status);
6787 ufshcd_hex_dump(hba, "host regs: ", hba->mmio_base,
6788 UFSHCI_REG_SPACE_SIZE);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306789 }
Subhash Jadavani9c807702017-04-01 00:35:51 -07006790
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306791 spin_unlock(hba->host->host_lock);
6792 return retval;
6793}
6794
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306795static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6796{
6797 int err = 0;
6798 u32 mask = 1 << tag;
6799 unsigned long flags;
6800
6801 if (!test_bit(tag, &hba->outstanding_tasks))
6802 goto out;
6803
6804 spin_lock_irqsave(hba->host->host_lock, flags);
6805 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
6806 spin_unlock_irqrestore(hba->host->host_lock, flags);
6807
6808 /* poll for max. 1 sec to clear door bell register by h/w */
6809 err = ufshcd_wait_for_register(hba,
6810 REG_UTP_TASK_REQ_DOOR_BELL,
Yaniv Gardi596585a2016-03-10 17:37:08 +02006811 mask, 0, 1000, 1000, true);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306812out:
6813 return err;
6814}
6815
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306816/**
6817 * ufshcd_issue_tm_cmd - issues task management commands to controller
6818 * @hba: per adapter instance
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306819 * @lun_id: LUN ID to which TM command is sent
6820 * @task_id: task ID to which the TM command is applicable
6821 * @tm_function: task management function opcode
6822 * @tm_response: task management service response return value
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306823 *
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306824 * Returns non-zero value on error, zero on success.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306825 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306826static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6827 u8 tm_function, u8 *tm_response)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306828{
6829 struct utp_task_req_desc *task_req_descp;
6830 struct utp_upiu_task_req *task_req_upiup;
6831 struct Scsi_Host *host;
6832 unsigned long flags;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306833 int free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306834 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306835 int task_tag;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306836
6837 host = hba->host;
6838
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306839 /*
6840 * Get free slot, sleep if slots are unavailable.
6841 * Even though we use wait_event() which sleeps indefinitely,
6842 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6843 */
6844 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
Asutosh Das3da913a2017-03-24 10:32:16 +05306845 hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006846 ufshcd_hold_all(hba);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306847
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306848 spin_lock_irqsave(host->host_lock, flags);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306849 task_req_descp = hba->utmrdl_base_addr;
6850 task_req_descp += free_slot;
6851
6852 /* Configure task request descriptor */
6853 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6854 task_req_descp->header.dword_2 =
6855 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6856
6857 /* Configure task request UPIU */
6858 task_req_upiup =
6859 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306860 task_tag = hba->nutrs + free_slot;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306861 task_req_upiup->header.dword_0 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306862 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306863 lun_id, task_tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306864 task_req_upiup->header.dword_1 =
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05306865 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
Subhash Jadavani0ce147d2014-09-25 15:32:29 +03006866 /*
6867 * The host shall provide the same value for LUN field in the basic
6868 * header and for Input Parameter.
6869 */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306870 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
6871 task_req_upiup->input_param2 = cpu_to_be32(task_id);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306872
6873 /* send command to the controller */
6874 __set_bit(free_slot, &hba->outstanding_tasks);
Yaniv Gardi897efe62016-02-01 15:02:48 +02006875
6876 /* Make sure descriptors are ready before ringing the task doorbell */
6877 wmb();
6878
Seungwon Jeonb873a2752013-06-26 22:39:26 +05306879 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006880 /* Make sure that doorbell is committed immediately */
6881 wmb();
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306882
6883 spin_unlock_irqrestore(host->host_lock, flags);
6884
6885 /* wait until the task management command is completed */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306886 err = wait_event_timeout(hba->tm_wq,
6887 test_bit(free_slot, &hba->tm_condition),
6888 msecs_to_jiffies(TM_CMD_TIMEOUT));
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306889 if (!err) {
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306890 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6891 __func__, tm_function);
6892 if (ufshcd_clear_tm_cmd(hba, free_slot))
6893 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6894 __func__, free_slot);
6895 err = -ETIMEDOUT;
6896 } else {
6897 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306898 }
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306899
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306900 clear_bit(free_slot, &hba->tm_condition);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306901 ufshcd_put_tm_slot(hba, free_slot);
6902 wake_up(&hba->tm_tag_wq);
Asutosh Das3da913a2017-03-24 10:32:16 +05306903 hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306904
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006905 ufshcd_release_all(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306906 return err;
6907}
6908
6909/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306910 * ufshcd_eh_device_reset_handler - device reset handler registered to
6911 * scsi layer.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306912 * @cmd: SCSI command pointer
6913 *
6914 * Returns SUCCESS/FAILED
6915 */
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306916static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306917{
6918 struct Scsi_Host *host;
6919 struct ufs_hba *hba;
6920 unsigned int tag;
6921 u32 pos;
6922 int err;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306923 u8 resp = 0xF;
6924 struct ufshcd_lrb *lrbp;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306925 unsigned long flags;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306926
6927 host = cmd->device->host;
6928 hba = shost_priv(host);
6929 tag = cmd->request->tag;
6930
Can Guof6411eb2017-06-09 15:17:22 +08006931 ufshcd_print_cmd_log(hba);
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306932 lrbp = &hba->lrb[tag];
6933 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6934 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306935 if (!err)
6936 err = resp;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306937 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306938 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306939
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306940 /* clear the commands that were pending for corresponding LUN */
6941 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6942 if (hba->lrb[pos].lun == lrbp->lun) {
6943 err = ufshcd_clear_cmd(hba, pos);
6944 if (err)
6945 break;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306946 }
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306947 }
6948 spin_lock_irqsave(host->host_lock, flags);
6949 ufshcd_transfer_req_compl(hba);
6950 spin_unlock_irqrestore(host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006951
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306952out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006953 hba->req_abort_count = 0;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05306954 if (!err) {
6955 err = SUCCESS;
6956 } else {
6957 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6958 err = FAILED;
6959 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306960 return err;
6961}
6962
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07006963static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6964{
6965 struct ufshcd_lrb *lrbp;
6966 int tag;
6967
6968 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6969 lrbp = &hba->lrb[tag];
6970 lrbp->req_abort_skip = true;
6971 }
6972}
6973
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306974/**
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306975 * ufshcd_abort - abort a specific command
6976 * @cmd: SCSI command pointer
6977 *
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306978 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6979 * command, and in host controller by clearing the door-bell register. There can
6980 * be race between controller sending the command to the device while abort is
6981 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6982 * really issued and then try to abort it.
6983 *
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306984 * Returns SUCCESS/FAILED
6985 */
6986static int ufshcd_abort(struct scsi_cmnd *cmd)
6987{
6988 struct Scsi_Host *host;
6989 struct ufs_hba *hba;
6990 unsigned long flags;
6991 unsigned int tag;
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05306992 int err = 0;
6993 int poll_cnt;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05306994 u8 resp = 0xF;
6995 struct ufshcd_lrb *lrbp;
Dolev Ravive9d501b2014-07-01 12:22:37 +03006996 u32 reg;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05306997
6998 host = cmd->device->host;
6999 hba = shost_priv(host);
7000 tag = cmd->request->tag;
Yaniv Gardi14497322016-02-01 15:02:39 +02007001 if (!ufshcd_valid_tag(hba, tag)) {
7002 dev_err(hba->dev,
7003 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
7004 __func__, tag, cmd, cmd->request);
7005 BUG();
7006 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307007
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007008 lrbp = &hba->lrb[tag];
7009
7010 ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
7011
7012 /*
7013 * Task abort to the device W-LUN is illegal. When this command
7014 * will fail, due to spec violation, scsi err handling next step
7015 * will be to send LU reset which, again, is a spec violation.
7016 * To avoid these unnecessary/illegal step we skip to the last error
7017 * handling stage: reset and restore.
7018 */
7019 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
7020 return ufshcd_eh_host_reset_handler(cmd);
7021
7022 ufshcd_hold_all(hba);
Dolev Ravive9d501b2014-07-01 12:22:37 +03007023 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
Yaniv Gardi14497322016-02-01 15:02:39 +02007024 /* If command is already aborted/completed, return SUCCESS */
7025 if (!(test_bit(tag, &hba->outstanding_reqs))) {
7026 dev_err(hba->dev,
7027 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7028 __func__, tag, hba->outstanding_reqs, reg);
7029 goto out;
7030 }
7031
Dolev Ravive9d501b2014-07-01 12:22:37 +03007032 if (!(reg & (1 << tag))) {
7033 dev_err(hba->dev,
7034 "%s: cmd was completed, but without a notifying intr, tag = %d",
7035 __func__, tag);
7036 }
7037
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007038 /* Print Transfer Request of aborted task */
7039 dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
7040
7041 /*
7042 * Print detailed info about aborted request.
7043 * As more than one request might get aborted at the same time,
7044 * print full information only for the first aborted request in order
7045 * to reduce repeated printouts. For other aborted requests only print
7046 * basic details.
7047 */
7048 scsi_print_command(cmd);
7049 if (!hba->req_abort_count) {
Sayali Lokhande501c5bb2017-11-15 15:54:33 +05307050 ufshcd_print_fsm_state(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007051 ufshcd_print_host_regs(hba);
7052 ufshcd_print_host_state(hba);
7053 ufshcd_print_pwr_info(hba);
7054 ufshcd_print_trs(hba, 1 << tag, true);
7055 } else {
7056 ufshcd_print_trs(hba, 1 << tag, false);
7057 }
7058 hba->req_abort_count++;
7059
7060
7061 /* Skip task abort in case previous aborts failed and report failure */
7062 if (lrbp->req_abort_skip) {
7063 err = -EIO;
7064 goto out;
7065 }
7066
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307067 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7068 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7069 UFS_QUERY_TASK, &resp);
7070 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7071 /* cmd pending in the device */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007072 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
7073 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307074 break;
7075 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307076 /*
7077 * cmd not pending in the device, check if it is
7078 * in transition.
7079 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007080 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
7081 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307082 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7083 if (reg & (1 << tag)) {
7084 /* sleep for max. 200us to stabilize */
7085 usleep_range(100, 200);
7086 continue;
7087 }
7088 /* command completed already */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007089 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
7090 __func__, tag);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307091 goto out;
7092 } else {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007093 dev_err(hba->dev,
7094 "%s: no response from device. tag = %d, err %d",
7095 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307096 if (!err)
7097 err = resp; /* service response error */
7098 goto out;
7099 }
7100 }
7101
7102 if (!poll_cnt) {
7103 err = -EBUSY;
7104 goto out;
7105 }
7106
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05307107 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7108 UFS_ABORT_TASK, &resp);
7109 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007110 if (!err) {
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307111 err = resp; /* service response error */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007112 dev_err(hba->dev, "%s: issued. tag = %d, err %d",
7113 __func__, tag, err);
7114 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307115 goto out;
Sujit Reddy Thummae2933132014-05-26 10:59:12 +05307116 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307117
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307118 err = ufshcd_clear_cmd(hba, tag);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007119 if (err) {
7120 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
7121 __func__, tag, err);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307122 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007123 }
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307124
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307125 scsi_dma_unmap(cmd);
7126
7127 spin_lock_irqsave(host->host_lock, flags);
Yaniv Gardia48353f2016-02-01 15:02:40 +02007128 ufshcd_outstanding_req_clear(hba, tag);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307129 hba->lrb[tag].cmd = NULL;
7130 spin_unlock_irqrestore(host->host_lock, flags);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307131
7132 clear_bit_unlock(tag, &hba->lrb_in_use);
7133 wake_up(&hba->dev_cmd.tag_wq);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007134
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307135out:
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307136 if (!err) {
7137 err = SUCCESS;
7138 } else {
7139 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007140 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
Sujit Reddy Thummaf20810d2014-05-26 10:59:13 +05307141 err = FAILED;
7142 }
7143
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007144 /*
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007145 * This ufshcd_release_all() corresponds to the original scsi cmd that
7146 * got aborted here (as we won't get any IRQ for it).
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03007147 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007148 ufshcd_release_all(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05307149 return err;
7150}
7151
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307152/**
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307153 * ufshcd_host_reset_and_restore - reset and restore host controller
7154 * @hba: per-adapter instance
7155 *
7156 * Note that host controller reset may issue DME_RESET to
7157 * local and remote (device) Uni-Pro stack and the attributes
7158 * are reset to default state.
7159 *
7160 * Returns zero on success, non-zero on failure
7161 */
7162static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7163{
7164 int err;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307165 unsigned long flags;
7166
7167 /* Reset the host controller */
7168 spin_lock_irqsave(hba->host->host_lock, flags);
Yaniv Gardi596585a2016-03-10 17:37:08 +02007169 ufshcd_hba_stop(hba, false);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307170 spin_unlock_irqrestore(hba->host->host_lock, flags);
7171
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007172 /* scale up clocks to max frequency before full reinitialization */
7173 ufshcd_set_clk_freq(hba, true);
7174
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307175 err = ufshcd_hba_enable(hba);
7176 if (err)
7177 goto out;
7178
7179 /* Establish the link again and restore the device */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007180 err = ufshcd_probe_hba(hba);
7181
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007182 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307183 err = -EIO;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007184 goto out;
7185 }
7186
7187 if (!err) {
7188 err = ufshcd_vops_crypto_engine_reset(hba);
7189 if (err) {
7190 dev_err(hba->dev,
7191 "%s: failed to reset crypto engine %d\n",
7192 __func__, err);
7193 goto out;
7194 }
7195 }
7196
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307197out:
7198 if (err)
7199 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7200
7201 return err;
7202}
7203
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07007204static int ufshcd_detect_device(struct ufs_hba *hba)
7205{
7206 int err = 0;
7207
7208 err = ufshcd_vops_full_reset(hba);
7209 if (err)
7210 dev_warn(hba->dev, "%s: full reset returned %d\n",
7211 __func__, err);
7212
7213 err = ufshcd_reset_device(hba);
7214 if (err)
7215 dev_warn(hba->dev, "%s: device reset failed. err %d\n",
7216 __func__, err);
7217
7218 return ufshcd_host_reset_and_restore(hba);
7219}
7220
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307221/**
7222 * ufshcd_reset_and_restore - reset and re-initialize host/device
7223 * @hba: per-adapter instance
7224 *
7225 * Reset and recover device, host and re-establish link. This
7226 * is helpful to recover the communication in fatal error conditions.
7227 *
7228 * Returns zero on success, non-zero on failure
7229 */
7230static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7231{
7232 int err = 0;
7233 unsigned long flags;
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007234 int retries = MAX_HOST_RESET_RETRIES;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307235
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007236 do {
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07007237 err = ufshcd_detect_device(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007238 } while (err && --retries);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307239
7240 /*
Sayali Lokhande8d7652782017-09-15 12:08:17 +05307241 * There is no point proceeding even after failing
7242 * to recover after multiple retries.
7243 */
7244 if (err && ufshcd_is_embedded_dev(hba))
7245 BUG();
7246 /*
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307247 * After reset the door-bell might be cleared, complete
7248 * outstanding requests in s/w here.
7249 */
7250 spin_lock_irqsave(hba->host->host_lock, flags);
7251 ufshcd_transfer_req_compl(hba);
7252 ufshcd_tmc_handler(hba);
7253 spin_unlock_irqrestore(hba->host->host_lock, flags);
7254
7255 return err;
7256}
7257
7258/**
7259 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7260 * @cmd - SCSI command pointer
7261 *
7262 * Returns SUCCESS/FAILED
7263 */
7264static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7265{
Subhash Jadavani9c807702017-04-01 00:35:51 -07007266 int err = SUCCESS;
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307267 unsigned long flags;
7268 struct ufs_hba *hba;
7269
7270 hba = shost_priv(cmd->device->host);
7271
7272 /*
7273 * Check if there is any race with fatal error handling.
7274 * If so, wait for it to complete. Even though fatal error
7275 * handling does reset and restore in some cases, don't assume
7276 * anything out of it. We are just avoiding race here.
7277 */
7278 do {
7279 spin_lock_irqsave(hba->host->host_lock, flags);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05307280 if (!(work_pending(&hba->eh_work) ||
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307281 hba->ufshcd_state == UFSHCD_STATE_RESET))
7282 break;
7283 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavani9c807702017-04-01 00:35:51 -07007284 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +05307285 flush_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307286 } while (1);
7287
Subhash Jadavani9c807702017-04-01 00:35:51 -07007288 /*
7289 * we don't know if previous reset had really reset the host controller
7290 * or not. So let's force reset here to be sure.
7291 */
7292 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7293 hba->force_host_reset = true;
7294 schedule_work(&hba->eh_work);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307295
Subhash Jadavani9c807702017-04-01 00:35:51 -07007296 /* wait for the reset work to finish */
7297 do {
7298 if (!(work_pending(&hba->eh_work) ||
7299 hba->ufshcd_state == UFSHCD_STATE_RESET))
7300 break;
7301 spin_unlock_irqrestore(hba->host->host_lock, flags);
7302 dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
7303 flush_work(&hba->eh_work);
7304 spin_lock_irqsave(hba->host->host_lock, flags);
7305 } while (1);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307306
Subhash Jadavani9c807702017-04-01 00:35:51 -07007307 if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
7308 ufshcd_is_link_active(hba))) {
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307309 err = FAILED;
7310 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7311 }
Subhash Jadavani9c807702017-04-01 00:35:51 -07007312
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307313 spin_unlock_irqrestore(hba->host->host_lock, flags);
7314
7315 return err;
7316}
7317
7318/**
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007319 * ufshcd_get_max_icc_level - calculate the ICC level
7320 * @sup_curr_uA: max. current supported by the regulator
7321 * @start_scan: row at the desc table to start scan from
7322 * @buff: power descriptor buffer
7323 *
7324 * Returns calculated max ICC level for specific regulator
7325 */
7326static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7327{
7328 int i;
7329 int curr_uA;
7330 u16 data;
7331 u16 unit;
7332
7333 for (i = start_scan; i >= 0; i--) {
7334 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
7335 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7336 ATTR_ICC_LVL_UNIT_OFFSET;
7337 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7338 switch (unit) {
7339 case UFSHCD_NANO_AMP:
7340 curr_uA = curr_uA / 1000;
7341 break;
7342 case UFSHCD_MILI_AMP:
7343 curr_uA = curr_uA * 1000;
7344 break;
7345 case UFSHCD_AMP:
7346 curr_uA = curr_uA * 1000 * 1000;
7347 break;
7348 case UFSHCD_MICRO_AMP:
7349 default:
7350 break;
7351 }
7352 if (sup_curr_uA >= curr_uA)
7353 break;
7354 }
7355 if (i < 0) {
7356 i = 0;
7357 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7358 }
7359
7360 return (u32)i;
7361}
7362
7363/**
Subhash Jadavania8d1ba32016-12-12 18:19:21 -08007364 * ufshcd_find_max_sup_active_icc_level - find the max ICC level
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007365 * In case regulators are not initialized we'll return 0
7366 * @hba: per-adapter instance
7367 * @desc_buf: power descriptor buffer to extract ICC levels from.
7368 * @len: length of desc_buff
7369 *
Subhash Jadavania8d1ba32016-12-12 18:19:21 -08007370 * Returns calculated max ICC level
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007371 */
7372static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7373 u8 *desc_buf, int len)
7374{
7375 u32 icc_level = 0;
7376
Subhash Jadavania8d1ba32016-12-12 18:19:21 -08007377 /*
7378 * VCCQ rail is optional for removable UFS card and also most of the
7379 * vendors don't use this rail for embedded UFS devices as well. So
7380 * it is normal that VCCQ rail may not be provided for given platform.
7381 */
7382 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq2) {
7383 dev_err(hba->dev, "%s: Regulator capability was not set, bActiveICCLevel=%d\n",
7384 __func__, icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007385 goto out;
7386 }
7387
7388 if (hba->vreg_info.vcc)
7389 icc_level = ufshcd_get_max_icc_level(
7390 hba->vreg_info.vcc->max_uA,
7391 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7392 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7393
7394 if (hba->vreg_info.vccq)
7395 icc_level = ufshcd_get_max_icc_level(
7396 hba->vreg_info.vccq->max_uA,
7397 icc_level,
7398 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7399
7400 if (hba->vreg_info.vccq2)
7401 icc_level = ufshcd_get_max_icc_level(
7402 hba->vreg_info.vccq2->max_uA,
7403 icc_level,
7404 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7405out:
7406 return icc_level;
7407}
7408
Subhash Jadavani8a93dbd2016-12-12 17:59:44 -08007409static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007410{
7411 int ret;
Michal' Potomski833ea2a2017-05-31 15:25:11 +05307412 int buff_len = hba->desc_size.pwr_desc;
7413 u8 *desc_buf = NULL;
Subhash Jadavani35732e52016-12-09 16:09:42 -08007414 u32 icc_level;
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007415
Michal' Potomski833ea2a2017-05-31 15:25:11 +05307416 if (buff_len) {
7417 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7418 if (!desc_buf) {
7419 dev_err(hba->dev,
7420 "%s: Failed to allocate desc_buf\n", __func__);
7421 return;
7422 }
7423 }
7424
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007425 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
7426 if (ret) {
7427 dev_err(hba->dev,
7428 "%s: Failed reading power descriptor.len = %d ret = %d",
7429 __func__, buff_len, ret);
7430 return;
7431 }
7432
Subhash Jadavani35732e52016-12-09 16:09:42 -08007433 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7434 buff_len);
7435 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007436
Yaniv Gardi5e86ae42016-02-01 15:02:50 +02007437 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
Subhash Jadavani35732e52016-12-09 16:09:42 -08007438 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007439
7440 if (ret)
7441 dev_err(hba->dev,
7442 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
Subhash Jadavani35732e52016-12-09 16:09:42 -08007443 __func__, icc_level, ret);
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007444}
7445
7446/**
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007447 * ufshcd_scsi_add_wlus - Adds required W-LUs
7448 * @hba: per-adapter instance
7449 *
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007450 * UFS devices can support upto 4 well known logical units:
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007451 * "REPORT_LUNS" (address: 01h)
7452 * "UFS Device" (address: 50h)
7453 * "RPMB" (address: 44h)
7454 * "BOOT" (address: 30h)
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007455 *
7456 * "REPORT_LUNS" & "UFS Device" are mandatory for all device classes (see
7457 * "bDeviceSubClass" parameter of device descriptor) while "BOOT" is supported
7458 * only for bootable devices. "RPMB" is only supported with embedded devices.
7459 *
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007460 * UFS device's power management needs to be controlled by "POWER CONDITION"
7461 * field of SSU (START STOP UNIT) command. But this "power condition" field
7462 * will take effect only when its sent to "UFS device" well known logical unit
7463 * hence we require the scsi_device instance to represent this logical unit in
7464 * order for the UFS host driver to send the SSU command for power management.
7465
7466 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7467 * Block) LU so user space process can control this LU. User space may also
7468 * want to have access to BOOT LU.
7469
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007470 * This function tries to add scsi device instances for each of all well known
7471 * LUs (except "REPORT LUNS" LU) depending on device class.
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007472 *
7473 * Returns zero on success (all required W-LUs are added successfully),
7474 * non-zero error value on failure (if failed to add any of the required W-LU).
7475 */
7476static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7477{
7478 int ret = 0;
Channagoud Kadabi075db3b2017-03-16 14:26:17 -07007479 struct scsi_device *sdev_rpmb = NULL;
7480 struct scsi_device *sdev_boot = NULL;
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007481 bool is_bootable_dev = false;
7482 bool is_embedded_dev = false;
7483
7484 if ((hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_BOOTABLE) ||
7485 (hba->dev_info.b_device_sub_class == UFS_DEV_REMOVABLE_BOOTABLE))
7486 is_bootable_dev = true;
7487
7488 if ((hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_BOOTABLE) ||
7489 (hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_NON_BOOTABLE))
7490 is_embedded_dev = true;
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007491
7492 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7493 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7494 if (IS_ERR(hba->sdev_ufs_device)) {
7495 ret = PTR_ERR(hba->sdev_ufs_device);
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007496 dev_err(hba->dev, "%s: failed adding DEVICE_WLUN. ret %d\n",
7497 __func__, ret);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007498 hba->sdev_ufs_device = NULL;
7499 goto out;
7500 }
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03007501 scsi_device_put(hba->sdev_ufs_device);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007502
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007503 if (is_bootable_dev) {
7504 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7505 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN),
7506 NULL);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007507
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007508 if (IS_ERR(sdev_boot)) {
7509 dev_err(hba->dev, "%s: failed adding BOOT_WLUN. ret %d\n",
7510 __func__, ret);
7511 ret = PTR_ERR(sdev_boot);
7512 goto remove_sdev_ufs_device;
7513 }
7514 scsi_device_put(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007515 }
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007516
7517 if (is_embedded_dev) {
7518 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7519 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN),
7520 NULL);
7521 if (IS_ERR(sdev_rpmb)) {
7522 dev_err(hba->dev, "%s: failed adding RPMB_WLUN. ret %d\n",
7523 __func__, ret);
7524 ret = PTR_ERR(sdev_rpmb);
7525 goto remove_sdev_boot;
7526 }
7527 scsi_device_put(sdev_rpmb);
7528 }
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007529 goto out;
7530
7531remove_sdev_boot:
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007532 if (is_bootable_dev)
7533 scsi_remove_device(sdev_boot);
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007534remove_sdev_ufs_device:
7535 scsi_remove_device(hba->sdev_ufs_device);
7536out:
7537 return ret;
7538}
7539
7540/**
Yaniv Gardi37113102016-03-10 17:37:16 +02007541 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7542 * @hba: per-adapter instance
7543 *
7544 * PA_TActivate parameter can be tuned manually if UniPro version is less than
7545 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7546 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7547 * the hibern8 exit latency.
7548 *
7549 * Returns zero on success, non-zero error value on failure.
7550 */
7551static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7552{
7553 int ret = 0;
7554 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7555
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007556 if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
7557 return 0;
7558
Yaniv Gardi37113102016-03-10 17:37:16 +02007559 ret = ufshcd_dme_peer_get(hba,
7560 UIC_ARG_MIB_SEL(
7561 RX_MIN_ACTIVATETIME_CAPABILITY,
7562 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7563 &peer_rx_min_activatetime);
7564 if (ret)
7565 goto out;
7566
7567 /* make sure proper unit conversion is applied */
7568 tuned_pa_tactivate =
7569 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7570 / PA_TACTIVATE_TIME_UNIT_US);
7571 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7572 tuned_pa_tactivate);
7573
7574out:
7575 return ret;
7576}
7577
7578/**
7579 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7580 * @hba: per-adapter instance
7581 *
7582 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7583 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7584 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7585 * This optimal value can help reduce the hibern8 exit latency.
7586 *
7587 * Returns zero on success, non-zero error value on failure.
7588 */
7589static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7590{
7591 int ret = 0;
7592 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7593 u32 max_hibern8_time, tuned_pa_hibern8time;
7594
7595 ret = ufshcd_dme_get(hba,
7596 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7597 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7598 &local_tx_hibern8_time_cap);
7599 if (ret)
7600 goto out;
7601
7602 ret = ufshcd_dme_peer_get(hba,
7603 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7604 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7605 &peer_rx_hibern8_time_cap);
7606 if (ret)
7607 goto out;
7608
7609 max_hibern8_time = max(local_tx_hibern8_time_cap,
7610 peer_rx_hibern8_time_cap);
7611 /* make sure proper unit conversion is applied */
7612 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7613 / PA_HIBERN8_TIME_UNIT_US);
7614 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7615 tuned_pa_hibern8time);
7616out:
7617 return ret;
7618}
7619
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007620/**
7621 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7622 * less than device PA_TACTIVATE time.
7623 * @hba: per-adapter instance
7624 *
7625 * Some UFS devices require host PA_TACTIVATE to be lower than device
7626 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7627 * for such devices.
7628 *
7629 * Returns zero on success, non-zero error value on failure.
7630 */
7631static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7632{
7633 int ret = 0;
7634 u32 granularity, peer_granularity;
7635 u32 pa_tactivate, peer_pa_tactivate;
7636 u32 pa_tactivate_us, peer_pa_tactivate_us;
7637 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7638
7639 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7640 &granularity);
7641 if (ret)
7642 goto out;
7643
7644 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7645 &peer_granularity);
7646 if (ret)
7647 goto out;
7648
7649 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7650 (granularity > PA_GRANULARITY_MAX_VAL)) {
7651 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7652 __func__, granularity);
7653 return -EINVAL;
7654 }
7655
7656 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7657 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7658 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7659 __func__, peer_granularity);
7660 return -EINVAL;
7661 }
7662
7663 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7664 if (ret)
7665 goto out;
7666
7667 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7668 &peer_pa_tactivate);
7669 if (ret)
7670 goto out;
7671
7672 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7673 peer_pa_tactivate_us = peer_pa_tactivate *
7674 gran_to_us_table[peer_granularity - 1];
7675
7676 if (pa_tactivate_us > peer_pa_tactivate_us) {
7677 u32 new_peer_pa_tactivate;
7678
7679 new_peer_pa_tactivate = pa_tactivate_us /
7680 gran_to_us_table[peer_granularity - 1];
7681 new_peer_pa_tactivate++;
7682 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7683 new_peer_pa_tactivate);
7684 }
7685
7686out:
7687 return ret;
7688}
7689
Yaniv Gardi37113102016-03-10 17:37:16 +02007690static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7691{
7692 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7693 ufshcd_tune_pa_tactivate(hba);
7694 ufshcd_tune_pa_hibern8time(hba);
7695 }
7696
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08007697 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
Yaniv Gardi37113102016-03-10 17:37:16 +02007698 /* set 1ms timeout for PA_TACTIVATE */
7699 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007700
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08007701 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007702 ufshcd_quirk_tune_host_pa_tactivate(hba);
7703
7704 ufshcd_vops_apply_dev_quirks(hba);
7705}
7706
7707static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7708{
7709 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
7710
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007711 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
7712 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
7713 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
7714 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
7715 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
7716
7717 hba->req_abort_count = 0;
7718}
7719
7720static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
7721{
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08007722 if (hba->dev_info.quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007723 if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
7724 UIC_LINK_OFF_STATE) {
7725 hba->rpm_lvl =
7726 ufs_get_desired_pm_lvl_for_dev_link_state(
7727 UFS_SLEEP_PWR_MODE,
7728 UIC_LINK_HIBERN8_STATE);
7729 dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
7730 hba->rpm_lvl);
7731 }
7732 if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7733 UIC_LINK_OFF_STATE) {
7734 hba->spm_lvl =
7735 ufs_get_desired_pm_lvl_for_dev_link_state(
7736 UFS_SLEEP_PWR_MODE,
7737 UIC_LINK_HIBERN8_STATE);
7738 dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
7739 hba->spm_lvl);
7740 }
7741 }
Yaniv Gardi37113102016-03-10 17:37:16 +02007742}
7743
7744/**
Subhash Jadavani88f99992016-12-13 15:52:21 -08007745 * ufshcd_set_dev_ref_clk - set the device bRefClkFreq
7746 * @hba: per-adapter instance
7747 *
7748 * Read the current value of the bRefClkFreq attribute from device and update it
7749 * if host is supplying different reference clock frequency than one mentioned
7750 * in bRefClkFreq attribute.
7751 *
7752 * Returns zero on success, non-zero error value on failure.
7753 */
7754static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7755{
7756 int err = 0;
7757 int ref_clk = -1;
7758 static const char * const ref_clk_freqs[] = {"19.2 MHz", "26 MHz",
7759 "38.4 MHz", "52 MHz"};
7760
7761 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7762 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7763
7764 if (err) {
7765 dev_err(hba->dev, "%s: failed reading bRefClkFreq. err = %d\n",
7766 __func__, err);
7767 goto out;
7768 }
7769
7770 if ((ref_clk < 0) || (ref_clk > REF_CLK_FREQ_52_MHZ)) {
7771 dev_err(hba->dev, "%s: invalid ref_clk setting = %d\n",
7772 __func__, ref_clk);
7773 err = -EINVAL;
7774 goto out;
7775 }
7776
7777 if (ref_clk == hba->dev_ref_clk_freq)
7778 goto out; /* nothing to update */
7779
7780 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7781 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0,
7782 &hba->dev_ref_clk_freq);
7783
7784 if (err)
7785 dev_err(hba->dev, "%s: bRefClkFreq setting to %s failed\n",
7786 __func__, ref_clk_freqs[hba->dev_ref_clk_freq]);
7787 else
7788 /*
7789 * It is good to print this out here to debug any later failures
7790 * related to gear switch.
7791 */
7792 dev_info(hba->dev, "%s: bRefClkFreq setting to %s succeeded\n",
7793 __func__, ref_clk_freqs[hba->dev_ref_clk_freq]);
7794
7795out:
7796 return err;
7797}
7798
Subhash Jadavani344c16c2016-12-15 17:09:35 -08007799static int ufs_read_device_desc_data(struct ufs_hba *hba)
7800{
7801 int err;
Michal' Potomski833ea2a2017-05-31 15:25:11 +05307802 u8 *desc_buf = NULL;
Subhash Jadavani344c16c2016-12-15 17:09:35 -08007803
Michal' Potomski833ea2a2017-05-31 15:25:11 +05307804 if (hba->desc_size.dev_desc) {
7805 desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL);
7806 if (!desc_buf) {
7807 err = -ENOMEM;
7808 dev_err(hba->dev,
7809 "%s: Failed to allocate desc_buf\n", __func__);
7810 return err;
7811 }
7812 }
7813 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
Subhash Jadavani344c16c2016-12-15 17:09:35 -08007814 if (err)
7815 return err;
7816
7817 /*
7818 * getting vendor (manufacturerID) and Bank Index in big endian
7819 * format
7820 */
7821 hba->dev_info.w_manufacturer_id =
7822 desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7823 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7824 hba->dev_info.b_device_sub_class =
7825 desc_buf[DEVICE_DESC_PARAM_DEVICE_SUB_CLASS];
7826 hba->dev_info.i_product_name = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7827
7828 return 0;
7829}
7830
Michal' Potomski833ea2a2017-05-31 15:25:11 +05307831static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
7832{
7833 int err;
7834
7835 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
7836 &hba->desc_size.dev_desc);
7837 if (err)
7838 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7839
7840 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
7841 &hba->desc_size.pwr_desc);
7842 if (err)
7843 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7844
7845 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
7846 &hba->desc_size.interc_desc);
7847 if (err)
7848 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7849
7850 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
7851 &hba->desc_size.conf_desc);
7852 if (err)
7853 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7854
7855 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
7856 &hba->desc_size.unit_desc);
7857 if (err)
7858 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
7859
7860 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7861 &hba->desc_size.geom_desc);
7862 if (err)
7863 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
7864}
7865
7866static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
7867{
7868 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7869 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7870 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7871 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7872 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
7873 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
7874}
7875
Subhash Jadavani88f99992016-12-13 15:52:21 -08007876/**
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007877 * ufshcd_probe_hba - probe hba to detect device and initialize
7878 * @hba: per-adapter instance
7879 *
7880 * Execute link-startup and verify device initialization
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307881 */
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03007882static int ufshcd_probe_hba(struct ufs_hba *hba)
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307883{
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307884 int ret;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007885 ktime_t start = ktime_get();
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05307886
7887 ret = ufshcd_link_startup(hba);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307888 if (ret)
7889 goto out;
7890
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007891 /* Debug counters initialization */
7892 ufshcd_clear_dbg_ufs_stats(hba);
Yaniv Gardiafdfff52016-03-10 17:37:15 +02007893 /* set the default level for urgent bkops */
7894 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
7895 hba->is_urgent_bkops_lvl_checked = false;
7896
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007897 /* UniPro link is active now */
7898 ufshcd_set_link_active(hba);
Seungwon Jeond3e89ba2013-08-31 21:40:24 +05307899
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05307900 ret = ufshcd_verify_dev_init(hba);
7901 if (ret)
7902 goto out;
7903
Dolev Raviv68078d52013-07-30 00:35:58 +05307904 ret = ufshcd_complete_dev_init(hba);
7905 if (ret)
7906 goto out;
7907
Subhash Jadavani2df121a2016-12-15 18:27:31 -08007908 /* clear any previous UFS device information */
7909 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
7910
Subhash Jadavani344c16c2016-12-15 17:09:35 -08007911 /* cache important parameters from device descriptor for later use */
7912 ret = ufs_read_device_desc_data(hba);
7913 if (ret)
7914 goto out;
7915
Michal' Potomski833ea2a2017-05-31 15:25:11 +05307916 /* Init check for device descriptor sizes */
7917 ufshcd_init_desc_sizes(hba);
Yaniv Gardic58ab7a2016-03-10 17:37:10 +02007918 ufs_advertise_fixup_device(hba);
Yaniv Gardi37113102016-03-10 17:37:16 +02007919 ufshcd_tune_unipro_params(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02007920
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007921 ufshcd_apply_pm_quirks(hba);
Yaniv Gardi60f01872016-03-10 17:37:11 +02007922 ret = ufshcd_set_vccq_rail_unused(hba,
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -08007923 (hba->dev_info.quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
Yaniv Gardi60f01872016-03-10 17:37:11 +02007924 if (ret)
7925 goto out;
7926
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007927 /* UFS device is also active now */
7928 ufshcd_set_ufs_dev_active(hba);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05307929 ufshcd_force_reset_auto_bkops(hba);
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307930
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007931 if (ufshcd_get_max_pwr_mode(hba)) {
7932 dev_err(hba->dev,
7933 "%s: Failed getting max supported power mode\n",
7934 __func__);
7935 } else {
Subhash Jadavani88f99992016-12-13 15:52:21 -08007936 /*
7937 * Set the right value to bRefClkFreq before attempting to
7938 * switch to HS gears.
7939 */
7940 ufshcd_set_dev_ref_clk(hba);
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007941 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007942 if (ret) {
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007943 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7944 __func__, ret);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07007945 goto out;
7946 }
Dolev Raviv7eb584d2014-09-25 15:32:31 +03007947 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007948
Subhash Jadavani8a93dbd2016-12-12 17:59:44 -08007949 /*
7950 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7951 * and for removable UFS card as well, hence always set the parameter.
7952 * Note: Error handler may issue the device reset hence resetting
7953 * bActiveICCLevel as well so it is always safe to set this here.
7954 */
7955 ufshcd_set_active_icc_lvl(hba);
7956
Yaniv Gardi53c12d02016-02-01 15:02:45 +02007957 /* set the state as operational after switching to desired gear */
7958 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007959 /*
7960 * If we are in error handling context or in power management callbacks
7961 * context, no need to scan the host
7962 */
7963 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7964 bool flag;
7965
Yaniv Gardidc3c8d32016-02-01 15:02:46 +02007966 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7967 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03007968 hba->dev_info.f_power_on_wp_en = flag;
7969
Subhash Jadavani2a8fa602014-09-25 15:32:28 +03007970 /* Add required well known logical units to scsi mid layer */
7971 if (ufshcd_scsi_add_wlus(hba))
7972 goto out;
7973
Subhash Jadavani9c807702017-04-01 00:35:51 -07007974 /* Initialize devfreq after UFS device is detected */
7975 if (ufshcd_is_clkscaling_supported(hba)) {
7976 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7977 &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
7978 hba->clk_scaling.saved_pwr_info.is_valid = true;
7979 hba->clk_scaling.is_scaled_up = true;
7980 if (!hba->devfreq) {
7981 hba->devfreq = devfreq_add_device(hba->dev,
7982 &ufs_devfreq_profile,
7983 "simple_ondemand",
7984 gov_data);
7985 if (IS_ERR(hba->devfreq)) {
7986 ret = PTR_ERR(hba->devfreq);
7987 dev_err(hba->dev, "Unable to register with devfreq %d\n",
7988 ret);
7989 goto out;
7990 }
7991 }
7992 hba->clk_scaling.is_allowed = true;
7993 }
7994
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05307995 scsi_scan_host(hba->host);
7996 pm_runtime_put_sync(hba->dev);
7997 }
Yaniv Gardi3a4bf062014-09-25 15:32:27 +03007998
Subhash Jadavani9c807702017-04-01 00:35:51 -07007999 /*
8000 * Enable auto hibern8 if supported, after full host and
8001 * device initialization.
8002 */
8003 if (ufshcd_is_auto_hibern8_supported(hba))
8004 ufshcd_set_auto_hibern8_timer(hba,
8005 hba->hibern8_on_idle.delay_ms);
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +05308006out:
Subhash Jadavani0eebfea2017-07-19 16:53:47 -07008007 if (ret) {
8008 ufshcd_set_ufs_dev_poweroff(hba);
8009 ufshcd_set_link_off(hba);
Subhash Jadavanief542222017-08-02 16:23:55 -07008010 if (hba->extcon) {
8011 if (!ufshcd_is_card_online(hba))
8012 ufsdbg_clr_err_state(hba);
8013 ufshcd_set_card_offline(hba);
8014 }
8015 } else if (hba->extcon) {
8016 ufshcd_set_card_online(hba);
Subhash Jadavani0eebfea2017-07-19 16:53:47 -07008017 }
8018
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008019 /*
8020 * If we failed to initialize the device or the device is not
8021 * present, turn off the power/clocks etc.
8022 */
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008023 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008024 pm_runtime_put_sync(hba->dev);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008025
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008026 trace_ufshcd_init(dev_name(hba->dev), ret,
8027 ktime_to_us(ktime_sub(ktime_get(), start)),
8028 hba->curr_dev_pwr_mode, hba->uic_link_state);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008029 return ret;
8030}
8031
Subhash Jadavanief542222017-08-02 16:23:55 -07008032static void ufshcd_remove_device(struct ufs_hba *hba)
8033{
8034 struct scsi_device *sdev;
8035 struct scsi_device *sdev_cache[UFS_MAX_LUS];
8036 int sdev_count = 0, i;
8037 unsigned long flags;
8038
8039 ufshcd_hold_all(hba);
8040 /* Reset the host controller */
8041 spin_lock_irqsave(hba->host->host_lock, flags);
8042 hba->silence_err_logs = true;
8043 ufshcd_hba_stop(hba, false);
8044 spin_unlock_irqrestore(hba->host->host_lock, flags);
8045
8046 ufshcd_set_ufs_dev_poweroff(hba);
8047 ufshcd_set_link_off(hba);
8048 __ufshcd_shutdown_clkscaling(hba);
8049
8050 /* Complete requests that have door-bell cleared by h/w */
8051 ufshcd_complete_requests(hba);
8052
8053 /* remove all scsi devices */
8054 list_for_each_entry(sdev, &hba->host->__devices, siblings) {
8055 if (sdev_count < UFS_MAX_LUS) {
8056 sdev_cache[sdev_count] = sdev;
8057 sdev_count++;
8058 }
8059 }
8060
8061 for (i = 0; i < sdev_count; i++)
8062 scsi_remove_device(sdev_cache[i]);
8063
8064 spin_lock_irqsave(hba->host->host_lock, flags);
8065 hba->silence_err_logs = false;
8066 spin_unlock_irqrestore(hba->host->host_lock, flags);
8067
8068 ufshcd_release_all(hba);
8069}
8070
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008071static void ufshcd_card_detect_handler(struct work_struct *work)
8072{
8073 struct ufs_hba *hba;
8074
8075 hba = container_of(work, struct ufs_hba, card_detect_work);
Subhash Jadavanief542222017-08-02 16:23:55 -07008076
8077 if (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) {
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008078 pm_runtime_get_sync(hba->dev);
8079 ufshcd_detect_device(hba);
Subhash Jadavani2d7e4e82017-07-25 15:56:45 -07008080 /* ufshcd_probe_hba() calls pm_runtime_put_sync() on exit */
Subhash Jadavanief542222017-08-02 16:23:55 -07008081 } else if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device) {
8082 pm_runtime_get_sync(hba->dev);
8083 ufshcd_remove_device(hba);
8084 pm_runtime_put_sync(hba->dev);
8085 ufsdbg_clr_err_state(hba);
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008086 }
8087}
8088
8089static int ufshcd_card_detect_notifier(struct notifier_block *nb,
8090 unsigned long event, void *ptr)
8091{
8092 struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
8093
Subhash Jadavanief542222017-08-02 16:23:55 -07008094 if (event)
8095 ufshcd_set_card_online(hba);
8096 else
8097 ufshcd_set_card_offline(hba);
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008098
Subhash Jadavanief542222017-08-02 16:23:55 -07008099 if (ufshcd_is_card_offline(hba) && !hba->sdev_ufs_device)
8100 goto out;
8101
8102 /*
8103 * card insertion/removal are very infrequent events and having this
8104 * message helps if there is some issue with card detection/removal.
8105 */
8106 dev_info(hba->dev, "%s: card %s notification rcvd\n",
8107 __func__, ufshcd_is_card_online(hba) ? "inserted" : "removed");
8108
8109 schedule_work(&hba->card_detect_work);
8110out:
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008111 return NOTIFY_DONE;
8112}
8113
8114static int ufshcd_extcon_register(struct ufs_hba *hba)
8115{
8116 int ret;
8117
8118 if (!hba->extcon)
8119 return 0;
8120
8121 hba->card_detect_nb.notifier_call = ufshcd_card_detect_notifier;
8122 ret = extcon_register_notifier(hba->extcon,
8123 EXTCON_MECHANICAL,
8124 &hba->card_detect_nb);
8125 if (ret)
8126 dev_err(hba->dev, "%s: extcon_register_notifier() failed, ret %d\n",
8127 __func__, ret);
8128
8129 return ret;
8130}
8131
8132static int ufshcd_extcon_unregister(struct ufs_hba *hba)
8133{
8134 int ret;
8135
8136 if (!hba->extcon)
8137 return 0;
8138
8139 ret = extcon_unregister_notifier(hba->extcon, EXTCON_MECHANICAL,
8140 &hba->card_detect_nb);
8141 if (ret)
8142 dev_err(hba->dev, "%s: extcon_unregister_notifier() failed, ret %d\n",
8143 __func__, ret);
8144
8145 return ret;
8146}
8147
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008148/**
8149 * ufshcd_async_scan - asynchronous execution for probing hba
8150 * @data: data pointer to pass to this function
8151 * @cookie: cookie data
8152 */
8153static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8154{
8155 struct ufs_hba *hba = (struct ufs_hba *)data;
8156
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008157 /*
8158 * Don't allow clock gating and hibern8 enter for faster device
8159 * detection.
8160 */
8161 ufshcd_hold_all(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008162 ufshcd_probe_hba(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008163 ufshcd_release_all(hba);
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008164
8165 ufshcd_extcon_register(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008166}
8167
8168/**
8169 * ufshcd_query_ioctl - perform user read queries
8170 * @hba: per-adapter instance
8171 * @lun: used for lun specific queries
8172 * @buffer: user space buffer for reading and submitting query data and params
8173 * @return: 0 for success negative error code otherwise
8174 *
8175 * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
8176 * It will read the opcode, idn and buf_length parameters, and, put the
8177 * response in the buffer field while updating the used size in buf_length.
8178 */
8179static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
8180{
8181 struct ufs_ioctl_query_data *ioctl_data;
8182 int err = 0;
8183 int length = 0;
8184 void *data_ptr;
8185 bool flag;
8186 u32 att;
8187 u8 index;
8188 u8 *desc = NULL;
8189
8190 ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
8191 if (!ioctl_data) {
8192 dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
8193 sizeof(struct ufs_ioctl_query_data));
8194 err = -ENOMEM;
8195 goto out;
8196 }
8197
8198 /* extract params from user buffer */
8199 err = copy_from_user(ioctl_data, buffer,
8200 sizeof(struct ufs_ioctl_query_data));
8201 if (err) {
8202 dev_err(hba->dev,
8203 "%s: Failed copying buffer from user, err %d\n",
8204 __func__, err);
8205 goto out_release_mem;
8206 }
8207
8208 /* verify legal parameters & send query */
8209 switch (ioctl_data->opcode) {
8210 case UPIU_QUERY_OPCODE_READ_DESC:
8211 switch (ioctl_data->idn) {
8212 case QUERY_DESC_IDN_DEVICE:
8213 case QUERY_DESC_IDN_CONFIGURAION:
8214 case QUERY_DESC_IDN_INTERCONNECT:
8215 case QUERY_DESC_IDN_GEOMETRY:
8216 case QUERY_DESC_IDN_POWER:
8217 index = 0;
8218 break;
8219 case QUERY_DESC_IDN_UNIT:
8220 if (!ufs_is_valid_unit_desc_lun(lun)) {
8221 dev_err(hba->dev,
8222 "%s: No unit descriptor for lun 0x%x\n",
8223 __func__, lun);
8224 err = -EINVAL;
8225 goto out_release_mem;
8226 }
8227 index = lun;
8228 break;
8229 default:
8230 goto out_einval;
8231 }
8232 length = min_t(int, QUERY_DESC_MAX_SIZE,
8233 ioctl_data->buf_size);
8234 desc = kzalloc(length, GFP_KERNEL);
8235 if (!desc) {
8236 dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
8237 __func__, length);
8238 err = -ENOMEM;
8239 goto out_release_mem;
8240 }
8241 err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
8242 ioctl_data->idn, index, 0, desc, &length);
8243 break;
8244 case UPIU_QUERY_OPCODE_READ_ATTR:
8245 switch (ioctl_data->idn) {
8246 case QUERY_ATTR_IDN_BOOT_LU_EN:
8247 case QUERY_ATTR_IDN_POWER_MODE:
8248 case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
8249 case QUERY_ATTR_IDN_OOO_DATA_EN:
8250 case QUERY_ATTR_IDN_BKOPS_STATUS:
8251 case QUERY_ATTR_IDN_PURGE_STATUS:
8252 case QUERY_ATTR_IDN_MAX_DATA_IN:
8253 case QUERY_ATTR_IDN_MAX_DATA_OUT:
8254 case QUERY_ATTR_IDN_REF_CLK_FREQ:
8255 case QUERY_ATTR_IDN_CONF_DESC_LOCK:
8256 case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
8257 case QUERY_ATTR_IDN_EE_CONTROL:
8258 case QUERY_ATTR_IDN_EE_STATUS:
8259 case QUERY_ATTR_IDN_SECONDS_PASSED:
8260 index = 0;
8261 break;
8262 case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
8263 case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
8264 index = lun;
8265 break;
8266 default:
8267 goto out_einval;
8268 }
8269 err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
8270 index, 0, &att);
8271 break;
8272
8273 case UPIU_QUERY_OPCODE_WRITE_ATTR:
8274 err = copy_from_user(&att,
8275 buffer + sizeof(struct ufs_ioctl_query_data),
8276 sizeof(u32));
8277 if (err) {
8278 dev_err(hba->dev,
8279 "%s: Failed copying buffer from user, err %d\n",
8280 __func__, err);
8281 goto out_release_mem;
8282 }
8283
8284 switch (ioctl_data->idn) {
8285 case QUERY_ATTR_IDN_BOOT_LU_EN:
8286 index = 0;
8287 if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
8288 dev_err(hba->dev,
8289 "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
8290 __func__, ioctl_data->opcode,
8291 (unsigned int)ioctl_data->idn, att);
8292 err = -EINVAL;
8293 goto out_release_mem;
8294 }
8295 break;
8296 default:
8297 goto out_einval;
8298 }
8299 err = ufshcd_query_attr(hba, ioctl_data->opcode,
8300 ioctl_data->idn, index, 0, &att);
8301 break;
8302
8303 case UPIU_QUERY_OPCODE_READ_FLAG:
8304 switch (ioctl_data->idn) {
8305 case QUERY_FLAG_IDN_FDEVICEINIT:
8306 case QUERY_FLAG_IDN_PERMANENT_WPE:
8307 case QUERY_FLAG_IDN_PWR_ON_WPE:
8308 case QUERY_FLAG_IDN_BKOPS_EN:
8309 case QUERY_FLAG_IDN_PURGE_ENABLE:
8310 case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
8311 case QUERY_FLAG_IDN_BUSY_RTC:
8312 break;
8313 default:
8314 goto out_einval;
8315 }
8316 err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
8317 ioctl_data->idn, &flag);
8318 break;
8319 default:
8320 goto out_einval;
8321 }
8322
8323 if (err) {
8324 dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
8325 ioctl_data->idn);
8326 goto out_release_mem;
8327 }
8328
8329 /*
8330 * copy response data
8331 * As we might end up reading less data then what is specified in
8332 * "ioctl_data->buf_size". So we are updating "ioctl_data->
8333 * buf_size" to what exactly we have read.
8334 */
8335 switch (ioctl_data->opcode) {
8336 case UPIU_QUERY_OPCODE_READ_DESC:
8337 ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
8338 data_ptr = desc;
8339 break;
8340 case UPIU_QUERY_OPCODE_READ_ATTR:
8341 ioctl_data->buf_size = sizeof(u32);
8342 data_ptr = &att;
8343 break;
8344 case UPIU_QUERY_OPCODE_READ_FLAG:
8345 ioctl_data->buf_size = 1;
8346 data_ptr = &flag;
8347 break;
8348 case UPIU_QUERY_OPCODE_WRITE_ATTR:
8349 goto out_release_mem;
8350 default:
8351 goto out_einval;
8352 }
8353
8354 /* copy to user */
8355 err = copy_to_user(buffer, ioctl_data,
8356 sizeof(struct ufs_ioctl_query_data));
8357 if (err)
8358 dev_err(hba->dev, "%s: Failed copying back to user.\n",
8359 __func__);
8360 err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
8361 data_ptr, ioctl_data->buf_size);
8362 if (err)
8363 dev_err(hba->dev, "%s: err %d copying back to user.\n",
8364 __func__, err);
8365 goto out_release_mem;
8366
8367out_einval:
8368 dev_err(hba->dev,
8369 "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
8370 __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
8371 err = -EINVAL;
8372out_release_mem:
8373 kfree(ioctl_data);
8374 kfree(desc);
8375out:
8376 return err;
8377}
8378
8379/**
8380 * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
8381 * @dev: scsi device required for per LUN queries
8382 * @cmd: command opcode
8383 * @buffer: user space buffer for transferring data
8384 *
8385 * Supported commands:
8386 * UFS_IOCTL_QUERY
8387 */
8388static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
8389{
8390 struct ufs_hba *hba = shost_priv(dev->host);
8391 int err = 0;
8392
8393 BUG_ON(!hba);
8394 if (!buffer) {
8395 dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
8396 return -EINVAL;
8397 }
8398
8399 switch (cmd) {
8400 case UFS_IOCTL_QUERY:
8401 pm_runtime_get_sync(hba->dev);
8402 err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
8403 buffer);
8404 pm_runtime_put_sync(hba->dev);
8405 break;
8406 default:
8407 err = -ENOIOCTLCMD;
8408 dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
8409 cmd);
8410 break;
8411 }
8412
8413 return err;
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +05308414}
8415
Yaniv Gardif550c652016-03-10 17:37:07 +02008416static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
8417{
8418 unsigned long flags;
8419 struct Scsi_Host *host;
8420 struct ufs_hba *hba;
8421 int index;
8422 bool found = false;
8423
8424 if (!scmd || !scmd->device || !scmd->device->host)
8425 return BLK_EH_NOT_HANDLED;
8426
8427 host = scmd->device->host;
8428 hba = shost_priv(host);
8429 if (!hba)
8430 return BLK_EH_NOT_HANDLED;
8431
8432 spin_lock_irqsave(host->host_lock, flags);
8433
8434 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
8435 if (hba->lrb[index].cmd == scmd) {
8436 found = true;
8437 break;
8438 }
8439 }
8440
8441 spin_unlock_irqrestore(host->host_lock, flags);
8442
8443 /*
8444 * Bypass SCSI error handling and reset the block layer timer if this
8445 * SCSI command was not actually dispatched to UFS driver, otherwise
8446 * let SCSI layer handle the error as usual.
8447 */
8448 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
8449}
8450
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308451static struct scsi_host_template ufshcd_driver_template = {
8452 .module = THIS_MODULE,
8453 .name = UFSHCD,
8454 .proc_name = UFSHCD,
8455 .queuecommand = ufshcd_queuecommand,
8456 .slave_alloc = ufshcd_slave_alloc,
Akinobu Mitaeeda4742014-07-01 23:00:32 +09008457 .slave_configure = ufshcd_slave_configure,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308458 .slave_destroy = ufshcd_slave_destroy,
Sujit Reddy Thumma4264fd62014-06-29 09:40:20 +03008459 .change_queue_depth = ufshcd_change_queue_depth,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308460 .eh_abort_handler = ufshcd_abort,
Sujit Reddy Thumma3441da72014-05-26 10:59:14 +05308461 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8462 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
Yaniv Gardif550c652016-03-10 17:37:07 +02008463 .eh_timed_out = ufshcd_eh_timed_out,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008464 .ioctl = ufshcd_ioctl,
8465#ifdef CONFIG_COMPAT
8466 .compat_ioctl = ufshcd_ioctl,
8467#endif
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308468 .this_id = -1,
8469 .sg_tablesize = SG_ALL,
8470 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8471 .can_queue = UFSHCD_CAN_QUEUE,
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008472 .max_host_blocked = 1,
Christoph Hellwigc40ecc12014-11-13 14:25:11 +01008473 .track_queue_depth = 1,
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308474};
8475
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008476static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8477 int ua)
8478{
Bjorn Andersson7b16a072015-02-11 19:35:28 -08008479 int ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008480
Bjorn Andersson7b16a072015-02-11 19:35:28 -08008481 if (!vreg)
8482 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008483
Bjorn Andersson7b16a072015-02-11 19:35:28 -08008484 ret = regulator_set_load(vreg->reg, ua);
8485 if (ret < 0) {
8486 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8487 __func__, vreg->name, ua, ret);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008488 }
8489
8490 return ret;
8491}
8492
8493static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8494 struct ufs_vreg *vreg)
8495{
Yaniv Gardi60f01872016-03-10 17:37:11 +02008496 if (!vreg)
8497 return 0;
8498 else if (vreg->unused)
8499 return 0;
8500 else
8501 return ufshcd_config_vreg_load(hba->dev, vreg,
8502 UFS_VREG_LPM_LOAD_UA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008503}
8504
8505static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8506 struct ufs_vreg *vreg)
8507{
Yaniv Gardi60f01872016-03-10 17:37:11 +02008508 if (!vreg)
8509 return 0;
8510 else if (vreg->unused)
8511 return 0;
8512 else
8513 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008514}
8515
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008516static int ufshcd_config_vreg(struct device *dev,
8517 struct ufs_vreg *vreg, bool on)
8518{
8519 int ret = 0;
Gustavo A. R. Silvaa248dc62017-11-20 08:12:29 -06008520 struct regulator *reg;
8521 const char *name;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008522 int min_uV, uA_load;
8523
8524 BUG_ON(!vreg);
8525
Gustavo A. R. Silvaa248dc62017-11-20 08:12:29 -06008526 reg = vreg->reg;
8527 name = vreg->name;
8528
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008529 if (regulator_count_voltages(reg) > 0) {
8530 min_uV = on ? vreg->min_uV : 0;
8531 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8532 if (ret) {
8533 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
8534 __func__, name, ret);
8535 goto out;
8536 }
8537
8538 uA_load = on ? vreg->max_uA : 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008539 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8540 if (ret)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008541 goto out;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008542 }
8543out:
8544 return ret;
8545}
8546
8547static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8548{
8549 int ret = 0;
8550
Yaniv Gardi60f01872016-03-10 17:37:11 +02008551 if (!vreg)
8552 goto out;
8553 else if (vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008554 goto out;
8555
8556 ret = ufshcd_config_vreg(dev, vreg, true);
8557 if (!ret)
8558 ret = regulator_enable(vreg->reg);
8559
8560 if (!ret)
8561 vreg->enabled = true;
8562 else
8563 dev_err(dev, "%s: %s enable failed, err=%d\n",
8564 __func__, vreg->name, ret);
8565out:
8566 return ret;
8567}
8568
8569static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8570{
8571 int ret = 0;
8572
Yaniv Gardi60f01872016-03-10 17:37:11 +02008573 if (!vreg)
8574 goto out;
8575 else if (!vreg->enabled || vreg->unused)
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008576 goto out;
8577
8578 ret = regulator_disable(vreg->reg);
8579
8580 if (!ret) {
8581 /* ignore errors on applying disable config */
8582 ufshcd_config_vreg(dev, vreg, false);
8583 vreg->enabled = false;
8584 } else {
8585 dev_err(dev, "%s: %s disable failed, err=%d\n",
8586 __func__, vreg->name, ret);
8587 }
8588out:
8589 return ret;
8590}
8591
8592static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8593{
8594 int ret = 0;
8595 struct device *dev = hba->dev;
8596 struct ufs_vreg_info *info = &hba->vreg_info;
8597
8598 if (!info)
8599 goto out;
8600
8601 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8602 if (ret)
8603 goto out;
8604
8605 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8606 if (ret)
8607 goto out;
8608
8609 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8610 if (ret)
8611 goto out;
8612
8613out:
8614 if (ret) {
8615 ufshcd_toggle_vreg(dev, info->vccq2, false);
8616 ufshcd_toggle_vreg(dev, info->vccq, false);
8617 ufshcd_toggle_vreg(dev, info->vcc, false);
8618 }
8619 return ret;
8620}
8621
Raviv Shvili6a771a62014-09-25 15:32:24 +03008622static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8623{
8624 struct ufs_vreg_info *info = &hba->vreg_info;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008625 int ret = 0;
Raviv Shvili6a771a62014-09-25 15:32:24 +03008626
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008627 if (info->vdd_hba) {
8628 ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
Raviv Shvili6a771a62014-09-25 15:32:24 +03008629
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008630 if (!ret)
8631 ufshcd_vops_update_sec_cfg(hba, on);
8632 }
8633
8634 return ret;
Raviv Shvili6a771a62014-09-25 15:32:24 +03008635}
8636
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008637static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8638{
8639 int ret = 0;
8640
8641 if (!vreg)
8642 goto out;
8643
8644 vreg->reg = devm_regulator_get(dev, vreg->name);
8645 if (IS_ERR(vreg->reg)) {
8646 ret = PTR_ERR(vreg->reg);
8647 dev_err(dev, "%s: %s get failed, err=%d\n",
8648 __func__, vreg->name, ret);
8649 }
8650out:
8651 return ret;
8652}
8653
8654static int ufshcd_init_vreg(struct ufs_hba *hba)
8655{
8656 int ret = 0;
8657 struct device *dev = hba->dev;
8658 struct ufs_vreg_info *info = &hba->vreg_info;
8659
8660 if (!info)
8661 goto out;
8662
8663 ret = ufshcd_get_vreg(dev, info->vcc);
8664 if (ret)
8665 goto out;
8666
8667 ret = ufshcd_get_vreg(dev, info->vccq);
8668 if (ret)
8669 goto out;
8670
8671 ret = ufshcd_get_vreg(dev, info->vccq2);
8672out:
8673 return ret;
8674}
8675
Raviv Shvili6a771a62014-09-25 15:32:24 +03008676static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8677{
8678 struct ufs_vreg_info *info = &hba->vreg_info;
8679
8680 if (info)
8681 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8682
8683 return 0;
8684}
8685
Yaniv Gardi60f01872016-03-10 17:37:11 +02008686static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
8687{
8688 int ret = 0;
8689 struct ufs_vreg_info *info = &hba->vreg_info;
8690
8691 if (!info)
8692 goto out;
8693 else if (!info->vccq)
8694 goto out;
8695
8696 if (unused) {
8697 /* shut off the rail here */
8698 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
8699 /*
8700 * Mark this rail as no longer used, so it doesn't get enabled
8701 * later by mistake
8702 */
8703 if (!ret)
8704 info->vccq->unused = true;
8705 } else {
8706 /*
8707 * rail should have been already enabled hence just make sure
8708 * that unused flag is cleared.
8709 */
8710 info->vccq->unused = false;
8711 }
8712out:
8713 return ret;
8714}
8715
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008716static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
8717 bool skip_ref_clk, bool is_gating_context)
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008718{
8719 int ret = 0;
8720 struct ufs_clk_info *clki;
8721 struct list_head *head = &hba->clk_list_head;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008722 unsigned long flags;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008723 ktime_t start = ktime_get();
8724 bool clk_state_changed = false;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008725
8726 if (!head || list_empty(head))
8727 goto out;
8728
Subhash Jadavani9c807702017-04-01 00:35:51 -07008729 /* call vendor specific bus vote before enabling the clocks */
8730 if (on) {
8731 ret = ufshcd_vops_set_bus_vote(hba, on);
8732 if (ret)
8733 return ret;
8734 }
8735
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008736 /*
8737 * vendor specific setup_clocks ops may depend on clocks managed by
8738 * this standard driver hence call the vendor specific setup_clocks
8739 * before disabling the clocks managed here.
8740 */
8741 if (!on) {
8742 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
8743 if (ret)
8744 return ret;
8745 }
8746
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008747 list_for_each_entry(clki, head, list) {
8748 if (!IS_ERR_OR_NULL(clki->clk)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008749 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
8750 continue;
8751
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008752 clk_state_changed = on ^ clki->enabled;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008753 if (on && !clki->enabled) {
8754 ret = clk_prepare_enable(clki->clk);
8755 if (ret) {
8756 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8757 __func__, clki->name, ret);
8758 goto out;
8759 }
8760 } else if (!on && clki->enabled) {
8761 clk_disable_unprepare(clki->clk);
8762 }
8763 clki->enabled = on;
8764 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8765 clki->name, on ? "en" : "dis");
8766 }
8767 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008768
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008769 /*
8770 * vendor specific setup_clocks ops may depend on clocks managed by
8771 * this standard driver hence call the vendor specific setup_clocks
8772 * after enabling the clocks managed here.
8773 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07008774 if (on) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008775 ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
Subhash Jadavani9c807702017-04-01 00:35:51 -07008776 if (ret)
8777 goto out;
8778 }
8779
8780 /*
8781 * call vendor specific bus vote to remove the vote after
8782 * disabling the clocks.
8783 */
8784 if (!on)
8785 ret = ufshcd_vops_set_bus_vote(hba, on);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008786
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008787out:
8788 if (ret) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07008789 if (on)
8790 /* Can't do much if this fails */
8791 (void) ufshcd_vops_set_bus_vote(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008792 list_for_each_entry(clki, head, list) {
8793 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8794 clk_disable_unprepare(clki->clk);
8795 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008796 } else if (!ret && on) {
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008797 spin_lock_irqsave(hba->host->host_lock, flags);
8798 hba->clk_gating.state = CLKS_ON;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008799 trace_ufshcd_clk_gating(dev_name(hba->dev),
8800 hba->clk_gating.state);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03008801 spin_unlock_irqrestore(hba->host->host_lock, flags);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008802 /* restore the secure configuration as clocks are enabled */
8803 ufshcd_vops_update_sec_cfg(hba, true);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008804 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008805
8806 if (clk_state_changed)
8807 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8808 (on ? "on" : "off"),
8809 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008810 return ret;
8811}
8812
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008813static int ufshcd_enable_clocks(struct ufs_hba *hba)
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008814{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008815 return ufshcd_setup_clocks(hba, true, false, false);
8816}
8817
8818static int ufshcd_disable_clocks(struct ufs_hba *hba,
8819 bool is_gating_context)
8820{
8821 return ufshcd_setup_clocks(hba, false, false, is_gating_context);
8822}
8823
8824static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
8825 bool is_gating_context)
8826{
8827 return ufshcd_setup_clocks(hba, false, true, is_gating_context);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008828}
8829
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008830static int ufshcd_init_clocks(struct ufs_hba *hba)
8831{
8832 int ret = 0;
8833 struct ufs_clk_info *clki;
8834 struct device *dev = hba->dev;
8835 struct list_head *head = &hba->clk_list_head;
8836
8837 if (!head || list_empty(head))
8838 goto out;
8839
8840 list_for_each_entry(clki, head, list) {
8841 if (!clki->name)
8842 continue;
8843
8844 clki->clk = devm_clk_get(dev, clki->name);
8845 if (IS_ERR(clki->clk)) {
8846 ret = PTR_ERR(clki->clk);
8847 dev_err(dev, "%s: %s clk get failed, %d\n",
8848 __func__, clki->name, ret);
8849 goto out;
8850 }
8851
8852 if (clki->max_freq) {
8853 ret = clk_set_rate(clki->clk, clki->max_freq);
8854 if (ret) {
8855 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8856 __func__, clki->name,
8857 clki->max_freq, ret);
8858 goto out;
8859 }
Sahitya Tummala856b3482014-09-25 15:32:34 +03008860 clki->curr_freq = clki->max_freq;
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008861 }
8862 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8863 clki->name, clk_get_rate(clki->clk));
8864 }
8865out:
8866 return ret;
8867}
8868
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008869static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8870{
8871 int err = 0;
8872
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008873 if (!hba->var || !hba->var->vops)
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008874 goto out;
8875
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008876 err = ufshcd_vops_init(hba);
8877 if (err)
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008878 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008879 __func__, ufshcd_get_var_name(hba), err);
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008880out:
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008881 return err;
8882}
8883
8884static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8885{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008886 if (!hba->var || !hba->var->vops)
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008887 return;
8888
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02008889 ufshcd_vops_exit(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03008890}
8891
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008892static int ufshcd_hba_init(struct ufs_hba *hba)
8893{
8894 int err;
8895
Raviv Shvili6a771a62014-09-25 15:32:24 +03008896 /*
8897 * Handle host controller power separately from the UFS device power
8898 * rails as it will help controlling the UFS host controller power
8899 * collapse easily which is different than UFS device power collapse.
8900 * Also, enable the host controller power before we go ahead with rest
8901 * of the initialization here.
8902 */
8903 err = ufshcd_init_hba_vreg(hba);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008904 if (err)
8905 goto out;
8906
Raviv Shvili6a771a62014-09-25 15:32:24 +03008907 err = ufshcd_setup_hba_vreg(hba, true);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008908 if (err)
8909 goto out;
8910
Raviv Shvili6a771a62014-09-25 15:32:24 +03008911 err = ufshcd_init_clocks(hba);
8912 if (err)
8913 goto out_disable_hba_vreg;
8914
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008915 err = ufshcd_enable_clocks(hba);
Raviv Shvili6a771a62014-09-25 15:32:24 +03008916 if (err)
8917 goto out_disable_hba_vreg;
8918
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008919 err = ufshcd_init_vreg(hba);
8920 if (err)
8921 goto out_disable_clks;
8922
8923 err = ufshcd_setup_vreg(hba, true);
8924 if (err)
8925 goto out_disable_clks;
8926
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008927 err = ufshcd_variant_hba_init(hba);
8928 if (err)
8929 goto out_disable_vreg;
8930
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008931 hba->is_powered = true;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008932 goto out;
8933
8934out_disable_vreg:
8935 ufshcd_setup_vreg(hba, false);
Sujit Reddy Thummac6e79da2014-09-25 15:32:23 +03008936out_disable_clks:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008937 ufshcd_disable_clocks(hba, false);
Raviv Shvili6a771a62014-09-25 15:32:24 +03008938out_disable_hba_vreg:
8939 ufshcd_setup_hba_vreg(hba, false);
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008940out:
8941 return err;
8942}
8943
8944static void ufshcd_hba_exit(struct ufs_hba *hba)
8945{
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008946 if (hba->is_powered) {
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07008947 ufshcd_extcon_unregister(hba);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008948 ufshcd_variant_hba_exit(hba);
8949 ufshcd_setup_vreg(hba, false);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008950 if (ufshcd_is_clkscaling_supported(hba)) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07008951 if (hba->devfreq)
8952 ufshcd_suspend_clkscaling(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008953 destroy_workqueue(hba->clk_scaling.workq);
8954 }
8955 ufshcd_disable_clocks(hba, false);
Sujit Reddy Thumma1d337ec2014-09-25 15:32:26 +03008956 ufshcd_setup_hba_vreg(hba, false);
8957 hba->is_powered = false;
8958 }
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03008959}
8960
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008961static int
8962ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308963{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008964 unsigned char cmd[6] = {REQUEST_SENSE,
8965 0,
8966 0,
8967 0,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008968 UFSHCD_REQ_SENSE_SIZE,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008969 0};
8970 char *buffer;
8971 int ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308972
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008973 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008974 if (!buffer) {
8975 ret = -ENOMEM;
8976 goto out;
8977 }
8978
8979 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07008980 UFSHCD_REQ_SENSE_SIZE, NULL,
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008981 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
8982 if (ret)
8983 pr_err("%s: failed with err %d\n", __func__, ret);
8984
8985 kfree(buffer);
8986out:
8987 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308988}
8989
8990/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008991 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8992 * power mode
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05308993 * @hba: per adapter instance
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008994 * @pwr_mode: device power mode to set
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308995 *
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008996 * Returns 0 if requested power mode is set successfully
8997 * Returns non-zero if failed to set the requested power mode
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05308998 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03008999static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9000 enum ufs_dev_pwr_mode pwr_mode)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309001{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009002 unsigned char cmd[6] = { START_STOP };
9003 struct scsi_sense_hdr sshdr;
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03009004 struct scsi_device *sdp;
9005 unsigned long flags;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009006 int ret;
9007
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03009008 spin_lock_irqsave(hba->host->host_lock, flags);
9009 sdp = hba->sdev_ufs_device;
9010 if (sdp) {
9011 ret = scsi_device_get(sdp);
9012 if (!ret && !scsi_device_online(sdp)) {
9013 ret = -ENODEV;
9014 scsi_device_put(sdp);
9015 }
9016 } else {
9017 ret = -ENODEV;
9018 }
9019 spin_unlock_irqrestore(hba->host->host_lock, flags);
9020
9021 if (ret)
9022 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009023
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309024 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009025 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9026 * handling, which would wait for host to be resumed. Since we know
9027 * we are functional while we are here, skip host resume in error
9028 * handling context.
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309029 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009030 hba->host->eh_noresume = 1;
Subhash Jadavani23df2312016-12-16 12:54:30 -08009031 if (!hba->dev_info.is_ufs_dev_wlun_ua_cleared) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009032 ret = ufshcd_send_request_sense(hba, sdp);
9033 if (ret)
9034 goto out;
9035 /* Unit attention condition is cleared now */
Subhash Jadavani23df2312016-12-16 12:54:30 -08009036 hba->dev_info.is_ufs_dev_wlun_ua_cleared = 1;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009037 }
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309038
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009039 cmd[4] = pwr_mode << 4;
9040
9041 /*
9042 * Current function would be generally called from the power management
9043 * callbacks hence set the REQ_PM flag so that it doesn't resume the
9044 * already suspended childs.
9045 */
9046 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
9047 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
9048 if (ret) {
9049 sdev_printk(KERN_WARNING, sdp,
Hannes Reineckeef613292014-10-24 14:27:00 +02009050 "START_STOP failed for power mode: %d, result %x\n",
9051 pwr_mode, ret);
Hannes Reinecke21045512015-01-08 07:43:46 +01009052 if (driver_byte(ret) & DRIVER_SENSE)
9053 scsi_print_sense_hdr(sdp, NULL, &sshdr);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009054 }
9055
9056 if (!ret)
9057 hba->curr_dev_pwr_mode = pwr_mode;
9058out:
Akinobu Mita7c48bfd2014-10-23 13:25:12 +03009059 scsi_device_put(sdp);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009060 hba->host->eh_noresume = 0;
9061 return ret;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309062}
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309063
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009064static int ufshcd_link_state_transition(struct ufs_hba *hba,
9065 enum uic_link_state req_link_state,
9066 int check_for_bkops)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309067{
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009068 int ret = 0;
9069
9070 if (req_link_state == hba->uic_link_state)
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309071 return 0;
9072
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009073 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9074 ret = ufshcd_uic_hibern8_enter(hba);
9075 if (!ret)
9076 ufshcd_set_link_hibern8(hba);
9077 else
9078 goto out;
9079 }
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309080 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009081 * If autobkops is enabled, link can't be turned off because
9082 * turning off the link would also turn off the device.
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309083 */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009084 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9085 (!check_for_bkops || (check_for_bkops &&
9086 !hba->auto_bkops_enabled))) {
9087 /*
Yaniv Gardif3099fb2016-03-10 17:37:17 +02009088 * Let's make sure that link is in low power mode, we are doing
9089 * this currently by putting the link in Hibern8. Otherway to
9090 * put the link in low power mode is to send the DME end point
9091 * to device and then send the DME reset command to local
9092 * unipro. But putting the link in hibern8 is much faster.
9093 */
9094 ret = ufshcd_uic_hibern8_enter(hba);
9095 if (ret)
9096 goto out;
9097 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009098 * Change controller state to "reset state" which
9099 * should also put the link in off/reset state
9100 */
Yaniv Gardi596585a2016-03-10 17:37:08 +02009101 ufshcd_hba_stop(hba, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009102 /*
9103 * TODO: Check if we need any delay to make sure that
9104 * controller is reset
9105 */
9106 ufshcd_set_link_off(hba);
9107 }
9108
9109out:
9110 return ret;
9111}
9112
9113static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9114{
9115 /*
Yaniv Gardib799fdf2016-03-10 17:37:18 +02009116 * It seems some UFS devices may keep drawing more than sleep current
9117 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9118 * To avoid this situation, add 2ms delay before putting these UFS
9119 * rails in LPM mode.
9120 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009121 if (!ufshcd_is_link_active(hba))
Yaniv Gardib799fdf2016-03-10 17:37:18 +02009122 usleep_range(2000, 2100);
9123
9124 /*
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009125 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9126 * power.
9127 *
9128 * If UFS device and link is in OFF state, all power supplies (VCC,
9129 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9130 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9131 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9132 *
9133 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9134 * in low power state which would save some power.
9135 */
9136 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9137 !hba->dev_info.is_lu_power_on_wp) {
9138 ufshcd_setup_vreg(hba, false);
9139 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9140 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9141 if (!ufshcd_is_link_active(hba)) {
9142 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9143 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9144 }
9145 }
9146}
9147
9148static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9149{
9150 int ret = 0;
9151
9152 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9153 !hba->dev_info.is_lu_power_on_wp) {
9154 ret = ufshcd_setup_vreg(hba, true);
9155 } else if (!ufshcd_is_ufs_dev_active(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009156 if (!ret && !ufshcd_is_link_active(hba)) {
9157 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9158 if (ret)
9159 goto vcc_disable;
9160 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9161 if (ret)
9162 goto vccq_lpm;
9163 }
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009164 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009165 }
9166 goto out;
9167
9168vccq_lpm:
9169 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9170vcc_disable:
9171 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9172out:
9173 return ret;
9174}
9175
9176static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9177{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009178 if (ufshcd_is_link_off(hba) ||
9179 (ufshcd_is_link_hibern8(hba)
9180 && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009181 ufshcd_setup_hba_vreg(hba, false);
9182}
9183
9184static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9185{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009186 if (ufshcd_is_link_off(hba) ||
9187 (ufshcd_is_link_hibern8(hba)
9188 && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009189 ufshcd_setup_hba_vreg(hba, true);
9190}
9191
9192/**
9193 * ufshcd_suspend - helper function for suspend operations
9194 * @hba: per adapter instance
9195 * @pm_op: desired low power operation type
9196 *
9197 * This function will try to put the UFS device and link into low power
9198 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
9199 * (System PM level).
9200 *
9201 * If this function is called during shutdown, it will make sure that
9202 * both UFS device and UFS link is powered off.
9203 *
9204 * NOTE: UFS device & link must be active before we enter in this function.
9205 *
9206 * Returns 0 for success and non-zero for failure
9207 */
9208static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9209{
9210 int ret = 0;
9211 enum ufs_pm_level pm_lvl;
9212 enum ufs_dev_pwr_mode req_dev_pwr_mode;
9213 enum uic_link_state req_link_state;
9214
9215 hba->pm_op_in_progress = 1;
9216 if (!ufshcd_is_shutdown_pm(pm_op)) {
9217 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
9218 hba->rpm_lvl : hba->spm_lvl;
9219 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9220 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9221 } else {
9222 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9223 req_link_state = UIC_LINK_OFF_STATE;
9224 }
9225
9226 /*
9227 * If we can't transition into any of the low power modes
9228 * just gate the clocks.
9229 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009230 WARN_ON(hba->hibern8_on_idle.is_enabled &&
9231 hba->hibern8_on_idle.active_reqs);
9232 ufshcd_hold_all(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009233 hba->clk_gating.is_suspended = true;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009234 hba->hibern8_on_idle.is_suspended = true;
9235
9236 if (hba->clk_scaling.is_allowed) {
9237 cancel_work_sync(&hba->clk_scaling.suspend_work);
9238 cancel_work_sync(&hba->clk_scaling.resume_work);
9239 ufshcd_suspend_clkscaling(hba);
9240 }
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009241
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009242 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9243 req_link_state == UIC_LINK_ACTIVE_STATE) {
9244 goto disable_clks;
9245 }
9246
9247 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9248 (req_link_state == hba->uic_link_state))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009249 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009250
9251 /* UFS device & link must be active before we enter in this function */
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07009252 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba))
9253 goto set_vreg_lpm;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009254
9255 if (ufshcd_is_runtime_pm(pm_op)) {
Subhash Jadavani374a2462014-09-25 15:32:35 +03009256 if (ufshcd_can_autobkops_during_suspend(hba)) {
9257 /*
9258 * The device is idle with no requests in the queue,
9259 * allow background operations if bkops status shows
9260 * that performance might be impacted.
9261 */
9262 ret = ufshcd_urgent_bkops(hba);
9263 if (ret)
9264 goto enable_gating;
9265 } else {
9266 /* make sure that auto bkops is disabled */
9267 ufshcd_disable_auto_bkops(hba);
9268 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009269 }
9270
9271 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
9272 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
9273 !ufshcd_is_runtime_pm(pm_op))) {
9274 /* ensure that bkops is disabled */
9275 ufshcd_disable_auto_bkops(hba);
9276 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9277 if (ret)
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009278 goto enable_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009279 }
9280
9281 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
9282 if (ret)
9283 goto set_dev_active;
9284
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009285 if (ufshcd_is_link_hibern8(hba) &&
9286 ufshcd_is_hibern8_on_idle_allowed(hba))
9287 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
9288
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07009289set_vreg_lpm:
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009290 ufshcd_vreg_set_lpm(hba);
9291
9292disable_clks:
9293 /*
9294 * Call vendor specific suspend callback. As these callbacks may access
9295 * vendor specific host controller register space call them before the
9296 * host clocks are ON.
9297 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02009298 ret = ufshcd_vops_suspend(hba, pm_op);
9299 if (ret)
9300 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009301
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009302 if (!ufshcd_is_link_active(hba))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009303 ret = ufshcd_disable_clocks(hba, false);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009304 else
9305 /* If link is active, device ref_clk can't be switched off */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009306 ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
9307 if (ret)
9308 goto set_link_active;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009309
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009310 if (ufshcd_is_clkgating_allowed(hba)) {
9311 hba->clk_gating.state = CLKS_OFF;
9312 trace_ufshcd_clk_gating(dev_name(hba->dev),
9313 hba->clk_gating.state);
9314 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009315 /*
9316 * Disable the host irq as host controller as there won't be any
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02009317 * host controller transaction expected till resume.
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009318 */
9319 ufshcd_disable_irq(hba);
9320 /* Put the host controller in low power mode if possible */
9321 ufshcd_hba_vreg_set_lpm(hba);
9322 goto out;
9323
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009324set_link_active:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009325 if (hba->clk_scaling.is_allowed)
9326 ufshcd_resume_clkscaling(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009327 ufshcd_vreg_set_hpm(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009328 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009329 ufshcd_set_link_active(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009330 } else if (ufshcd_is_link_off(hba)) {
9331 ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009332 ufshcd_host_reset_and_restore(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009333 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009334set_dev_active:
9335 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9336 ufshcd_disable_auto_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009337enable_gating:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009338 if (hba->clk_scaling.is_allowed)
9339 ufshcd_resume_clkscaling(hba);
9340 hba->hibern8_on_idle.is_suspended = false;
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009341 hba->clk_gating.is_suspended = false;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009342 ufshcd_release_all(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009343out:
9344 hba->pm_op_in_progress = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009345
9346 if (ret)
9347 ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
9348
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009349 return ret;
9350}
9351
9352/**
9353 * ufshcd_resume - helper function for resume operations
9354 * @hba: per adapter instance
9355 * @pm_op: runtime PM or system PM
9356 *
9357 * This function basically brings the UFS device, UniPro link and controller
9358 * to active state.
9359 *
9360 * Returns 0 for success and non-zero for failure
9361 */
9362static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9363{
9364 int ret;
9365 enum uic_link_state old_link_state;
9366
9367 hba->pm_op_in_progress = 1;
9368 old_link_state = hba->uic_link_state;
9369
9370 ufshcd_hba_vreg_set_hpm(hba);
9371 /* Make sure clocks are enabled before accessing controller */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009372 ret = ufshcd_enable_clocks(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009373 if (ret)
9374 goto out;
9375
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009376 /* enable the host irq as host controller would be active soon */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009377 ufshcd_enable_irq(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009378
9379 ret = ufshcd_vreg_set_hpm(hba);
9380 if (ret)
9381 goto disable_irq_and_vops_clks;
9382
9383 /*
9384 * Call vendor specific resume callback. As these callbacks may access
9385 * vendor specific host controller register space call them when the
9386 * host clocks are ON.
9387 */
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02009388 ret = ufshcd_vops_resume(hba, pm_op);
9389 if (ret)
9390 goto disable_vreg;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009391
Subhash Jadavanief542222017-08-02 16:23:55 -07009392 if (hba->extcon &&
9393 (ufshcd_is_card_offline(hba) ||
9394 (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device)))
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07009395 goto skip_dev_ops;
9396
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009397 if (ufshcd_is_link_hibern8(hba)) {
9398 ret = ufshcd_uic_hibern8_exit(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009399 if (!ret) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009400 ufshcd_set_link_active(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009401 if (ufshcd_is_hibern8_on_idle_allowed(hba))
9402 hba->hibern8_on_idle.state = HIBERN8_EXITED;
9403 } else {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009404 goto vendor_suspend;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009405 }
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009406 } else if (ufshcd_is_link_off(hba)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009407 /*
Subhash Jadavani9c807702017-04-01 00:35:51 -07009408 * A full initialization of the host and the device is required
9409 * since the link was put to off during suspend.
9410 */
9411 ret = ufshcd_reset_and_restore(hba);
9412 /*
9413 * ufshcd_reset_and_restore() should have already
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009414 * set the link state as active
9415 */
9416 if (ret || !ufshcd_is_link_active(hba))
9417 goto vendor_suspend;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009418 /* mark link state as hibern8 exited */
9419 if (ufshcd_is_hibern8_on_idle_allowed(hba))
9420 hba->hibern8_on_idle.state = HIBERN8_EXITED;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009421 }
9422
9423 if (!ufshcd_is_ufs_dev_active(hba)) {
9424 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9425 if (ret)
9426 goto set_old_link_state;
9427 }
9428
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009429 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9430 ufshcd_enable_auto_bkops(hba);
9431 else
9432 /*
9433 * If BKOPs operations are urgently needed at this moment then
9434 * keep auto-bkops enabled or else disable it.
9435 */
9436 ufshcd_urgent_bkops(hba);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009437
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009438 hba->clk_gating.is_suspended = false;
9439 hba->hibern8_on_idle.is_suspended = false;
9440
9441 if (hba->clk_scaling.is_allowed)
9442 ufshcd_resume_clkscaling(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +03009443
Subhash Jadavani9e7ed482017-05-08 18:29:45 -07009444skip_dev_ops:
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009445 /* Schedule clock gating in case of no access to UFS device yet */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009446 ufshcd_release_all(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009447 goto out;
9448
9449set_old_link_state:
9450 ufshcd_link_state_transition(hba, old_link_state, 0);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009451 if (ufshcd_is_link_hibern8(hba) &&
9452 ufshcd_is_hibern8_on_idle_allowed(hba))
9453 hba->hibern8_on_idle.state = HIBERN8_ENTERED;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009454vendor_suspend:
Yaniv Gardi0263bcd2015-10-28 13:15:48 +02009455 ufshcd_vops_suspend(hba, pm_op);
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009456disable_vreg:
9457 ufshcd_vreg_set_lpm(hba);
9458disable_irq_and_vops_clks:
9459 ufshcd_disable_irq(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009460 if (hba->clk_scaling.is_allowed)
9461 ufshcd_suspend_clkscaling(hba);
9462 ufshcd_disable_clocks(hba, false);
9463 if (ufshcd_is_clkgating_allowed(hba))
9464 hba->clk_gating.state = CLKS_OFF;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009465out:
9466 hba->pm_op_in_progress = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009467
9468 if (ret)
9469 ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
9470
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009471 return ret;
9472}
9473
9474/**
9475 * ufshcd_system_suspend - system suspend routine
9476 * @hba: per adapter instance
9477 * @pm_op: runtime PM or system PM
9478 *
9479 * Check the description of ufshcd_suspend() function for more details.
9480 *
9481 * Returns 0 for success and non-zero for failure
9482 */
9483int ufshcd_system_suspend(struct ufs_hba *hba)
9484{
9485 int ret = 0;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009486 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009487
9488 if (!hba || !hba->is_powered)
Dolev Raviv233b5942014-10-23 13:25:14 +03009489 return 0;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009490
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009491 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
9492 hba->curr_dev_pwr_mode) &&
9493 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
9494 hba->uic_link_state))
9495 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009496
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009497 if (pm_runtime_suspended(hba->dev)) {
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009498 /*
9499 * UFS device and/or UFS link low power states during runtime
9500 * suspend seems to be different than what is expected during
9501 * system suspend. Hence runtime resume the devic & link and
9502 * let the system suspend low power states to take effect.
9503 * TODO: If resume takes longer time, we might have optimize
9504 * it in future by not resuming everything if possible.
9505 */
9506 ret = ufshcd_runtime_resume(hba);
9507 if (ret)
9508 goto out;
9509 }
9510
9511 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
9512out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009513 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9514 ktime_to_us(ktime_sub(ktime_get(), start)),
9515 hba->curr_dev_pwr_mode, hba->uic_link_state);
Dolev Ravive7850602014-09-25 15:32:36 +03009516 if (!ret)
9517 hba->is_sys_suspended = true;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009518 return ret;
9519}
9520EXPORT_SYMBOL(ufshcd_system_suspend);
9521
9522/**
9523 * ufshcd_system_resume - system resume routine
9524 * @hba: per adapter instance
9525 *
9526 * Returns 0 for success and non-zero for failure
9527 */
9528
9529int ufshcd_system_resume(struct ufs_hba *hba)
9530{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009531 int ret = 0;
9532 ktime_t start = ktime_get();
9533
9534 if (!hba)
9535 return -EINVAL;
9536
9537 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009538 /*
9539 * Let the runtime resume take care of resuming
9540 * if runtime suspended.
9541 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009542 goto out;
9543 else
9544 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
9545out:
9546 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9547 ktime_to_us(ktime_sub(ktime_get(), start)),
9548 hba->curr_dev_pwr_mode, hba->uic_link_state);
9549 return ret;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009550}
9551EXPORT_SYMBOL(ufshcd_system_resume);
9552
9553/**
9554 * ufshcd_runtime_suspend - runtime suspend routine
9555 * @hba: per adapter instance
9556 *
9557 * Check the description of ufshcd_suspend() function for more details.
9558 *
9559 * Returns 0 for success and non-zero for failure
9560 */
9561int ufshcd_runtime_suspend(struct ufs_hba *hba)
9562{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009563 int ret = 0;
9564 ktime_t start = ktime_get();
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009565
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009566 if (!hba)
9567 return -EINVAL;
9568
9569 if (!hba->is_powered)
9570 goto out;
9571 else
9572 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
9573out:
9574 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9575 ktime_to_us(ktime_sub(ktime_get(), start)),
9576 hba->curr_dev_pwr_mode,
9577 hba->uic_link_state);
9578 return ret;
9579
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309580}
9581EXPORT_SYMBOL(ufshcd_runtime_suspend);
9582
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009583/**
9584 * ufshcd_runtime_resume - runtime resume routine
9585 * @hba: per adapter instance
9586 *
9587 * This function basically brings the UFS device, UniPro link and controller
9588 * to active state. Following operations are done in this function:
9589 *
9590 * 1. Turn on all the controller related clocks
9591 * 2. Bring the UniPro link out of Hibernate state
9592 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9593 * to active state.
9594 * 4. If auto-bkops is enabled on the device, disable it.
9595 *
9596 * So following would be the possible power state after this function return
9597 * successfully:
9598 * S1: UFS device in Active state with VCC rail ON
9599 * UniPro link in Active state
9600 * All the UFS/UniPro controller clocks are ON
9601 *
9602 * Returns 0 for success and non-zero for failure
9603 */
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309604int ufshcd_runtime_resume(struct ufs_hba *hba)
9605{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009606 int ret = 0;
9607 ktime_t start = ktime_get();
9608
9609 if (!hba)
9610 return -EINVAL;
9611
9612 if (!hba->is_powered)
9613 goto out;
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009614 else
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009615 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9616out:
9617 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9618 ktime_to_us(ktime_sub(ktime_get(), start)),
9619 hba->curr_dev_pwr_mode,
9620 hba->uic_link_state);
9621 return ret;
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +05309622}
9623EXPORT_SYMBOL(ufshcd_runtime_resume);
9624
9625int ufshcd_runtime_idle(struct ufs_hba *hba)
9626{
9627 return 0;
9628}
9629EXPORT_SYMBOL(ufshcd_runtime_idle);
9630
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009631static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
9632 struct device_attribute *attr,
9633 const char *buf, size_t count,
9634 bool rpm)
9635{
9636 struct ufs_hba *hba = dev_get_drvdata(dev);
9637 unsigned long flags, value;
9638
9639 if (kstrtoul(buf, 0, &value))
9640 return -EINVAL;
9641
9642 if (value >= UFS_PM_LVL_MAX)
9643 return -EINVAL;
9644
9645 spin_lock_irqsave(hba->host->host_lock, flags);
9646 if (rpm)
9647 hba->rpm_lvl = value;
9648 else
9649 hba->spm_lvl = value;
9650 ufshcd_apply_pm_quirks(hba);
9651 spin_unlock_irqrestore(hba->host->host_lock, flags);
9652 return count;
9653}
9654
9655static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
9656 struct device_attribute *attr, char *buf)
9657{
9658 struct ufs_hba *hba = dev_get_drvdata(dev);
9659 int curr_len;
9660 u8 lvl;
9661
9662 curr_len = snprintf(buf, PAGE_SIZE,
9663 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
9664 hba->rpm_lvl,
9665 ufschd_ufs_dev_pwr_mode_to_string(
9666 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
9667 ufschd_uic_link_state_to_string(
9668 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
9669
9670 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9671 "\nAll available Runtime PM levels info:\n");
9672 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9673 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9674 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
9675 lvl,
9676 ufschd_ufs_dev_pwr_mode_to_string(
9677 ufs_pm_lvl_states[lvl].dev_state),
9678 ufschd_uic_link_state_to_string(
9679 ufs_pm_lvl_states[lvl].link_state));
9680
9681 return curr_len;
9682}
9683
9684static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
9685 struct device_attribute *attr, const char *buf, size_t count)
9686{
9687 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
9688}
9689
9690static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
9691{
9692 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
9693 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
9694 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
9695 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
9696 hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9697 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
9698 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
9699}
9700
9701static ssize_t ufshcd_spm_lvl_show(struct device *dev,
9702 struct device_attribute *attr, char *buf)
9703{
9704 struct ufs_hba *hba = dev_get_drvdata(dev);
9705 int curr_len;
9706 u8 lvl;
9707
9708 curr_len = snprintf(buf, PAGE_SIZE,
9709 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
9710 hba->spm_lvl,
9711 ufschd_ufs_dev_pwr_mode_to_string(
9712 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
9713 ufschd_uic_link_state_to_string(
9714 ufs_pm_lvl_states[hba->spm_lvl].link_state));
9715
9716 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9717 "\nAll available System PM levels info:\n");
9718 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
9719 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
9720 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
9721 lvl,
9722 ufschd_ufs_dev_pwr_mode_to_string(
9723 ufs_pm_lvl_states[lvl].dev_state),
9724 ufschd_uic_link_state_to_string(
9725 ufs_pm_lvl_states[lvl].link_state));
9726
9727 return curr_len;
9728}
9729
9730static ssize_t ufshcd_spm_lvl_store(struct device *dev,
9731 struct device_attribute *attr, const char *buf, size_t count)
9732{
9733 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
9734}
9735
9736static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
9737{
9738 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
9739 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
9740 sysfs_attr_init(&hba->spm_lvl_attr.attr);
9741 hba->spm_lvl_attr.attr.name = "spm_lvl";
9742 hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
9743 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
9744 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
9745}
9746
9747static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
9748{
9749 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
9750 ufshcd_add_spm_lvl_sysfs_nodes(hba);
9751}
9752
Subhash Jadavanief542222017-08-02 16:23:55 -07009753static void __ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
Subhash Jadavani9c807702017-04-01 00:35:51 -07009754{
9755 bool suspend = false;
9756 unsigned long flags;
9757
9758 spin_lock_irqsave(hba->host->host_lock, flags);
9759 if (hba->clk_scaling.is_allowed) {
9760 hba->clk_scaling.is_allowed = false;
9761 suspend = true;
9762 }
9763 spin_unlock_irqrestore(hba->host->host_lock, flags);
9764
9765 /**
9766 * Scaling may be scheduled before, hence make sure it
9767 * doesn't race with shutdown
9768 */
9769 if (ufshcd_is_clkscaling_supported(hba)) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07009770 cancel_work_sync(&hba->clk_scaling.suspend_work);
9771 cancel_work_sync(&hba->clk_scaling.resume_work);
9772 if (suspend)
9773 ufshcd_suspend_clkscaling(hba);
9774 }
9775
9776 /* Unregister so that devfreq_monitor can't race with shutdown */
Subhash Jadavanief542222017-08-02 16:23:55 -07009777 if (hba->devfreq) {
Subhash Jadavani9c807702017-04-01 00:35:51 -07009778 devfreq_remove_device(hba->devfreq);
Subhash Jadavanief542222017-08-02 16:23:55 -07009779 hba->devfreq = NULL;
9780 }
9781}
9782
9783static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
9784{
Sayali Lokhande378d03f2017-09-06 20:09:32 +05309785 if (!ufshcd_is_clkscaling_supported(hba))
9786 return;
Subhash Jadavanief542222017-08-02 16:23:55 -07009787 __ufshcd_shutdown_clkscaling(hba);
9788 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Subhash Jadavani9c807702017-04-01 00:35:51 -07009789}
9790
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309791/**
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009792 * ufshcd_shutdown - shutdown routine
9793 * @hba: per adapter instance
9794 *
9795 * This function would power off both UFS device and UFS link.
9796 *
9797 * Returns 0 always to allow force shutdown even in case of errors.
9798 */
9799int ufshcd_shutdown(struct ufs_hba *hba)
9800{
Subhash Jadavani9c807702017-04-01 00:35:51 -07009801 int ret = 0;
9802
9803 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9804 goto out;
9805
9806 pm_runtime_get_sync(hba->dev);
9807 ufshcd_hold_all(hba);
9808 ufshcd_mark_shutdown_ongoing(hba);
9809 ufshcd_shutdown_clkscaling(hba);
9810 /**
9811 * (1) Acquire the lock to stop any more requests
9812 * (2) Wait for all issued requests to complete
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009813 */
Subhash Jadavani9c807702017-04-01 00:35:51 -07009814 ufshcd_get_write_lock(hba);
9815 ufshcd_scsi_block_requests(hba);
9816 ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
9817 if (ret)
9818 dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
9819 __func__, ret);
9820 /* Requests may have errored out above, let it be handled */
9821 flush_work(&hba->eh_work);
9822 /* reqs issued from contexts other than shutdown will fail from now */
9823 ufshcd_scsi_unblock_requests(hba);
9824 ufshcd_release_all(hba);
9825 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9826out:
9827 if (ret)
9828 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9829 /* allow force shutdown even in case of errors */
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009830 return 0;
9831}
9832EXPORT_SYMBOL(ufshcd_shutdown);
9833
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07009834/*
9835 * Values permitted 0, 1, 2.
9836 * 0 -> Disable IO latency histograms (default)
9837 * 1 -> Enable IO latency histograms
9838 * 2 -> Zero out IO latency histograms
9839 */
9840static ssize_t
9841latency_hist_store(struct device *dev, struct device_attribute *attr,
9842 const char *buf, size_t count)
9843{
9844 struct ufs_hba *hba = dev_get_drvdata(dev);
9845 long value;
9846
9847 if (kstrtol(buf, 0, &value))
9848 return -EINVAL;
9849 if (value == BLK_IO_LAT_HIST_ZERO)
9850 blk_zero_latency_hist(&hba->io_lat_s);
9851 else if (value == BLK_IO_LAT_HIST_ENABLE ||
9852 value == BLK_IO_LAT_HIST_DISABLE)
9853 hba->latency_hist_enabled = value;
9854 return count;
9855}
9856
9857ssize_t
9858latency_hist_show(struct device *dev, struct device_attribute *attr,
9859 char *buf)
9860{
9861 struct ufs_hba *hba = dev_get_drvdata(dev);
9862
9863 return blk_latency_hist_show(&hba->io_lat_s, buf);
9864}
9865
9866static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
9867 latency_hist_show, latency_hist_store);
9868
9869static void
9870ufshcd_init_latency_hist(struct ufs_hba *hba)
9871{
9872 if (device_create_file(hba->dev, &dev_attr_latency_hist))
9873 dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
9874}
9875
9876static void
9877ufshcd_exit_latency_hist(struct ufs_hba *hba)
9878{
9879 device_create_file(hba->dev, &dev_attr_latency_hist);
9880}
9881
Subhash Jadavani57d104c2014-09-25 15:32:30 +03009882/**
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309883 * ufshcd_remove - de-allocate SCSI host and host memory space
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309884 * data structure memory
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309885 * @hba - per adapter instance
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309886 */
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309887void ufshcd_remove(struct ufs_hba *hba)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309888{
Akinobu Mitacfdf9c92013-07-30 00:36:03 +05309889 scsi_remove_host(hba->host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309890 /* disable interrupts */
Seungwon Jeon2fbd0092013-06-26 22:39:27 +05309891 ufshcd_disable_intr(hba, hba->intr_mask);
Yaniv Gardi596585a2016-03-10 17:37:08 +02009892 ufshcd_hba_stop(hba, true);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309893
Sahitya Tummala1ab27c92014-09-25 15:32:32 +03009894 ufshcd_exit_clk_gating(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009895 ufshcd_exit_hibern8_on_idle(hba);
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -07009896 ufshcd_exit_latency_hist(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009897 if (ufshcd_is_clkscaling_supported(hba)) {
9898 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
Kyle Yan65be4a52016-10-31 15:05:00 -07009899 if (hba->devfreq)
9900 devfreq_remove_device(hba->devfreq);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009901 }
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -08009902
9903 ufshcd_exit_latency_hist(hba);
9904
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +03009905 ufshcd_hba_exit(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009906 ufsdbg_remove_debugfs(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309907}
9908EXPORT_SYMBOL_GPL(ufshcd_remove);
9909
9910/**
Yaniv Gardi47555a52015-10-28 13:15:49 +02009911 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9912 * @hba: pointer to Host Bus Adapter (HBA)
9913 */
9914void ufshcd_dealloc_host(struct ufs_hba *hba)
9915{
9916 scsi_host_put(hba->host);
9917}
9918EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9919
9920/**
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +09009921 * ufshcd_set_dma_mask - Set dma mask based on the controller
9922 * addressing capability
9923 * @hba: per adapter instance
9924 *
9925 * Returns 0 for success, non-zero for failure
9926 */
9927static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9928{
9929 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9930 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9931 return 0;
9932 }
9933 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9934}
9935
9936/**
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009937 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309938 * @dev: pointer to device handle
9939 * @hba_handle: driver private handle
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309940 * Returns 0 on success, non-zero value on failure
9941 */
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009942int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309943{
9944 struct Scsi_Host *host;
9945 struct ufs_hba *hba;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009946 int err = 0;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309947
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309948 if (!dev) {
9949 dev_err(dev,
9950 "Invalid memory reference for dev is NULL\n");
9951 err = -ENODEV;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309952 goto out_error;
9953 }
9954
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309955 host = scsi_host_alloc(&ufshcd_driver_template,
9956 sizeof(struct ufs_hba));
9957 if (!host) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309958 dev_err(dev, "scsi_host_alloc failed\n");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309959 err = -ENOMEM;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309960 goto out_error;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309961 }
9962 hba = shost_priv(host);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +05309963 hba->host = host;
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +05309964 hba->dev = dev;
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +03009965 *hba_handle = hba;
9966
9967out_error:
9968 return err;
9969}
9970EXPORT_SYMBOL(ufshcd_alloc_host);
9971
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009972/**
9973 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
9974 * @hba: per adapter instance
9975 * @scale_up: True if scaling up and false if scaling down
9976 *
9977 * Returns true if scaling is required, false otherwise.
9978 */
9979static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
9980 bool scale_up)
Sahitya Tummala856b3482014-09-25 15:32:34 +03009981{
Sahitya Tummala856b3482014-09-25 15:32:34 +03009982 struct ufs_clk_info *clki;
9983 struct list_head *head = &hba->clk_list_head;
9984
9985 if (!head || list_empty(head))
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009986 return false;
Yaniv Gardif06fcc72015-10-28 13:15:51 +02009987
Sahitya Tummala856b3482014-09-25 15:32:34 +03009988 list_for_each_entry(clki, head, list) {
9989 if (!IS_ERR_OR_NULL(clki->clk)) {
9990 if (scale_up && clki->max_freq) {
9991 if (clki->curr_freq == clki->max_freq)
9992 continue;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009993 return true;
Sahitya Tummala856b3482014-09-25 15:32:34 +03009994 } else if (!scale_up && clki->min_freq) {
9995 if (clki->curr_freq == clki->min_freq)
9996 continue;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -07009997 return true;
Sahitya Tummala856b3482014-09-25 15:32:34 +03009998 }
9999 }
Sahitya Tummala856b3482014-09-25 15:32:34 +030010000 }
Yaniv Gardif06fcc72015-10-28 13:15:51 +020010001
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010002 return false;
10003}
Yaniv Gardif06fcc72015-10-28 13:15:51 +020010004
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010005/**
10006 * ufshcd_scale_gear - scale up/down UFS gear
10007 * @hba: per adapter instance
10008 * @scale_up: True for scaling up gear and false for scaling down
10009 *
10010 * Returns 0 for success,
10011 * Returns -EBUSY if scaling can't happen at this time
10012 * Returns non-zero for any other errors
10013 */
10014static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
10015{
10016 int ret = 0;
10017 struct ufs_pa_layer_attr new_pwr_info;
10018 u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
10019
10020 BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
10021
10022 if (scale_up) {
10023 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
10024 sizeof(struct ufs_pa_layer_attr));
Subhash Jadavanibe096032017-03-23 12:55:25 -070010025 /*
10026 * Some UFS devices may stop responding after switching from
10027 * HS-G1 to HS-G3. Also, it is found that these devices work
10028 * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
10029 * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
10030 * quirk is enabled for such devices, this 2 steps gear switch
10031 * workaround will be applied.
10032 */
10033 if ((hba->dev_info.quirks &
10034 UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
10035 && (hba->pwr_info.gear_tx == UFS_HS_G1)
10036 && (new_pwr_info.gear_tx == UFS_HS_G3)) {
10037 /* scale up to G2 first */
10038 new_pwr_info.gear_tx = UFS_HS_G2;
10039 new_pwr_info.gear_rx = UFS_HS_G2;
10040 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
10041 if (ret)
10042 goto out;
10043
10044 /* scale up to G3 now */
10045 new_pwr_info.gear_tx = UFS_HS_G3;
10046 new_pwr_info.gear_rx = UFS_HS_G3;
Subhash Jadavanicfd76732017-04-18 11:06:22 -070010047 /* now, fall through to set the HS-G3 */
Subhash Jadavanibe096032017-03-23 12:55:25 -070010048 }
Subhash Jadavanicfd76732017-04-18 11:06:22 -070010049 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
10050 if (ret)
10051 goto out;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010052 } else {
10053 memcpy(&new_pwr_info, &hba->pwr_info,
10054 sizeof(struct ufs_pa_layer_attr));
10055
10056 if (hba->pwr_info.gear_tx > scale_down_gear
10057 || hba->pwr_info.gear_rx > scale_down_gear) {
10058 /* save the current power mode */
10059 memcpy(&hba->clk_scaling.saved_pwr_info.info,
10060 &hba->pwr_info,
10061 sizeof(struct ufs_pa_layer_attr));
10062
10063 /* scale down gear */
10064 new_pwr_info.gear_tx = scale_down_gear;
10065 new_pwr_info.gear_rx = scale_down_gear;
Subhash Jadavani4f0df17b2016-12-16 13:19:27 -080010066 if (!(hba->dev_info.quirks & UFS_DEVICE_NO_FASTAUTO)) {
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010067 new_pwr_info.pwr_tx = FASTAUTO_MODE;
10068 new_pwr_info.pwr_rx = FASTAUTO_MODE;
10069 }
10070 }
Subhash Jadavanibe096032017-03-23 12:55:25 -070010071 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010072 }
10073
Subhash Jadavanibe096032017-03-23 12:55:25 -070010074out:
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010075 if (ret)
10076 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
10077 __func__, ret,
10078 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
10079 new_pwr_info.gear_tx, new_pwr_info.gear_rx,
10080 scale_up);
10081
Sahitya Tummala856b3482014-09-25 15:32:34 +030010082 return ret;
10083}
10084
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010085static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
10086{
10087 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
10088 int ret = 0;
10089 /*
10090 * make sure that there are no outstanding requests when
10091 * clock scaling is in progress
10092 */
10093 ufshcd_scsi_block_requests(hba);
Subhash Jadavani9c807702017-04-01 00:35:51 -070010094 down_write(&hba->lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010095 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
10096 ret = -EBUSY;
Subhash Jadavani9c807702017-04-01 00:35:51 -070010097 up_write(&hba->lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010098 ufshcd_scsi_unblock_requests(hba);
10099 }
10100
10101 return ret;
10102}
10103
10104static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
10105{
Subhash Jadavani9c807702017-04-01 00:35:51 -070010106 up_write(&hba->lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010107 ufshcd_scsi_unblock_requests(hba);
10108}
10109
10110/**
10111 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
10112 * @hba: per adapter instance
10113 * @scale_up: True for scaling up and false for scalin down
10114 *
10115 * Returns 0 for success,
10116 * Returns -EBUSY if scaling can't happen at this time
10117 * Returns non-zero for any other errors
10118 */
10119static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
10120{
10121 int ret = 0;
10122
Subhash Jadavanief542222017-08-02 16:23:55 -070010123 if (hba->extcon && ufshcd_is_card_offline(hba))
10124 return 0;
10125
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010126 /* let's not get into low power until clock scaling is completed */
Asutosh Das3da913a2017-03-24 10:32:16 +053010127 hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010128 ufshcd_hold_all(hba);
10129
10130 ret = ufshcd_clock_scaling_prepare(hba);
10131 if (ret)
10132 goto out;
10133
10134 /* scale down the gear before scaling down clocks */
10135 if (!scale_up) {
10136 ret = ufshcd_scale_gear(hba, false);
10137 if (ret)
10138 goto clk_scaling_unprepare;
10139 }
10140
Subhash Jadavani67c53302017-03-22 17:00:54 -070010141 /*
10142 * If auto hibern8 is supported then put the link in
10143 * hibern8 manually, this is to avoid auto hibern8
10144 * racing during clock frequency scaling sequence.
10145 */
10146 if (ufshcd_is_auto_hibern8_supported(hba)) {
10147 ret = ufshcd_uic_hibern8_enter(hba);
10148 if (ret)
10149 /* link will be bad state so no need to scale_up_gear */
10150 return ret;
10151 }
10152
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010153 ret = ufshcd_scale_clks(hba, scale_up);
10154 if (ret)
10155 goto scale_up_gear;
10156
Subhash Jadavani67c53302017-03-22 17:00:54 -070010157 if (ufshcd_is_auto_hibern8_supported(hba)) {
10158 ret = ufshcd_uic_hibern8_exit(hba);
10159 if (ret)
10160 /* link will be bad state so no need to scale_up_gear */
10161 return ret;
10162 }
10163
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010164 /* scale up the gear after scaling up clocks */
10165 if (scale_up) {
10166 ret = ufshcd_scale_gear(hba, true);
10167 if (ret) {
10168 ufshcd_scale_clks(hba, false);
10169 goto clk_scaling_unprepare;
10170 }
10171 }
10172
10173 if (!ret) {
10174 hba->clk_scaling.is_scaled_up = scale_up;
10175 if (scale_up)
10176 hba->clk_gating.delay_ms =
10177 hba->clk_gating.delay_ms_perf;
10178 else
10179 hba->clk_gating.delay_ms =
10180 hba->clk_gating.delay_ms_pwr_save;
10181 }
10182
10183 goto clk_scaling_unprepare;
10184
10185scale_up_gear:
10186 if (!scale_up)
10187 ufshcd_scale_gear(hba, true);
10188clk_scaling_unprepare:
10189 ufshcd_clock_scaling_unprepare(hba);
10190out:
Asutosh Das3da913a2017-03-24 10:32:16 +053010191 hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010192 ufshcd_release_all(hba);
10193 return ret;
10194}
10195
10196static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
10197{
10198 unsigned long flags;
10199
10200 devfreq_suspend_device(hba->devfreq);
10201 spin_lock_irqsave(hba->host->host_lock, flags);
10202 hba->clk_scaling.window_start_t = 0;
10203 spin_unlock_irqrestore(hba->host->host_lock, flags);
10204}
10205
10206static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
10207{
10208 unsigned long flags;
10209 bool suspend = false;
10210
10211 if (!ufshcd_is_clkscaling_supported(hba))
10212 return;
10213
10214 spin_lock_irqsave(hba->host->host_lock, flags);
10215 if (!hba->clk_scaling.is_suspended) {
10216 suspend = true;
10217 hba->clk_scaling.is_suspended = true;
10218 }
10219 spin_unlock_irqrestore(hba->host->host_lock, flags);
10220
10221 if (suspend)
10222 __ufshcd_suspend_clkscaling(hba);
10223}
10224
10225static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
10226{
10227 unsigned long flags;
10228 bool resume = false;
10229
10230 if (!ufshcd_is_clkscaling_supported(hba))
10231 return;
10232
10233 spin_lock_irqsave(hba->host->host_lock, flags);
10234 if (hba->clk_scaling.is_suspended) {
10235 resume = true;
10236 hba->clk_scaling.is_suspended = false;
10237 }
10238 spin_unlock_irqrestore(hba->host->host_lock, flags);
10239
10240 if (resume)
10241 devfreq_resume_device(hba->devfreq);
10242}
10243
10244static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
10245 struct device_attribute *attr, char *buf)
10246{
10247 struct ufs_hba *hba = dev_get_drvdata(dev);
10248
10249 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
10250}
10251
10252static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
10253 struct device_attribute *attr, const char *buf, size_t count)
10254{
10255 struct ufs_hba *hba = dev_get_drvdata(dev);
10256 u32 value;
10257 int err;
10258
10259 if (kstrtou32(buf, 0, &value))
10260 return -EINVAL;
10261
10262 value = !!value;
10263 if (value == hba->clk_scaling.is_allowed)
10264 goto out;
10265
10266 pm_runtime_get_sync(hba->dev);
10267 ufshcd_hold(hba, false);
10268
10269 cancel_work_sync(&hba->clk_scaling.suspend_work);
10270 cancel_work_sync(&hba->clk_scaling.resume_work);
10271
10272 hba->clk_scaling.is_allowed = value;
10273
10274 if (value) {
10275 ufshcd_resume_clkscaling(hba);
10276 } else {
10277 ufshcd_suspend_clkscaling(hba);
10278 err = ufshcd_devfreq_scale(hba, true);
10279 if (err)
10280 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
10281 __func__, err);
10282 }
10283
10284 ufshcd_release(hba, false);
10285 pm_runtime_put_sync(hba->dev);
10286out:
10287 return count;
10288}
10289
10290static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
10291{
10292 struct ufs_hba *hba = container_of(work, struct ufs_hba,
10293 clk_scaling.suspend_work);
10294 unsigned long irq_flags;
10295
10296 spin_lock_irqsave(hba->host->host_lock, irq_flags);
10297 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
10298 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10299 return;
10300 }
10301 hba->clk_scaling.is_suspended = true;
10302 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10303
10304 __ufshcd_suspend_clkscaling(hba);
10305}
10306
10307static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
10308{
10309 struct ufs_hba *hba = container_of(work, struct ufs_hba,
10310 clk_scaling.resume_work);
10311 unsigned long irq_flags;
10312
10313 spin_lock_irqsave(hba->host->host_lock, irq_flags);
10314 if (!hba->clk_scaling.is_suspended) {
10315 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10316 return;
10317 }
10318 hba->clk_scaling.is_suspended = false;
10319 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10320
10321 devfreq_resume_device(hba->devfreq);
10322}
10323
Sahitya Tummala856b3482014-09-25 15:32:34 +030010324static int ufshcd_devfreq_target(struct device *dev,
10325 unsigned long *freq, u32 flags)
10326{
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010327 int ret = 0;
Sahitya Tummala856b3482014-09-25 15:32:34 +030010328 struct ufs_hba *hba = dev_get_drvdata(dev);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010329 unsigned long irq_flags;
10330 ktime_t start;
10331 bool scale_up, sched_clk_scaling_suspend_work = false;
Sahitya Tummala856b3482014-09-25 15:32:34 +030010332
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010333 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +030010334 return -EINVAL;
10335
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010336 if ((*freq > 0) && (*freq < UINT_MAX)) {
10337 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
10338 return -EINVAL;
10339 }
Sahitya Tummala856b3482014-09-25 15:32:34 +030010340
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010341 spin_lock_irqsave(hba->host->host_lock, irq_flags);
10342 if (ufshcd_eh_in_progress(hba)) {
10343 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10344 return 0;
10345 }
10346
10347 if (!hba->clk_scaling.active_reqs)
10348 sched_clk_scaling_suspend_work = true;
10349
10350 scale_up = (*freq == UINT_MAX) ? true : false;
10351 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
10352 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10353 ret = 0;
10354 goto out; /* no state change required */
10355 }
10356 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
10357
10358 start = ktime_get();
10359 ret = ufshcd_devfreq_scale(hba, scale_up);
10360 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
10361 (scale_up ? "up" : "down"),
10362 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
10363
10364out:
10365 if (sched_clk_scaling_suspend_work)
10366 queue_work(hba->clk_scaling.workq,
10367 &hba->clk_scaling.suspend_work);
10368
10369 return ret;
Sahitya Tummala856b3482014-09-25 15:32:34 +030010370}
10371
10372static int ufshcd_devfreq_get_dev_status(struct device *dev,
10373 struct devfreq_dev_status *stat)
10374{
10375 struct ufs_hba *hba = dev_get_drvdata(dev);
10376 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
10377 unsigned long flags;
10378
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010379 if (!ufshcd_is_clkscaling_supported(hba))
Sahitya Tummala856b3482014-09-25 15:32:34 +030010380 return -EINVAL;
10381
10382 memset(stat, 0, sizeof(*stat));
10383
10384 spin_lock_irqsave(hba->host->host_lock, flags);
10385 if (!scaling->window_start_t)
10386 goto start_window;
10387
10388 if (scaling->is_busy_started)
10389 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
10390 scaling->busy_start_t));
10391
10392 stat->total_time = jiffies_to_usecs((long)jiffies -
10393 (long)scaling->window_start_t);
10394 stat->busy_time = scaling->tot_busy_t;
10395start_window:
10396 scaling->window_start_t = jiffies;
10397 scaling->tot_busy_t = 0;
10398
10399 if (hba->outstanding_reqs) {
10400 scaling->busy_start_t = ktime_get();
10401 scaling->is_busy_started = true;
10402 } else {
10403 scaling->busy_start_t = ktime_set(0, 0);
10404 scaling->is_busy_started = false;
10405 }
10406 spin_unlock_irqrestore(hba->host->host_lock, flags);
10407 return 0;
10408}
10409
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010410static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
10411{
10412 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
10413 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
10414 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
10415 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
10416 hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
10417 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
10418 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
10419}
Sahitya Tummala856b3482014-09-25 15:32:34 +030010420
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010421static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
10422{
10423 struct device *dev = hba->dev;
10424 int ret;
10425
10426 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
10427 &hba->lanes_per_direction);
10428 if (ret) {
10429 dev_dbg(hba->dev,
10430 "%s: failed to read lanes-per-direction, ret=%d\n",
10431 __func__, ret);
10432 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
10433 }
10434}
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030010435/**
10436 * ufshcd_init - Driver initialization routine
10437 * @hba: per-adapter instance
10438 * @mmio_base: base register address
10439 * @irq: Interrupt line of device
10440 * Returns 0 on success, non-zero value on failure
10441 */
10442int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10443{
10444 int err;
10445 struct Scsi_Host *host = hba->host;
10446 struct device *dev = hba->dev;
10447
10448 if (!mmio_base) {
10449 dev_err(hba->dev,
10450 "Invalid memory reference for mmio_base is NULL\n");
10451 err = -ENODEV;
10452 goto out_error;
10453 }
10454
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010455 hba->mmio_base = mmio_base;
10456 hba->irq = irq;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010457
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010458 ufshcd_init_lanes_per_dir(hba);
10459
Michal' Potomski833ea2a2017-05-31 15:25:11 +053010460 /* Set descriptor lengths to specification defaults */
10461 ufshcd_def_desc_sizes(hba);
10462
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030010463 err = ufshcd_hba_init(hba);
Sujit Reddy Thumma5c0c28a2014-09-25 15:32:21 +030010464 if (err)
10465 goto out_error;
10466
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010467 /* Read capabilities registers */
10468 ufshcd_hba_capabilities(hba);
10469
10470 /* Get UFS version supported by the controller */
10471 hba->ufs_version = ufshcd_get_ufs_version(hba);
10472
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010473 /* print error message if ufs_version is not valid */
10474 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
10475 (hba->ufs_version != UFSHCI_VERSION_11) &&
10476 (hba->ufs_version != UFSHCI_VERSION_20) &&
10477 (hba->ufs_version != UFSHCI_VERSION_21))
10478 dev_err(hba->dev, "invalid UFS version 0x%x\n",
10479 hba->ufs_version);
10480
Seungwon Jeon2fbd0092013-06-26 22:39:27 +053010481 /* Get Interrupt bit mask per version */
10482 hba->intr_mask = ufshcd_get_intr_mask(hba);
10483
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010484 /* Enable debug prints */
10485 hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
10486
Akinobu Mitaca3d7bf2014-07-13 21:24:46 +090010487 err = ufshcd_set_dma_mask(hba);
10488 if (err) {
10489 dev_err(hba->dev, "set dma mask failed\n");
10490 goto out_disable;
10491 }
10492
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010493 /* Allocate memory for host memory space */
10494 err = ufshcd_memory_alloc(hba);
10495 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010496 dev_err(hba->dev, "Memory allocation failed\n");
10497 goto out_disable;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010498 }
10499
10500 /* Configure LRB */
10501 ufshcd_host_memory_configure(hba);
10502
10503 host->can_queue = hba->nutrs;
10504 host->cmd_per_lun = hba->nutrs;
10505 host->max_id = UFSHCD_MAX_ID;
Subhash Jadavani0ce147d2014-09-25 15:32:29 +030010506 host->max_lun = UFS_MAX_LUNS;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010507 host->max_channel = UFSHCD_MAX_CHANNEL;
10508 host->unique_id = host->host_no;
10509 host->max_cmd_len = MAX_CDB_SIZE;
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010510 host->set_dbd_for_caching = 1;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010511
Dolev Raviv7eb584d2014-09-25 15:32:31 +030010512 hba->max_pwr_info.is_valid = false;
10513
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010514 /* Initailize wait queue for task management */
Sujit Reddy Thummae2933132014-05-26 10:59:12 +053010515 init_waitqueue_head(&hba->tm_wq);
10516 init_waitqueue_head(&hba->tm_tag_wq);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010517
10518 /* Initialize work queues */
Sujit Reddy Thummae8e7f272014-05-26 10:59:15 +053010519 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
Sujit Reddy Thumma66ec6d52013-07-30 00:35:59 +053010520 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
Subhash Jadavani9e7ed482017-05-08 18:29:45 -070010521 INIT_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
Asutosh Das3923c232017-09-15 16:14:26 +053010522 INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010523
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053010524 /* Initialize UIC command mutex */
10525 mutex_init(&hba->uic_cmd_mutex);
10526
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053010527 /* Initialize mutex for device management commands */
10528 mutex_init(&hba->dev_cmd.lock);
10529
Subhash Jadavani9c807702017-04-01 00:35:51 -070010530 init_rwsem(&hba->lock);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010531
Sujit Reddy Thumma5a0b0cb2013-07-30 00:35:57 +053010532 /* Initialize device management tag acquire wait queue */
10533 init_waitqueue_head(&hba->dev_cmd.tag_wq);
10534
Sahitya Tummala1ab27c92014-09-25 15:32:32 +030010535 ufshcd_init_clk_gating(hba);
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010536 ufshcd_init_hibern8_on_idle(hba);
Yaniv Gardi199ef132016-03-10 17:37:06 +020010537
10538 /*
10539 * In order to avoid any spurious interrupt immediately after
10540 * registering UFS controller interrupt handler, clear any pending UFS
10541 * interrupt status and disable all the UFS interrupts.
10542 */
10543 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10544 REG_INTERRUPT_STATUS);
10545 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10546 /*
10547 * Make sure that UFS interrupts are disabled and any pending interrupt
10548 * status is cleared before registering UFS interrupt handler.
10549 */
10550 mb();
10551
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010552 /* IRQ registration */
Can Guo8bf59ba2017-05-18 15:30:04 +080010553 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED,
10554 dev_name(dev), hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010555 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010556 dev_err(hba->dev, "request irq failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +030010557 goto exit_gating;
Subhash Jadavani57d104c2014-09-25 15:32:30 +030010558 } else {
10559 hba->is_irq_enabled = true;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010560 }
10561
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010562 err = scsi_add_host(host, hba->dev);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010563 if (err) {
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010564 dev_err(hba->dev, "scsi_add_host failed\n");
Sahitya Tummala1ab27c92014-09-25 15:32:32 +030010565 goto exit_gating;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010566 }
10567
Subhash Jadavani9c807702017-04-01 00:35:51 -070010568 /* Reset controller to power on reset (POR) state */
10569 ufshcd_vops_full_reset(hba);
10570
10571 /* reset connected UFS device */
10572 err = ufshcd_reset_device(hba);
10573 if (err)
10574 dev_warn(hba->dev, "%s: device reset failed. err %d\n",
10575 __func__, err);
10576
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053010577 /* Host controller enable */
10578 err = ufshcd_hba_enable(hba);
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010579 if (err) {
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053010580 dev_err(hba->dev, "Host controller enable failed\n");
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010581 ufshcd_print_host_regs(hba);
10582 ufshcd_print_host_state(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010583 goto out_remove_scsi_host;
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010584 }
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053010585
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010586 if (ufshcd_is_clkscaling_supported(hba)) {
10587 char wq_name[sizeof("ufs_clkscaling_00")];
10588
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010589 INIT_WORK(&hba->clk_scaling.suspend_work,
10590 ufshcd_clk_scaling_suspend_work);
10591 INIT_WORK(&hba->clk_scaling.resume_work,
10592 ufshcd_clk_scaling_resume_work);
10593
10594 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
10595 host->host_no);
10596 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
10597
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010598 ufshcd_clkscaling_init_sysfs(hba);
Sahitya Tummala856b3482014-09-25 15:32:34 +030010599 }
10600
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010601 /*
10602 * If rpm_lvl and and spm_lvl are not already set to valid levels,
10603 * set the default power management level for UFS runtime and system
10604 * suspend. Default power saving mode selected is keeping UFS link in
10605 * Hibern8 state and UFS device in sleep.
10606 */
10607 if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
10608 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10609 UFS_SLEEP_PWR_MODE,
10610 UIC_LINK_HIBERN8_STATE);
10611 if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
10612 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10613 UFS_SLEEP_PWR_MODE,
10614 UIC_LINK_HIBERN8_STATE);
10615
Sujit Reddy Thumma62694732013-07-30 00:36:00 +053010616 /* Hold auto suspend until async scan completes */
10617 pm_runtime_get_sync(dev);
10618
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -070010619 ufshcd_init_latency_hist(hba);
10620
Subhash Jadavani57d104c2014-09-25 15:32:30 +030010621 /*
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010622 * We are assuming that device wasn't put in sleep/power-down
10623 * state exclusively during the boot stage before kernel.
10624 * This assumption helps avoid doing link startup twice during
10625 * ufshcd_probe_hba().
Subhash Jadavani57d104c2014-09-25 15:32:30 +030010626 */
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010627 ufshcd_set_ufs_dev_active(hba);
Subhash Jadavani57d104c2014-09-25 15:32:30 +030010628
Can Guob7147732017-04-18 16:22:56 +080010629 ufshcd_cmd_log_init(hba);
10630
Seungwon Jeon6ccf44fe2013-06-26 22:39:29 +053010631 async_schedule(ufshcd_async_scan, hba);
10632
Subhash Jadavanicce6fbc2016-08-11 11:35:26 -070010633 ufsdbg_add_debugfs(hba);
10634
10635 ufshcd_add_sysfs_nodes(hba);
10636
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010637 return 0;
10638
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010639out_remove_scsi_host:
10640 scsi_remove_host(hba->host);
Sahitya Tummala1ab27c92014-09-25 15:32:32 +030010641exit_gating:
10642 ufshcd_exit_clk_gating(hba);
Mohan Srinivasan0ef170d2016-08-25 18:31:01 -070010643 ufshcd_exit_latency_hist(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010644out_disable:
Subhash Jadavani57d104c2014-09-25 15:32:30 +030010645 hba->is_irq_enabled = false;
Sujit Reddy Thummaaa497612014-09-25 15:32:22 +030010646 ufshcd_hba_exit(hba);
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010647out_error:
10648 return err;
10649}
10650EXPORT_SYMBOL_GPL(ufshcd_init);
10651
Vinayak Holikatti3b1d0582013-02-25 21:44:32 +053010652MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10653MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
Vinayak Holikattie0eca632013-02-25 21:44:33 +053010654MODULE_DESCRIPTION("Generic UFS host controller driver Core");
Santosh Yaraganavi7a3e97b2012-02-29 12:11:50 +053010655MODULE_LICENSE("GPL");
10656MODULE_VERSION(UFSHCD_DRIVER_VERSION);