scsi: bnx2fc: Removal of unused variables
[linux-block.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
67351119 1// SPDX-License-Identifier: GPL-2.0-or-later
7a3e97b0 2/*
e0eca63e 3 * Universal Flash Storage Host controller driver Core
3b1d0580 4 * Copyright (C) 2011-2013 Samsung India Software Operations
52ac95fe 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 */
11
6ccf44fe 12#include <linux/async.h>
856b3483 13#include <linux/devfreq.h>
b573d484 14#include <linux/nls.h>
54b879b7 15#include <linux/of.h>
ad448378 16#include <linux/bitfield.h>
fb276f77 17#include <linux/blk-pm.h>
e0eca63e 18#include "ufshcd.h"
c58ab7aa 19#include "ufs_quirks.h"
53b3d9c3 20#include "unipro.h"
cbb6813e 21#include "ufs-sysfs.h"
df032bf2 22#include "ufs_bsg.h"
3d17b9b5
AD
23#include <asm/unaligned.h>
24#include <linux/blkdev.h>
7a3e97b0 25
7ff5ab47 26#define CREATE_TRACE_POINTS
27#include <trace/events/ufs.h>
28
2fbd009b
SJ
29#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
30 UTP_TASK_REQ_COMPL |\
31 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
32/* UIC command timeout, unit: ms */
33#define UIC_CMD_TIMEOUT 500
2fbd009b 34
5a0b0cb9
SRT
35/* NOP OUT retries waiting for NOP IN response */
36#define NOP_OUT_RETRIES 10
37/* Timeout after 30 msecs if NOP OUT hangs without response */
38#define NOP_OUT_TIMEOUT 30 /* msecs */
39
68078d5c 40/* Query request retries */
10fe5888 41#define QUERY_REQ_RETRIES 3
68078d5c 42/* Query request timeout */
10fe5888 43#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68078d5c 44
e2933132
SRT
45/* Task management command timeout */
46#define TM_CMD_TIMEOUT 100 /* msecs */
47
64238fbd
YG
48/* maximum number of retries for a general UIC command */
49#define UFS_UIC_COMMAND_RETRIES 3
50
1d337ec2
SRT
51/* maximum number of link-startup retries */
52#define DME_LINKSTARTUP_RETRIES 3
53
87d0b4a6
YG
54/* Maximum retries for Hibern8 enter */
55#define UIC_HIBERN8_ENTER_RETRIES 3
56
1d337ec2
SRT
57/* maximum number of reset retries before giving up */
58#define MAX_HOST_RESET_RETRIES 5
59
68078d5c
DR
60/* Expose the flag value from utp_upiu_query.value */
61#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
62
7d568652
SJ
63/* Interrupt aggregation default timeout, unit: 40us */
64#define INT_AGGR_DEF_TO 0x02
65
49615ba1
SC
66/* default delay of autosuspend: 2000 ms */
67#define RPM_AUTOSUSPEND_DELAY_MS 2000
68
51dd905b
SC
69/* Default delay of RPM device flush delayed work */
70#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
71
09f17791
CG
72/* Default value of wait time before gating device ref clock */
73#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
74
aa497613
SRT
75#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
76 ({ \
77 int _ret; \
78 if (_on) \
79 _ret = ufshcd_enable_vreg(_dev, _vreg); \
80 else \
81 _ret = ufshcd_disable_vreg(_dev, _vreg); \
82 _ret; \
83 })
84
ba80917d
TW
85#define ufshcd_hex_dump(prefix_str, buf, len) do { \
86 size_t __len = (len); \
87 print_hex_dump(KERN_ERR, prefix_str, \
88 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
89 16, 4, buf, __len, false); \
90} while (0)
91
92int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
93 const char *prefix)
94{
d6724756
MG
95 u32 *regs;
96 size_t pos;
97
98 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
99 return -EINVAL;
ba80917d 100
cddaebaf 101 regs = kzalloc(len, GFP_ATOMIC);
ba80917d
TW
102 if (!regs)
103 return -ENOMEM;
104
d6724756
MG
105 for (pos = 0; pos < len; pos += 4)
106 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
107
ba80917d
TW
108 ufshcd_hex_dump(prefix, regs, len);
109 kfree(regs);
110
111 return 0;
112}
113EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
66cc820f 114
7a3e97b0
SY
115enum {
116 UFSHCD_MAX_CHANNEL = 0,
117 UFSHCD_MAX_ID = 1,
7a3e97b0
SY
118 UFSHCD_CMD_PER_LUN = 32,
119 UFSHCD_CAN_QUEUE = 32,
120};
121
122/* UFSHCD states */
123enum {
7a3e97b0
SY
124 UFSHCD_STATE_RESET,
125 UFSHCD_STATE_ERROR,
3441da7d 126 UFSHCD_STATE_OPERATIONAL,
141f8165 127 UFSHCD_STATE_EH_SCHEDULED,
3441da7d
SRT
128};
129
130/* UFSHCD error handling flags */
131enum {
132 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
133};
134
e8e7f271
SRT
135/* UFSHCD UIC layer error flags */
136enum {
137 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
9a47ec7c
YG
138 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
139 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
140 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
141 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
142 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
e8e7f271
SRT
143};
144
3441da7d 145#define ufshcd_set_eh_in_progress(h) \
9c490d2d 146 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
3441da7d 147#define ufshcd_eh_in_progress(h) \
9c490d2d 148 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
3441da7d 149#define ufshcd_clear_eh_in_progress(h) \
9c490d2d 150 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
3441da7d 151
cbb6813e 152struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
57d104c1
SJ
153 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
154 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
155 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
156 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
157 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
158 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
159};
160
161static inline enum ufs_dev_pwr_mode
162ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
163{
164 return ufs_pm_lvl_states[lvl].dev_state;
165}
166
167static inline enum uic_link_state
168ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
169{
170 return ufs_pm_lvl_states[lvl].link_state;
171}
172
0c8f7586 173static inline enum ufs_pm_level
174ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
175 enum uic_link_state link_state)
176{
177 enum ufs_pm_level lvl;
178
179 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
180 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
181 (ufs_pm_lvl_states[lvl].link_state == link_state))
182 return lvl;
183 }
184
185 /* if no match found, return the level 0 */
186 return UFS_PM_LVL_0;
187}
188
56d4a186
SJ
189static struct ufs_dev_fix ufs_fixups[] = {
190 /* UFS cards deviations table */
c0a18ee0
SC
191 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
192 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
56d4a186 193 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
ed0b40ff
SC
194 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
195 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
56d4a186 196 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
ed0b40ff
SC
197 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
198 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
199 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
200 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
56d4a186
SJ
201 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
202 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
203 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
204 UFS_DEVICE_QUIRK_PA_TACTIVATE),
205 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
206 UFS_DEVICE_QUIRK_PA_TACTIVATE),
56d4a186
SJ
207 END_FIX
208};
209
9333d775 210static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
3441da7d 211static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271 212static int ufshcd_reset_and_restore(struct ufs_hba *hba);
e7d38257 213static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
e8e7f271 214static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2 215static void ufshcd_hba_exit(struct ufs_hba *hba);
1b9e2141 216static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
1ab27c9c
ST
217static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
218 bool skip_ref_clk);
219static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
1ab27c9c 220static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
cad2e03d 221static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1 222static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
fcb0c4b0
ST
223static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
224static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
401f1e44 225static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
fcb0c4b0 226static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
57d104c1 227static irqreturn_t ufshcd_intr(int irq, void *__hba);
874237f7
YG
228static int ufshcd_change_power_mode(struct ufs_hba *hba,
229 struct ufs_pa_layer_attr *pwr_mode);
3d17b9b5
AD
230static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
231static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
232static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
233static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
234static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
235
14497328
YG
236static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
237{
238 return tag >= 0 && tag < hba->nutrs;
239}
57d104c1 240
5231d38c 241static inline void ufshcd_enable_irq(struct ufs_hba *hba)
57d104c1 242{
57d104c1 243 if (!hba->is_irq_enabled) {
5231d38c 244 enable_irq(hba->irq);
57d104c1
SJ
245 hba->is_irq_enabled = true;
246 }
57d104c1
SJ
247}
248
249static inline void ufshcd_disable_irq(struct ufs_hba *hba)
250{
251 if (hba->is_irq_enabled) {
5231d38c 252 disable_irq(hba->irq);
57d104c1
SJ
253 hba->is_irq_enabled = false;
254 }
255}
3441da7d 256
3d17b9b5
AD
257static inline void ufshcd_wb_config(struct ufs_hba *hba)
258{
259 int ret;
260
79e3520f 261 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5
AD
262 return;
263
264 ret = ufshcd_wb_ctrl(hba, true);
265 if (ret)
266 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
267 else
268 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
269 ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
270 if (ret)
271 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
272 __func__, ret);
273 ufshcd_wb_toggle_flush(hba, true);
274}
275
38135535
SJ
276static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
277{
278 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
279 scsi_unblock_requests(hba->host);
280}
281
282static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
283{
284 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
285 scsi_block_requests(hba->host);
286}
287
6667e6d9
OS
288static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
289 const char *str)
290{
291 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
292
293 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
294}
295
296static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
297 const char *str)
298{
299 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
300
301 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
302}
303
304static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
305 const char *str)
306{
6667e6d9 307 int off = (int)tag - hba->nutrs;
391e388f 308 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
6667e6d9 309
391e388f
CH
310 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
311 &descp->input_param1);
6667e6d9
OS
312}
313
aa5c6979
SC
314static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
315 struct uic_command *ucmd,
316 const char *str)
317{
318 u32 cmd;
319
320 if (!trace_ufshcd_uic_command_enabled())
321 return;
322
323 if (!strcmp(str, "send"))
324 cmd = ucmd->command;
325 else
326 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
327
328 trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
329 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
330 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
331 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
332}
333
1a07f2d9
LS
334static void ufshcd_add_command_trace(struct ufs_hba *hba,
335 unsigned int tag, const char *str)
336{
337 sector_t lba = -1;
338 u8 opcode = 0;
339 u32 intr, doorbell;
e7c3b379 340 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
e4d2add7 341 struct scsi_cmnd *cmd = lrbp->cmd;
1a07f2d9
LS
342 int transfer_len = -1;
343
e7c3b379
OS
344 if (!trace_ufshcd_command_enabled()) {
345 /* trace UPIU W/O tracing command */
e4d2add7 346 if (cmd)
e7c3b379 347 ufshcd_add_cmd_upiu_trace(hba, tag, str);
1a07f2d9 348 return;
e7c3b379 349 }
1a07f2d9 350
e4d2add7 351 if (cmd) { /* data phase exists */
e7c3b379
OS
352 /* trace UPIU also */
353 ufshcd_add_cmd_upiu_trace(hba, tag, str);
e4d2add7 354 opcode = cmd->cmnd[0];
1a07f2d9
LS
355 if ((opcode == READ_10) || (opcode == WRITE_10)) {
356 /*
357 * Currently we only fully trace read(10) and write(10)
358 * commands
359 */
e4d2add7
BVA
360 if (cmd->request && cmd->request->bio)
361 lba = cmd->request->bio->bi_iter.bi_sector;
1a07f2d9
LS
362 transfer_len = be32_to_cpu(
363 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
364 }
365 }
366
367 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
368 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
369 trace_ufshcd_command(dev_name(hba->dev), str, tag,
370 doorbell, transfer_len, intr, lba, opcode);
371}
372
ff8e20c6
DR
373static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
374{
375 struct ufs_clk_info *clki;
376 struct list_head *head = &hba->clk_list_head;
377
566ec9ad 378 if (list_empty(head))
ff8e20c6
DR
379 return;
380
381 list_for_each_entry(clki, head, list) {
382 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
383 clki->max_freq)
384 dev_err(hba->dev, "clk: %s, rate: %u\n",
385 clki->name, clki->curr_freq);
386 }
387}
388
48d5b973
SC
389static void ufshcd_print_err_hist(struct ufs_hba *hba,
390 struct ufs_err_reg_hist *err_hist,
391 char *err_name)
ff8e20c6
DR
392{
393 int i;
27752647 394 bool found = false;
ff8e20c6 395
48d5b973
SC
396 for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
397 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
ff8e20c6 398
645728a6 399 if (err_hist->tstamp[p] == 0)
ff8e20c6 400 continue;
c5397f13 401 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
ff8e20c6 402 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
27752647 403 found = true;
ff8e20c6 404 }
27752647
SC
405
406 if (!found)
fd1fb4d5 407 dev_err(hba->dev, "No record of %s\n", err_name);
ff8e20c6
DR
408}
409
66cc820f
DR
410static void ufshcd_print_host_regs(struct ufs_hba *hba)
411{
ba80917d 412 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
66cc820f
DR
413 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
414 hba->ufs_version, hba->capabilities);
415 dev_err(hba->dev,
416 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
417 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
ff8e20c6
DR
418 dev_err(hba->dev,
419 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
420 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
421 hba->ufs_stats.hibern8_exit_cnt);
422
48d5b973
SC
423 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
424 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
425 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
426 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
427 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
d3c615bf
SC
428 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
429 "auto_hibern8_err");
8808b4e9
SC
430 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
431 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
432 "link_startup_fail");
433 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
434 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
435 "suspend_fail");
436 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
437 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
438 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
ff8e20c6
DR
439
440 ufshcd_print_clk_freqs(hba);
441
7c486d91 442 ufshcd_vops_dbg_register_dump(hba);
66cc820f
DR
443}
444
445static
446void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
447{
448 struct ufshcd_lrb *lrbp;
7fabb77b 449 int prdt_length;
66cc820f
DR
450 int tag;
451
452 for_each_set_bit(tag, &bitmap, hba->nutrs) {
453 lrbp = &hba->lrb[tag];
454
ff8e20c6
DR
455 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
456 tag, ktime_to_us(lrbp->issue_time_stamp));
09017188
ZL
457 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
458 tag, ktime_to_us(lrbp->compl_time_stamp));
ff8e20c6
DR
459 dev_err(hba->dev,
460 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
461 tag, (u64)lrbp->utrd_dma_addr);
462
66cc820f
DR
463 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
464 sizeof(struct utp_transfer_req_desc));
ff8e20c6
DR
465 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
466 (u64)lrbp->ucd_req_dma_addr);
66cc820f
DR
467 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
468 sizeof(struct utp_upiu_req));
ff8e20c6
DR
469 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
470 (u64)lrbp->ucd_rsp_dma_addr);
66cc820f
DR
471 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
472 sizeof(struct utp_upiu_rsp));
66cc820f 473
7fabb77b
GB
474 prdt_length = le16_to_cpu(
475 lrbp->utr_descriptor_ptr->prd_table_length);
476 dev_err(hba->dev,
477 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
478 tag, prdt_length,
479 (u64)lrbp->ucd_prdt_dma_addr);
480
481 if (pr_prdt)
66cc820f 482 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
7fabb77b 483 sizeof(struct ufshcd_sg_entry) * prdt_length);
66cc820f
DR
484 }
485}
486
487static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
488{
66cc820f
DR
489 int tag;
490
491 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
391e388f
CH
492 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
493
66cc820f 494 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
391e388f 495 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
66cc820f
DR
496 }
497}
498
6ba65588
GB
499static void ufshcd_print_host_state(struct ufs_hba *hba)
500{
501 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
7252a360
BVA
502 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
503 hba->outstanding_reqs, hba->outstanding_tasks);
6ba65588
GB
504 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
505 hba->saved_err, hba->saved_uic_err);
506 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
507 hba->curr_dev_pwr_mode, hba->uic_link_state);
508 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
509 hba->pm_op_in_progress, hba->is_sys_suspended);
510 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
511 hba->auto_bkops_enabled, hba->host->host_self_blocked);
512 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
513 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
514 hba->eh_flags, hba->req_abort_count);
515 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
516 hba->capabilities, hba->caps);
517 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
518 hba->dev_quirks);
519}
520
ff8e20c6
DR
521/**
522 * ufshcd_print_pwr_info - print power params as saved in hba
523 * power info
524 * @hba: per-adapter instance
525 */
526static void ufshcd_print_pwr_info(struct ufs_hba *hba)
527{
528 static const char * const names[] = {
529 "INVALID MODE",
530 "FAST MODE",
531 "SLOW_MODE",
532 "INVALID MODE",
533 "FASTAUTO_MODE",
534 "SLOWAUTO_MODE",
535 "INVALID MODE",
536 };
537
538 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
539 __func__,
540 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
541 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
542 names[hba->pwr_info.pwr_rx],
543 names[hba->pwr_info.pwr_tx],
544 hba->pwr_info.hs_rate);
545}
546
5c955c10
SC
547void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
548{
549 if (!us)
550 return;
551
552 if (us < 10)
553 udelay(us);
554 else
555 usleep_range(us, us + tolerance);
556}
557EXPORT_SYMBOL_GPL(ufshcd_delay_us);
558
5cac1095 559/**
5a0b0cb9 560 * ufshcd_wait_for_register - wait for register value to change
5cac1095
BVA
561 * @hba: per-adapter interface
562 * @reg: mmio register offset
563 * @mask: mask to apply to the read register value
564 * @val: value to wait for
565 * @interval_us: polling interval in microseconds
566 * @timeout_ms: timeout in milliseconds
5a0b0cb9 567 *
5cac1095
BVA
568 * Return:
569 * -ETIMEDOUT on error, zero on success.
5a0b0cb9 570 */
596585a2
YG
571int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
572 u32 val, unsigned long interval_us,
5cac1095 573 unsigned long timeout_ms)
5a0b0cb9
SRT
574{
575 int err = 0;
576 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
577
578 /* ignore bits that we don't intend to wait on */
579 val = val & mask;
580
581 while ((ufshcd_readl(hba, reg) & mask) != val) {
5cac1095 582 usleep_range(interval_us, interval_us + 50);
5a0b0cb9
SRT
583 if (time_after(jiffies, timeout)) {
584 if ((ufshcd_readl(hba, reg) & mask) != val)
585 err = -ETIMEDOUT;
586 break;
587 }
588 }
589
590 return err;
591}
592
2fbd009b
SJ
593/**
594 * ufshcd_get_intr_mask - Get the interrupt bit mask
8aa29f19 595 * @hba: Pointer to adapter instance
2fbd009b
SJ
596 *
597 * Returns interrupt bit mask per version
598 */
599static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
600{
c01848c6
YG
601 u32 intr_mask = 0;
602
603 switch (hba->ufs_version) {
604 case UFSHCI_VERSION_10:
605 intr_mask = INTERRUPT_MASK_ALL_VER_10;
606 break;
c01848c6
YG
607 case UFSHCI_VERSION_11:
608 case UFSHCI_VERSION_20:
609 intr_mask = INTERRUPT_MASK_ALL_VER_11;
610 break;
c01848c6
YG
611 case UFSHCI_VERSION_21:
612 default:
613 intr_mask = INTERRUPT_MASK_ALL_VER_21;
031d1e0f 614 break;
c01848c6
YG
615 }
616
617 return intr_mask;
2fbd009b
SJ
618}
619
7a3e97b0
SY
620/**
621 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
8aa29f19 622 * @hba: Pointer to adapter instance
7a3e97b0
SY
623 *
624 * Returns UFSHCI version supported by the controller
625 */
626static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
627{
0263bcd0
YG
628 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
629 return ufshcd_vops_get_ufs_hci_version(hba);
9949e702 630
b873a275 631 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
632}
633
634/**
635 * ufshcd_is_device_present - Check if any device connected to
636 * the host controller
5c0c28a8 637 * @hba: pointer to adapter instance
7a3e97b0 638 *
c9e6010b 639 * Returns true if device present, false if no device detected
7a3e97b0 640 */
c9e6010b 641static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 642{
5c0c28a8 643 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
c9e6010b 644 DEVICE_PRESENT) ? true : false;
7a3e97b0
SY
645}
646
647/**
648 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
8aa29f19 649 * @lrbp: pointer to local command reference block
7a3e97b0
SY
650 *
651 * This function is used to get the OCS field from UTRD
652 * Returns the OCS field in the UTRD
653 */
654static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
655{
e8c8e82a 656 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
657}
658
7a3e97b0
SY
659/**
660 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
661 * @hba: per adapter instance
662 * @pos: position of the bit to be cleared
663 */
664static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
665{
87183841
AA
666 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
667 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
668 else
669 ufshcd_writel(hba, ~(1 << pos),
670 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1399c5b0
AA
671}
672
673/**
674 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
675 * @hba: per adapter instance
676 * @pos: position of the bit to be cleared
677 */
678static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
679{
87183841
AA
680 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
681 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
682 else
683 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
7a3e97b0
SY
684}
685
a48353f6
YG
686/**
687 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
688 * @hba: per adapter instance
689 * @tag: position of the bit to be cleared
690 */
691static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
692{
693 __clear_bit(tag, &hba->outstanding_reqs);
694}
695
7a3e97b0
SY
696/**
697 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
698 * @reg: Register value of host controller status
699 *
700 * Returns integer, 0 on Success and positive value if failed
701 */
702static inline int ufshcd_get_lists_status(u32 reg)
703{
6cf16115 704 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
7a3e97b0
SY
705}
706
707/**
708 * ufshcd_get_uic_cmd_result - Get the UIC command result
709 * @hba: Pointer to adapter instance
710 *
711 * This function gets the result of UIC command completion
712 * Returns 0 on success, non zero value on error
713 */
714static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
715{
b873a275 716 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
717 MASK_UIC_COMMAND_RESULT;
718}
719
12b4fdb4
SJ
720/**
721 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
722 * @hba: Pointer to adapter instance
723 *
724 * This function gets UIC command argument3
725 * Returns 0 on success, non zero value on error
726 */
727static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
728{
729 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
730}
731
7a3e97b0 732/**
5a0b0cb9 733 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 734 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
735 */
736static inline int
5a0b0cb9 737ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 738{
5a0b0cb9 739 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
740}
741
742/**
743 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
744 * @ucd_rsp_ptr: pointer to response UPIU
745 *
746 * This function gets the response status and scsi_status from response UPIU
747 * Returns the response result code.
748 */
749static inline int
750ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
751{
752 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
753}
754
1c2623c5
SJ
755/*
756 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
757 * from response UPIU
758 * @ucd_rsp_ptr: pointer to response UPIU
759 *
760 * Return the data segment length.
761 */
762static inline unsigned int
763ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
764{
765 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
766 MASK_RSP_UPIU_DATA_SEG_LEN;
767}
768
66ec6d59
SRT
769/**
770 * ufshcd_is_exception_event - Check if the device raised an exception event
771 * @ucd_rsp_ptr: pointer to response UPIU
772 *
773 * The function checks if the device raised an exception event indicated in
774 * the Device Information field of response UPIU.
775 *
776 * Returns true if exception is raised, false otherwise.
777 */
778static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
779{
780 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
781 MASK_RSP_EXCEPTION_EVENT ? true : false;
782}
783
7a3e97b0 784/**
7d568652 785 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 786 * @hba: per adapter instance
7a3e97b0
SY
787 */
788static inline void
7d568652 789ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 790{
7d568652
SJ
791 ufshcd_writel(hba, INT_AGGR_ENABLE |
792 INT_AGGR_COUNTER_AND_TIMER_RESET,
793 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
794}
795
796/**
797 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
798 * @hba: per adapter instance
799 * @cnt: Interrupt aggregation counter threshold
800 * @tmout: Interrupt aggregation timeout value
801 */
802static inline void
803ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
804{
805 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
806 INT_AGGR_COUNTER_THLD_VAL(cnt) |
807 INT_AGGR_TIMEOUT_VAL(tmout),
808 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
809}
810
b852190e
YG
811/**
812 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
813 * @hba: per adapter instance
814 */
815static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
816{
817 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
818}
819
7a3e97b0
SY
820/**
821 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
822 * When run-stop registers are set to 1, it indicates the
823 * host controller that it can process the requests
824 * @hba: per adapter instance
825 */
826static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
827{
b873a275
SJ
828 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
829 REG_UTP_TASK_REQ_LIST_RUN_STOP);
830 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
831 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
832}
833
7a3e97b0
SY
834/**
835 * ufshcd_hba_start - Start controller initialization sequence
836 * @hba: per adapter instance
837 */
838static inline void ufshcd_hba_start(struct ufs_hba *hba)
839{
b873a275 840 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
841}
842
843/**
844 * ufshcd_is_hba_active - Get controller state
845 * @hba: per adapter instance
846 *
c9e6010b 847 * Returns false if controller is active, true otherwise
7a3e97b0 848 */
c9e6010b 849static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
7a3e97b0 850{
4a8eec2b
TK
851 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
852 ? false : true;
7a3e97b0
SY
853}
854
37113106
YG
855u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
856{
857 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
858 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
859 (hba->ufs_version == UFSHCI_VERSION_11))
860 return UFS_UNIPRO_VER_1_41;
861 else
862 return UFS_UNIPRO_VER_1_6;
863}
864EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
865
866static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
867{
868 /*
869 * If both host and device support UniPro ver1.6 or later, PA layer
870 * parameters tuning happens during link startup itself.
871 *
872 * We can manually tune PA layer parameters if either host or device
873 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
874 * logic simple, we will only do manual tuning if local unipro version
875 * doesn't support ver1.6 or later.
876 */
877 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
878 return true;
879 else
880 return false;
881}
882
394b949f
SJ
883/**
884 * ufshcd_set_clk_freq - set UFS controller clock frequencies
885 * @hba: per adapter instance
886 * @scale_up: If True, set max possible frequency othewise set low frequency
887 *
888 * Returns 0 if successful
889 * Returns < 0 for any other errors
890 */
891static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
a3cd5ec5 892{
893 int ret = 0;
894 struct ufs_clk_info *clki;
895 struct list_head *head = &hba->clk_list_head;
a3cd5ec5 896
566ec9ad 897 if (list_empty(head))
a3cd5ec5 898 goto out;
899
a3cd5ec5 900 list_for_each_entry(clki, head, list) {
901 if (!IS_ERR_OR_NULL(clki->clk)) {
902 if (scale_up && clki->max_freq) {
903 if (clki->curr_freq == clki->max_freq)
904 continue;
905
a3cd5ec5 906 ret = clk_set_rate(clki->clk, clki->max_freq);
907 if (ret) {
908 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
909 __func__, clki->name,
910 clki->max_freq, ret);
911 break;
912 }
913 trace_ufshcd_clk_scaling(dev_name(hba->dev),
914 "scaled up", clki->name,
915 clki->curr_freq,
916 clki->max_freq);
917
918 clki->curr_freq = clki->max_freq;
919
920 } else if (!scale_up && clki->min_freq) {
921 if (clki->curr_freq == clki->min_freq)
922 continue;
923
a3cd5ec5 924 ret = clk_set_rate(clki->clk, clki->min_freq);
925 if (ret) {
926 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
927 __func__, clki->name,
928 clki->min_freq, ret);
929 break;
930 }
931 trace_ufshcd_clk_scaling(dev_name(hba->dev),
932 "scaled down", clki->name,
933 clki->curr_freq,
934 clki->min_freq);
935 clki->curr_freq = clki->min_freq;
936 }
937 }
938 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
939 clki->name, clk_get_rate(clki->clk));
940 }
941
394b949f
SJ
942out:
943 return ret;
944}
945
946/**
947 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
948 * @hba: per adapter instance
949 * @scale_up: True if scaling up and false if scaling down
950 *
951 * Returns 0 if successful
952 * Returns < 0 for any other errors
953 */
954static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
955{
956 int ret = 0;
957 ktime_t start = ktime_get();
958
959 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
960 if (ret)
961 goto out;
962
963 ret = ufshcd_set_clk_freq(hba, scale_up);
964 if (ret)
965 goto out;
966
a3cd5ec5 967 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
394b949f
SJ
968 if (ret)
969 ufshcd_set_clk_freq(hba, !scale_up);
a3cd5ec5 970
971out:
394b949f 972 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
a3cd5ec5 973 (scale_up ? "up" : "down"),
974 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
975 return ret;
976}
977
978/**
979 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
980 * @hba: per adapter instance
981 * @scale_up: True if scaling up and false if scaling down
982 *
983 * Returns true if scaling is required, false otherwise.
984 */
985static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
986 bool scale_up)
987{
988 struct ufs_clk_info *clki;
989 struct list_head *head = &hba->clk_list_head;
990
566ec9ad 991 if (list_empty(head))
a3cd5ec5 992 return false;
993
994 list_for_each_entry(clki, head, list) {
995 if (!IS_ERR_OR_NULL(clki->clk)) {
996 if (scale_up && clki->max_freq) {
997 if (clki->curr_freq == clki->max_freq)
998 continue;
999 return true;
1000 } else if (!scale_up && clki->min_freq) {
1001 if (clki->curr_freq == clki->min_freq)
1002 continue;
1003 return true;
1004 }
1005 }
1006 }
1007
1008 return false;
1009}
1010
1011static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1012 u64 wait_timeout_us)
1013{
1014 unsigned long flags;
1015 int ret = 0;
1016 u32 tm_doorbell;
1017 u32 tr_doorbell;
1018 bool timeout = false, do_last_check = false;
1019 ktime_t start;
1020
1021 ufshcd_hold(hba, false);
1022 spin_lock_irqsave(hba->host->host_lock, flags);
1023 /*
1024 * Wait for all the outstanding tasks/transfer requests.
1025 * Verify by checking the doorbell registers are clear.
1026 */
1027 start = ktime_get();
1028 do {
1029 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1030 ret = -EBUSY;
1031 goto out;
1032 }
1033
1034 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1035 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1036 if (!tm_doorbell && !tr_doorbell) {
1037 timeout = false;
1038 break;
1039 } else if (do_last_check) {
1040 break;
1041 }
1042
1043 spin_unlock_irqrestore(hba->host->host_lock, flags);
1044 schedule();
1045 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1046 wait_timeout_us) {
1047 timeout = true;
1048 /*
1049 * We might have scheduled out for long time so make
1050 * sure to check if doorbells are cleared by this time
1051 * or not.
1052 */
1053 do_last_check = true;
1054 }
1055 spin_lock_irqsave(hba->host->host_lock, flags);
1056 } while (tm_doorbell || tr_doorbell);
1057
1058 if (timeout) {
1059 dev_err(hba->dev,
1060 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1061 __func__, tm_doorbell, tr_doorbell);
1062 ret = -EBUSY;
1063 }
1064out:
1065 spin_unlock_irqrestore(hba->host->host_lock, flags);
1066 ufshcd_release(hba);
1067 return ret;
1068}
1069
1070/**
1071 * ufshcd_scale_gear - scale up/down UFS gear
1072 * @hba: per adapter instance
1073 * @scale_up: True for scaling up gear and false for scaling down
1074 *
1075 * Returns 0 for success,
1076 * Returns -EBUSY if scaling can't happen at this time
1077 * Returns non-zero for any other errors
1078 */
1079static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1080{
1081 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1082 int ret = 0;
1083 struct ufs_pa_layer_attr new_pwr_info;
1084
1085 if (scale_up) {
1086 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1087 sizeof(struct ufs_pa_layer_attr));
1088 } else {
1089 memcpy(&new_pwr_info, &hba->pwr_info,
1090 sizeof(struct ufs_pa_layer_attr));
1091
1092 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1093 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1094 /* save the current power mode */
1095 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1096 &hba->pwr_info,
1097 sizeof(struct ufs_pa_layer_attr));
1098
1099 /* scale down gear */
1100 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1101 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1102 }
1103 }
1104
1105 /* check if the power mode needs to be changed or not? */
6a9df818 1106 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
a3cd5ec5 1107 if (ret)
1108 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1109 __func__, ret,
1110 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1111 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1112
1113 return ret;
1114}
1115
1116static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1117{
1118 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1119 int ret = 0;
1120 /*
1121 * make sure that there are no outstanding requests when
1122 * clock scaling is in progress
1123 */
38135535 1124 ufshcd_scsi_block_requests(hba);
a3cd5ec5 1125 down_write(&hba->clk_scaling_lock);
1126 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1127 ret = -EBUSY;
1128 up_write(&hba->clk_scaling_lock);
38135535 1129 ufshcd_scsi_unblock_requests(hba);
a3cd5ec5 1130 }
1131
1132 return ret;
1133}
1134
1135static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1136{
1137 up_write(&hba->clk_scaling_lock);
38135535 1138 ufshcd_scsi_unblock_requests(hba);
a3cd5ec5 1139}
1140
1141/**
1142 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1143 * @hba: per adapter instance
1144 * @scale_up: True for scaling up and false for scalin down
1145 *
1146 * Returns 0 for success,
1147 * Returns -EBUSY if scaling can't happen at this time
1148 * Returns non-zero for any other errors
1149 */
1150static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1151{
1152 int ret = 0;
1153
401f1e44 1154 /* let's not get into low power until clock scaling is completed */
1155 ufshcd_hold(hba, false);
1156
a3cd5ec5 1157 ret = ufshcd_clock_scaling_prepare(hba);
1158 if (ret)
394b949f 1159 goto out;
a3cd5ec5 1160
1161 /* scale down the gear before scaling down clocks */
1162 if (!scale_up) {
1163 ret = ufshcd_scale_gear(hba, false);
1164 if (ret)
394b949f 1165 goto out_unprepare;
a3cd5ec5 1166 }
1167
1168 ret = ufshcd_scale_clks(hba, scale_up);
1169 if (ret) {
1170 if (!scale_up)
1171 ufshcd_scale_gear(hba, true);
394b949f 1172 goto out_unprepare;
a3cd5ec5 1173 }
1174
1175 /* scale up the gear after scaling up clocks */
1176 if (scale_up) {
1177 ret = ufshcd_scale_gear(hba, true);
3d17b9b5 1178 if (ret) {
a3cd5ec5 1179 ufshcd_scale_clks(hba, false);
3d17b9b5
AD
1180 goto out_unprepare;
1181 }
a3cd5ec5 1182 }
1183
3d17b9b5
AD
1184 /* Enable Write Booster if we have scaled up else disable it */
1185 up_write(&hba->clk_scaling_lock);
1186 ufshcd_wb_ctrl(hba, scale_up);
1187 down_write(&hba->clk_scaling_lock);
1188
394b949f 1189out_unprepare:
a3cd5ec5 1190 ufshcd_clock_scaling_unprepare(hba);
394b949f 1191out:
401f1e44 1192 ufshcd_release(hba);
a3cd5ec5 1193 return ret;
1194}
1195
401f1e44 1196static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1197{
1198 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1199 clk_scaling.suspend_work);
1200 unsigned long irq_flags;
1201
1202 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1203 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1204 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1205 return;
1206 }
1207 hba->clk_scaling.is_suspended = true;
1208 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1209
1210 __ufshcd_suspend_clkscaling(hba);
1211}
1212
1213static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1214{
1215 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1216 clk_scaling.resume_work);
1217 unsigned long irq_flags;
1218
1219 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1220 if (!hba->clk_scaling.is_suspended) {
1221 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1222 return;
1223 }
1224 hba->clk_scaling.is_suspended = false;
1225 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1226
1227 devfreq_resume_device(hba->devfreq);
1228}
1229
a3cd5ec5 1230static int ufshcd_devfreq_target(struct device *dev,
1231 unsigned long *freq, u32 flags)
1232{
1233 int ret = 0;
1234 struct ufs_hba *hba = dev_get_drvdata(dev);
1235 ktime_t start;
401f1e44 1236 bool scale_up, sched_clk_scaling_suspend_work = false;
092b4558
BA
1237 struct list_head *clk_list = &hba->clk_list_head;
1238 struct ufs_clk_info *clki;
a3cd5ec5 1239 unsigned long irq_flags;
1240
1241 if (!ufshcd_is_clkscaling_supported(hba))
1242 return -EINVAL;
1243
91831d33
AD
1244 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1245 /* Override with the closest supported frequency */
1246 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
a3cd5ec5 1247 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1248 if (ufshcd_eh_in_progress(hba)) {
1249 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1250 return 0;
1251 }
1252
401f1e44 1253 if (!hba->clk_scaling.active_reqs)
1254 sched_clk_scaling_suspend_work = true;
1255
092b4558
BA
1256 if (list_empty(clk_list)) {
1257 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1258 goto out;
1259 }
1260
91831d33 1261 /* Decide based on the rounded-off frequency and update */
092b4558 1262 scale_up = (*freq == clki->max_freq) ? true : false;
91831d33
AD
1263 if (!scale_up)
1264 *freq = clki->min_freq;
1265 /* Update the frequency */
401f1e44 1266 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1267 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1268 ret = 0;
1269 goto out; /* no state change required */
a3cd5ec5 1270 }
1271 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1272
1273 start = ktime_get();
a3cd5ec5 1274 ret = ufshcd_devfreq_scale(hba, scale_up);
1275
a3cd5ec5 1276 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1277 (scale_up ? "up" : "down"),
1278 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1279
401f1e44 1280out:
1281 if (sched_clk_scaling_suspend_work)
1282 queue_work(hba->clk_scaling.workq,
1283 &hba->clk_scaling.suspend_work);
1284
a3cd5ec5 1285 return ret;
1286}
1287
7252a360
BVA
1288static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1289{
1290 int *busy = priv;
1291
1292 WARN_ON_ONCE(reserved);
1293 (*busy)++;
1294 return false;
1295}
1296
1297/* Whether or not any tag is in use by a request that is in progress. */
1298static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1299{
1300 struct request_queue *q = hba->cmd_queue;
1301 int busy = 0;
1302
1303 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1304 return busy;
1305}
a3cd5ec5 1306
1307static int ufshcd_devfreq_get_dev_status(struct device *dev,
1308 struct devfreq_dev_status *stat)
1309{
1310 struct ufs_hba *hba = dev_get_drvdata(dev);
1311 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1312 unsigned long flags;
91831d33
AD
1313 struct list_head *clk_list = &hba->clk_list_head;
1314 struct ufs_clk_info *clki;
b1bf66d1 1315 ktime_t curr_t;
a3cd5ec5 1316
1317 if (!ufshcd_is_clkscaling_supported(hba))
1318 return -EINVAL;
1319
1320 memset(stat, 0, sizeof(*stat));
1321
1322 spin_lock_irqsave(hba->host->host_lock, flags);
b1bf66d1 1323 curr_t = ktime_get();
a3cd5ec5 1324 if (!scaling->window_start_t)
1325 goto start_window;
1326
91831d33
AD
1327 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1328 /*
1329 * If current frequency is 0, then the ondemand governor considers
1330 * there's no initial frequency set. And it always requests to set
1331 * to max. frequency.
1332 */
1333 stat->current_frequency = clki->curr_freq;
a3cd5ec5 1334 if (scaling->is_busy_started)
b1bf66d1
SC
1335 scaling->tot_busy_t += ktime_us_delta(curr_t,
1336 scaling->busy_start_t);
a3cd5ec5 1337
b1bf66d1 1338 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
a3cd5ec5 1339 stat->busy_time = scaling->tot_busy_t;
1340start_window:
b1bf66d1 1341 scaling->window_start_t = curr_t;
a3cd5ec5 1342 scaling->tot_busy_t = 0;
1343
1344 if (hba->outstanding_reqs) {
b1bf66d1 1345 scaling->busy_start_t = curr_t;
a3cd5ec5 1346 scaling->is_busy_started = true;
1347 } else {
1348 scaling->busy_start_t = 0;
1349 scaling->is_busy_started = false;
1350 }
1351 spin_unlock_irqrestore(hba->host->host_lock, flags);
1352 return 0;
1353}
1354
deac444f
BA
1355static int ufshcd_devfreq_init(struct ufs_hba *hba)
1356{
092b4558
BA
1357 struct list_head *clk_list = &hba->clk_list_head;
1358 struct ufs_clk_info *clki;
deac444f
BA
1359 struct devfreq *devfreq;
1360 int ret;
1361
092b4558
BA
1362 /* Skip devfreq if we don't have any clocks in the list */
1363 if (list_empty(clk_list))
1364 return 0;
1365
1366 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1367 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1368 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1369
90b8491c
SC
1370 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1371 &hba->vps->ondemand_data);
092b4558 1372 devfreq = devfreq_add_device(hba->dev,
90b8491c 1373 &hba->vps->devfreq_profile,
deac444f 1374 DEVFREQ_GOV_SIMPLE_ONDEMAND,
90b8491c 1375 &hba->vps->ondemand_data);
deac444f
BA
1376 if (IS_ERR(devfreq)) {
1377 ret = PTR_ERR(devfreq);
1378 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
092b4558
BA
1379
1380 dev_pm_opp_remove(hba->dev, clki->min_freq);
1381 dev_pm_opp_remove(hba->dev, clki->max_freq);
deac444f
BA
1382 return ret;
1383 }
1384
1385 hba->devfreq = devfreq;
1386
1387 return 0;
1388}
1389
092b4558
BA
1390static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1391{
1392 struct list_head *clk_list = &hba->clk_list_head;
1393 struct ufs_clk_info *clki;
1394
1395 if (!hba->devfreq)
1396 return;
1397
1398 devfreq_remove_device(hba->devfreq);
1399 hba->devfreq = NULL;
1400
1401 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1402 dev_pm_opp_remove(hba->dev, clki->min_freq);
1403 dev_pm_opp_remove(hba->dev, clki->max_freq);
1404}
1405
401f1e44 1406static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1407{
1408 unsigned long flags;
1409
1410 devfreq_suspend_device(hba->devfreq);
1411 spin_lock_irqsave(hba->host->host_lock, flags);
1412 hba->clk_scaling.window_start_t = 0;
1413 spin_unlock_irqrestore(hba->host->host_lock, flags);
1414}
a3cd5ec5 1415
a508253d
GB
1416static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1417{
401f1e44 1418 unsigned long flags;
1419 bool suspend = false;
1420
fcb0c4b0
ST
1421 if (!ufshcd_is_clkscaling_supported(hba))
1422 return;
1423
401f1e44 1424 spin_lock_irqsave(hba->host->host_lock, flags);
1425 if (!hba->clk_scaling.is_suspended) {
1426 suspend = true;
1427 hba->clk_scaling.is_suspended = true;
1428 }
1429 spin_unlock_irqrestore(hba->host->host_lock, flags);
1430
1431 if (suspend)
1432 __ufshcd_suspend_clkscaling(hba);
a508253d
GB
1433}
1434
1435static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1436{
401f1e44 1437 unsigned long flags;
1438 bool resume = false;
1439
1440 if (!ufshcd_is_clkscaling_supported(hba))
1441 return;
1442
1443 spin_lock_irqsave(hba->host->host_lock, flags);
1444 if (hba->clk_scaling.is_suspended) {
1445 resume = true;
1446 hba->clk_scaling.is_suspended = false;
1447 }
1448 spin_unlock_irqrestore(hba->host->host_lock, flags);
1449
1450 if (resume)
1451 devfreq_resume_device(hba->devfreq);
fcb0c4b0
ST
1452}
1453
1454static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1455 struct device_attribute *attr, char *buf)
1456{
1457 struct ufs_hba *hba = dev_get_drvdata(dev);
1458
1459 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1460}
1461
1462static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1463 struct device_attribute *attr, const char *buf, size_t count)
1464{
1465 struct ufs_hba *hba = dev_get_drvdata(dev);
1466 u32 value;
1467 int err;
1468
1469 if (kstrtou32(buf, 0, &value))
1470 return -EINVAL;
1471
1472 value = !!value;
1473 if (value == hba->clk_scaling.is_allowed)
1474 goto out;
1475
1476 pm_runtime_get_sync(hba->dev);
1477 ufshcd_hold(hba, false);
1478
401f1e44 1479 cancel_work_sync(&hba->clk_scaling.suspend_work);
1480 cancel_work_sync(&hba->clk_scaling.resume_work);
1481
1482 hba->clk_scaling.is_allowed = value;
1483
fcb0c4b0
ST
1484 if (value) {
1485 ufshcd_resume_clkscaling(hba);
1486 } else {
1487 ufshcd_suspend_clkscaling(hba);
a3cd5ec5 1488 err = ufshcd_devfreq_scale(hba, true);
fcb0c4b0
ST
1489 if (err)
1490 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1491 __func__, err);
1492 }
fcb0c4b0
ST
1493
1494 ufshcd_release(hba);
1495 pm_runtime_put_sync(hba->dev);
1496out:
1497 return count;
a508253d
GB
1498}
1499
a3cd5ec5 1500static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1501{
1502 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1503 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1504 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1505 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1506 hba->clk_scaling.enable_attr.attr.mode = 0644;
1507 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1508 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1509}
1510
1ab27c9c
ST
1511static void ufshcd_ungate_work(struct work_struct *work)
1512{
1513 int ret;
1514 unsigned long flags;
1515 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1516 clk_gating.ungate_work);
1517
1518 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1519
1520 spin_lock_irqsave(hba->host->host_lock, flags);
1521 if (hba->clk_gating.state == CLKS_ON) {
1522 spin_unlock_irqrestore(hba->host->host_lock, flags);
1523 goto unblock_reqs;
1524 }
1525
1526 spin_unlock_irqrestore(hba->host->host_lock, flags);
1527 ufshcd_setup_clocks(hba, true);
1528
8b0bbf00
SC
1529 ufshcd_enable_irq(hba);
1530
1ab27c9c
ST
1531 /* Exit from hibern8 */
1532 if (ufshcd_can_hibern8_during_gating(hba)) {
1533 /* Prevent gating in this path */
1534 hba->clk_gating.is_suspended = true;
1535 if (ufshcd_is_link_hibern8(hba)) {
1536 ret = ufshcd_uic_hibern8_exit(hba);
1537 if (ret)
1538 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1539 __func__, ret);
1540 else
1541 ufshcd_set_link_active(hba);
1542 }
1543 hba->clk_gating.is_suspended = false;
1544 }
1545unblock_reqs:
38135535 1546 ufshcd_scsi_unblock_requests(hba);
1ab27c9c
ST
1547}
1548
1549/**
1550 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1551 * Also, exit from hibern8 mode and set the link as active.
1552 * @hba: per adapter instance
1553 * @async: This indicates whether caller should ungate clocks asynchronously.
1554 */
1555int ufshcd_hold(struct ufs_hba *hba, bool async)
1556{
1557 int rc = 0;
1558 unsigned long flags;
1559
1560 if (!ufshcd_is_clkgating_allowed(hba))
1561 goto out;
1ab27c9c
ST
1562 spin_lock_irqsave(hba->host->host_lock, flags);
1563 hba->clk_gating.active_reqs++;
1564
53c12d0e
YG
1565 if (ufshcd_eh_in_progress(hba)) {
1566 spin_unlock_irqrestore(hba->host->host_lock, flags);
1567 return 0;
1568 }
1569
856b3483 1570start:
1ab27c9c
ST
1571 switch (hba->clk_gating.state) {
1572 case CLKS_ON:
f2a785ac
VG
1573 /*
1574 * Wait for the ungate work to complete if in progress.
1575 * Though the clocks may be in ON state, the link could
1576 * still be in hibner8 state if hibern8 is allowed
1577 * during clock gating.
1578 * Make sure we exit hibern8 state also in addition to
1579 * clocks being ON.
1580 */
1581 if (ufshcd_can_hibern8_during_gating(hba) &&
1582 ufshcd_is_link_hibern8(hba)) {
c63d6099
CG
1583 if (async) {
1584 rc = -EAGAIN;
1585 hba->clk_gating.active_reqs--;
1586 break;
1587 }
f2a785ac
VG
1588 spin_unlock_irqrestore(hba->host->host_lock, flags);
1589 flush_work(&hba->clk_gating.ungate_work);
1590 spin_lock_irqsave(hba->host->host_lock, flags);
1591 goto start;
1592 }
1ab27c9c
ST
1593 break;
1594 case REQ_CLKS_OFF:
1595 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1596 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1597 trace_ufshcd_clk_gating(dev_name(hba->dev),
1598 hba->clk_gating.state);
1ab27c9c
ST
1599 break;
1600 }
1601 /*
9c490d2d 1602 * If we are here, it means gating work is either done or
1ab27c9c
ST
1603 * currently running. Hence, fall through to cancel gating
1604 * work and to enable clocks.
1605 */
30eb2e4c 1606 /* fallthrough */
1ab27c9c 1607 case CLKS_OFF:
38135535 1608 ufshcd_scsi_block_requests(hba);
1ab27c9c 1609 hba->clk_gating.state = REQ_CLKS_ON;
7ff5ab47 1610 trace_ufshcd_clk_gating(dev_name(hba->dev),
1611 hba->clk_gating.state);
10e5e375
VV
1612 queue_work(hba->clk_gating.clk_gating_workq,
1613 &hba->clk_gating.ungate_work);
1ab27c9c
ST
1614 /*
1615 * fall through to check if we should wait for this
1616 * work to be done or not.
1617 */
30eb2e4c 1618 /* fallthrough */
1ab27c9c
ST
1619 case REQ_CLKS_ON:
1620 if (async) {
1621 rc = -EAGAIN;
1622 hba->clk_gating.active_reqs--;
1623 break;
1624 }
1625
1626 spin_unlock_irqrestore(hba->host->host_lock, flags);
1627 flush_work(&hba->clk_gating.ungate_work);
1628 /* Make sure state is CLKS_ON before returning */
856b3483 1629 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
1630 goto start;
1631 default:
1632 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1633 __func__, hba->clk_gating.state);
1634 break;
1635 }
1636 spin_unlock_irqrestore(hba->host->host_lock, flags);
1637out:
1638 return rc;
1639}
6e3fd44d 1640EXPORT_SYMBOL_GPL(ufshcd_hold);
1ab27c9c
ST
1641
1642static void ufshcd_gate_work(struct work_struct *work)
1643{
1644 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1645 clk_gating.gate_work.work);
1646 unsigned long flags;
1647
1648 spin_lock_irqsave(hba->host->host_lock, flags);
3f0c06de
VG
1649 /*
1650 * In case you are here to cancel this work the gating state
1651 * would be marked as REQ_CLKS_ON. In this case save time by
1652 * skipping the gating work and exit after changing the clock
1653 * state to CLKS_ON.
1654 */
1655 if (hba->clk_gating.is_suspended ||
18f01374 1656 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1ab27c9c 1657 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1658 trace_ufshcd_clk_gating(dev_name(hba->dev),
1659 hba->clk_gating.state);
1ab27c9c
ST
1660 goto rel_lock;
1661 }
1662
1663 if (hba->clk_gating.active_reqs
1664 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
7252a360 1665 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1ab27c9c
ST
1666 || hba->active_uic_cmd || hba->uic_async_done)
1667 goto rel_lock;
1668
1669 spin_unlock_irqrestore(hba->host->host_lock, flags);
1670
1671 /* put the link into hibern8 mode before turning off clocks */
1672 if (ufshcd_can_hibern8_during_gating(hba)) {
1673 if (ufshcd_uic_hibern8_enter(hba)) {
1674 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1675 trace_ufshcd_clk_gating(dev_name(hba->dev),
1676 hba->clk_gating.state);
1ab27c9c
ST
1677 goto out;
1678 }
1679 ufshcd_set_link_hibern8(hba);
1680 }
1681
8b0bbf00
SC
1682 ufshcd_disable_irq(hba);
1683
1ab27c9c
ST
1684 if (!ufshcd_is_link_active(hba))
1685 ufshcd_setup_clocks(hba, false);
1686 else
1687 /* If link is active, device ref_clk can't be switched off */
1688 __ufshcd_setup_clocks(hba, false, true);
1689
1690 /*
1691 * In case you are here to cancel this work the gating state
1692 * would be marked as REQ_CLKS_ON. In this case keep the state
1693 * as REQ_CLKS_ON which would anyway imply that clocks are off
1694 * and a request to turn them on is pending. By doing this way,
1695 * we keep the state machine in tact and this would ultimately
1696 * prevent from doing cancel work multiple times when there are
1697 * new requests arriving before the current cancel work is done.
1698 */
1699 spin_lock_irqsave(hba->host->host_lock, flags);
7ff5ab47 1700 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1ab27c9c 1701 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 1702 trace_ufshcd_clk_gating(dev_name(hba->dev),
1703 hba->clk_gating.state);
1704 }
1ab27c9c
ST
1705rel_lock:
1706 spin_unlock_irqrestore(hba->host->host_lock, flags);
1707out:
1708 return;
1709}
1710
1711/* host lock must be held before calling this variant */
1712static void __ufshcd_release(struct ufs_hba *hba)
1713{
1714 if (!ufshcd_is_clkgating_allowed(hba))
1715 return;
1716
1717 hba->clk_gating.active_reqs--;
1718
1719 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1720 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
7252a360 1721 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
53c12d0e
YG
1722 || hba->active_uic_cmd || hba->uic_async_done
1723 || ufshcd_eh_in_progress(hba))
1ab27c9c
ST
1724 return;
1725
1726 hba->clk_gating.state = REQ_CLKS_OFF;
7ff5ab47 1727 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
f4bb7704
EG
1728 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1729 &hba->clk_gating.gate_work,
1730 msecs_to_jiffies(hba->clk_gating.delay_ms));
1ab27c9c
ST
1731}
1732
1733void ufshcd_release(struct ufs_hba *hba)
1734{
1735 unsigned long flags;
1736
1737 spin_lock_irqsave(hba->host->host_lock, flags);
1738 __ufshcd_release(hba);
1739 spin_unlock_irqrestore(hba->host->host_lock, flags);
1740}
6e3fd44d 1741EXPORT_SYMBOL_GPL(ufshcd_release);
1ab27c9c
ST
1742
1743static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1744 struct device_attribute *attr, char *buf)
1745{
1746 struct ufs_hba *hba = dev_get_drvdata(dev);
1747
1748 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1749}
1750
1751static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1752 struct device_attribute *attr, const char *buf, size_t count)
1753{
1754 struct ufs_hba *hba = dev_get_drvdata(dev);
1755 unsigned long flags, value;
1756
1757 if (kstrtoul(buf, 0, &value))
1758 return -EINVAL;
1759
1760 spin_lock_irqsave(hba->host->host_lock, flags);
1761 hba->clk_gating.delay_ms = value;
1762 spin_unlock_irqrestore(hba->host->host_lock, flags);
1763 return count;
1764}
1765
b427411a
ST
1766static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1767 struct device_attribute *attr, char *buf)
1768{
1769 struct ufs_hba *hba = dev_get_drvdata(dev);
1770
1771 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1772}
1773
1774static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1775 struct device_attribute *attr, const char *buf, size_t count)
1776{
1777 struct ufs_hba *hba = dev_get_drvdata(dev);
1778 unsigned long flags;
1779 u32 value;
1780
1781 if (kstrtou32(buf, 0, &value))
1782 return -EINVAL;
1783
1784 value = !!value;
1785 if (value == hba->clk_gating.is_enabled)
1786 goto out;
1787
1788 if (value) {
1789 ufshcd_release(hba);
1790 } else {
1791 spin_lock_irqsave(hba->host->host_lock, flags);
1792 hba->clk_gating.active_reqs++;
1793 spin_unlock_irqrestore(hba->host->host_lock, flags);
1794 }
1795
1796 hba->clk_gating.is_enabled = value;
1797out:
1798 return count;
1799}
1800
eebcc196
VG
1801static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1802{
1803 char wq_name[sizeof("ufs_clkscaling_00")];
1804
1805 if (!ufshcd_is_clkscaling_supported(hba))
1806 return;
1807
1808 INIT_WORK(&hba->clk_scaling.suspend_work,
1809 ufshcd_clk_scaling_suspend_work);
1810 INIT_WORK(&hba->clk_scaling.resume_work,
1811 ufshcd_clk_scaling_resume_work);
1812
1813 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1814 hba->host->host_no);
1815 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1816
1817 ufshcd_clkscaling_init_sysfs(hba);
1818}
1819
1820static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1821{
1822 if (!ufshcd_is_clkscaling_supported(hba))
1823 return;
1824
1825 destroy_workqueue(hba->clk_scaling.workq);
1826 ufshcd_devfreq_remove(hba);
1827}
1828
1ab27c9c
ST
1829static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1830{
10e5e375
VV
1831 char wq_name[sizeof("ufs_clk_gating_00")];
1832
1ab27c9c
ST
1833 if (!ufshcd_is_clkgating_allowed(hba))
1834 return;
1835
1836 hba->clk_gating.delay_ms = 150;
1837 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1838 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1839
10e5e375
VV
1840 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1841 hba->host->host_no);
1842 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1843 WQ_MEM_RECLAIM);
1844
b427411a
ST
1845 hba->clk_gating.is_enabled = true;
1846
1ab27c9c
ST
1847 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1848 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1849 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1850 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
b427411a 1851 hba->clk_gating.delay_attr.attr.mode = 0644;
1ab27c9c
ST
1852 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1853 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
b427411a
ST
1854
1855 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1856 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1857 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1858 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1859 hba->clk_gating.enable_attr.attr.mode = 0644;
1860 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1861 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1ab27c9c
ST
1862}
1863
1864static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1865{
1866 if (!ufshcd_is_clkgating_allowed(hba))
1867 return;
1868 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
b427411a 1869 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
97cd6805
AM
1870 cancel_work_sync(&hba->clk_gating.ungate_work);
1871 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
10e5e375 1872 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1ab27c9c
ST
1873}
1874
856b3483
ST
1875/* Must be called with host lock acquired */
1876static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1877{
401f1e44 1878 bool queue_resume_work = false;
b1bf66d1 1879 ktime_t curr_t = ktime_get();
401f1e44 1880
fcb0c4b0 1881 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1882 return;
1883
401f1e44 1884 if (!hba->clk_scaling.active_reqs++)
1885 queue_resume_work = true;
1886
1887 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1888 return;
1889
1890 if (queue_resume_work)
1891 queue_work(hba->clk_scaling.workq,
1892 &hba->clk_scaling.resume_work);
1893
1894 if (!hba->clk_scaling.window_start_t) {
b1bf66d1 1895 hba->clk_scaling.window_start_t = curr_t;
401f1e44 1896 hba->clk_scaling.tot_busy_t = 0;
1897 hba->clk_scaling.is_busy_started = false;
1898 }
1899
856b3483 1900 if (!hba->clk_scaling.is_busy_started) {
b1bf66d1 1901 hba->clk_scaling.busy_start_t = curr_t;
856b3483
ST
1902 hba->clk_scaling.is_busy_started = true;
1903 }
1904}
1905
1906static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1907{
1908 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1909
fcb0c4b0 1910 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1911 return;
1912
1913 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1914 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1915 scaling->busy_start_t));
8b0e1953 1916 scaling->busy_start_t = 0;
856b3483
ST
1917 scaling->is_busy_started = false;
1918 }
1919}
7a3e97b0
SY
1920/**
1921 * ufshcd_send_command - Send SCSI or device management commands
1922 * @hba: per adapter instance
1923 * @task_tag: Task tag of the command
1924 */
1925static inline
1926void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1927{
ff8e20c6 1928 hba->lrb[task_tag].issue_time_stamp = ktime_get();
09017188 1929 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
eacf36f5 1930 ufshcd_add_command_trace(hba, task_tag, "send");
856b3483 1931 ufshcd_clk_scaling_start_busy(hba);
7a3e97b0 1932 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 1933 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
ad1a1b9c
GB
1934 /* Make sure that doorbell is committed immediately */
1935 wmb();
7a3e97b0
SY
1936}
1937
1938/**
1939 * ufshcd_copy_sense_data - Copy sense data in case of check condition
8aa29f19 1940 * @lrbp: pointer to local reference block
7a3e97b0
SY
1941 */
1942static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1943{
1944 int len;
1c2623c5
SJ
1945 if (lrbp->sense_buffer &&
1946 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
e3ce73d6
YG
1947 int len_to_copy;
1948
5a0b0cb9 1949 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
09a5a24f 1950 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
e3ce73d6 1951
09a5a24f
AA
1952 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1953 len_to_copy);
7a3e97b0
SY
1954 }
1955}
1956
68078d5c
DR
1957/**
1958 * ufshcd_copy_query_response() - Copy the Query Response and the data
1959 * descriptor
1960 * @hba: per adapter instance
8aa29f19 1961 * @lrbp: pointer to local reference block
68078d5c
DR
1962 */
1963static
c6d4a831 1964int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
1965{
1966 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1967
68078d5c 1968 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 1969
68078d5c 1970 /* Get the descriptor */
1c90836f
AA
1971 if (hba->dev_cmd.query.descriptor &&
1972 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 1973 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 1974 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
1975 u16 resp_len;
1976 u16 buf_len;
68078d5c
DR
1977
1978 /* data segment length */
c6d4a831 1979 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 1980 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
1981 buf_len = be16_to_cpu(
1982 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
1983 if (likely(buf_len >= resp_len)) {
1984 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1985 } else {
1986 dev_warn(hba->dev,
3d4881d1
BH
1987 "%s: rsp size %d is bigger than buffer size %d",
1988 __func__, resp_len, buf_len);
c6d4a831
DR
1989 return -EINVAL;
1990 }
68078d5c 1991 }
c6d4a831
DR
1992
1993 return 0;
68078d5c
DR
1994}
1995
7a3e97b0
SY
1996/**
1997 * ufshcd_hba_capabilities - Read controller capabilities
1998 * @hba: per adapter instance
1999 */
2000static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2001{
b873a275 2002 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
2003
2004 /* nutrs and nutmrs are 0 based values */
2005 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2006 hba->nutmrs =
2007 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2008}
2009
2010/**
6ccf44fe
SJ
2011 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2012 * to accept UIC commands
7a3e97b0 2013 * @hba: per adapter instance
6ccf44fe
SJ
2014 * Return true on success, else false
2015 */
2016static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2017{
2018 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2019 return true;
2020 else
2021 return false;
2022}
2023
53b3d9c3
SJ
2024/**
2025 * ufshcd_get_upmcrs - Get the power mode change request status
2026 * @hba: Pointer to adapter instance
2027 *
2028 * This function gets the UPMCRS field of HCS register
2029 * Returns value of UPMCRS field
2030 */
2031static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2032{
2033 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2034}
2035
6ccf44fe
SJ
2036/**
2037 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2038 * @hba: per adapter instance
2039 * @uic_cmd: UIC command
2040 *
2041 * Mutex must be held.
7a3e97b0
SY
2042 */
2043static inline void
6ccf44fe 2044ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 2045{
6ccf44fe
SJ
2046 WARN_ON(hba->active_uic_cmd);
2047
2048 hba->active_uic_cmd = uic_cmd;
2049
7a3e97b0 2050 /* Write Args */
6ccf44fe
SJ
2051 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2052 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2053 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0 2054
aa5c6979
SC
2055 ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2056
7a3e97b0 2057 /* Write UIC Cmd */
6ccf44fe 2058 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 2059 REG_UIC_COMMAND);
7a3e97b0
SY
2060}
2061
6ccf44fe
SJ
2062/**
2063 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2064 * @hba: per adapter instance
8aa29f19 2065 * @uic_cmd: UIC command
6ccf44fe
SJ
2066 *
2067 * Must be called with mutex held.
2068 * Returns 0 only if success.
2069 */
2070static int
2071ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2072{
2073 int ret;
2074 unsigned long flags;
2075
2076 if (wait_for_completion_timeout(&uic_cmd->done,
2077 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2078 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2079 else
2080 ret = -ETIMEDOUT;
2081
2082 spin_lock_irqsave(hba->host->host_lock, flags);
2083 hba->active_uic_cmd = NULL;
2084 spin_unlock_irqrestore(hba->host->host_lock, flags);
2085
2086 return ret;
2087}
2088
2089/**
2090 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2091 * @hba: per adapter instance
2092 * @uic_cmd: UIC command
d75f7fe4 2093 * @completion: initialize the completion only if this is set to true
6ccf44fe
SJ
2094 *
2095 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
57d104c1 2096 * with mutex held and host_lock locked.
6ccf44fe
SJ
2097 * Returns 0 only if success.
2098 */
2099static int
d75f7fe4
YG
2100__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2101 bool completion)
6ccf44fe 2102{
6ccf44fe
SJ
2103 if (!ufshcd_ready_for_uic_cmd(hba)) {
2104 dev_err(hba->dev,
2105 "Controller not ready to accept UIC commands\n");
2106 return -EIO;
2107 }
2108
d75f7fe4
YG
2109 if (completion)
2110 init_completion(&uic_cmd->done);
6ccf44fe 2111
6ccf44fe 2112 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 2113
57d104c1 2114 return 0;
6ccf44fe
SJ
2115}
2116
2117/**
2118 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2119 * @hba: per adapter instance
2120 * @uic_cmd: UIC command
2121 *
2122 * Returns 0 only if success.
2123 */
e77044c5 2124int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
6ccf44fe
SJ
2125{
2126 int ret;
57d104c1 2127 unsigned long flags;
6ccf44fe 2128
1ab27c9c 2129 ufshcd_hold(hba, false);
6ccf44fe 2130 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
2131 ufshcd_add_delay_before_dme_cmd(hba);
2132
57d104c1 2133 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 2134 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
57d104c1
SJ
2135 spin_unlock_irqrestore(hba->host->host_lock, flags);
2136 if (!ret)
2137 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2138
6ccf44fe
SJ
2139 mutex_unlock(&hba->uic_cmd_mutex);
2140
1ab27c9c 2141 ufshcd_release(hba);
6ccf44fe
SJ
2142 return ret;
2143}
2144
7a3e97b0
SY
2145/**
2146 * ufshcd_map_sg - Map scatter-gather list to prdt
8aa29f19
BVA
2147 * @hba: per adapter instance
2148 * @lrbp: pointer to local reference block
7a3e97b0
SY
2149 *
2150 * Returns 0 in case of success, non-zero value in case of failure
2151 */
75b1cc4a 2152static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0
SY
2153{
2154 struct ufshcd_sg_entry *prd_table;
2155 struct scatterlist *sg;
2156 struct scsi_cmnd *cmd;
2157 int sg_segments;
2158 int i;
2159
2160 cmd = lrbp->cmd;
2161 sg_segments = scsi_dma_map(cmd);
2162 if (sg_segments < 0)
2163 return sg_segments;
2164
2165 if (sg_segments) {
26f968d7
AA
2166
2167 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2168 lrbp->utr_descriptor_ptr->prd_table_length =
2169 cpu_to_le16((sg_segments *
2170 sizeof(struct ufshcd_sg_entry)));
2171 else
2172 lrbp->utr_descriptor_ptr->prd_table_length =
2173 cpu_to_le16((u16) (sg_segments));
7a3e97b0
SY
2174
2175 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2176
2177 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2178 prd_table[i].size =
2179 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2180 prd_table[i].base_addr =
2181 cpu_to_le32(lower_32_bits(sg->dma_address));
2182 prd_table[i].upper_addr =
2183 cpu_to_le32(upper_32_bits(sg->dma_address));
52ac95fe 2184 prd_table[i].reserved = 0;
7a3e97b0
SY
2185 }
2186 } else {
2187 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2188 }
2189
2190 return 0;
2191}
2192
2193/**
2fbd009b 2194 * ufshcd_enable_intr - enable interrupts
7a3e97b0 2195 * @hba: per adapter instance
2fbd009b 2196 * @intrs: interrupt bits
7a3e97b0 2197 */
2fbd009b 2198static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 2199{
2fbd009b
SJ
2200 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2201
2202 if (hba->ufs_version == UFSHCI_VERSION_10) {
2203 u32 rw;
2204 rw = set & INTERRUPT_MASK_RW_VER_10;
2205 set = rw | ((set ^ intrs) & intrs);
2206 } else {
2207 set |= intrs;
2208 }
2209
2210 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2211}
2212
2213/**
2214 * ufshcd_disable_intr - disable interrupts
2215 * @hba: per adapter instance
2216 * @intrs: interrupt bits
2217 */
2218static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2219{
2220 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2221
2222 if (hba->ufs_version == UFSHCI_VERSION_10) {
2223 u32 rw;
2224 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2225 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2226 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2227
2228 } else {
2229 set &= ~intrs;
7a3e97b0 2230 }
2fbd009b
SJ
2231
2232 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
2233}
2234
5a0b0cb9
SRT
2235/**
2236 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2237 * descriptor according to request
2238 * @lrbp: pointer to local reference block
2239 * @upiu_flags: flags required in the header
2240 * @cmd_dir: requests data direction
2241 */
2242static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
300bb13f 2243 u32 *upiu_flags, enum dma_data_direction cmd_dir)
5a0b0cb9
SRT
2244{
2245 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2246 u32 data_direction;
2247 u32 dword_0;
2248
2249 if (cmd_dir == DMA_FROM_DEVICE) {
2250 data_direction = UTP_DEVICE_TO_HOST;
2251 *upiu_flags = UPIU_CMD_FLAGS_READ;
2252 } else if (cmd_dir == DMA_TO_DEVICE) {
2253 data_direction = UTP_HOST_TO_DEVICE;
2254 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2255 } else {
2256 data_direction = UTP_NO_DATA_TRANSFER;
2257 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2258 }
2259
2260 dword_0 = data_direction | (lrbp->command_type
2261 << UPIU_COMMAND_TYPE_OFFSET);
2262 if (lrbp->intr_cmd)
2263 dword_0 |= UTP_REQ_DESC_INT_CMD;
2264
2265 /* Transfer request descriptor header fields */
2266 req_desc->header.dword_0 = cpu_to_le32(dword_0);
52ac95fe
YG
2267 /* dword_1 is reserved, hence it is set to 0 */
2268 req_desc->header.dword_1 = 0;
5a0b0cb9
SRT
2269 /*
2270 * assigning invalid value for command status. Controller
2271 * updates OCS on command completion, with the command
2272 * status
2273 */
2274 req_desc->header.dword_2 =
2275 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
52ac95fe
YG
2276 /* dword_3 is reserved, hence it is set to 0 */
2277 req_desc->header.dword_3 = 0;
51047266
YG
2278
2279 req_desc->prd_table_length = 0;
5a0b0cb9
SRT
2280}
2281
2282/**
2283 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2284 * for scsi commands
8aa29f19
BVA
2285 * @lrbp: local reference block pointer
2286 * @upiu_flags: flags
5a0b0cb9
SRT
2287 */
2288static
2289void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2290{
1b21b8f0 2291 struct scsi_cmnd *cmd = lrbp->cmd;
5a0b0cb9 2292 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
52ac95fe 2293 unsigned short cdb_len;
5a0b0cb9
SRT
2294
2295 /* command descriptor fields */
2296 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2297 UPIU_TRANSACTION_COMMAND, upiu_flags,
2298 lrbp->lun, lrbp->task_tag);
2299 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2300 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2301
2302 /* Total EHS length and Data segment length will be zero */
2303 ucd_req_ptr->header.dword_2 = 0;
2304
1b21b8f0 2305 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
5a0b0cb9 2306
1b21b8f0 2307 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
a851b2bd 2308 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1b21b8f0 2309 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
52ac95fe
YG
2310
2311 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2312}
2313
68078d5c
DR
2314/**
2315 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2316 * for query requsts
2317 * @hba: UFS hba
2318 * @lrbp: local reference block pointer
2319 * @upiu_flags: flags
2320 */
2321static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2322 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2323{
2324 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2325 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 2326 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
2327
2328 /* Query request header */
2329 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2330 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2331 lrbp->lun, lrbp->task_tag);
2332 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2333 0, query->request.query_func, 0, 0);
2334
6861285c
ZL
2335 /* Data segment length only need for WRITE_DESC */
2336 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2337 ucd_req_ptr->header.dword_2 =
2338 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2339 else
2340 ucd_req_ptr->header.dword_2 = 0;
68078d5c
DR
2341
2342 /* Copy the Query Request buffer as is */
2343 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2344 QUERY_OSF_SIZE);
68078d5c
DR
2345
2346 /* Copy the Descriptor */
c6d4a831 2347 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
220d17a6 2348 memcpy(ucd_req_ptr + 1, query->descriptor, len);
c6d4a831 2349
51047266 2350 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
68078d5c
DR
2351}
2352
5a0b0cb9
SRT
2353static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2354{
2355 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2356
2357 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2358
2359 /* command descriptor fields */
2360 ucd_req_ptr->header.dword_0 =
2361 UPIU_HEADER_DWORD(
2362 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
51047266
YG
2363 /* clear rest of the fields of basic header */
2364 ucd_req_ptr->header.dword_1 = 0;
2365 ucd_req_ptr->header.dword_2 = 0;
2366
2367 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2368}
2369
7a3e97b0 2370/**
300bb13f
JP
2371 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2372 * for Device Management Purposes
8aa29f19
BVA
2373 * @hba: per adapter instance
2374 * @lrbp: pointer to local reference block
7a3e97b0 2375 */
300bb13f 2376static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 2377{
7a3e97b0 2378 u32 upiu_flags;
5a0b0cb9 2379 int ret = 0;
7a3e97b0 2380
83dc7e3d 2381 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2382 (hba->ufs_version == UFSHCI_VERSION_11))
300bb13f 2383 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
83dc7e3d 2384 else
2385 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2386
2387 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2388 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2389 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2390 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2391 ufshcd_prepare_utp_nop_upiu(lrbp);
2392 else
2393 ret = -EINVAL;
2394
2395 return ret;
2396}
2397
2398/**
2399 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2400 * for SCSI Purposes
8aa29f19
BVA
2401 * @hba: per adapter instance
2402 * @lrbp: pointer to local reference block
300bb13f
JP
2403 */
2404static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2405{
2406 u32 upiu_flags;
2407 int ret = 0;
2408
83dc7e3d 2409 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2410 (hba->ufs_version == UFSHCI_VERSION_11))
300bb13f 2411 lrbp->command_type = UTP_CMD_TYPE_SCSI;
83dc7e3d 2412 else
2413 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2414
2415 if (likely(lrbp->cmd)) {
2416 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2417 lrbp->cmd->sc_data_direction);
2418 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2419 } else {
2420 ret = -EINVAL;
2421 }
5a0b0cb9
SRT
2422
2423 return ret;
7a3e97b0
SY
2424}
2425
2a8fa600
SJ
2426/**
2427 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
8aa29f19 2428 * @upiu_wlun_id: UPIU W-LUN id
2a8fa600
SJ
2429 *
2430 * Returns SCSI W-LUN id
2431 */
2432static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2433{
2434 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2435}
2436
4d2b8d40
BVA
2437static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2438{
2439 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2440 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2441 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2442 i * sizeof(struct utp_transfer_cmd_desc);
2443 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2444 response_upiu);
2445 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2446
2447 lrb->utr_descriptor_ptr = utrdlp + i;
2448 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2449 i * sizeof(struct utp_transfer_req_desc);
2450 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2451 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2452 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2453 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2454 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2455 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2456}
2457
7a3e97b0
SY
2458/**
2459 * ufshcd_queuecommand - main entry point for SCSI requests
8aa29f19 2460 * @host: SCSI host pointer
7a3e97b0 2461 * @cmd: command from SCSI Midlayer
7a3e97b0
SY
2462 *
2463 * Returns 0 for success, non-zero in case of failure
2464 */
2465static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2466{
2467 struct ufshcd_lrb *lrbp;
2468 struct ufs_hba *hba;
2469 unsigned long flags;
2470 int tag;
2471 int err = 0;
2472
2473 hba = shost_priv(host);
2474
2475 tag = cmd->request->tag;
14497328
YG
2476 if (!ufshcd_valid_tag(hba, tag)) {
2477 dev_err(hba->dev,
2478 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2479 __func__, tag, cmd, cmd->request);
2480 BUG();
2481 }
7a3e97b0 2482
a3cd5ec5 2483 if (!down_read_trylock(&hba->clk_scaling_lock))
2484 return SCSI_MLQUEUE_HOST_BUSY;
2485
3441da7d
SRT
2486 spin_lock_irqsave(hba->host->host_lock, flags);
2487 switch (hba->ufshcd_state) {
2488 case UFSHCD_STATE_OPERATIONAL:
2489 break;
141f8165 2490 case UFSHCD_STATE_EH_SCHEDULED:
3441da7d 2491 case UFSHCD_STATE_RESET:
7a3e97b0 2492 err = SCSI_MLQUEUE_HOST_BUSY;
3441da7d
SRT
2493 goto out_unlock;
2494 case UFSHCD_STATE_ERROR:
2495 set_host_byte(cmd, DID_ERROR);
2496 cmd->scsi_done(cmd);
2497 goto out_unlock;
2498 default:
2499 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2500 __func__, hba->ufshcd_state);
2501 set_host_byte(cmd, DID_BAD_TARGET);
2502 cmd->scsi_done(cmd);
2503 goto out_unlock;
7a3e97b0 2504 }
53c12d0e
YG
2505
2506 /* if error handling is in progress, don't issue commands */
2507 if (ufshcd_eh_in_progress(hba)) {
2508 set_host_byte(cmd, DID_ERROR);
2509 cmd->scsi_done(cmd);
2510 goto out_unlock;
2511 }
3441da7d 2512 spin_unlock_irqrestore(hba->host->host_lock, flags);
7a3e97b0 2513
7fabb77b
GB
2514 hba->req_abort_count = 0;
2515
1ab27c9c
ST
2516 err = ufshcd_hold(hba, true);
2517 if (err) {
2518 err = SCSI_MLQUEUE_HOST_BUSY;
1ab27c9c
ST
2519 goto out;
2520 }
2521 WARN_ON(hba->clk_gating.state != CLKS_ON);
2522
7a3e97b0
SY
2523 lrbp = &hba->lrb[tag];
2524
5a0b0cb9 2525 WARN_ON(lrbp->cmd);
7a3e97b0 2526 lrbp->cmd = cmd;
09a5a24f 2527 lrbp->sense_bufflen = UFS_SENSE_SIZE;
7a3e97b0
SY
2528 lrbp->sense_buffer = cmd->sense_buffer;
2529 lrbp->task_tag = tag;
0ce147d4 2530 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
b852190e 2531 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
e0b299e3 2532 lrbp->req_abort_skip = false;
7a3e97b0 2533
300bb13f
JP
2534 ufshcd_comp_scsi_upiu(hba, lrbp);
2535
75b1cc4a 2536 err = ufshcd_map_sg(hba, lrbp);
5a0b0cb9
SRT
2537 if (err) {
2538 lrbp->cmd = NULL;
17c7d35f 2539 ufshcd_release(hba);
7a3e97b0 2540 goto out;
5a0b0cb9 2541 }
ad1a1b9c
GB
2542 /* Make sure descriptors are ready before ringing the doorbell */
2543 wmb();
7a3e97b0
SY
2544
2545 /* issue command to the controller */
2546 spin_lock_irqsave(hba->host->host_lock, flags);
5905d464 2547 ufshcd_vops_setup_xfer_req(hba, tag, true);
7a3e97b0 2548 ufshcd_send_command(hba, tag);
3441da7d 2549out_unlock:
7a3e97b0
SY
2550 spin_unlock_irqrestore(hba->host->host_lock, flags);
2551out:
a3cd5ec5 2552 up_read(&hba->clk_scaling_lock);
7a3e97b0
SY
2553 return err;
2554}
2555
5a0b0cb9
SRT
2556static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2557 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2558{
2559 lrbp->cmd = NULL;
2560 lrbp->sense_bufflen = 0;
2561 lrbp->sense_buffer = NULL;
2562 lrbp->task_tag = tag;
2563 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
5a0b0cb9
SRT
2564 lrbp->intr_cmd = true; /* No interrupt aggregation */
2565 hba->dev_cmd.type = cmd_type;
2566
300bb13f 2567 return ufshcd_comp_devman_upiu(hba, lrbp);
5a0b0cb9
SRT
2568}
2569
2570static int
2571ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2572{
2573 int err = 0;
2574 unsigned long flags;
2575 u32 mask = 1 << tag;
2576
2577 /* clear outstanding transaction before retry */
2578 spin_lock_irqsave(hba->host->host_lock, flags);
2579 ufshcd_utrl_clear(hba, tag);
2580 spin_unlock_irqrestore(hba->host->host_lock, flags);
2581
2582 /*
2583 * wait for for h/w to clear corresponding bit in door-bell.
2584 * max. wait is 1 sec.
2585 */
2586 err = ufshcd_wait_for_register(hba,
2587 REG_UTP_TRANSFER_REQ_DOOR_BELL,
5cac1095 2588 mask, ~mask, 1000, 1000);
5a0b0cb9
SRT
2589
2590 return err;
2591}
2592
c6d4a831
DR
2593static int
2594ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2595{
2596 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2597
2598 /* Get the UPIU response */
2599 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2600 UPIU_RSP_CODE_OFFSET;
2601 return query_res->response;
2602}
2603
5a0b0cb9
SRT
2604/**
2605 * ufshcd_dev_cmd_completion() - handles device management command responses
2606 * @hba: per adapter instance
2607 * @lrbp: pointer to local reference block
2608 */
2609static int
2610ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2611{
2612 int resp;
2613 int err = 0;
2614
ff8e20c6 2615 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
2616 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2617
2618 switch (resp) {
2619 case UPIU_TRANSACTION_NOP_IN:
2620 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2621 err = -EINVAL;
2622 dev_err(hba->dev, "%s: unexpected response %x\n",
2623 __func__, resp);
2624 }
2625 break;
68078d5c 2626 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
2627 err = ufshcd_check_query_response(hba, lrbp);
2628 if (!err)
2629 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 2630 break;
5a0b0cb9
SRT
2631 case UPIU_TRANSACTION_REJECT_UPIU:
2632 /* TODO: handle Reject UPIU Response */
2633 err = -EPERM;
2634 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2635 __func__);
2636 break;
2637 default:
2638 err = -EINVAL;
2639 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2640 __func__, resp);
2641 break;
2642 }
2643
2644 return err;
2645}
2646
2647static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2648 struct ufshcd_lrb *lrbp, int max_timeout)
2649{
2650 int err = 0;
2651 unsigned long time_left;
2652 unsigned long flags;
2653
2654 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2655 msecs_to_jiffies(max_timeout));
2656
ad1a1b9c
GB
2657 /* Make sure descriptors are ready before ringing the doorbell */
2658 wmb();
5a0b0cb9
SRT
2659 spin_lock_irqsave(hba->host->host_lock, flags);
2660 hba->dev_cmd.complete = NULL;
2661 if (likely(time_left)) {
2662 err = ufshcd_get_tr_ocs(lrbp);
2663 if (!err)
2664 err = ufshcd_dev_cmd_completion(hba, lrbp);
2665 }
2666 spin_unlock_irqrestore(hba->host->host_lock, flags);
2667
2668 if (!time_left) {
2669 err = -ETIMEDOUT;
a48353f6
YG
2670 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2671 __func__, lrbp->task_tag);
5a0b0cb9 2672 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
a48353f6 2673 /* successfully cleared the command, retry if needed */
5a0b0cb9 2674 err = -EAGAIN;
a48353f6
YG
2675 /*
2676 * in case of an error, after clearing the doorbell,
2677 * we also need to clear the outstanding_request
2678 * field in hba
2679 */
2680 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
5a0b0cb9
SRT
2681 }
2682
2683 return err;
2684}
2685
5a0b0cb9
SRT
2686/**
2687 * ufshcd_exec_dev_cmd - API for sending device management requests
8aa29f19
BVA
2688 * @hba: UFS hba
2689 * @cmd_type: specifies the type (NOP, Query...)
2690 * @timeout: time in seconds
5a0b0cb9 2691 *
68078d5c
DR
2692 * NOTE: Since there is only one available tag for device management commands,
2693 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
2694 */
2695static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2696 enum dev_cmd_type cmd_type, int timeout)
2697{
7252a360
BVA
2698 struct request_queue *q = hba->cmd_queue;
2699 struct request *req;
5a0b0cb9
SRT
2700 struct ufshcd_lrb *lrbp;
2701 int err;
2702 int tag;
2703 struct completion wait;
2704 unsigned long flags;
2705
a3cd5ec5 2706 down_read(&hba->clk_scaling_lock);
2707
5a0b0cb9
SRT
2708 /*
2709 * Get free slot, sleep if slots are unavailable.
2710 * Even though we use wait_event() which sleeps indefinitely,
2711 * the maximum wait time is bounded by SCSI request timeout.
2712 */
7252a360 2713 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
bb14dd15
DC
2714 if (IS_ERR(req)) {
2715 err = PTR_ERR(req);
2716 goto out_unlock;
2717 }
7252a360
BVA
2718 tag = req->tag;
2719 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5a0b0cb9
SRT
2720
2721 init_completion(&wait);
2722 lrbp = &hba->lrb[tag];
2723 WARN_ON(lrbp->cmd);
2724 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2725 if (unlikely(err))
2726 goto out_put_tag;
2727
2728 hba->dev_cmd.complete = &wait;
2729
6667e6d9 2730 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
e3dfdc53
YG
2731 /* Make sure descriptors are ready before ringing the doorbell */
2732 wmb();
5a0b0cb9 2733 spin_lock_irqsave(hba->host->host_lock, flags);
5905d464 2734 ufshcd_vops_setup_xfer_req(hba, tag, false);
5a0b0cb9
SRT
2735 ufshcd_send_command(hba, tag);
2736 spin_unlock_irqrestore(hba->host->host_lock, flags);
2737
2738 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2739
6667e6d9
OS
2740 ufshcd_add_query_upiu_trace(hba, tag,
2741 err ? "query_complete_err" : "query_complete");
2742
5a0b0cb9 2743out_put_tag:
7252a360 2744 blk_put_request(req);
bb14dd15 2745out_unlock:
a3cd5ec5 2746 up_read(&hba->clk_scaling_lock);
5a0b0cb9
SRT
2747 return err;
2748}
2749
d44a5f98
DR
2750/**
2751 * ufshcd_init_query() - init the query response and request parameters
2752 * @hba: per-adapter instance
2753 * @request: address of the request pointer to be initialized
2754 * @response: address of the response pointer to be initialized
2755 * @opcode: operation to perform
2756 * @idn: flag idn to access
2757 * @index: LU number to access
2758 * @selector: query/flag/descriptor further identification
2759 */
2760static inline void ufshcd_init_query(struct ufs_hba *hba,
2761 struct ufs_query_req **request, struct ufs_query_res **response,
2762 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2763{
2764 *request = &hba->dev_cmd.query.request;
2765 *response = &hba->dev_cmd.query.response;
2766 memset(*request, 0, sizeof(struct ufs_query_req));
2767 memset(*response, 0, sizeof(struct ufs_query_res));
2768 (*request)->upiu_req.opcode = opcode;
2769 (*request)->upiu_req.idn = idn;
2770 (*request)->upiu_req.index = index;
2771 (*request)->upiu_req.selector = selector;
2772}
2773
dc3c8d3a 2774static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1f34eedf 2775 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
dc3c8d3a
YG
2776{
2777 int ret;
2778 int retries;
2779
2780 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1f34eedf 2781 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
dc3c8d3a
YG
2782 if (ret)
2783 dev_dbg(hba->dev,
2784 "%s: failed with error %d, retries %d\n",
2785 __func__, ret, retries);
2786 else
2787 break;
2788 }
2789
2790 if (ret)
2791 dev_err(hba->dev,
2792 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2793 __func__, opcode, idn, ret, retries);
2794 return ret;
2795}
2796
68078d5c
DR
2797/**
2798 * ufshcd_query_flag() - API function for sending flag query requests
8aa29f19
BVA
2799 * @hba: per-adapter instance
2800 * @opcode: flag query to perform
2801 * @idn: flag idn to access
1f34eedf 2802 * @index: flag index to access
8aa29f19 2803 * @flag_res: the flag value after the query request completes
68078d5c
DR
2804 *
2805 * Returns 0 for success, non-zero in case of failure
2806 */
dc3c8d3a 2807int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1f34eedf 2808 enum flag_idn idn, u8 index, bool *flag_res)
68078d5c 2809{
d44a5f98
DR
2810 struct ufs_query_req *request = NULL;
2811 struct ufs_query_res *response = NULL;
1f34eedf 2812 int err, selector = 0;
e5ad406c 2813 int timeout = QUERY_REQ_TIMEOUT;
68078d5c
DR
2814
2815 BUG_ON(!hba);
2816
1ab27c9c 2817 ufshcd_hold(hba, false);
68078d5c 2818 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
2819 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2820 selector);
68078d5c
DR
2821
2822 switch (opcode) {
2823 case UPIU_QUERY_OPCODE_SET_FLAG:
2824 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2825 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2826 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2827 break;
2828 case UPIU_QUERY_OPCODE_READ_FLAG:
2829 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2830 if (!flag_res) {
2831 /* No dummy reads */
2832 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2833 __func__);
2834 err = -EINVAL;
2835 goto out_unlock;
2836 }
2837 break;
2838 default:
2839 dev_err(hba->dev,
2840 "%s: Expected query flag opcode but got = %d\n",
2841 __func__, opcode);
2842 err = -EINVAL;
2843 goto out_unlock;
2844 }
68078d5c 2845
e5ad406c 2846 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
68078d5c
DR
2847
2848 if (err) {
2849 dev_err(hba->dev,
2850 "%s: Sending flag query for idn %d failed, err = %d\n",
2851 __func__, idn, err);
2852 goto out_unlock;
2853 }
2854
2855 if (flag_res)
e8c8e82a 2856 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
2857 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2858
2859out_unlock:
2860 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 2861 ufshcd_release(hba);
68078d5c
DR
2862 return err;
2863}
2864
66ec6d59
SRT
2865/**
2866 * ufshcd_query_attr - API function for sending attribute requests
8aa29f19
BVA
2867 * @hba: per-adapter instance
2868 * @opcode: attribute opcode
2869 * @idn: attribute idn to access
2870 * @index: index field
2871 * @selector: selector field
2872 * @attr_val: the attribute value after the query request completes
66ec6d59
SRT
2873 *
2874 * Returns 0 for success, non-zero in case of failure
2875*/
ec92b59c
SN
2876int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2877 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
66ec6d59 2878{
d44a5f98
DR
2879 struct ufs_query_req *request = NULL;
2880 struct ufs_query_res *response = NULL;
66ec6d59
SRT
2881 int err;
2882
2883 BUG_ON(!hba);
2884
1ab27c9c 2885 ufshcd_hold(hba, false);
66ec6d59
SRT
2886 if (!attr_val) {
2887 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2888 __func__, opcode);
2889 err = -EINVAL;
2890 goto out;
2891 }
2892
2893 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
2894 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2895 selector);
66ec6d59
SRT
2896
2897 switch (opcode) {
2898 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2899 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 2900 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
2901 break;
2902 case UPIU_QUERY_OPCODE_READ_ATTR:
2903 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2904 break;
2905 default:
2906 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2907 __func__, opcode);
2908 err = -EINVAL;
2909 goto out_unlock;
2910 }
2911
d44a5f98 2912 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
2913
2914 if (err) {
4b761b58
YG
2915 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2916 __func__, opcode, idn, index, err);
66ec6d59
SRT
2917 goto out_unlock;
2918 }
2919
e8c8e82a 2920 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
2921
2922out_unlock:
2923 mutex_unlock(&hba->dev_cmd.lock);
2924out:
1ab27c9c 2925 ufshcd_release(hba);
66ec6d59
SRT
2926 return err;
2927}
2928
5e86ae44
YG
2929/**
2930 * ufshcd_query_attr_retry() - API function for sending query
2931 * attribute with retries
2932 * @hba: per-adapter instance
2933 * @opcode: attribute opcode
2934 * @idn: attribute idn to access
2935 * @index: index field
2936 * @selector: selector field
2937 * @attr_val: the attribute value after the query request
2938 * completes
2939 *
2940 * Returns 0 for success, non-zero in case of failure
2941*/
2942static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2943 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2944 u32 *attr_val)
2945{
2946 int ret = 0;
2947 u32 retries;
2948
68c9fcfd 2949 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
5e86ae44
YG
2950 ret = ufshcd_query_attr(hba, opcode, idn, index,
2951 selector, attr_val);
2952 if (ret)
2953 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2954 __func__, ret, retries);
2955 else
2956 break;
2957 }
2958
2959 if (ret)
2960 dev_err(hba->dev,
2961 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2962 __func__, idn, ret, QUERY_REQ_RETRIES);
2963 return ret;
2964}
2965
a70e91b8 2966static int __ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
2967 enum query_opcode opcode, enum desc_idn idn, u8 index,
2968 u8 selector, u8 *desc_buf, int *buf_len)
2969{
2970 struct ufs_query_req *request = NULL;
2971 struct ufs_query_res *response = NULL;
2972 int err;
2973
2974 BUG_ON(!hba);
2975
1ab27c9c 2976 ufshcd_hold(hba, false);
d44a5f98
DR
2977 if (!desc_buf) {
2978 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2979 __func__, opcode);
2980 err = -EINVAL;
2981 goto out;
2982 }
2983
a4b0e8a4 2984 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
d44a5f98
DR
2985 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2986 __func__, *buf_len);
2987 err = -EINVAL;
2988 goto out;
2989 }
2990
2991 mutex_lock(&hba->dev_cmd.lock);
2992 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2993 selector);
2994 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 2995 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
2996
2997 switch (opcode) {
2998 case UPIU_QUERY_OPCODE_WRITE_DESC:
2999 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3000 break;
3001 case UPIU_QUERY_OPCODE_READ_DESC:
3002 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3003 break;
3004 default:
3005 dev_err(hba->dev,
3006 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3007 __func__, opcode);
3008 err = -EINVAL;
3009 goto out_unlock;
3010 }
3011
3012 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3013
3014 if (err) {
4b761b58
YG
3015 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3016 __func__, opcode, idn, index, err);
d44a5f98
DR
3017 goto out_unlock;
3018 }
3019
ea2aab24 3020 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
3021
3022out_unlock:
cfcbae38 3023 hba->dev_cmd.query.descriptor = NULL;
d44a5f98
DR
3024 mutex_unlock(&hba->dev_cmd.lock);
3025out:
1ab27c9c 3026 ufshcd_release(hba);
d44a5f98
DR
3027 return err;
3028}
3029
a70e91b8 3030/**
8aa29f19
BVA
3031 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3032 * @hba: per-adapter instance
3033 * @opcode: attribute opcode
3034 * @idn: attribute idn to access
3035 * @index: index field
3036 * @selector: selector field
3037 * @desc_buf: the buffer that contains the descriptor
3038 * @buf_len: length parameter passed to the device
a70e91b8
YG
3039 *
3040 * Returns 0 for success, non-zero in case of failure.
3041 * The buf_len parameter will contain, on return, the length parameter
3042 * received on the response.
3043 */
2238d31c
SN
3044int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3045 enum query_opcode opcode,
3046 enum desc_idn idn, u8 index,
3047 u8 selector,
3048 u8 *desc_buf, int *buf_len)
a70e91b8
YG
3049{
3050 int err;
3051 int retries;
3052
3053 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3054 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3055 selector, desc_buf, buf_len);
3056 if (!err || err == -EINVAL)
3057 break;
3058 }
3059
3060 return err;
3061}
a70e91b8 3062
a4b0e8a4
PM
3063/**
3064 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3065 * @hba: Pointer to adapter instance
3066 * @desc_id: descriptor idn value
3067 * @desc_len: mapped desc length (out)
a4b0e8a4 3068 */
7a0bf85b
BH
3069void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3070 int *desc_len)
a4b0e8a4 3071{
7a0bf85b
BH
3072 if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3073 desc_id == QUERY_DESC_IDN_RFU_1)
a4b0e8a4 3074 *desc_len = 0;
7a0bf85b
BH
3075 else
3076 *desc_len = hba->desc_size[desc_id];
a4b0e8a4
PM
3077}
3078EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3079
7a0bf85b 3080static void ufshcd_update_desc_length(struct ufs_hba *hba,
72fb690e 3081 enum desc_idn desc_id, int desc_index,
7a0bf85b
BH
3082 unsigned char desc_len)
3083{
3084 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
72fb690e
BH
3085 desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3086 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3087 * than the RPMB unit, however, both descriptors share the same
3088 * desc_idn, to cover both unit descriptors with one length, we
3089 * choose the normal unit descriptor length by desc_index.
3090 */
7a0bf85b
BH
3091 hba->desc_size[desc_id] = desc_len;
3092}
3093
da461cec
SJ
3094/**
3095 * ufshcd_read_desc_param - read the specified descriptor parameter
3096 * @hba: Pointer to adapter instance
3097 * @desc_id: descriptor idn value
3098 * @desc_index: descriptor index
3099 * @param_offset: offset of the parameter to read
3100 * @param_read_buf: pointer to buffer where parameter would be read
3101 * @param_size: sizeof(param_read_buf)
3102 *
3103 * Return 0 in case of success, non-zero otherwise
3104 */
45bced87
SN
3105int ufshcd_read_desc_param(struct ufs_hba *hba,
3106 enum desc_idn desc_id,
3107 int desc_index,
3108 u8 param_offset,
3109 u8 *param_read_buf,
3110 u8 param_size)
da461cec
SJ
3111{
3112 int ret;
3113 u8 *desc_buf;
a4b0e8a4 3114 int buff_len;
da461cec
SJ
3115 bool is_kmalloc = true;
3116
a4b0e8a4
PM
3117 /* Safety check */
3118 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
da461cec
SJ
3119 return -EINVAL;
3120
7a0bf85b
BH
3121 /* Get the length of descriptor */
3122 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3123 if (!buff_len) {
3124 dev_err(hba->dev, "%s: Failed to get desc length", __func__);
3125 return -EINVAL;
a4b0e8a4
PM
3126 }
3127
3128 /* Check whether we need temp memory */
3129 if (param_offset != 0 || param_size < buff_len) {
da461cec
SJ
3130 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3131 if (!desc_buf)
3132 return -ENOMEM;
a4b0e8a4
PM
3133 } else {
3134 desc_buf = param_read_buf;
3135 is_kmalloc = false;
da461cec
SJ
3136 }
3137
a4b0e8a4 3138 /* Request for full descriptor */
a70e91b8 3139 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
a4b0e8a4
PM
3140 desc_id, desc_index, 0,
3141 desc_buf, &buff_len);
da461cec 3142
bde44bb6 3143 if (ret) {
3144 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3145 __func__, desc_id, desc_index, param_offset, ret);
da461cec
SJ
3146 goto out;
3147 }
3148
bde44bb6 3149 /* Sanity check */
3150 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3151 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3152 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3153 ret = -EINVAL;
3154 goto out;
3155 }
3156
7a0bf85b
BH
3157 /* Update descriptor length */
3158 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
72fb690e 3159 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
7a0bf85b 3160
a4b0e8a4 3161 /* Check wherher we will not copy more data, than available */
cbe193f6
BH
3162 if (is_kmalloc && (param_offset + param_size) > buff_len)
3163 param_size = buff_len - param_offset;
bde44bb6 3164
da461cec
SJ
3165 if (is_kmalloc)
3166 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3167out:
3168 if (is_kmalloc)
3169 kfree(desc_buf);
3170 return ret;
3171}
3172
4b828fe1
TW
3173/**
3174 * struct uc_string_id - unicode string
3175 *
3176 * @len: size of this descriptor inclusive
3177 * @type: descriptor type
3178 * @uc: unicode string character
3179 */
3180struct uc_string_id {
3181 u8 len;
3182 u8 type;
ec38c0ad 3183 wchar_t uc[];
4b828fe1
TW
3184} __packed;
3185
3186/* replace non-printable or non-ASCII characters with spaces */
3187static inline char ufshcd_remove_non_printable(u8 ch)
3188{
3189 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3190}
3191
b573d484
YG
3192/**
3193 * ufshcd_read_string_desc - read string descriptor
3194 * @hba: pointer to adapter instance
3195 * @desc_index: descriptor index
4b828fe1
TW
3196 * @buf: pointer to buffer where descriptor would be read,
3197 * the caller should free the memory.
b573d484 3198 * @ascii: if true convert from unicode to ascii characters
4b828fe1 3199 * null terminated string.
b573d484 3200 *
4b828fe1
TW
3201 * Return:
3202 * * string size on success.
3203 * * -ENOMEM: on allocation failure
3204 * * -EINVAL: on a wrong parameter
b573d484 3205 */
4b828fe1
TW
3206int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3207 u8 **buf, bool ascii)
b573d484 3208{
4b828fe1
TW
3209 struct uc_string_id *uc_str;
3210 u8 *str;
3211 int ret;
b573d484 3212
4b828fe1
TW
3213 if (!buf)
3214 return -EINVAL;
b573d484 3215
4b828fe1
TW
3216 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3217 if (!uc_str)
3218 return -ENOMEM;
b573d484 3219
c4607a09
BH
3220 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3221 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
4b828fe1
TW
3222 if (ret < 0) {
3223 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3224 QUERY_REQ_RETRIES, ret);
3225 str = NULL;
3226 goto out;
3227 }
3228
3229 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3230 dev_dbg(hba->dev, "String Desc is of zero length\n");
3231 str = NULL;
3232 ret = 0;
b573d484
YG
3233 goto out;
3234 }
3235
3236 if (ascii) {
4b828fe1 3237 ssize_t ascii_len;
b573d484 3238 int i;
b573d484 3239 /* remove header and divide by 2 to move from UTF16 to UTF8 */
4b828fe1
TW
3240 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3241 str = kzalloc(ascii_len, GFP_KERNEL);
3242 if (!str) {
3243 ret = -ENOMEM;
fcbefc3b 3244 goto out;
b573d484
YG
3245 }
3246
3247 /*
3248 * the descriptor contains string in UTF16 format
3249 * we need to convert to utf-8 so it can be displayed
3250 */
4b828fe1
TW
3251 ret = utf16s_to_utf8s(uc_str->uc,
3252 uc_str->len - QUERY_DESC_HDR_SIZE,
3253 UTF16_BIG_ENDIAN, str, ascii_len);
b573d484
YG
3254
3255 /* replace non-printable or non-ASCII characters with spaces */
4b828fe1
TW
3256 for (i = 0; i < ret; i++)
3257 str[i] = ufshcd_remove_non_printable(str[i]);
b573d484 3258
4b828fe1
TW
3259 str[ret++] = '\0';
3260
3261 } else {
5f57704d 3262 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
4b828fe1
TW
3263 if (!str) {
3264 ret = -ENOMEM;
3265 goto out;
3266 }
4b828fe1 3267 ret = uc_str->len;
b573d484
YG
3268 }
3269out:
4b828fe1
TW
3270 *buf = str;
3271 kfree(uc_str);
3272 return ret;
b573d484 3273}
b573d484 3274
da461cec
SJ
3275/**
3276 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3277 * @hba: Pointer to adapter instance
3278 * @lun: lun id
3279 * @param_offset: offset of the parameter to read
3280 * @param_read_buf: pointer to buffer where parameter would be read
3281 * @param_size: sizeof(param_read_buf)
3282 *
3283 * Return 0 in case of success, non-zero otherwise
3284 */
3285static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3286 int lun,
3287 enum unit_desc_param param_offset,
3288 u8 *param_read_buf,
3289 u32 param_size)
3290{
3291 /*
3292 * Unit descriptors are only available for general purpose LUs (LUN id
3293 * from 0 to 7) and RPMB Well known LU.
3294 */
1baa8011 3295 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
da461cec
SJ
3296 return -EOPNOTSUPP;
3297
3298 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3299 param_offset, param_read_buf, param_size);
3300}
3301
09f17791
CG
3302static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3303{
3304 int err = 0;
3305 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3306
3307 if (hba->dev_info.wspecversion >= 0x300) {
3308 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3309 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3310 &gating_wait);
3311 if (err)
3312 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3313 err, gating_wait);
3314
3315 if (gating_wait == 0) {
3316 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3317 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3318 gating_wait);
3319 }
3320
3321 hba->dev_info.clk_gating_wait_us = gating_wait;
3322 }
3323
3324 return err;
3325}
3326
7a3e97b0
SY
3327/**
3328 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3329 * @hba: per adapter instance
3330 *
3331 * 1. Allocate DMA memory for Command Descriptor array
3332 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3333 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3334 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3335 * (UTMRDL)
3336 * 4. Allocate memory for local reference block(lrb).
3337 *
3338 * Returns 0 for success, non-zero in case of failure
3339 */
3340static int ufshcd_memory_alloc(struct ufs_hba *hba)
3341{
3342 size_t utmrdl_size, utrdl_size, ucdl_size;
3343
3344 /* Allocate memory for UTP command descriptors */
3345 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
3346 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3347 ucdl_size,
3348 &hba->ucdl_dma_addr,
3349 GFP_KERNEL);
7a3e97b0
SY
3350
3351 /*
3352 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3353 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3354 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3355 * be aligned to 128 bytes as well
3356 */
3357 if (!hba->ucdl_base_addr ||
3358 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3359 dev_err(hba->dev,
7a3e97b0
SY
3360 "Command Descriptor Memory allocation failed\n");
3361 goto out;
3362 }
3363
3364 /*
3365 * Allocate memory for UTP Transfer descriptors
3366 * UFSHCI requires 1024 byte alignment of UTRD
3367 */
3368 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
3369 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3370 utrdl_size,
3371 &hba->utrdl_dma_addr,
3372 GFP_KERNEL);
7a3e97b0
SY
3373 if (!hba->utrdl_base_addr ||
3374 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3375 dev_err(hba->dev,
7a3e97b0
SY
3376 "Transfer Descriptor Memory allocation failed\n");
3377 goto out;
3378 }
3379
3380 /*
3381 * Allocate memory for UTP Task Management descriptors
3382 * UFSHCI requires 1024 byte alignment of UTMRD
3383 */
3384 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
3385 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3386 utmrdl_size,
3387 &hba->utmrdl_dma_addr,
3388 GFP_KERNEL);
7a3e97b0
SY
3389 if (!hba->utmrdl_base_addr ||
3390 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3391 dev_err(hba->dev,
7a3e97b0
SY
3392 "Task Management Descriptor Memory allocation failed\n");
3393 goto out;
3394 }
3395
3396 /* Allocate memory for local reference block */
a86854d0
KC
3397 hba->lrb = devm_kcalloc(hba->dev,
3398 hba->nutrs, sizeof(struct ufshcd_lrb),
2953f850 3399 GFP_KERNEL);
7a3e97b0 3400 if (!hba->lrb) {
3b1d0580 3401 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
3402 goto out;
3403 }
3404 return 0;
3405out:
7a3e97b0
SY
3406 return -ENOMEM;
3407}
3408
3409/**
3410 * ufshcd_host_memory_configure - configure local reference block with
3411 * memory offsets
3412 * @hba: per adapter instance
3413 *
3414 * Configure Host memory space
3415 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3416 * address.
3417 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3418 * and PRDT offset.
3419 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3420 * into local reference block.
3421 */
3422static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3423{
7a3e97b0
SY
3424 struct utp_transfer_req_desc *utrdlp;
3425 dma_addr_t cmd_desc_dma_addr;
3426 dma_addr_t cmd_desc_element_addr;
3427 u16 response_offset;
3428 u16 prdt_offset;
3429 int cmd_desc_size;
3430 int i;
3431
3432 utrdlp = hba->utrdl_base_addr;
7a3e97b0
SY
3433
3434 response_offset =
3435 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3436 prdt_offset =
3437 offsetof(struct utp_transfer_cmd_desc, prd_table);
3438
3439 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3440 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3441
3442 for (i = 0; i < hba->nutrs; i++) {
3443 /* Configure UTRD with command descriptor base address */
3444 cmd_desc_element_addr =
3445 (cmd_desc_dma_addr + (cmd_desc_size * i));
3446 utrdlp[i].command_desc_base_addr_lo =
3447 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3448 utrdlp[i].command_desc_base_addr_hi =
3449 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3450
3451 /* Response upiu and prdt offset should be in double words */
26f968d7
AA
3452 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3453 utrdlp[i].response_upiu_offset =
3454 cpu_to_le16(response_offset);
3455 utrdlp[i].prd_table_offset =
3456 cpu_to_le16(prdt_offset);
3457 utrdlp[i].response_upiu_length =
3458 cpu_to_le16(ALIGNED_UPIU_SIZE);
3459 } else {
3460 utrdlp[i].response_upiu_offset =
3461 cpu_to_le16(response_offset >> 2);
3462 utrdlp[i].prd_table_offset =
3463 cpu_to_le16(prdt_offset >> 2);
3464 utrdlp[i].response_upiu_length =
3465 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3466 }
7a3e97b0 3467
4d2b8d40 3468 ufshcd_init_lrb(hba, &hba->lrb[i], i);
7a3e97b0
SY
3469 }
3470}
3471
3472/**
3473 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3474 * @hba: per adapter instance
3475 *
3476 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3477 * in order to initialize the Unipro link startup procedure.
3478 * Once the Unipro links are up, the device connected to the controller
3479 * is detected.
3480 *
3481 * Returns 0 on success, non-zero value on failure
3482 */
3483static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3484{
6ccf44fe
SJ
3485 struct uic_command uic_cmd = {0};
3486 int ret;
7a3e97b0 3487
6ccf44fe 3488 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 3489
6ccf44fe
SJ
3490 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3491 if (ret)
ff8e20c6 3492 dev_dbg(hba->dev,
6ccf44fe
SJ
3493 "dme-link-startup: error code %d\n", ret);
3494 return ret;
7a3e97b0 3495}
39bf2d83
AA
3496/**
3497 * ufshcd_dme_reset - UIC command for DME_RESET
3498 * @hba: per adapter instance
3499 *
3500 * DME_RESET command is issued in order to reset UniPro stack.
3501 * This function now deals with cold reset.
3502 *
3503 * Returns 0 on success, non-zero value on failure
3504 */
3505static int ufshcd_dme_reset(struct ufs_hba *hba)
3506{
3507 struct uic_command uic_cmd = {0};
3508 int ret;
3509
3510 uic_cmd.command = UIC_CMD_DME_RESET;
3511
3512 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3513 if (ret)
3514 dev_err(hba->dev,
3515 "dme-reset: error code %d\n", ret);
3516
3517 return ret;
3518}
3519
3520/**
3521 * ufshcd_dme_enable - UIC command for DME_ENABLE
3522 * @hba: per adapter instance
3523 *
3524 * DME_ENABLE command is issued in order to enable UniPro stack.
3525 *
3526 * Returns 0 on success, non-zero value on failure
3527 */
3528static int ufshcd_dme_enable(struct ufs_hba *hba)
3529{
3530 struct uic_command uic_cmd = {0};
3531 int ret;
3532
3533 uic_cmd.command = UIC_CMD_DME_ENABLE;
3534
3535 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3536 if (ret)
3537 dev_err(hba->dev,
3538 "dme-reset: error code %d\n", ret);
3539
3540 return ret;
3541}
7a3e97b0 3542
cad2e03d
YG
3543static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3544{
3545 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3546 unsigned long min_sleep_time_us;
3547
3548 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3549 return;
3550
3551 /*
3552 * last_dme_cmd_tstamp will be 0 only for 1st call to
3553 * this function
3554 */
3555 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3556 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3557 } else {
3558 unsigned long delta =
3559 (unsigned long) ktime_to_us(
3560 ktime_sub(ktime_get(),
3561 hba->last_dme_cmd_tstamp));
3562
3563 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3564 min_sleep_time_us =
3565 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3566 else
3567 return; /* no more delay required */
3568 }
3569
3570 /* allow sleep for extra 50us if needed */
3571 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3572}
3573
12b4fdb4
SJ
3574/**
3575 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3576 * @hba: per adapter instance
3577 * @attr_sel: uic command argument1
3578 * @attr_set: attribute set type as uic command argument2
3579 * @mib_val: setting value as uic command argument3
3580 * @peer: indicate whether peer or local
3581 *
3582 * Returns 0 on success, non-zero value on failure
3583 */
3584int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3585 u8 attr_set, u32 mib_val, u8 peer)
3586{
3587 struct uic_command uic_cmd = {0};
3588 static const char *const action[] = {
3589 "dme-set",
3590 "dme-peer-set"
3591 };
3592 const char *set = action[!!peer];
3593 int ret;
64238fbd 3594 int retries = UFS_UIC_COMMAND_RETRIES;
12b4fdb4
SJ
3595
3596 uic_cmd.command = peer ?
3597 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3598 uic_cmd.argument1 = attr_sel;
3599 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3600 uic_cmd.argument3 = mib_val;
3601
64238fbd
YG
3602 do {
3603 /* for peer attributes we retry upon failure */
3604 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3605 if (ret)
3606 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3607 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3608 } while (ret && peer && --retries);
3609
f37e9f8c 3610 if (ret)
64238fbd 3611 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
f37e9f8c
YG
3612 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3613 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4
SJ
3614
3615 return ret;
3616}
3617EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3618
3619/**
3620 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3621 * @hba: per adapter instance
3622 * @attr_sel: uic command argument1
3623 * @mib_val: the value of the attribute as returned by the UIC command
3624 * @peer: indicate whether peer or local
3625 *
3626 * Returns 0 on success, non-zero value on failure
3627 */
3628int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3629 u32 *mib_val, u8 peer)
3630{
3631 struct uic_command uic_cmd = {0};
3632 static const char *const action[] = {
3633 "dme-get",
3634 "dme-peer-get"
3635 };
3636 const char *get = action[!!peer];
3637 int ret;
64238fbd 3638 int retries = UFS_UIC_COMMAND_RETRIES;
874237f7
YG
3639 struct ufs_pa_layer_attr orig_pwr_info;
3640 struct ufs_pa_layer_attr temp_pwr_info;
3641 bool pwr_mode_change = false;
3642
3643 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3644 orig_pwr_info = hba->pwr_info;
3645 temp_pwr_info = orig_pwr_info;
3646
3647 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3648 orig_pwr_info.pwr_rx == FAST_MODE) {
3649 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3650 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3651 pwr_mode_change = true;
3652 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3653 orig_pwr_info.pwr_rx == SLOW_MODE) {
3654 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3655 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3656 pwr_mode_change = true;
3657 }
3658 if (pwr_mode_change) {
3659 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3660 if (ret)
3661 goto out;
3662 }
3663 }
12b4fdb4
SJ
3664
3665 uic_cmd.command = peer ?
3666 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3667 uic_cmd.argument1 = attr_sel;
3668
64238fbd
YG
3669 do {
3670 /* for peer attributes we retry upon failure */
3671 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3672 if (ret)
3673 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3674 get, UIC_GET_ATTR_ID(attr_sel), ret);
3675 } while (ret && peer && --retries);
3676
f37e9f8c 3677 if (ret)
64238fbd 3678 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
f37e9f8c
YG
3679 get, UIC_GET_ATTR_ID(attr_sel),
3680 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4 3681
64238fbd 3682 if (mib_val && !ret)
12b4fdb4 3683 *mib_val = uic_cmd.argument3;
874237f7
YG
3684
3685 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3686 && pwr_mode_change)
3687 ufshcd_change_power_mode(hba, &orig_pwr_info);
12b4fdb4
SJ
3688out:
3689 return ret;
3690}
3691EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3692
53b3d9c3 3693/**
57d104c1
SJ
3694 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3695 * state) and waits for it to take effect.
3696 *
53b3d9c3 3697 * @hba: per adapter instance
57d104c1
SJ
3698 * @cmd: UIC command to execute
3699 *
3700 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3701 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3702 * and device UniPro link and hence it's final completion would be indicated by
3703 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3704 * addition to normal UIC command completion Status (UCCS). This function only
3705 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
3706 *
3707 * Returns 0 on success, non-zero value on failure
3708 */
57d104c1 3709static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 3710{
57d104c1 3711 struct completion uic_async_done;
53b3d9c3
SJ
3712 unsigned long flags;
3713 u8 status;
3714 int ret;
d75f7fe4 3715 bool reenable_intr = false;
53b3d9c3 3716
53b3d9c3 3717 mutex_lock(&hba->uic_cmd_mutex);
57d104c1 3718 init_completion(&uic_async_done);
cad2e03d 3719 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
3720
3721 spin_lock_irqsave(hba->host->host_lock, flags);
57d104c1 3722 hba->uic_async_done = &uic_async_done;
d75f7fe4
YG
3723 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3724 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3725 /*
3726 * Make sure UIC command completion interrupt is disabled before
3727 * issuing UIC command.
3728 */
3729 wmb();
3730 reenable_intr = true;
57d104c1 3731 }
d75f7fe4
YG
3732 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3733 spin_unlock_irqrestore(hba->host->host_lock, flags);
57d104c1
SJ
3734 if (ret) {
3735 dev_err(hba->dev,
3736 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3737 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
3738 goto out;
3739 }
3740
57d104c1 3741 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
3742 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3743 dev_err(hba->dev,
57d104c1
SJ
3744 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3745 cmd->command, cmd->argument3);
53b3d9c3
SJ
3746 ret = -ETIMEDOUT;
3747 goto out;
3748 }
3749
3750 status = ufshcd_get_upmcrs(hba);
3751 if (status != PWR_LOCAL) {
3752 dev_err(hba->dev,
479da360 3753 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
57d104c1 3754 cmd->command, status);
53b3d9c3
SJ
3755 ret = (status != PWR_OK) ? status : -1;
3756 }
3757out:
7942f7b5
VG
3758 if (ret) {
3759 ufshcd_print_host_state(hba);
3760 ufshcd_print_pwr_info(hba);
3761 ufshcd_print_host_regs(hba);
3762 }
3763
53b3d9c3 3764 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 3765 hba->active_uic_cmd = NULL;
57d104c1 3766 hba->uic_async_done = NULL;
d75f7fe4
YG
3767 if (reenable_intr)
3768 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
53b3d9c3
SJ
3769 spin_unlock_irqrestore(hba->host->host_lock, flags);
3770 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 3771
53b3d9c3
SJ
3772 return ret;
3773}
3774
57d104c1
SJ
3775/**
3776 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3777 * using DME_SET primitives.
3778 * @hba: per adapter instance
3779 * @mode: powr mode value
3780 *
3781 * Returns 0 on success, non-zero value on failure
3782 */
3783static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3784{
3785 struct uic_command uic_cmd = {0};
1ab27c9c 3786 int ret;
57d104c1 3787
c3a2f9ee
YG
3788 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3789 ret = ufshcd_dme_set(hba,
3790 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3791 if (ret) {
3792 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3793 __func__, ret);
3794 goto out;
3795 }
3796 }
3797
57d104c1
SJ
3798 uic_cmd.command = UIC_CMD_DME_SET;
3799 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3800 uic_cmd.argument3 = mode;
1ab27c9c
ST
3801 ufshcd_hold(hba, false);
3802 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3803 ufshcd_release(hba);
57d104c1 3804
c3a2f9ee 3805out:
1ab27c9c 3806 return ret;
57d104c1
SJ
3807}
3808
087c5efa 3809int ufshcd_link_recovery(struct ufs_hba *hba)
53c12d0e
YG
3810{
3811 int ret;
3812 unsigned long flags;
3813
3814 spin_lock_irqsave(hba->host->host_lock, flags);
3815 hba->ufshcd_state = UFSHCD_STATE_RESET;
3816 ufshcd_set_eh_in_progress(hba);
3817 spin_unlock_irqrestore(hba->host->host_lock, flags);
3818
ebdd1dfd
CG
3819 /* Reset the attached device */
3820 ufshcd_vops_device_reset(hba);
3821
53c12d0e
YG
3822 ret = ufshcd_host_reset_and_restore(hba);
3823
3824 spin_lock_irqsave(hba->host->host_lock, flags);
3825 if (ret)
3826 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3827 ufshcd_clear_eh_in_progress(hba);
3828 spin_unlock_irqrestore(hba->host->host_lock, flags);
3829
3830 if (ret)
3831 dev_err(hba->dev, "%s: link recovery failed, err %d",
3832 __func__, ret);
3833
3834 return ret;
3835}
087c5efa 3836EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
53c12d0e 3837
87d0b4a6 3838static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
57d104c1 3839{
87d0b4a6 3840 int ret;
57d104c1 3841 struct uic_command uic_cmd = {0};
911a0771 3842 ktime_t start = ktime_get();
57d104c1 3843
ee32c909
KK
3844 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3845
57d104c1 3846 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
87d0b4a6 3847 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 3848 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3849 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
87d0b4a6 3850
53c12d0e 3851 if (ret) {
6d303e4b
SJ
3852 int err;
3853
87d0b4a6
YG
3854 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3855 __func__, ret);
3856
53c12d0e 3857 /*
6d303e4b
SJ
3858 * If link recovery fails then return error code returned from
3859 * ufshcd_link_recovery().
3860 * If link recovery succeeds then return -EAGAIN to attempt
3861 * hibern8 enter retry again.
53c12d0e 3862 */
6d303e4b
SJ
3863 err = ufshcd_link_recovery(hba);
3864 if (err) {
3865 dev_err(hba->dev, "%s: link recovery failed", __func__);
3866 ret = err;
3867 } else {
3868 ret = -EAGAIN;
3869 }
ee32c909
KK
3870 } else
3871 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3872 POST_CHANGE);
53c12d0e 3873
87d0b4a6
YG
3874 return ret;
3875}
3876
3877static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3878{
3879 int ret = 0, retries;
57d104c1 3880
87d0b4a6
YG
3881 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3882 ret = __ufshcd_uic_hibern8_enter(hba);
6d303e4b 3883 if (!ret)
87d0b4a6
YG
3884 goto out;
3885 }
3886out:
3887 return ret;
57d104c1
SJ
3888}
3889
9d19bf7a 3890int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
57d104c1
SJ
3891{
3892 struct uic_command uic_cmd = {0};
3893 int ret;
911a0771 3894 ktime_t start = ktime_get();
57d104c1 3895
ee32c909
KK
3896 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3897
57d104c1
SJ
3898 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3899 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 3900 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3901 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3902
57d104c1 3903 if (ret) {
53c12d0e
YG
3904 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3905 __func__, ret);
3906 ret = ufshcd_link_recovery(hba);
ff8e20c6 3907 } else {
ee32c909
KK
3908 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3909 POST_CHANGE);
ff8e20c6
DR
3910 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3911 hba->ufs_stats.hibern8_exit_cnt++;
3912 }
57d104c1
SJ
3913
3914 return ret;
3915}
9d19bf7a 3916EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
57d104c1 3917
ba7af5ec
SC
3918void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3919{
3920 unsigned long flags;
be7594a4 3921 bool update = false;
ba7af5ec 3922
be7594a4 3923 if (!ufshcd_is_auto_hibern8_supported(hba))
ba7af5ec
SC
3924 return;
3925
3926 spin_lock_irqsave(hba->host->host_lock, flags);
be7594a4
CG
3927 if (hba->ahit != ahit) {
3928 hba->ahit = ahit;
3929 update = true;
3930 }
ba7af5ec 3931 spin_unlock_irqrestore(hba->host->host_lock, flags);
be7594a4
CG
3932
3933 if (update && !pm_runtime_suspended(hba->dev)) {
3934 pm_runtime_get_sync(hba->dev);
3935 ufshcd_hold(hba, false);
3936 ufshcd_auto_hibern8_enable(hba);
3937 ufshcd_release(hba);
3938 pm_runtime_put(hba->dev);
3939 }
ba7af5ec
SC
3940}
3941EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
3942
71d848b8 3943void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
ad448378
AH
3944{
3945 unsigned long flags;
3946
ee5f1042 3947 if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
ad448378
AH
3948 return;
3949
3950 spin_lock_irqsave(hba->host->host_lock, flags);
3951 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3952 spin_unlock_irqrestore(hba->host->host_lock, flags);
3953}
3954
5064636c
YG
3955 /**
3956 * ufshcd_init_pwr_info - setting the POR (power on reset)
3957 * values in hba power info
3958 * @hba: per-adapter instance
3959 */
3960static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3961{
3962 hba->pwr_info.gear_rx = UFS_PWM_G1;
3963 hba->pwr_info.gear_tx = UFS_PWM_G1;
3964 hba->pwr_info.lane_rx = 1;
3965 hba->pwr_info.lane_tx = 1;
3966 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3967 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3968 hba->pwr_info.hs_rate = 0;
3969}
3970
d3e89bac 3971/**
7eb584db
DR
3972 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3973 * @hba: per-adapter instance
d3e89bac 3974 */
7eb584db 3975static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 3976{
7eb584db
DR
3977 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3978
3979 if (hba->max_pwr_info.is_valid)
3980 return 0;
3981
2349b533 3982 pwr_info->pwr_tx = FAST_MODE;
3983 pwr_info->pwr_rx = FAST_MODE;
7eb584db 3984 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
3985
3986 /* Get the connected lane count */
7eb584db
DR
3987 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3988 &pwr_info->lane_rx);
3989 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3990 &pwr_info->lane_tx);
3991
3992 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3993 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3994 __func__,
3995 pwr_info->lane_rx,
3996 pwr_info->lane_tx);
3997 return -EINVAL;
3998 }
d3e89bac
SJ
3999
4000 /*
4001 * First, get the maximum gears of HS speed.
4002 * If a zero value, it means there is no HSGEAR capability.
4003 * Then, get the maximum gears of PWM speed.
4004 */
7eb584db
DR
4005 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4006 if (!pwr_info->gear_rx) {
4007 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4008 &pwr_info->gear_rx);
4009 if (!pwr_info->gear_rx) {
4010 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4011 __func__, pwr_info->gear_rx);
4012 return -EINVAL;
4013 }
2349b533 4014 pwr_info->pwr_rx = SLOW_MODE;
d3e89bac
SJ
4015 }
4016
7eb584db
DR
4017 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4018 &pwr_info->gear_tx);
4019 if (!pwr_info->gear_tx) {
d3e89bac 4020 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
4021 &pwr_info->gear_tx);
4022 if (!pwr_info->gear_tx) {
4023 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4024 __func__, pwr_info->gear_tx);
4025 return -EINVAL;
4026 }
2349b533 4027 pwr_info->pwr_tx = SLOW_MODE;
7eb584db
DR
4028 }
4029
4030 hba->max_pwr_info.is_valid = true;
4031 return 0;
4032}
4033
4034static int ufshcd_change_power_mode(struct ufs_hba *hba,
4035 struct ufs_pa_layer_attr *pwr_mode)
4036{
4037 int ret;
4038
4039 /* if already configured to the requested pwr_mode */
4040 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4041 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4042 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4043 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4044 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4045 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4046 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4047 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4048 return 0;
d3e89bac
SJ
4049 }
4050
4051 /*
4052 * Configure attributes for power mode change with below.
4053 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4054 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4055 * - PA_HSSERIES
4056 */
7eb584db
DR
4057 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4058 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4059 pwr_mode->lane_rx);
4060 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4061 pwr_mode->pwr_rx == FAST_MODE)
d3e89bac 4062 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
7eb584db
DR
4063 else
4064 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
d3e89bac 4065
7eb584db
DR
4066 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4067 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4068 pwr_mode->lane_tx);
4069 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4070 pwr_mode->pwr_tx == FAST_MODE)
d3e89bac 4071 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
7eb584db
DR
4072 else
4073 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
d3e89bac 4074
7eb584db
DR
4075 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4076 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4077 pwr_mode->pwr_rx == FAST_MODE ||
4078 pwr_mode->pwr_tx == FAST_MODE)
4079 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4080 pwr_mode->hs_rate);
d3e89bac 4081
08342537
CG
4082 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4083 DL_FC0ProtectionTimeOutVal_Default);
4084 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4085 DL_TC0ReplayTimeOutVal_Default);
4086 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4087 DL_AFC0ReqTimeOutVal_Default);
4088 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4089 DL_FC1ProtectionTimeOutVal_Default);
4090 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4091 DL_TC1ReplayTimeOutVal_Default);
4092 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4093 DL_AFC1ReqTimeOutVal_Default);
4094
4095 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4096 DL_FC0ProtectionTimeOutVal_Default);
4097 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4098 DL_TC0ReplayTimeOutVal_Default);
4099 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4100 DL_AFC0ReqTimeOutVal_Default);
4101
7eb584db
DR
4102 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4103 | pwr_mode->pwr_tx);
4104
4105 if (ret) {
d3e89bac 4106 dev_err(hba->dev,
7eb584db
DR
4107 "%s: power mode change failed %d\n", __func__, ret);
4108 } else {
0263bcd0
YG
4109 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4110 pwr_mode);
7eb584db
DR
4111
4112 memcpy(&hba->pwr_info, pwr_mode,
4113 sizeof(struct ufs_pa_layer_attr));
4114 }
4115
4116 return ret;
4117}
4118
4119/**
4120 * ufshcd_config_pwr_mode - configure a new power mode
4121 * @hba: per-adapter instance
4122 * @desired_pwr_mode: desired power configuration
4123 */
0d846e70 4124int ufshcd_config_pwr_mode(struct ufs_hba *hba,
7eb584db
DR
4125 struct ufs_pa_layer_attr *desired_pwr_mode)
4126{
4127 struct ufs_pa_layer_attr final_params = { 0 };
4128 int ret;
4129
0263bcd0
YG
4130 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4131 desired_pwr_mode, &final_params);
4132
4133 if (ret)
7eb584db
DR
4134 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4135
4136 ret = ufshcd_change_power_mode(hba, &final_params);
d3e89bac
SJ
4137
4138 return ret;
4139}
0d846e70 4140EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
d3e89bac 4141
68078d5c
DR
4142/**
4143 * ufshcd_complete_dev_init() - checks device readiness
8aa29f19 4144 * @hba: per-adapter instance
68078d5c
DR
4145 *
4146 * Set fDeviceInit flag and poll until device toggles it.
4147 */
4148static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4149{
dc3c8d3a
YG
4150 int i;
4151 int err;
7dfdcc39 4152 bool flag_res = true;
68078d5c 4153
dc3c8d3a 4154 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 4155 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
68078d5c
DR
4156 if (err) {
4157 dev_err(hba->dev,
4158 "%s setting fDeviceInit flag failed with error %d\n",
4159 __func__, err);
4160 goto out;
4161 }
4162
dc3c8d3a
YG
4163 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4164 for (i = 0; i < 1000 && !err && flag_res; i++)
4165 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1f34eedf 4166 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
dc3c8d3a 4167
68078d5c
DR
4168 if (err)
4169 dev_err(hba->dev,
4170 "%s reading fDeviceInit flag failed with error %d\n",
4171 __func__, err);
4172 else if (flag_res)
4173 dev_err(hba->dev,
4174 "%s fDeviceInit was not cleared by the device\n",
4175 __func__);
4176
4177out:
4178 return err;
4179}
4180
7a3e97b0
SY
4181/**
4182 * ufshcd_make_hba_operational - Make UFS controller operational
4183 * @hba: per adapter instance
4184 *
4185 * To bring UFS host controller to operational state,
5c0c28a8
SRT
4186 * 1. Enable required interrupts
4187 * 2. Configure interrupt aggregation
897efe62 4188 * 3. Program UTRL and UTMRL base address
5c0c28a8 4189 * 4. Configure run-stop-registers
7a3e97b0
SY
4190 *
4191 * Returns 0 on success, non-zero value on failure
4192 */
9d19bf7a 4193int ufshcd_make_hba_operational(struct ufs_hba *hba)
7a3e97b0
SY
4194{
4195 int err = 0;
4196 u32 reg;
4197
6ccf44fe
SJ
4198 /* Enable required interrupts */
4199 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4200
4201 /* Configure interrupt aggregation */
b852190e
YG
4202 if (ufshcd_is_intr_aggr_allowed(hba))
4203 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4204 else
4205 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
4206
4207 /* Configure UTRL and UTMRL base address registers */
4208 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4209 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4210 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4211 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4212 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4213 REG_UTP_TASK_REQ_LIST_BASE_L);
4214 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4215 REG_UTP_TASK_REQ_LIST_BASE_H);
4216
897efe62
YG
4217 /*
4218 * Make sure base address and interrupt setup are updated before
4219 * enabling the run/stop registers below.
4220 */
4221 wmb();
4222
7a3e97b0
SY
4223 /*
4224 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
7a3e97b0 4225 */
5c0c28a8 4226 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
4227 if (!(ufshcd_get_lists_status(reg))) {
4228 ufshcd_enable_run_stop_reg(hba);
4229 } else {
3b1d0580 4230 dev_err(hba->dev,
7a3e97b0
SY
4231 "Host controller not ready to process requests");
4232 err = -EIO;
4233 goto out;
4234 }
4235
7a3e97b0
SY
4236out:
4237 return err;
4238}
9d19bf7a 4239EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
7a3e97b0 4240
596585a2
YG
4241/**
4242 * ufshcd_hba_stop - Send controller to reset state
4243 * @hba: per adapter instance
596585a2 4244 */
5cac1095 4245static inline void ufshcd_hba_stop(struct ufs_hba *hba)
596585a2 4246{
5cac1095 4247 unsigned long flags;
596585a2
YG
4248 int err;
4249
5cac1095
BVA
4250 /*
4251 * Obtain the host lock to prevent that the controller is disabled
4252 * while the UFS interrupt handler is active on another CPU.
4253 */
4254 spin_lock_irqsave(hba->host->host_lock, flags);
596585a2 4255 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
5cac1095
BVA
4256 spin_unlock_irqrestore(hba->host->host_lock, flags);
4257
596585a2
YG
4258 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4259 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
5cac1095 4260 10, 1);
596585a2
YG
4261 if (err)
4262 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4263}
4264
7a3e97b0 4265/**
39bf2d83 4266 * ufshcd_hba_execute_hce - initialize the controller
7a3e97b0
SY
4267 * @hba: per adapter instance
4268 *
4269 * The controller resets itself and controller firmware initialization
4270 * sequence kicks off. When controller is ready it will set
4271 * the Host Controller Enable bit to 1.
4272 *
4273 * Returns 0 on success, non-zero value on failure
4274 */
39bf2d83 4275static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
7a3e97b0
SY
4276{
4277 int retry;
4278
596585a2 4279 if (!ufshcd_is_hba_active(hba))
7a3e97b0 4280 /* change controller state to "reset state" */
5cac1095 4281 ufshcd_hba_stop(hba);
7a3e97b0 4282
57d104c1
SJ
4283 /* UniPro link is disabled at this point */
4284 ufshcd_set_link_off(hba);
4285
0263bcd0 4286 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
5c0c28a8 4287
7a3e97b0
SY
4288 /* start controller initialization sequence */
4289 ufshcd_hba_start(hba);
4290
4291 /*
4292 * To initialize a UFS host controller HCE bit must be set to 1.
4293 * During initialization the HCE bit value changes from 1->0->1.
4294 * When the host controller completes initialization sequence
4295 * it sets the value of HCE bit to 1. The same HCE bit is read back
4296 * to check if the controller has completed initialization sequence.
4297 * So without this delay the value HCE = 1, set in the previous
4298 * instruction might be read back.
4299 * This delay can be changed based on the controller.
4300 */
90b8491c 4301 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
7a3e97b0
SY
4302
4303 /* wait for the host controller to complete initialization */
9fc305ef 4304 retry = 50;
7a3e97b0
SY
4305 while (ufshcd_is_hba_active(hba)) {
4306 if (retry) {
4307 retry--;
4308 } else {
3b1d0580 4309 dev_err(hba->dev,
7a3e97b0
SY
4310 "Controller enable failed\n");
4311 return -EIO;
4312 }
9fc305ef 4313 usleep_range(1000, 1100);
7a3e97b0 4314 }
5c0c28a8 4315
1d337ec2 4316 /* enable UIC related interrupts */
57d104c1 4317 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 4318
0263bcd0 4319 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
5c0c28a8 4320
7a3e97b0
SY
4321 return 0;
4322}
39bf2d83
AA
4323
4324int ufshcd_hba_enable(struct ufs_hba *hba)
4325{
4326 int ret;
4327
4328 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4329 ufshcd_set_link_off(hba);
4330 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4331
4332 /* enable UIC related interrupts */
4333 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4334 ret = ufshcd_dme_reset(hba);
4335 if (!ret) {
4336 ret = ufshcd_dme_enable(hba);
4337 if (!ret)
4338 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4339 if (ret)
4340 dev_err(hba->dev,
4341 "Host controller enable failed with non-hce\n");
4342 }
4343 } else {
4344 ret = ufshcd_hba_execute_hce(hba);
4345 }
4346
4347 return ret;
4348}
9d19bf7a
SC
4349EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4350
7ca38cf3
YG
4351static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4352{
ba0320fb 4353 int tx_lanes = 0, i, err = 0;
7ca38cf3
YG
4354
4355 if (!peer)
4356 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4357 &tx_lanes);
4358 else
4359 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4360 &tx_lanes);
4361 for (i = 0; i < tx_lanes; i++) {
4362 if (!peer)
4363 err = ufshcd_dme_set(hba,
4364 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4365 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4366 0);
4367 else
4368 err = ufshcd_dme_peer_set(hba,
4369 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4370 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4371 0);
4372 if (err) {
4373 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4374 __func__, peer, i, err);
4375 break;
4376 }
4377 }
4378
4379 return err;
4380}
4381
4382static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4383{
4384 return ufshcd_disable_tx_lcc(hba, true);
4385}
4386
a5fe372d
SC
4387void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4388 u32 reg)
8808b4e9
SC
4389{
4390 reg_hist->reg[reg_hist->pos] = reg;
4391 reg_hist->tstamp[reg_hist->pos] = ktime_get();
4392 reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4393}
a5fe372d 4394EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
8808b4e9 4395
7a3e97b0 4396/**
6ccf44fe 4397 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
4398 * @hba: per adapter instance
4399 *
6ccf44fe 4400 * Returns 0 for success, non-zero in case of failure
7a3e97b0 4401 */
6ccf44fe 4402static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 4403{
6ccf44fe 4404 int ret;
1d337ec2 4405 int retries = DME_LINKSTARTUP_RETRIES;
7caf489b 4406 bool link_startup_again = false;
7a3e97b0 4407
7caf489b 4408 /*
4409 * If UFS device isn't active then we will have to issue link startup
4410 * 2 times to make sure the device state move to active.
4411 */
4412 if (!ufshcd_is_ufs_dev_active(hba))
4413 link_startup_again = true;
7a3e97b0 4414
7caf489b 4415link_startup:
1d337ec2 4416 do {
0263bcd0 4417 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 4418
1d337ec2 4419 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 4420
1d337ec2
SRT
4421 /* check if device is detected by inter-connect layer */
4422 if (!ret && !ufshcd_is_device_present(hba)) {
8808b4e9
SC
4423 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4424 0);
1d337ec2
SRT
4425 dev_err(hba->dev, "%s: Device not present\n", __func__);
4426 ret = -ENXIO;
4427 goto out;
4428 }
6ccf44fe 4429
1d337ec2
SRT
4430 /*
4431 * DME link lost indication is only received when link is up,
4432 * but we can't be sure if the link is up until link startup
4433 * succeeds. So reset the local Uni-Pro and try again.
4434 */
8808b4e9
SC
4435 if (ret && ufshcd_hba_enable(hba)) {
4436 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4437 (u32)ret);
1d337ec2 4438 goto out;
8808b4e9 4439 }
1d337ec2
SRT
4440 } while (ret && retries--);
4441
8808b4e9 4442 if (ret) {
1d337ec2 4443 /* failed to get the link up... retire */
8808b4e9
SC
4444 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4445 (u32)ret);
5c0c28a8 4446 goto out;
8808b4e9 4447 }
5c0c28a8 4448
7caf489b 4449 if (link_startup_again) {
4450 link_startup_again = false;
4451 retries = DME_LINKSTARTUP_RETRIES;
4452 goto link_startup;
4453 }
4454
d2aebb9b 4455 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4456 ufshcd_init_pwr_info(hba);
4457 ufshcd_print_pwr_info(hba);
4458
7ca38cf3
YG
4459 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4460 ret = ufshcd_disable_device_tx_lcc(hba);
4461 if (ret)
4462 goto out;
4463 }
4464
5c0c28a8 4465 /* Include any host controller configuration via UIC commands */
0263bcd0
YG
4466 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4467 if (ret)
4468 goto out;
7a3e97b0 4469
5c0c28a8 4470 ret = ufshcd_make_hba_operational(hba);
6ccf44fe 4471out:
7942f7b5 4472 if (ret) {
6ccf44fe 4473 dev_err(hba->dev, "link startup failed %d\n", ret);
7942f7b5
VG
4474 ufshcd_print_host_state(hba);
4475 ufshcd_print_pwr_info(hba);
4476 ufshcd_print_host_regs(hba);
4477 }
6ccf44fe 4478 return ret;
7a3e97b0
SY
4479}
4480
5a0b0cb9
SRT
4481/**
4482 * ufshcd_verify_dev_init() - Verify device initialization
4483 * @hba: per-adapter instance
4484 *
4485 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4486 * device Transport Protocol (UTP) layer is ready after a reset.
4487 * If the UTP layer at the device side is not initialized, it may
4488 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4489 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4490 */
4491static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4492{
4493 int err = 0;
4494 int retries;
4495
1ab27c9c 4496 ufshcd_hold(hba, false);
5a0b0cb9
SRT
4497 mutex_lock(&hba->dev_cmd.lock);
4498 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4499 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4500 NOP_OUT_TIMEOUT);
4501
4502 if (!err || err == -ETIMEDOUT)
4503 break;
4504
4505 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4506 }
4507 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 4508 ufshcd_release(hba);
5a0b0cb9
SRT
4509
4510 if (err)
4511 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4512 return err;
4513}
4514
0ce147d4
SJ
4515/**
4516 * ufshcd_set_queue_depth - set lun queue depth
4517 * @sdev: pointer to SCSI device
4518 *
4519 * Read bLUQueueDepth value and activate scsi tagged command
4520 * queueing. For WLUN, queue depth is set to 1. For best-effort
4521 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4522 * value that host can queue.
4523 */
4524static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4525{
4526 int ret = 0;
4527 u8 lun_qdepth;
4528 struct ufs_hba *hba;
4529
4530 hba = shost_priv(sdev->host);
4531
4532 lun_qdepth = hba->nutrs;
dbd34a61
SM
4533 ret = ufshcd_read_unit_desc_param(hba,
4534 ufshcd_scsi_to_upiu_lun(sdev->lun),
4535 UNIT_DESC_PARAM_LU_Q_DEPTH,
4536 &lun_qdepth,
4537 sizeof(lun_qdepth));
0ce147d4
SJ
4538
4539 /* Some WLUN doesn't support unit descriptor */
4540 if (ret == -EOPNOTSUPP)
4541 lun_qdepth = 1;
4542 else if (!lun_qdepth)
4543 /* eventually, we can figure out the real queue depth */
4544 lun_qdepth = hba->nutrs;
4545 else
4546 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4547
4548 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4549 __func__, lun_qdepth);
db5ed4df 4550 scsi_change_queue_depth(sdev, lun_qdepth);
0ce147d4
SJ
4551}
4552
57d104c1
SJ
4553/*
4554 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4555 * @hba: per-adapter instance
4556 * @lun: UFS device lun id
4557 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4558 *
4559 * Returns 0 in case of success and b_lu_write_protect status would be returned
4560 * @b_lu_write_protect parameter.
4561 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4562 * Returns -EINVAL in case of invalid parameters passed to this function.
4563 */
4564static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4565 u8 lun,
4566 u8 *b_lu_write_protect)
4567{
4568 int ret;
4569
4570 if (!b_lu_write_protect)
4571 ret = -EINVAL;
4572 /*
4573 * According to UFS device spec, RPMB LU can't be write
4574 * protected so skip reading bLUWriteProtect parameter for
4575 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4576 */
1baa8011 4577 else if (lun >= hba->dev_info.max_lu_supported)
57d104c1
SJ
4578 ret = -ENOTSUPP;
4579 else
4580 ret = ufshcd_read_unit_desc_param(hba,
4581 lun,
4582 UNIT_DESC_PARAM_LU_WR_PROTECT,
4583 b_lu_write_protect,
4584 sizeof(*b_lu_write_protect));
4585 return ret;
4586}
4587
4588/**
4589 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4590 * status
4591 * @hba: per-adapter instance
4592 * @sdev: pointer to SCSI device
4593 *
4594 */
4595static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4596 struct scsi_device *sdev)
4597{
4598 if (hba->dev_info.f_power_on_wp_en &&
4599 !hba->dev_info.is_lu_power_on_wp) {
4600 u8 b_lu_write_protect;
4601
4602 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4603 &b_lu_write_protect) &&
4604 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4605 hba->dev_info.is_lu_power_on_wp = true;
4606 }
4607}
4608
7a3e97b0
SY
4609/**
4610 * ufshcd_slave_alloc - handle initial SCSI device configurations
4611 * @sdev: pointer to SCSI device
4612 *
4613 * Returns success
4614 */
4615static int ufshcd_slave_alloc(struct scsi_device *sdev)
4616{
4617 struct ufs_hba *hba;
4618
4619 hba = shost_priv(sdev->host);
7a3e97b0
SY
4620
4621 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4622 sdev->use_10_for_ms = 1;
a3a76391
CG
4623
4624 /* DBD field should be set to 1 in mode sense(10) */
4625 sdev->set_dbd_for_ms = 1;
7a3e97b0 4626
e8e7f271
SRT
4627 /* allow SCSI layer to restart the device in case of errors */
4628 sdev->allow_restart = 1;
4264fd61 4629
b2a6c522
SRT
4630 /* REPORT SUPPORTED OPERATION CODES is not supported */
4631 sdev->no_report_opcodes = 1;
4632
84af7e8b
SRT
4633 /* WRITE_SAME command is not supported */
4634 sdev->no_write_same = 1;
e8e7f271 4635
0ce147d4 4636 ufshcd_set_queue_depth(sdev);
4264fd61 4637
57d104c1
SJ
4638 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4639
7a3e97b0
SY
4640 return 0;
4641}
4642
4264fd61
SRT
4643/**
4644 * ufshcd_change_queue_depth - change queue depth
4645 * @sdev: pointer to SCSI device
4646 * @depth: required depth to set
4264fd61 4647 *
db5ed4df 4648 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 4649 */
db5ed4df 4650static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61
SRT
4651{
4652 struct ufs_hba *hba = shost_priv(sdev->host);
4653
4654 if (depth > hba->nutrs)
4655 depth = hba->nutrs;
db5ed4df 4656 return scsi_change_queue_depth(sdev, depth);
4264fd61
SRT
4657}
4658
eeda4749
AM
4659/**
4660 * ufshcd_slave_configure - adjust SCSI device configurations
4661 * @sdev: pointer to SCSI device
4662 */
4663static int ufshcd_slave_configure(struct scsi_device *sdev)
4664{
49615ba1 4665 struct ufs_hba *hba = shost_priv(sdev->host);
eeda4749
AM
4666 struct request_queue *q = sdev->request_queue;
4667
4668 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
49615ba1
SC
4669
4670 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4671 sdev->rpm_autosuspend = 1;
4672
eeda4749
AM
4673 return 0;
4674}
4675
7a3e97b0
SY
4676/**
4677 * ufshcd_slave_destroy - remove SCSI device configurations
4678 * @sdev: pointer to SCSI device
4679 */
4680static void ufshcd_slave_destroy(struct scsi_device *sdev)
4681{
4682 struct ufs_hba *hba;
4683
4684 hba = shost_priv(sdev->host);
0ce147d4 4685 /* Drop the reference as it won't be needed anymore */
7c48bfd0
AM
4686 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4687 unsigned long flags;
4688
4689 spin_lock_irqsave(hba->host->host_lock, flags);
0ce147d4 4690 hba->sdev_ufs_device = NULL;
7c48bfd0
AM
4691 spin_unlock_irqrestore(hba->host->host_lock, flags);
4692 }
7a3e97b0
SY
4693}
4694
7a3e97b0
SY
4695/**
4696 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
8aa29f19 4697 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
4698 * @scsi_status: SCSI command status
4699 *
4700 * Returns value base on SCSI command status
4701 */
4702static inline int
4703ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4704{
4705 int result = 0;
4706
4707 switch (scsi_status) {
7a3e97b0 4708 case SAM_STAT_CHECK_CONDITION:
1c2623c5 4709 ufshcd_copy_sense_data(lrbp);
30eb2e4c 4710 /* fallthrough */
1c2623c5 4711 case SAM_STAT_GOOD:
7a3e97b0
SY
4712 result |= DID_OK << 16 |
4713 COMMAND_COMPLETE << 8 |
1c2623c5 4714 scsi_status;
7a3e97b0
SY
4715 break;
4716 case SAM_STAT_TASK_SET_FULL:
1c2623c5 4717 case SAM_STAT_BUSY:
7a3e97b0 4718 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
4719 ufshcd_copy_sense_data(lrbp);
4720 result |= scsi_status;
7a3e97b0
SY
4721 break;
4722 default:
4723 result |= DID_ERROR << 16;
4724 break;
4725 } /* end of switch */
4726
4727 return result;
4728}
4729
4730/**
4731 * ufshcd_transfer_rsp_status - Get overall status of the response
4732 * @hba: per adapter instance
8aa29f19 4733 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
4734 *
4735 * Returns result of the command to notify SCSI midlayer
4736 */
4737static inline int
4738ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4739{
4740 int result = 0;
4741 int scsi_status;
4742 int ocs;
4743
4744 /* overall command status of utrd */
4745 ocs = ufshcd_get_tr_ocs(lrbp);
4746
d779a6e9
KK
4747 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4748 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
4749 MASK_RSP_UPIU_RESULT)
4750 ocs = OCS_SUCCESS;
4751 }
4752
7a3e97b0
SY
4753 switch (ocs) {
4754 case OCS_SUCCESS:
5a0b0cb9 4755 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
ff8e20c6 4756 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
4757 switch (result) {
4758 case UPIU_TRANSACTION_RESPONSE:
4759 /*
4760 * get the response UPIU result to extract
4761 * the SCSI command status
4762 */
4763 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4764
4765 /*
4766 * get the result based on SCSI status response
4767 * to notify the SCSI midlayer of the command status
4768 */
4769 scsi_status = result & MASK_SCSI_STATUS;
4770 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59 4771
f05ac2e5
YG
4772 /*
4773 * Currently we are only supporting BKOPs exception
4774 * events hence we can ignore BKOPs exception event
4775 * during power management callbacks. BKOPs exception
4776 * event is not expected to be raised in runtime suspend
4777 * callback as it allows the urgent bkops.
4778 * During system suspend, we are anyway forcefully
4779 * disabling the bkops and if urgent bkops is needed
4780 * it will be enabled on system resume. Long term
4781 * solution could be to abort the system suspend if
4782 * UFS device needs urgent BKOPs.
4783 */
4784 if (!hba->pm_op_in_progress &&
2824ec9f
SL
4785 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4786 schedule_work(&hba->eeh_work)) {
4787 /*
4788 * Prevent suspend once eeh_work is scheduled
4789 * to avoid deadlock between ufshcd_suspend
4790 * and exception event handler.
4791 */
4792 pm_runtime_get_noresume(hba->dev);
4793 }
5a0b0cb9
SRT
4794 break;
4795 case UPIU_TRANSACTION_REJECT_UPIU:
4796 /* TODO: handle Reject UPIU Response */
4797 result = DID_ERROR << 16;
3b1d0580 4798 dev_err(hba->dev,
5a0b0cb9
SRT
4799 "Reject UPIU not fully implemented\n");
4800 break;
4801 default:
5a0b0cb9
SRT
4802 dev_err(hba->dev,
4803 "Unexpected request response code = %x\n",
4804 result);
e0347d89 4805 result = DID_ERROR << 16;
7a3e97b0
SY
4806 break;
4807 }
7a3e97b0
SY
4808 break;
4809 case OCS_ABORTED:
4810 result |= DID_ABORT << 16;
4811 break;
e8e7f271
SRT
4812 case OCS_INVALID_COMMAND_STATUS:
4813 result |= DID_REQUEUE << 16;
4814 break;
7a3e97b0
SY
4815 case OCS_INVALID_CMD_TABLE_ATTR:
4816 case OCS_INVALID_PRDT_ATTR:
4817 case OCS_MISMATCH_DATA_BUF_SIZE:
4818 case OCS_MISMATCH_RESP_UPIU_SIZE:
4819 case OCS_PEER_COMM_FAILURE:
4820 case OCS_FATAL_ERROR:
4821 default:
4822 result |= DID_ERROR << 16;
3b1d0580 4823 dev_err(hba->dev,
ff8e20c6
DR
4824 "OCS error from controller = %x for tag %d\n",
4825 ocs, lrbp->task_tag);
4826 ufshcd_print_host_regs(hba);
6ba65588 4827 ufshcd_print_host_state(hba);
7a3e97b0
SY
4828 break;
4829 } /* end of switch */
4830
2df74b69 4831 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
66cc820f 4832 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
7a3e97b0
SY
4833 return result;
4834}
4835
6ccf44fe
SJ
4836/**
4837 * ufshcd_uic_cmd_compl - handle completion of uic command
4838 * @hba: per adapter instance
53b3d9c3 4839 * @intr_status: interrupt status generated by the controller
9333d775
VG
4840 *
4841 * Returns
4842 * IRQ_HANDLED - If interrupt is valid
4843 * IRQ_NONE - If invalid interrupt
6ccf44fe 4844 */
9333d775 4845static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 4846{
9333d775
VG
4847 irqreturn_t retval = IRQ_NONE;
4848
53b3d9c3 4849 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
4850 hba->active_uic_cmd->argument2 |=
4851 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
4852 hba->active_uic_cmd->argument3 =
4853 ufshcd_get_dme_attr_val(hba);
6ccf44fe 4854 complete(&hba->active_uic_cmd->done);
9333d775 4855 retval = IRQ_HANDLED;
6ccf44fe 4856 }
53b3d9c3 4857
9333d775 4858 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
57d104c1 4859 complete(hba->uic_async_done);
9333d775
VG
4860 retval = IRQ_HANDLED;
4861 }
aa5c6979
SC
4862
4863 if (retval == IRQ_HANDLED)
4864 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
4865 "complete");
9333d775 4866 return retval;
6ccf44fe
SJ
4867}
4868
7a3e97b0 4869/**
9a47ec7c 4870 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
7a3e97b0 4871 * @hba: per adapter instance
9a47ec7c 4872 * @completed_reqs: requests to complete
7a3e97b0 4873 */
9a47ec7c
YG
4874static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4875 unsigned long completed_reqs)
7a3e97b0 4876{
5a0b0cb9
SRT
4877 struct ufshcd_lrb *lrbp;
4878 struct scsi_cmnd *cmd;
7a3e97b0
SY
4879 int result;
4880 int index;
e9d501b1 4881
e9d501b1
DR
4882 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4883 lrbp = &hba->lrb[index];
4884 cmd = lrbp->cmd;
4885 if (cmd) {
1a07f2d9 4886 ufshcd_add_command_trace(hba, index, "complete");
e9d501b1
DR
4887 result = ufshcd_transfer_rsp_status(hba, lrbp);
4888 scsi_dma_unmap(cmd);
4889 cmd->result = result;
4890 /* Mark completed command as NULL in LRB */
4891 lrbp->cmd = NULL;
74a527a2 4892 lrbp->compl_time_stamp = ktime_get();
e9d501b1
DR
4893 /* Do not touch lrbp after scsi done */
4894 cmd->scsi_done(cmd);
1ab27c9c 4895 __ufshcd_release(hba);
300bb13f
JP
4896 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4897 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
74a527a2 4898 lrbp->compl_time_stamp = ktime_get();
1a07f2d9
LS
4899 if (hba->dev_cmd.complete) {
4900 ufshcd_add_command_trace(hba, index,
4901 "dev_complete");
e9d501b1 4902 complete(hba->dev_cmd.complete);
1a07f2d9 4903 }
e9d501b1 4904 }
401f1e44 4905 if (ufshcd_is_clkscaling_supported(hba))
4906 hba->clk_scaling.active_reqs--;
e9d501b1 4907 }
7a3e97b0
SY
4908
4909 /* clear corresponding bits of completed commands */
4910 hba->outstanding_reqs ^= completed_reqs;
4911
856b3483 4912 ufshcd_clk_scaling_update_busy(hba);
7a3e97b0
SY
4913}
4914
9a47ec7c
YG
4915/**
4916 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4917 * @hba: per adapter instance
9333d775
VG
4918 *
4919 * Returns
4920 * IRQ_HANDLED - If interrupt is valid
4921 * IRQ_NONE - If invalid interrupt
9a47ec7c 4922 */
9333d775 4923static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
9a47ec7c
YG
4924{
4925 unsigned long completed_reqs;
4926 u32 tr_doorbell;
4927
4928 /* Resetting interrupt aggregation counters first and reading the
4929 * DOOR_BELL afterward allows us to handle all the completed requests.
4930 * In order to prevent other interrupts starvation the DB is read once
4931 * after reset. The down side of this solution is the possibility of
4932 * false interrupt if device completes another request after resetting
4933 * aggregation and before reading the DB.
4934 */
b638b5eb
AA
4935 if (ufshcd_is_intr_aggr_allowed(hba) &&
4936 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
9a47ec7c
YG
4937 ufshcd_reset_intr_aggr(hba);
4938
4939 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4940 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4941
9333d775
VG
4942 if (completed_reqs) {
4943 __ufshcd_transfer_req_compl(hba, completed_reqs);
4944 return IRQ_HANDLED;
4945 } else {
4946 return IRQ_NONE;
4947 }
9a47ec7c
YG
4948}
4949
66ec6d59
SRT
4950/**
4951 * ufshcd_disable_ee - disable exception event
4952 * @hba: per-adapter instance
4953 * @mask: exception event to disable
4954 *
4955 * Disables exception event in the device so that the EVENT_ALERT
4956 * bit is not set.
4957 *
4958 * Returns zero on success, non-zero error value on failure.
4959 */
4960static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4961{
4962 int err = 0;
4963 u32 val;
4964
4965 if (!(hba->ee_ctrl_mask & mask))
4966 goto out;
4967
4968 val = hba->ee_ctrl_mask & ~mask;
d7e2ddd5 4969 val &= MASK_EE_STATUS;
5e86ae44 4970 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
4971 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4972 if (!err)
4973 hba->ee_ctrl_mask &= ~mask;
4974out:
4975 return err;
4976}
4977
4978/**
4979 * ufshcd_enable_ee - enable exception event
4980 * @hba: per-adapter instance
4981 * @mask: exception event to enable
4982 *
4983 * Enable corresponding exception event in the device to allow
4984 * device to alert host in critical scenarios.
4985 *
4986 * Returns zero on success, non-zero error value on failure.
4987 */
4988static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4989{
4990 int err = 0;
4991 u32 val;
4992
4993 if (hba->ee_ctrl_mask & mask)
4994 goto out;
4995
4996 val = hba->ee_ctrl_mask | mask;
d7e2ddd5 4997 val &= MASK_EE_STATUS;
5e86ae44 4998 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
4999 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5000 if (!err)
5001 hba->ee_ctrl_mask |= mask;
5002out:
5003 return err;
5004}
5005
5006/**
5007 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5008 * @hba: per-adapter instance
5009 *
5010 * Allow device to manage background operations on its own. Enabling
5011 * this might lead to inconsistent latencies during normal data transfers
5012 * as the device is allowed to manage its own way of handling background
5013 * operations.
5014 *
5015 * Returns zero on success, non-zero on failure.
5016 */
5017static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5018{
5019 int err = 0;
5020
5021 if (hba->auto_bkops_enabled)
5022 goto out;
5023
dc3c8d3a 5024 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 5025 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
66ec6d59
SRT
5026 if (err) {
5027 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5028 __func__, err);
5029 goto out;
5030 }
5031
5032 hba->auto_bkops_enabled = true;
7ff5ab47 5033 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
66ec6d59
SRT
5034
5035 /* No need of URGENT_BKOPS exception from the device */
5036 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5037 if (err)
5038 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5039 __func__, err);
5040out:
5041 return err;
5042}
5043
5044/**
5045 * ufshcd_disable_auto_bkops - block device in doing background operations
5046 * @hba: per-adapter instance
5047 *
5048 * Disabling background operations improves command response latency but
5049 * has drawback of device moving into critical state where the device is
5050 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5051 * host is idle so that BKOPS are managed effectively without any negative
5052 * impacts.
5053 *
5054 * Returns zero on success, non-zero on failure.
5055 */
5056static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5057{
5058 int err = 0;
5059
5060 if (!hba->auto_bkops_enabled)
5061 goto out;
5062
5063 /*
5064 * If host assisted BKOPs is to be enabled, make sure
5065 * urgent bkops exception is allowed.
5066 */
5067 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5068 if (err) {
5069 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5070 __func__, err);
5071 goto out;
5072 }
5073
dc3c8d3a 5074 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
1f34eedf 5075 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
66ec6d59
SRT
5076 if (err) {
5077 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5078 __func__, err);
5079 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5080 goto out;
5081 }
5082
5083 hba->auto_bkops_enabled = false;
7ff5ab47 5084 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
24366c2a 5085 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5086out:
5087 return err;
5088}
5089
5090/**
4e768e76 5091 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
66ec6d59
SRT
5092 * @hba: per adapter instance
5093 *
5094 * After a device reset the device may toggle the BKOPS_EN flag
5095 * to default value. The s/w tracking variables should be updated
4e768e76 5096 * as well. This function would change the auto-bkops state based on
5097 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
66ec6d59 5098 */
4e768e76 5099static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
66ec6d59 5100{
4e768e76 5101 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5102 hba->auto_bkops_enabled = false;
5103 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5104 ufshcd_enable_auto_bkops(hba);
5105 } else {
5106 hba->auto_bkops_enabled = true;
5107 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5108 ufshcd_disable_auto_bkops(hba);
5109 }
7b6668d8 5110 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
24366c2a 5111 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5112}
5113
5114static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5115{
5e86ae44 5116 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5117 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5118}
5119
5120/**
57d104c1 5121 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 5122 * @hba: per-adapter instance
57d104c1 5123 * @status: bkops_status value
66ec6d59 5124 *
57d104c1
SJ
5125 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5126 * flag in the device to permit background operations if the device
5127 * bkops_status is greater than or equal to "status" argument passed to
5128 * this function, disable otherwise.
5129 *
5130 * Returns 0 for success, non-zero in case of failure.
5131 *
5132 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5133 * to know whether auto bkops is enabled or disabled after this function
5134 * returns control to it.
66ec6d59 5135 */
57d104c1
SJ
5136static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5137 enum bkops_status status)
66ec6d59
SRT
5138{
5139 int err;
57d104c1 5140 u32 curr_status = 0;
66ec6d59 5141
57d104c1 5142 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
5143 if (err) {
5144 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5145 __func__, err);
5146 goto out;
57d104c1
SJ
5147 } else if (curr_status > BKOPS_STATUS_MAX) {
5148 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5149 __func__, curr_status);
5150 err = -EINVAL;
5151 goto out;
66ec6d59
SRT
5152 }
5153
57d104c1 5154 if (curr_status >= status)
66ec6d59 5155 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
5156 else
5157 err = ufshcd_disable_auto_bkops(hba);
66ec6d59
SRT
5158out:
5159 return err;
5160}
5161
57d104c1
SJ
5162/**
5163 * ufshcd_urgent_bkops - handle urgent bkops exception event
5164 * @hba: per-adapter instance
5165 *
5166 * Enable fBackgroundOpsEn flag in the device to permit background
5167 * operations.
5168 *
5169 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5170 * and negative error value for any other failure.
5171 */
5172static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5173{
afdfff59 5174 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
57d104c1
SJ
5175}
5176
66ec6d59
SRT
5177static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5178{
5e86ae44 5179 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5180 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5181}
5182
afdfff59
YG
5183static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5184{
5185 int err;
5186 u32 curr_status = 0;
5187
5188 if (hba->is_urgent_bkops_lvl_checked)
5189 goto enable_auto_bkops;
5190
5191 err = ufshcd_get_bkops_status(hba, &curr_status);
5192 if (err) {
5193 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5194 __func__, err);
5195 goto out;
5196 }
5197
5198 /*
5199 * We are seeing that some devices are raising the urgent bkops
5200 * exception events even when BKOPS status doesn't indicate performace
5201 * impacted or critical. Handle these device by determining their urgent
5202 * bkops status at runtime.
5203 */
5204 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5205 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5206 __func__, curr_status);
5207 /* update the current status as the urgent bkops level */
5208 hba->urgent_bkops_lvl = curr_status;
5209 hba->is_urgent_bkops_lvl_checked = true;
5210 }
5211
5212enable_auto_bkops:
5213 err = ufshcd_enable_auto_bkops(hba);
5214out:
5215 if (err < 0)
5216 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5217 __func__, err);
5218}
5219
3d17b9b5
AD
5220static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5221{
5222 int ret;
6f8d5a6a 5223 u8 index;
3d17b9b5
AD
5224 enum query_opcode opcode;
5225
79e3520f 5226 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5
AD
5227 return 0;
5228
5229 if (!(enable ^ hba->wb_enabled))
5230 return 0;
5231 if (enable)
5232 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5233 else
5234 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5235
e31011ab 5236 index = ufshcd_wb_get_query_index(hba);
3d17b9b5 5237 ret = ufshcd_query_flag_retry(hba, opcode,
6f8d5a6a 5238 QUERY_FLAG_IDN_WB_EN, index, NULL);
3d17b9b5
AD
5239 if (ret) {
5240 dev_err(hba->dev, "%s write booster %s failed %d\n",
5241 __func__, enable ? "enable" : "disable", ret);
5242 return ret;
5243 }
5244
5245 hba->wb_enabled = enable;
5246 dev_dbg(hba->dev, "%s write booster %s %d\n",
5247 __func__, enable ? "enable" : "disable", ret);
5248
5249 return ret;
5250}
5251
5252static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5253{
5254 int val;
6f8d5a6a 5255 u8 index;
3d17b9b5
AD
5256
5257 if (set)
5258 val = UPIU_QUERY_OPCODE_SET_FLAG;
5259 else
5260 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5261
e31011ab 5262 index = ufshcd_wb_get_query_index(hba);
3d17b9b5 5263 return ufshcd_query_flag_retry(hba, val,
6f8d5a6a
SC
5264 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5265 index, NULL);
3d17b9b5
AD
5266}
5267
5268static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5269{
5270 if (enable)
5271 ufshcd_wb_buf_flush_enable(hba);
5272 else
5273 ufshcd_wb_buf_flush_disable(hba);
5274
5275}
5276
5277static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5278{
5279 int ret;
6f8d5a6a 5280 u8 index;
3d17b9b5 5281
79e3520f 5282 if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
3d17b9b5
AD
5283 return 0;
5284
e31011ab 5285 index = ufshcd_wb_get_query_index(hba);
3d17b9b5 5286 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 5287 QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
6f8d5a6a 5288 index, NULL);
3d17b9b5
AD
5289 if (ret)
5290 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5291 __func__, ret);
5292 else
5293 hba->wb_buf_flush_enabled = true;
5294
5295 dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5296 return ret;
5297}
5298
5299static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5300{
5301 int ret;
6f8d5a6a 5302 u8 index;
3d17b9b5 5303
79e3520f 5304 if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
3d17b9b5
AD
5305 return 0;
5306
e31011ab 5307 index = ufshcd_wb_get_query_index(hba);
3d17b9b5 5308 ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
6f8d5a6a
SC
5309 QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5310 index, NULL);
3d17b9b5
AD
5311 if (ret) {
5312 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5313 __func__, ret);
5314 } else {
5315 hba->wb_buf_flush_enabled = false;
5316 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5317 }
5318
5319 return ret;
5320}
5321
5322static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5323 u32 avail_buf)
5324{
5325 u32 cur_buf;
5326 int ret;
e31011ab 5327 u8 index;
3d17b9b5 5328
e31011ab 5329 index = ufshcd_wb_get_query_index(hba);
3d17b9b5
AD
5330 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5331 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
e31011ab 5332 index, 0, &cur_buf);
3d17b9b5
AD
5333 if (ret) {
5334 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5335 __func__, ret);
5336 return false;
5337 }
5338
5339 if (!cur_buf) {
5340 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5341 cur_buf);
5342 return false;
5343 }
d14734ae
SC
5344 /* Let it continue to flush when available buffer exceeds threshold */
5345 if (avail_buf < hba->vps->wb_flush_threshold)
3d17b9b5
AD
5346 return true;
5347
5348 return false;
5349}
5350
51dd905b 5351static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
3d17b9b5
AD
5352{
5353 int ret;
5354 u32 avail_buf;
e31011ab 5355 u8 index;
3d17b9b5 5356
79e3520f 5357 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5
AD
5358 return false;
5359 /*
5360 * The ufs device needs the vcc to be ON to flush.
5361 * With user-space reduction enabled, it's enough to enable flush
5362 * by checking only the available buffer. The threshold
5363 * defined here is > 90% full.
5364 * With user-space preserved enabled, the current-buffer
5365 * should be checked too because the wb buffer size can reduce
5366 * when disk tends to be full. This info is provided by current
5367 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5368 * keeping vcc on when current buffer is empty.
5369 */
e31011ab 5370 index = ufshcd_wb_get_query_index(hba);
3d17b9b5
AD
5371 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5372 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
e31011ab 5373 index, 0, &avail_buf);
3d17b9b5
AD
5374 if (ret) {
5375 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5376 __func__, ret);
5377 return false;
5378 }
5379
5380 if (!hba->dev_info.b_presrv_uspc_en) {
d14734ae 5381 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
3d17b9b5
AD
5382 return true;
5383 return false;
5384 }
5385
5386 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5387}
5388
51dd905b
SC
5389static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5390{
5391 struct ufs_hba *hba = container_of(to_delayed_work(work),
5392 struct ufs_hba,
5393 rpm_dev_flush_recheck_work);
5394 /*
5395 * To prevent unnecessary VCC power drain after device finishes
5396 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5397 * after a certain delay to recheck the threshold by next runtime
5398 * suspend.
5399 */
5400 pm_runtime_get_sync(hba->dev);
5401 pm_runtime_put_sync(hba->dev);
5402}
5403
66ec6d59
SRT
5404/**
5405 * ufshcd_exception_event_handler - handle exceptions raised by device
5406 * @work: pointer to work data
5407 *
5408 * Read bExceptionEventStatus attribute from the device and handle the
5409 * exception event accordingly.
5410 */
5411static void ufshcd_exception_event_handler(struct work_struct *work)
5412{
5413 struct ufs_hba *hba;
5414 int err;
5415 u32 status = 0;
5416 hba = container_of(work, struct ufs_hba, eeh_work);
5417
62694735 5418 pm_runtime_get_sync(hba->dev);
03e1d28e 5419 ufshcd_scsi_block_requests(hba);
66ec6d59
SRT
5420 err = ufshcd_get_ee_status(hba, &status);
5421 if (err) {
5422 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5423 __func__, err);
5424 goto out;
5425 }
5426
5427 status &= hba->ee_ctrl_mask;
afdfff59
YG
5428
5429 if (status & MASK_EE_URGENT_BKOPS)
5430 ufshcd_bkops_exception_event_handler(hba);
5431
66ec6d59 5432out:
03e1d28e 5433 ufshcd_scsi_unblock_requests(hba);
2824ec9f
SL
5434 /*
5435 * pm_runtime_get_noresume is called while scheduling
5436 * eeh_work to avoid suspend racing with exception work.
5437 * Hence decrement usage counter using pm_runtime_put_noidle
5438 * to allow suspend on completion of exception event handler.
5439 */
5440 pm_runtime_put_noidle(hba->dev);
5441 pm_runtime_put(hba->dev);
66ec6d59
SRT
5442 return;
5443}
5444
9a47ec7c
YG
5445/* Complete requests that have door-bell cleared */
5446static void ufshcd_complete_requests(struct ufs_hba *hba)
5447{
5448 ufshcd_transfer_req_compl(hba);
5449 ufshcd_tmc_handler(hba);
5450}
5451
583fa62d
YG
5452/**
5453 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5454 * to recover from the DL NAC errors or not.
5455 * @hba: per-adapter instance
5456 *
5457 * Returns true if error handling is required, false otherwise
5458 */
5459static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5460{
5461 unsigned long flags;
5462 bool err_handling = true;
5463
5464 spin_lock_irqsave(hba->host->host_lock, flags);
5465 /*
5466 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5467 * device fatal error and/or DL NAC & REPLAY timeout errors.
5468 */
5469 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5470 goto out;
5471
5472 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5473 ((hba->saved_err & UIC_ERROR) &&
5474 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5475 goto out;
5476
5477 if ((hba->saved_err & UIC_ERROR) &&
5478 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5479 int err;
5480 /*
5481 * wait for 50ms to see if we can get any other errors or not.
5482 */
5483 spin_unlock_irqrestore(hba->host->host_lock, flags);
5484 msleep(50);
5485 spin_lock_irqsave(hba->host->host_lock, flags);
5486
5487 /*
5488 * now check if we have got any other severe errors other than
5489 * DL NAC error?
5490 */
5491 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5492 ((hba->saved_err & UIC_ERROR) &&
5493 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5494 goto out;
5495
5496 /*
5497 * As DL NAC is the only error received so far, send out NOP
5498 * command to confirm if link is still active or not.
5499 * - If we don't get any response then do error recovery.
5500 * - If we get response then clear the DL NAC error bit.
5501 */
5502
5503 spin_unlock_irqrestore(hba->host->host_lock, flags);
5504 err = ufshcd_verify_dev_init(hba);
5505 spin_lock_irqsave(hba->host->host_lock, flags);
5506
5507 if (err)
5508 goto out;
5509
5510 /* Link seems to be alive hence ignore the DL NAC errors */
5511 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5512 hba->saved_err &= ~UIC_ERROR;
5513 /* clear NAC error */
5514 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5515 if (!hba->saved_uic_err) {
5516 err_handling = false;
5517 goto out;
5518 }
5519 }
5520out:
5521 spin_unlock_irqrestore(hba->host->host_lock, flags);
5522 return err_handling;
5523}
5524
7a3e97b0 5525/**
e8e7f271
SRT
5526 * ufshcd_err_handler - handle UFS errors that require s/w attention
5527 * @work: pointer to work structure
7a3e97b0 5528 */
e8e7f271 5529static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0
SY
5530{
5531 struct ufs_hba *hba;
e8e7f271
SRT
5532 unsigned long flags;
5533 u32 err_xfer = 0;
5534 u32 err_tm = 0;
5535 int err = 0;
5536 int tag;
9a47ec7c 5537 bool needs_reset = false;
e8e7f271
SRT
5538
5539 hba = container_of(work, struct ufs_hba, eh_work);
7a3e97b0 5540
62694735 5541 pm_runtime_get_sync(hba->dev);
1ab27c9c 5542 ufshcd_hold(hba, false);
e8e7f271
SRT
5543
5544 spin_lock_irqsave(hba->host->host_lock, flags);
9a47ec7c 5545 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
e8e7f271 5546 goto out;
e8e7f271
SRT
5547
5548 hba->ufshcd_state = UFSHCD_STATE_RESET;
5549 ufshcd_set_eh_in_progress(hba);
5550
5551 /* Complete requests that have door-bell cleared by h/w */
9a47ec7c 5552 ufshcd_complete_requests(hba);
583fa62d
YG
5553
5554 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5555 bool ret;
5556
5557 spin_unlock_irqrestore(hba->host->host_lock, flags);
5558 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5559 ret = ufshcd_quirk_dl_nac_errors(hba);
5560 spin_lock_irqsave(hba->host->host_lock, flags);
5561 if (!ret)
5562 goto skip_err_handling;
5563 }
9a47ec7c 5564 if ((hba->saved_err & INT_FATAL_ERRORS) ||
82174440 5565 (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
9a47ec7c
YG
5566 ((hba->saved_err & UIC_ERROR) &&
5567 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5568 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5569 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5570 needs_reset = true;
e8e7f271 5571
9a47ec7c
YG
5572 /*
5573 * if host reset is required then skip clearing the pending
2df74b69
CG
5574 * transfers forcefully because they will get cleared during
5575 * host reset and restore
9a47ec7c
YG
5576 */
5577 if (needs_reset)
5578 goto skip_pending_xfer_clear;
5579
5580 /* release lock as clear command might sleep */
5581 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 5582 /* Clear pending transfer requests */
9a47ec7c
YG
5583 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5584 if (ufshcd_clear_cmd(hba, tag)) {
5585 err_xfer = true;
5586 goto lock_skip_pending_xfer_clear;
5587 }
5588 }
e8e7f271
SRT
5589
5590 /* Clear pending task management requests */
9a47ec7c
YG
5591 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5592 if (ufshcd_clear_tm_cmd(hba, tag)) {
5593 err_tm = true;
5594 goto lock_skip_pending_xfer_clear;
5595 }
5596 }
e8e7f271 5597
9a47ec7c 5598lock_skip_pending_xfer_clear:
e8e7f271 5599 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 5600
9a47ec7c
YG
5601 /* Complete the requests that are cleared by s/w */
5602 ufshcd_complete_requests(hba);
5603
5604 if (err_xfer || err_tm)
5605 needs_reset = true;
5606
5607skip_pending_xfer_clear:
e8e7f271 5608 /* Fatal errors need reset */
9a47ec7c
YG
5609 if (needs_reset) {
5610 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5611
5612 /*
5613 * ufshcd_reset_and_restore() does the link reinitialization
5614 * which will need atleast one empty doorbell slot to send the
5615 * device management commands (NOP and query commands).
5616 * If there is no slot empty at this moment then free up last
5617 * slot forcefully.
5618 */
5619 if (hba->outstanding_reqs == max_doorbells)
5620 __ufshcd_transfer_req_compl(hba,
5621 (1UL << (hba->nutrs - 1)));
5622
5623 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 5624 err = ufshcd_reset_and_restore(hba);
9a47ec7c 5625 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271
SRT
5626 if (err) {
5627 dev_err(hba->dev, "%s: reset and restore failed\n",
5628 __func__);
5629 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5630 }
5631 /*
5632 * Inform scsi mid-layer that we did reset and allow to handle
5633 * Unit Attention properly.
5634 */
5635 scsi_report_bus_reset(hba->host, 0);
5636 hba->saved_err = 0;
5637 hba->saved_uic_err = 0;
5638 }
9a47ec7c 5639
583fa62d 5640skip_err_handling:
9a47ec7c
YG
5641 if (!needs_reset) {
5642 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5643 if (hba->saved_err || hba->saved_uic_err)
5644 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5645 __func__, hba->saved_err, hba->saved_uic_err);
5646 }
5647
e8e7f271
SRT
5648 ufshcd_clear_eh_in_progress(hba);
5649
5650out:
9a47ec7c 5651 spin_unlock_irqrestore(hba->host->host_lock, flags);
38135535 5652 ufshcd_scsi_unblock_requests(hba);
1ab27c9c 5653 ufshcd_release(hba);
62694735 5654 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
5655}
5656
5657/**
e8e7f271
SRT
5658 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5659 * @hba: per-adapter instance
9333d775
VG
5660 *
5661 * Returns
5662 * IRQ_HANDLED - If interrupt is valid
5663 * IRQ_NONE - If invalid interrupt
7a3e97b0 5664 */
9333d775 5665static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
5666{
5667 u32 reg;
9333d775 5668 irqreturn_t retval = IRQ_NONE;
7a3e97b0 5669
fb7b45f0
DR
5670 /* PHY layer lane error */
5671 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5672 /* Ignore LINERESET indication, as this is not an error */
5673 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
9333d775 5674 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
fb7b45f0
DR
5675 /*
5676 * To know whether this error is fatal or not, DB timeout
5677 * must be checked but this error is handled separately.
5678 */
5679 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
48d5b973 5680 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
9333d775 5681 retval |= IRQ_HANDLED;
ff8e20c6 5682 }
fb7b45f0 5683
e8e7f271
SRT
5684 /* PA_INIT_ERROR is fatal and needs UIC reset */
5685 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
9333d775
VG
5686 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5687 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
48d5b973 5688 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
ff8e20c6 5689
9333d775
VG
5690 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5691 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5692 else if (hba->dev_quirks &
5693 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5694 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5695 hba->uic_error |=
5696 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5697 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5698 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5699 }
5700 retval |= IRQ_HANDLED;
583fa62d 5701 }
e8e7f271
SRT
5702
5703 /* UIC NL/TL/DME errors needs software retry */
5704 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
9333d775
VG
5705 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5706 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
48d5b973 5707 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
e8e7f271 5708 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
9333d775 5709 retval |= IRQ_HANDLED;
ff8e20c6 5710 }
e8e7f271
SRT
5711
5712 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
9333d775
VG
5713 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5714 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
48d5b973 5715 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
e8e7f271 5716 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
9333d775 5717 retval |= IRQ_HANDLED;
ff8e20c6 5718 }
e8e7f271
SRT
5719
5720 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
9333d775
VG
5721 if ((reg & UIC_DME_ERROR) &&
5722 (reg & UIC_DME_ERROR_CODE_MASK)) {
48d5b973 5723 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
e8e7f271 5724 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
9333d775 5725 retval |= IRQ_HANDLED;
ff8e20c6 5726 }
e8e7f271
SRT
5727
5728 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5729 __func__, hba->uic_error);
9333d775 5730 return retval;
e8e7f271
SRT
5731}
5732
82174440
SC
5733static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5734 u32 intr_mask)
5735{
5a244e0e
SC
5736 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5737 !ufshcd_is_auto_hibern8_enabled(hba))
82174440
SC
5738 return false;
5739
5740 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5741 return false;
5742
5743 if (hba->active_uic_cmd &&
5744 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5745 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5746 return false;
5747
5748 return true;
5749}
5750
e8e7f271
SRT
5751/**
5752 * ufshcd_check_errors - Check for errors that need s/w attention
5753 * @hba: per-adapter instance
9333d775
VG
5754 *
5755 * Returns
5756 * IRQ_HANDLED - If interrupt is valid
5757 * IRQ_NONE - If invalid interrupt
e8e7f271 5758 */
9333d775 5759static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
e8e7f271
SRT
5760{
5761 bool queue_eh_work = false;
9333d775 5762 irqreturn_t retval = IRQ_NONE;
e8e7f271 5763
d3c615bf
SC
5764 if (hba->errors & INT_FATAL_ERRORS) {
5765 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
e8e7f271 5766 queue_eh_work = true;
d3c615bf 5767 }
7a3e97b0
SY
5768
5769 if (hba->errors & UIC_ERROR) {
e8e7f271 5770 hba->uic_error = 0;
9333d775 5771 retval = ufshcd_update_uic_error(hba);
e8e7f271
SRT
5772 if (hba->uic_error)
5773 queue_eh_work = true;
7a3e97b0 5774 }
e8e7f271 5775
82174440
SC
5776 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5777 dev_err(hba->dev,
5778 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5779 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5780 "Enter" : "Exit",
5781 hba->errors, ufshcd_get_upmcrs(hba));
d3c615bf
SC
5782 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5783 hba->errors);
82174440
SC
5784 queue_eh_work = true;
5785 }
5786
e8e7f271 5787 if (queue_eh_work) {
9a47ec7c
YG
5788 /*
5789 * update the transfer error masks to sticky bits, let's do this
5790 * irrespective of current ufshcd_state.
5791 */
5792 hba->saved_err |= hba->errors;
5793 hba->saved_uic_err |= hba->uic_error;
5794
e8e7f271
SRT
5795 /* handle fatal errors only when link is functional */
5796 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5797 /* block commands from scsi mid-layer */
38135535 5798 ufshcd_scsi_block_requests(hba);
e8e7f271 5799
141f8165 5800 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
66cc820f
DR
5801
5802 /* dump controller state before resetting */
5803 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5804 bool pr_prdt = !!(hba->saved_err &
5805 SYSTEM_BUS_FATAL_ERROR);
5806
5807 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5808 __func__, hba->saved_err,
5809 hba->saved_uic_err);
5810
5811 ufshcd_print_host_regs(hba);
5812 ufshcd_print_pwr_info(hba);
5813 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5814 ufshcd_print_trs(hba, hba->outstanding_reqs,
5815 pr_prdt);
5816 }
e8e7f271
SRT
5817 schedule_work(&hba->eh_work);
5818 }
9333d775 5819 retval |= IRQ_HANDLED;
3441da7d 5820 }
e8e7f271
SRT
5821 /*
5822 * if (!queue_eh_work) -
5823 * Other errors are either non-fatal where host recovers
5824 * itself without s/w intervention or errors that will be
5825 * handled by the SCSI core layer.
5826 */
9333d775 5827 return retval;
7a3e97b0
SY
5828}
5829
69a6c269
BVA
5830struct ctm_info {
5831 struct ufs_hba *hba;
5832 unsigned long pending;
5833 unsigned int ncpl;
5834};
5835
5836static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5837{
5838 struct ctm_info *const ci = priv;
5839 struct completion *c;
5840
5841 WARN_ON_ONCE(reserved);
5842 if (test_bit(req->tag, &ci->pending))
5843 return true;
5844 ci->ncpl++;
5845 c = req->end_io_data;
5846 if (c)
5847 complete(c);
5848 return true;
5849}
5850
7a3e97b0
SY
5851/**
5852 * ufshcd_tmc_handler - handle task management function completion
5853 * @hba: per adapter instance
9333d775
VG
5854 *
5855 * Returns
5856 * IRQ_HANDLED - If interrupt is valid
5857 * IRQ_NONE - If invalid interrupt
7a3e97b0 5858 */
9333d775 5859static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
7a3e97b0 5860{
69a6c269
BVA
5861 struct request_queue *q = hba->tmf_queue;
5862 struct ctm_info ci = {
5863 .hba = hba,
5864 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5865 };
7a3e97b0 5866
69a6c269
BVA
5867 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5868 return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
7a3e97b0
SY
5869}
5870
5871/**
5872 * ufshcd_sl_intr - Interrupt service routine
5873 * @hba: per adapter instance
5874 * @intr_status: contains interrupts generated by the controller
9333d775
VG
5875 *
5876 * Returns
5877 * IRQ_HANDLED - If interrupt is valid
5878 * IRQ_NONE - If invalid interrupt
7a3e97b0 5879 */
9333d775 5880static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
7a3e97b0 5881{
9333d775
VG
5882 irqreturn_t retval = IRQ_NONE;
5883
7a3e97b0 5884 hba->errors = UFSHCD_ERROR_MASK & intr_status;
82174440
SC
5885
5886 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5887 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5888
7a3e97b0 5889 if (hba->errors)
9333d775 5890 retval |= ufshcd_check_errors(hba);
7a3e97b0 5891
53b3d9c3 5892 if (intr_status & UFSHCD_UIC_MASK)
9333d775 5893 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
5894
5895 if (intr_status & UTP_TASK_REQ_COMPL)
9333d775 5896 retval |= ufshcd_tmc_handler(hba);
7a3e97b0
SY
5897
5898 if (intr_status & UTP_TRANSFER_REQ_COMPL)
9333d775
VG
5899 retval |= ufshcd_transfer_req_compl(hba);
5900
5901 return retval;
7a3e97b0
SY
5902}
5903
5904/**
5905 * ufshcd_intr - Main interrupt service routine
5906 * @irq: irq number
5907 * @__hba: pointer to adapter instance
5908 *
9333d775
VG
5909 * Returns
5910 * IRQ_HANDLED - If interrupt is valid
5911 * IRQ_NONE - If invalid interrupt
7a3e97b0
SY
5912 */
5913static irqreturn_t ufshcd_intr(int irq, void *__hba)
5914{
d75f7fe4 5915 u32 intr_status, enabled_intr_status;
7a3e97b0
SY
5916 irqreturn_t retval = IRQ_NONE;
5917 struct ufs_hba *hba = __hba;
7f6ba4f1 5918 int retries = hba->nutrs;
7a3e97b0
SY
5919
5920 spin_lock(hba->host->host_lock);
b873a275 5921 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0 5922
7f6ba4f1
VG
5923 /*
5924 * There could be max of hba->nutrs reqs in flight and in worst case
5925 * if the reqs get finished 1 by 1 after the interrupt status is
5926 * read, make sure we handle them by checking the interrupt status
5927 * again in a loop until we process all of the reqs before returning.
5928 */
5929 do {
5930 enabled_intr_status =
5931 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5932 if (intr_status)
5933 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
9333d775
VG
5934 if (enabled_intr_status)
5935 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7f6ba4f1
VG
5936
5937 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5938 } while (intr_status && --retries);
d75f7fe4 5939
9333d775
VG
5940 if (retval == IRQ_NONE) {
5941 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
5942 __func__, intr_status);
5943 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
5944 }
5945
7a3e97b0
SY
5946 spin_unlock(hba->host->host_lock);
5947 return retval;
5948}
5949
e2933132
SRT
5950static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5951{
5952 int err = 0;
5953 u32 mask = 1 << tag;
5954 unsigned long flags;
5955
5956 if (!test_bit(tag, &hba->outstanding_tasks))
5957 goto out;
5958
5959 spin_lock_irqsave(hba->host->host_lock, flags);
1399c5b0 5960 ufshcd_utmrl_clear(hba, tag);
e2933132
SRT
5961 spin_unlock_irqrestore(hba->host->host_lock, flags);
5962
5963 /* poll for max. 1 sec to clear door bell register by h/w */
5964 err = ufshcd_wait_for_register(hba,
5965 REG_UTP_TASK_REQ_DOOR_BELL,
5cac1095 5966 mask, 0, 1000, 1000);
e2933132
SRT
5967out:
5968 return err;
5969}
5970
c6049cd9
CH
5971static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5972 struct utp_task_req_desc *treq, u8 tm_function)
7a3e97b0 5973{
69a6c269 5974 struct request_queue *q = hba->tmf_queue;
c6049cd9 5975 struct Scsi_Host *host = hba->host;
69a6c269
BVA
5976 DECLARE_COMPLETION_ONSTACK(wait);
5977 struct request *req;
7a3e97b0 5978 unsigned long flags;
c6049cd9 5979 int free_slot, task_tag, err;
7a3e97b0 5980
e2933132
SRT
5981 /*
5982 * Get free slot, sleep if slots are unavailable.
5983 * Even though we use wait_event() which sleeps indefinitely,
5984 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5985 */
69a6c269
BVA
5986 req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
5987 req->end_io_data = &wait;
5988 free_slot = req->tag;
5989 WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
1ab27c9c 5990 ufshcd_hold(hba, false);
7a3e97b0 5991
e2933132 5992 spin_lock_irqsave(host->host_lock, flags);
e2933132 5993 task_tag = hba->nutrs + free_slot;
7a3e97b0 5994
c6049cd9
CH
5995 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5996
5997 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
d2877be4
KK
5998 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5999
7a3e97b0
SY
6000 /* send command to the controller */
6001 __set_bit(free_slot, &hba->outstanding_tasks);
897efe62
YG
6002
6003 /* Make sure descriptors are ready before ringing the task doorbell */
6004 wmb();
6005
b873a275 6006 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
ad1a1b9c
GB
6007 /* Make sure that doorbell is committed immediately */
6008 wmb();
7a3e97b0
SY
6009
6010 spin_unlock_irqrestore(host->host_lock, flags);
6011
6667e6d9
OS
6012 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6013
7a3e97b0 6014 /* wait until the task management command is completed */
69a6c269 6015 err = wait_for_completion_io_timeout(&wait,
e2933132 6016 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 6017 if (!err) {
69a6c269
BVA
6018 /*
6019 * Make sure that ufshcd_compl_tm() does not trigger a
6020 * use-after-free.
6021 */
6022 req->end_io_data = NULL;
6667e6d9 6023 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
e2933132
SRT
6024 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6025 __func__, tm_function);
6026 if (ufshcd_clear_tm_cmd(hba, free_slot))
6027 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6028 __func__, free_slot);
6029 err = -ETIMEDOUT;
6030 } else {
c6049cd9
CH
6031 err = 0;
6032 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
6033
6667e6d9 6034 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
7a3e97b0 6035 }
e2933132 6036
b557217c
SC
6037 spin_lock_irqsave(hba->host->host_lock, flags);
6038 __clear_bit(free_slot, &hba->outstanding_tasks);
6039 spin_unlock_irqrestore(hba->host->host_lock, flags);
6040
69a6c269 6041 blk_put_request(req);
e2933132 6042
1ab27c9c 6043 ufshcd_release(hba);
7a3e97b0
SY
6044 return err;
6045}
6046
c6049cd9
CH
6047/**
6048 * ufshcd_issue_tm_cmd - issues task management commands to controller
6049 * @hba: per adapter instance
6050 * @lun_id: LUN ID to which TM command is sent
6051 * @task_id: task ID to which the TM command is applicable
6052 * @tm_function: task management function opcode
6053 * @tm_response: task management service response return value
6054 *
6055 * Returns non-zero value on error, zero on success.
6056 */
6057static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6058 u8 tm_function, u8 *tm_response)
6059{
6060 struct utp_task_req_desc treq = { { 0 }, };
6061 int ocs_value, err;
6062
6063 /* Configure task request descriptor */
6064 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6065 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6066
6067 /* Configure task request UPIU */
6068 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6069 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6070 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6071
6072 /*
6073 * The host shall provide the same value for LUN field in the basic
6074 * header and for Input Parameter.
6075 */
6076 treq.input_param1 = cpu_to_be32(lun_id);
6077 treq.input_param2 = cpu_to_be32(task_id);
6078
6079 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6080 if (err == -ETIMEDOUT)
6081 return err;
6082
6083 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6084 if (ocs_value != OCS_SUCCESS)
6085 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6086 __func__, ocs_value);
6087 else if (tm_response)
6088 *tm_response = be32_to_cpu(treq.output_param1) &
6089 MASK_TM_SERVICE_RESP;
6090 return err;
6091}
6092
5e0a86ee
AA
6093/**
6094 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6095 * @hba: per-adapter instance
6096 * @req_upiu: upiu request
6097 * @rsp_upiu: upiu reply
5e0a86ee
AA
6098 * @desc_buff: pointer to descriptor buffer, NULL if NA
6099 * @buff_len: descriptor size, 0 if NA
d0e9760d 6100 * @cmd_type: specifies the type (NOP, Query...)
5e0a86ee
AA
6101 * @desc_op: descriptor operation
6102 *
6103 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6104 * Therefore, it "rides" the device management infrastructure: uses its tag and
6105 * tasks work queues.
6106 *
6107 * Since there is only one available tag for device management commands,
6108 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6109 */
6110static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6111 struct utp_upiu_req *req_upiu,
6112 struct utp_upiu_req *rsp_upiu,
6113 u8 *desc_buff, int *buff_len,
7f674c38 6114 enum dev_cmd_type cmd_type,
5e0a86ee
AA
6115 enum query_opcode desc_op)
6116{
7252a360
BVA
6117 struct request_queue *q = hba->cmd_queue;
6118 struct request *req;
5e0a86ee
AA
6119 struct ufshcd_lrb *lrbp;
6120 int err = 0;
6121 int tag;
6122 struct completion wait;
6123 unsigned long flags;
6124 u32 upiu_flags;
6125
6126 down_read(&hba->clk_scaling_lock);
6127
7252a360 6128 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
bb14dd15
DC
6129 if (IS_ERR(req)) {
6130 err = PTR_ERR(req);
6131 goto out_unlock;
6132 }
7252a360
BVA
6133 tag = req->tag;
6134 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5e0a86ee
AA
6135
6136 init_completion(&wait);
6137 lrbp = &hba->lrb[tag];
6138 WARN_ON(lrbp->cmd);
6139
6140 lrbp->cmd = NULL;
6141 lrbp->sense_bufflen = 0;
6142 lrbp->sense_buffer = NULL;
6143 lrbp->task_tag = tag;
6144 lrbp->lun = 0;
6145 lrbp->intr_cmd = true;
6146 hba->dev_cmd.type = cmd_type;
6147
6148 switch (hba->ufs_version) {
6149 case UFSHCI_VERSION_10:
6150 case UFSHCI_VERSION_11:
6151 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6152 break;
6153 default:
6154 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6155 break;
6156 }
6157
6158 /* update the task tag in the request upiu */
6159 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6160
6161 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6162
6163 /* just copy the upiu request as it is */
6164 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6165 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6166 /* The Data Segment Area is optional depending upon the query
6167 * function value. for WRITE DESCRIPTOR, the data segment
6168 * follows right after the tsf.
6169 */
6170 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6171 *buff_len = 0;
6172 }
6173
6174 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6175
6176 hba->dev_cmd.complete = &wait;
6177
6178 /* Make sure descriptors are ready before ringing the doorbell */
6179 wmb();
6180 spin_lock_irqsave(hba->host->host_lock, flags);
6181 ufshcd_send_command(hba, tag);
6182 spin_unlock_irqrestore(hba->host->host_lock, flags);
6183
6184 /*
6185 * ignore the returning value here - ufshcd_check_query_response is
6186 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6187 * read the response directly ignoring all errors.
6188 */
6189 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6190
6191 /* just copy the upiu response as it is */
6192 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
4bbbe242
AA
6193 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6194 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6195 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6196 MASK_QUERY_DATA_SEG_LEN;
6197
6198 if (*buff_len >= resp_len) {
6199 memcpy(desc_buff, descp, resp_len);
6200 *buff_len = resp_len;
6201 } else {
3d4881d1
BH
6202 dev_warn(hba->dev,
6203 "%s: rsp size %d is bigger than buffer size %d",
6204 __func__, resp_len, *buff_len);
4bbbe242
AA
6205 *buff_len = 0;
6206 err = -EINVAL;
6207 }
6208 }
5e0a86ee 6209
7252a360 6210 blk_put_request(req);
bb14dd15 6211out_unlock:
5e0a86ee
AA
6212 up_read(&hba->clk_scaling_lock);
6213 return err;
6214}
6215
6216/**
6217 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6218 * @hba: per-adapter instance
6219 * @req_upiu: upiu request
6220 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6221 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6222 * @desc_buff: pointer to descriptor buffer, NULL if NA
6223 * @buff_len: descriptor size, 0 if NA
6224 * @desc_op: descriptor operation
6225 *
6226 * Supports UTP Transfer requests (nop and query), and UTP Task
6227 * Management requests.
6228 * It is up to the caller to fill the upiu conent properly, as it will
6229 * be copied without any further input validations.
6230 */
6231int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6232 struct utp_upiu_req *req_upiu,
6233 struct utp_upiu_req *rsp_upiu,
6234 int msgcode,
6235 u8 *desc_buff, int *buff_len,
6236 enum query_opcode desc_op)
6237{
6238 int err;
7f674c38 6239 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
5e0a86ee
AA
6240 struct utp_task_req_desc treq = { { 0 }, };
6241 int ocs_value;
6242 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6243
5e0a86ee
AA
6244 switch (msgcode) {
6245 case UPIU_TRANSACTION_NOP_OUT:
6246 cmd_type = DEV_CMD_TYPE_NOP;
6247 /* fall through */
6248 case UPIU_TRANSACTION_QUERY_REQ:
6249 ufshcd_hold(hba, false);
6250 mutex_lock(&hba->dev_cmd.lock);
6251 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6252 desc_buff, buff_len,
6253 cmd_type, desc_op);
6254 mutex_unlock(&hba->dev_cmd.lock);
6255 ufshcd_release(hba);
6256
6257 break;
6258 case UPIU_TRANSACTION_TASK_REQ:
6259 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6260 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6261
6262 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6263
6264 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6265 if (err == -ETIMEDOUT)
6266 break;
6267
6268 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6269 if (ocs_value != OCS_SUCCESS) {
6270 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6271 ocs_value);
6272 break;
6273 }
6274
6275 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6276
6277 break;
6278 default:
6279 err = -EINVAL;
6280
6281 break;
6282 }
6283
5e0a86ee
AA
6284 return err;
6285}
6286
7a3e97b0 6287/**
3441da7d
SRT
6288 * ufshcd_eh_device_reset_handler - device reset handler registered to
6289 * scsi layer.
7a3e97b0
SY
6290 * @cmd: SCSI command pointer
6291 *
6292 * Returns SUCCESS/FAILED
6293 */
3441da7d 6294static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
6295{
6296 struct Scsi_Host *host;
6297 struct ufs_hba *hba;
6298 unsigned int tag;
6299 u32 pos;
6300 int err;
e2933132
SRT
6301 u8 resp = 0xF;
6302 struct ufshcd_lrb *lrbp;
3441da7d 6303 unsigned long flags;
7a3e97b0
SY
6304
6305 host = cmd->device->host;
6306 hba = shost_priv(host);
6307 tag = cmd->request->tag;
6308
e2933132
SRT
6309 lrbp = &hba->lrb[tag];
6310 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6311 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
6312 if (!err)
6313 err = resp;
7a3e97b0 6314 goto out;
e2933132 6315 }
7a3e97b0 6316
3441da7d
SRT
6317 /* clear the commands that were pending for corresponding LUN */
6318 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6319 if (hba->lrb[pos].lun == lrbp->lun) {
6320 err = ufshcd_clear_cmd(hba, pos);
6321 if (err)
6322 break;
7a3e97b0 6323 }
3441da7d
SRT
6324 }
6325 spin_lock_irqsave(host->host_lock, flags);
6326 ufshcd_transfer_req_compl(hba);
6327 spin_unlock_irqrestore(host->host_lock, flags);
7fabb77b 6328
7a3e97b0 6329out:
7fabb77b 6330 hba->req_abort_count = 0;
8808b4e9 6331 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
3441da7d
SRT
6332 if (!err) {
6333 err = SUCCESS;
6334 } else {
6335 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6336 err = FAILED;
6337 }
7a3e97b0
SY
6338 return err;
6339}
6340
e0b299e3
GB
6341static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6342{
6343 struct ufshcd_lrb *lrbp;
6344 int tag;
6345
6346 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6347 lrbp = &hba->lrb[tag];
6348 lrbp->req_abort_skip = true;
6349 }
6350}
6351
7a3e97b0
SY
6352/**
6353 * ufshcd_abort - abort a specific command
6354 * @cmd: SCSI command pointer
6355 *
f20810d8
SRT
6356 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6357 * command, and in host controller by clearing the door-bell register. There can
6358 * be race between controller sending the command to the device while abort is
6359 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6360 * really issued and then try to abort it.
6361 *
7a3e97b0
SY
6362 * Returns SUCCESS/FAILED
6363 */
6364static int ufshcd_abort(struct scsi_cmnd *cmd)
6365{
6366 struct Scsi_Host *host;
6367 struct ufs_hba *hba;
6368 unsigned long flags;
6369 unsigned int tag;
f20810d8
SRT
6370 int err = 0;
6371 int poll_cnt;
e2933132
SRT
6372 u8 resp = 0xF;
6373 struct ufshcd_lrb *lrbp;
e9d501b1 6374 u32 reg;
7a3e97b0
SY
6375
6376 host = cmd->device->host;
6377 hba = shost_priv(host);
6378 tag = cmd->request->tag;
e7d38257 6379 lrbp = &hba->lrb[tag];
14497328
YG
6380 if (!ufshcd_valid_tag(hba, tag)) {
6381 dev_err(hba->dev,
6382 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6383 __func__, tag, cmd, cmd->request);
6384 BUG();
6385 }
7a3e97b0 6386
e7d38257
DR
6387 /*
6388 * Task abort to the device W-LUN is illegal. When this command
6389 * will fail, due to spec violation, scsi err handling next step
6390 * will be to send LU reset which, again, is a spec violation.
6391 * To avoid these unnecessary/illegal step we skip to the last error
6392 * handling stage: reset and restore.
6393 */
6394 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6395 return ufshcd_eh_host_reset_handler(cmd);
6396
1ab27c9c 6397 ufshcd_hold(hba, false);
14497328 6398 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
f20810d8 6399 /* If command is already aborted/completed, return SUCCESS */
14497328
YG
6400 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6401 dev_err(hba->dev,
6402 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6403 __func__, tag, hba->outstanding_reqs, reg);
f20810d8 6404 goto out;
14497328 6405 }
7a3e97b0 6406
e9d501b1
DR
6407 if (!(reg & (1 << tag))) {
6408 dev_err(hba->dev,
6409 "%s: cmd was completed, but without a notifying intr, tag = %d",
6410 __func__, tag);
6411 }
6412
66cc820f
DR
6413 /* Print Transfer Request of aborted task */
6414 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
66cc820f 6415
7fabb77b
GB
6416 /*
6417 * Print detailed info about aborted request.
6418 * As more than one request might get aborted at the same time,
6419 * print full information only for the first aborted request in order
6420 * to reduce repeated printouts. For other aborted requests only print
6421 * basic details.
6422 */
6423 scsi_print_command(hba->lrb[tag].cmd);
6424 if (!hba->req_abort_count) {
8808b4e9 6425 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
7fabb77b 6426 ufshcd_print_host_regs(hba);
6ba65588 6427 ufshcd_print_host_state(hba);
7fabb77b
GB
6428 ufshcd_print_pwr_info(hba);
6429 ufshcd_print_trs(hba, 1 << tag, true);
6430 } else {
6431 ufshcd_print_trs(hba, 1 << tag, false);
6432 }
6433 hba->req_abort_count++;
e0b299e3
GB
6434
6435 /* Skip task abort in case previous aborts failed and report failure */
6436 if (lrbp->req_abort_skip) {
6437 err = -EIO;
6438 goto out;
6439 }
6440
f20810d8
SRT
6441 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6442 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6443 UFS_QUERY_TASK, &resp);
6444 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6445 /* cmd pending in the device */
ff8e20c6
DR
6446 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6447 __func__, tag);
f20810d8
SRT
6448 break;
6449 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
6450 /*
6451 * cmd not pending in the device, check if it is
6452 * in transition.
6453 */
ff8e20c6
DR
6454 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6455 __func__, tag);
f20810d8
SRT
6456 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6457 if (reg & (1 << tag)) {
6458 /* sleep for max. 200us to stabilize */
6459 usleep_range(100, 200);
6460 continue;
6461 }
6462 /* command completed already */
ff8e20c6
DR
6463 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6464 __func__, tag);
f20810d8
SRT
6465 goto out;
6466 } else {
ff8e20c6
DR
6467 dev_err(hba->dev,
6468 "%s: no response from device. tag = %d, err %d\n",
6469 __func__, tag, err);
f20810d8
SRT
6470 if (!err)
6471 err = resp; /* service response error */
6472 goto out;
6473 }
6474 }
6475
6476 if (!poll_cnt) {
6477 err = -EBUSY;
7a3e97b0
SY
6478 goto out;
6479 }
7a3e97b0 6480
e2933132
SRT
6481 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6482 UFS_ABORT_TASK, &resp);
6483 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
ff8e20c6 6484 if (!err) {
f20810d8 6485 err = resp; /* service response error */
ff8e20c6
DR
6486 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6487 __func__, tag, err);
6488 }
7a3e97b0 6489 goto out;
e2933132 6490 }
7a3e97b0 6491
f20810d8 6492 err = ufshcd_clear_cmd(hba, tag);
ff8e20c6
DR
6493 if (err) {
6494 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6495 __func__, tag, err);
f20810d8 6496 goto out;
ff8e20c6 6497 }
f20810d8 6498
7a3e97b0
SY
6499 scsi_dma_unmap(cmd);
6500
6501 spin_lock_irqsave(host->host_lock, flags);
a48353f6 6502 ufshcd_outstanding_req_clear(hba, tag);
7a3e97b0
SY
6503 hba->lrb[tag].cmd = NULL;
6504 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9 6505
7a3e97b0 6506out:
f20810d8
SRT
6507 if (!err) {
6508 err = SUCCESS;
6509 } else {
6510 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
e0b299e3 6511 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
f20810d8
SRT
6512 err = FAILED;
6513 }
6514
1ab27c9c
ST
6515 /*
6516 * This ufshcd_release() corresponds to the original scsi cmd that got
6517 * aborted here (as we won't get any IRQ for it).
6518 */
6519 ufshcd_release(hba);
7a3e97b0
SY
6520 return err;
6521}
6522
3441da7d
SRT
6523/**
6524 * ufshcd_host_reset_and_restore - reset and restore host controller
6525 * @hba: per-adapter instance
6526 *
6527 * Note that host controller reset may issue DME_RESET to
6528 * local and remote (device) Uni-Pro stack and the attributes
6529 * are reset to default state.
6530 *
6531 * Returns zero on success, non-zero on failure
6532 */
6533static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6534{
6535 int err;
3441da7d
SRT
6536 unsigned long flags;
6537
2df74b69
CG
6538 /*
6539 * Stop the host controller and complete the requests
6540 * cleared by h/w
6541 */
5cac1095
BVA
6542 ufshcd_hba_stop(hba);
6543
3441da7d 6544 spin_lock_irqsave(hba->host->host_lock, flags);
2df74b69
CG
6545 hba->silence_err_logs = true;
6546 ufshcd_complete_requests(hba);
6547 hba->silence_err_logs = false;
3441da7d
SRT
6548 spin_unlock_irqrestore(hba->host->host_lock, flags);
6549
a3cd5ec5 6550 /* scale up clocks to max frequency before full reinitialization */
394b949f 6551 ufshcd_set_clk_freq(hba, true);
a3cd5ec5 6552
3441da7d
SRT
6553 err = ufshcd_hba_enable(hba);
6554 if (err)
6555 goto out;
6556
6557 /* Establish the link again and restore the device */
1b9e2141 6558 err = ufshcd_probe_hba(hba, false);
1d337ec2
SRT
6559
6560 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3441da7d
SRT
6561 err = -EIO;
6562out:
6563 if (err)
6564 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
8808b4e9 6565 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
3441da7d
SRT
6566 return err;
6567}
6568
6569/**
6570 * ufshcd_reset_and_restore - reset and re-initialize host/device
6571 * @hba: per-adapter instance
6572 *
6573 * Reset and recover device, host and re-establish link. This
6574 * is helpful to recover the communication in fatal error conditions.
6575 *
6576 * Returns zero on success, non-zero on failure
6577 */
6578static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6579{
6580 int err = 0;
1d337ec2 6581 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 6582
1d337ec2 6583 do {
d8d9f793
BA
6584 /* Reset the attached device */
6585 ufshcd_vops_device_reset(hba);
6586
1d337ec2
SRT
6587 err = ufshcd_host_reset_and_restore(hba);
6588 } while (err && --retries);
3441da7d 6589
3441da7d
SRT
6590 return err;
6591}
6592
6593/**
6594 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
8aa29f19 6595 * @cmd: SCSI command pointer
3441da7d
SRT
6596 *
6597 * Returns SUCCESS/FAILED
6598 */
6599static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6600{
6601 int err;
6602 unsigned long flags;
6603 struct ufs_hba *hba;
6604
6605 hba = shost_priv(cmd->device->host);
6606
1ab27c9c 6607 ufshcd_hold(hba, false);
3441da7d
SRT
6608 /*
6609 * Check if there is any race with fatal error handling.
6610 * If so, wait for it to complete. Even though fatal error
6611 * handling does reset and restore in some cases, don't assume
6612 * anything out of it. We are just avoiding race here.
6613 */
6614 do {
6615 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 6616 if (!(work_pending(&hba->eh_work) ||
8dc0da79
ZL
6617 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6618 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
3441da7d
SRT
6619 break;
6620 spin_unlock_irqrestore(hba->host->host_lock, flags);
6621 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
e8e7f271 6622 flush_work(&hba->eh_work);
3441da7d
SRT
6623 } while (1);
6624
6625 hba->ufshcd_state = UFSHCD_STATE_RESET;
6626 ufshcd_set_eh_in_progress(hba);
6627 spin_unlock_irqrestore(hba->host->host_lock, flags);
6628
6629 err = ufshcd_reset_and_restore(hba);
6630
6631 spin_lock_irqsave(hba->host->host_lock, flags);
6632 if (!err) {
6633 err = SUCCESS;
6634 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6635 } else {
6636 err = FAILED;
6637 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6638 }
6639 ufshcd_clear_eh_in_progress(hba);
6640 spin_unlock_irqrestore(hba->host->host_lock, flags);
6641
1ab27c9c 6642 ufshcd_release(hba);
3441da7d
SRT
6643 return err;
6644}
6645
3a4bf06d
YG
6646/**
6647 * ufshcd_get_max_icc_level - calculate the ICC level
6648 * @sup_curr_uA: max. current supported by the regulator
6649 * @start_scan: row at the desc table to start scan from
6650 * @buff: power descriptor buffer
6651 *
6652 * Returns calculated max ICC level for specific regulator
6653 */
6654static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6655{
6656 int i;
6657 int curr_uA;
6658 u16 data;
6659 u16 unit;
6660
6661 for (i = start_scan; i >= 0; i--) {
d79713f9 6662 data = be16_to_cpup((__be16 *)&buff[2 * i]);
3a4bf06d
YG
6663 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6664 ATTR_ICC_LVL_UNIT_OFFSET;
6665 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6666 switch (unit) {
6667 case UFSHCD_NANO_AMP:
6668 curr_uA = curr_uA / 1000;
6669 break;
6670 case UFSHCD_MILI_AMP:
6671 curr_uA = curr_uA * 1000;
6672 break;
6673 case UFSHCD_AMP:
6674 curr_uA = curr_uA * 1000 * 1000;
6675 break;
6676 case UFSHCD_MICRO_AMP:
6677 default:
6678 break;
6679 }
6680 if (sup_curr_uA >= curr_uA)
6681 break;
6682 }
6683 if (i < 0) {
6684 i = 0;
6685 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6686 }
6687
6688 return (u32)i;
6689}
6690
6691/**
6692 * ufshcd_calc_icc_level - calculate the max ICC level
6693 * In case regulators are not initialized we'll return 0
6694 * @hba: per-adapter instance
6695 * @desc_buf: power descriptor buffer to extract ICC levels from.
6696 * @len: length of desc_buff
6697 *
6698 * Returns calculated ICC level
6699 */
6700static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6701 u8 *desc_buf, int len)
6702{
6703 u32 icc_level = 0;
6704
6705 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6706 !hba->vreg_info.vccq2) {
6707 dev_err(hba->dev,
6708 "%s: Regulator capability was not set, actvIccLevel=%d",
6709 __func__, icc_level);
6710 goto out;
6711 }
6712
0487fff7 6713 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
3a4bf06d
YG
6714 icc_level = ufshcd_get_max_icc_level(
6715 hba->vreg_info.vcc->max_uA,
6716 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6717 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6718
0487fff7 6719 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
3a4bf06d
YG
6720 icc_level = ufshcd_get_max_icc_level(
6721 hba->vreg_info.vccq->max_uA,
6722 icc_level,
6723 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6724
0487fff7 6725 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
3a4bf06d
YG
6726 icc_level = ufshcd_get_max_icc_level(
6727 hba->vreg_info.vccq2->max_uA,
6728 icc_level,
6729 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6730out:
6731 return icc_level;
6732}
6733
e89860f1 6734static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
3a4bf06d
YG
6735{
6736 int ret;
7a0bf85b 6737 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
bbe21d7a 6738 u8 *desc_buf;
e89860f1 6739 u32 icc_level;
bbe21d7a
KC
6740
6741 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6742 if (!desc_buf)
6743 return;
3a4bf06d 6744
c4607a09
BH
6745 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
6746 desc_buf, buff_len);
3a4bf06d
YG
6747 if (ret) {
6748 dev_err(hba->dev,
6749 "%s: Failed reading power descriptor.len = %d ret = %d",
6750 __func__, buff_len, ret);
bbe21d7a 6751 goto out;
3a4bf06d
YG
6752 }
6753
e89860f1
CG
6754 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
6755 buff_len);
6756 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
3a4bf06d 6757
dbd34a61 6758 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
e89860f1 6759 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
3a4bf06d
YG
6760
6761 if (ret)
6762 dev_err(hba->dev,
6763 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
e89860f1 6764 __func__, icc_level, ret);
3a4bf06d 6765
bbe21d7a
KC
6766out:
6767 kfree(desc_buf);
3a4bf06d
YG
6768}
6769
fb276f77
CG
6770static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
6771{
6772 scsi_autopm_get_device(sdev);
6773 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
6774 if (sdev->rpm_autosuspend)
6775 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
6776 RPM_AUTOSUSPEND_DELAY_MS);
6777 scsi_autopm_put_device(sdev);
6778}
6779
2a8fa600
SJ
6780/**
6781 * ufshcd_scsi_add_wlus - Adds required W-LUs
6782 * @hba: per-adapter instance
6783 *
6784 * UFS device specification requires the UFS devices to support 4 well known
6785 * logical units:
6786 * "REPORT_LUNS" (address: 01h)
6787 * "UFS Device" (address: 50h)
6788 * "RPMB" (address: 44h)
6789 * "BOOT" (address: 30h)
6790 * UFS device's power management needs to be controlled by "POWER CONDITION"
6791 * field of SSU (START STOP UNIT) command. But this "power condition" field
6792 * will take effect only when its sent to "UFS device" well known logical unit
6793 * hence we require the scsi_device instance to represent this logical unit in
6794 * order for the UFS host driver to send the SSU command for power management.
8aa29f19 6795 *
2a8fa600
SJ
6796 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6797 * Block) LU so user space process can control this LU. User space may also
6798 * want to have access to BOOT LU.
8aa29f19 6799 *
2a8fa600
SJ
6800 * This function adds scsi device instances for each of all well known LUs
6801 * (except "REPORT LUNS" LU).
6802 *
6803 * Returns zero on success (all required W-LUs are added successfully),
6804 * non-zero error value on failure (if failed to add any of the required W-LU).
6805 */
6806static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6807{
6808 int ret = 0;
7c48bfd0
AM
6809 struct scsi_device *sdev_rpmb;
6810 struct scsi_device *sdev_boot;
2a8fa600
SJ
6811
6812 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6813 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6814 if (IS_ERR(hba->sdev_ufs_device)) {
6815 ret = PTR_ERR(hba->sdev_ufs_device);
6816 hba->sdev_ufs_device = NULL;
6817 goto out;
6818 }
fb276f77 6819 ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
7c48bfd0 6820 scsi_device_put(hba->sdev_ufs_device);
2a8fa600 6821
7c48bfd0 6822 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 6823 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7c48bfd0
AM
6824 if (IS_ERR(sdev_rpmb)) {
6825 ret = PTR_ERR(sdev_rpmb);
3d21fbde 6826 goto remove_sdev_ufs_device;
2a8fa600 6827 }
fb276f77 6828 ufshcd_blk_pm_runtime_init(sdev_rpmb);
7c48bfd0 6829 scsi_device_put(sdev_rpmb);
3d21fbde
HK
6830
6831 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6832 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
fb276f77 6833 if (IS_ERR(sdev_boot)) {
3d21fbde 6834 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
fb276f77
CG
6835 } else {
6836 ufshcd_blk_pm_runtime_init(sdev_boot);
3d21fbde 6837 scsi_device_put(sdev_boot);
fb276f77 6838 }
2a8fa600
SJ
6839 goto out;
6840
2a8fa600
SJ
6841remove_sdev_ufs_device:
6842 scsi_remove_device(hba->sdev_ufs_device);
6843out:
6844 return ret;
6845}
6846
3d17b9b5
AD
6847static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
6848{
6f8d5a6a
SC
6849 u8 lun;
6850 u32 d_lu_wb_buf_alloc;
6851
817d7e14
SC
6852 if (!ufshcd_is_wb_allowed(hba))
6853 return;
6854
7a0bf85b
BH
6855 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
6856 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
817d7e14
SC
6857 goto wb_disabled;
6858
3d17b9b5
AD
6859 hba->dev_info.d_ext_ufs_feature_sup =
6860 get_unaligned_be32(desc_buf +
6861 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
817d7e14
SC
6862
6863 if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
6864 goto wb_disabled;
6865
3d17b9b5
AD
6866 /*
6867 * WB may be supported but not configured while provisioning.
6868 * The spec says, in dedicated wb buffer mode,
6869 * a max of 1 lun would have wb buffer configured.
6870 * Now only shared buffer mode is supported.
6871 */
6872 hba->dev_info.b_wb_buffer_type =
6873 desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
6874
3d17b9b5
AD
6875 hba->dev_info.b_presrv_uspc_en =
6876 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
6877
6f8d5a6a
SC
6878 if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
6879 hba->dev_info.d_wb_alloc_units =
6880 get_unaligned_be32(desc_buf +
6881 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
6882 if (!hba->dev_info.d_wb_alloc_units)
6883 goto wb_disabled;
6884 } else {
6885 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
6886 d_lu_wb_buf_alloc = 0;
6887 ufshcd_read_unit_desc_param(hba,
6888 lun,
6889 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
6890 (u8 *)&d_lu_wb_buf_alloc,
6891 sizeof(d_lu_wb_buf_alloc));
6892 if (d_lu_wb_buf_alloc) {
6893 hba->dev_info.wb_dedicated_lu = lun;
6894 break;
6895 }
6896 }
817d7e14 6897
6f8d5a6a
SC
6898 if (!d_lu_wb_buf_alloc)
6899 goto wb_disabled;
6900 }
817d7e14
SC
6901 return;
6902
6903wb_disabled:
6904 hba->caps &= ~UFSHCD_CAP_WB_EN;
6905}
6906
8db269a5 6907void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
817d7e14
SC
6908{
6909 struct ufs_dev_fix *f;
6910 struct ufs_dev_info *dev_info = &hba->dev_info;
6911
8db269a5
SC
6912 if (!fixups)
6913 return;
6914
6915 for (f = fixups; f->quirk; f++) {
817d7e14
SC
6916 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
6917 f->wmanufacturerid == UFS_ANY_VENDOR) &&
6918 ((dev_info->model &&
6919 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
6920 !strcmp(f->model, UFS_ANY_MODEL)))
6921 hba->dev_quirks |= f->quirk;
6922 }
3d17b9b5 6923}
8db269a5 6924EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
3d17b9b5 6925
c28c00ba
SC
6926static void ufs_fixup_device_setup(struct ufs_hba *hba)
6927{
6928 /* fix by general quirk table */
8db269a5 6929 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
c28c00ba
SC
6930
6931 /* allow vendors to fix quirks */
6932 ufshcd_vops_fixup_dev_quirks(hba);
6933}
6934
09750066 6935static int ufs_get_device_desc(struct ufs_hba *hba)
c58ab7aa
YG
6936{
6937 int err;
6938 u8 model_index;
bbe21d7a 6939 u8 *desc_buf;
09750066 6940 struct ufs_dev_info *dev_info = &hba->dev_info;
4b828fe1 6941
458a45f5 6942 desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
bbe21d7a
KC
6943 if (!desc_buf) {
6944 err = -ENOMEM;
6945 goto out;
6946 }
c58ab7aa 6947
c4607a09 6948 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7a0bf85b 6949 hba->desc_size[QUERY_DESC_IDN_DEVICE]);
c58ab7aa
YG
6950 if (err) {
6951 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6952 __func__, err);
6953 goto out;
6954 }
6955
6956 /*
6957 * getting vendor (manufacturerID) and Bank Index in big endian
6958 * format
6959 */
09750066 6960 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
c58ab7aa
YG
6961 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6962
09f17791
CG
6963 /* getting Specification Version in big endian format */
6964 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
6965 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
6966
c58ab7aa 6967 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
3d17b9b5 6968
4b828fe1 6969 err = ufshcd_read_string_desc(hba, model_index,
09750066 6970 &dev_info->model, SD_ASCII_STD);
4b828fe1 6971 if (err < 0) {
c58ab7aa
YG
6972 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6973 __func__, err);
6974 goto out;
6975 }
6976
817d7e14
SC
6977 ufs_fixup_device_setup(hba);
6978
6979 /*
6980 * Probe WB only for UFS-3.1 devices or UFS devices with quirk
6981 * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
6982 */
6983 if (dev_info->wspecversion >= 0x310 ||
c7cee3e7 6984 dev_info->wspecversion == 0x220 ||
817d7e14
SC
6985 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
6986 ufshcd_wb_probe(hba, desc_buf);
6987
4b828fe1
TW
6988 /*
6989 * ufshcd_read_string_desc returns size of the string
6990 * reset the error value
6991 */
6992 err = 0;
c58ab7aa
YG
6993
6994out:
bbe21d7a 6995 kfree(desc_buf);
c58ab7aa
YG
6996 return err;
6997}
6998
09750066 6999static void ufs_put_device_desc(struct ufs_hba *hba)
4b828fe1 7000{
09750066
BH
7001 struct ufs_dev_info *dev_info = &hba->dev_info;
7002
7003 kfree(dev_info->model);
7004 dev_info->model = NULL;
4b828fe1
TW
7005}
7006
37113106
YG
7007/**
7008 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7009 * @hba: per-adapter instance
7010 *
7011 * PA_TActivate parameter can be tuned manually if UniPro version is less than
7012 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7013 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7014 * the hibern8 exit latency.
7015 *
7016 * Returns zero on success, non-zero error value on failure.
7017 */
7018static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7019{
7020 int ret = 0;
7021 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7022
7023 ret = ufshcd_dme_peer_get(hba,
7024 UIC_ARG_MIB_SEL(
7025 RX_MIN_ACTIVATETIME_CAPABILITY,
7026 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7027 &peer_rx_min_activatetime);
7028 if (ret)
7029 goto out;
7030
7031 /* make sure proper unit conversion is applied */
7032 tuned_pa_tactivate =
7033 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7034 / PA_TACTIVATE_TIME_UNIT_US);
7035 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7036 tuned_pa_tactivate);
7037
7038out:
7039 return ret;
7040}
7041
7042/**
7043 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7044 * @hba: per-adapter instance
7045 *
7046 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7047 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7048 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7049 * This optimal value can help reduce the hibern8 exit latency.
7050 *
7051 * Returns zero on success, non-zero error value on failure.
7052 */
7053static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7054{
7055 int ret = 0;
7056 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7057 u32 max_hibern8_time, tuned_pa_hibern8time;
7058
7059 ret = ufshcd_dme_get(hba,
7060 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7061 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7062 &local_tx_hibern8_time_cap);
7063 if (ret)
7064 goto out;
7065
7066 ret = ufshcd_dme_peer_get(hba,
7067 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7068 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7069 &peer_rx_hibern8_time_cap);
7070 if (ret)
7071 goto out;
7072
7073 max_hibern8_time = max(local_tx_hibern8_time_cap,
7074 peer_rx_hibern8_time_cap);
7075 /* make sure proper unit conversion is applied */
7076 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7077 / PA_HIBERN8_TIME_UNIT_US);
7078 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7079 tuned_pa_hibern8time);
7080out:
7081 return ret;
7082}
7083
c6a6db43 7084/**
7085 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7086 * less than device PA_TACTIVATE time.
7087 * @hba: per-adapter instance
7088 *
7089 * Some UFS devices require host PA_TACTIVATE to be lower than device
7090 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7091 * for such devices.
7092 *
7093 * Returns zero on success, non-zero error value on failure.
7094 */
7095static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7096{
7097 int ret = 0;
7098 u32 granularity, peer_granularity;
7099 u32 pa_tactivate, peer_pa_tactivate;
7100 u32 pa_tactivate_us, peer_pa_tactivate_us;
7101 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7102
7103 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7104 &granularity);
7105 if (ret)
7106 goto out;
7107
7108 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7109 &peer_granularity);
7110 if (ret)
7111 goto out;
7112
7113 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7114 (granularity > PA_GRANULARITY_MAX_VAL)) {
7115 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7116 __func__, granularity);
7117 return -EINVAL;
7118 }
7119
7120 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7121 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7122 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7123 __func__, peer_granularity);
7124 return -EINVAL;
7125 }
7126
7127 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7128 if (ret)
7129 goto out;
7130
7131 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7132 &peer_pa_tactivate);
7133 if (ret)
7134 goto out;
7135
7136 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7137 peer_pa_tactivate_us = peer_pa_tactivate *
7138 gran_to_us_table[peer_granularity - 1];
7139
7140 if (pa_tactivate_us > peer_pa_tactivate_us) {
7141 u32 new_peer_pa_tactivate;
7142
7143 new_peer_pa_tactivate = pa_tactivate_us /
7144 gran_to_us_table[peer_granularity - 1];
7145 new_peer_pa_tactivate++;
7146 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7147 new_peer_pa_tactivate);
7148 }
7149
7150out:
7151 return ret;
7152}
7153
09750066 7154static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
37113106
YG
7155{
7156 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7157 ufshcd_tune_pa_tactivate(hba);
7158 ufshcd_tune_pa_hibern8time(hba);
7159 }
7160
e91ed9e0
CG
7161 ufshcd_vops_apply_dev_quirks(hba);
7162
37113106
YG
7163 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7164 /* set 1ms timeout for PA_TACTIVATE */
7165 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
c6a6db43 7166
7167 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7168 ufshcd_quirk_tune_host_pa_tactivate(hba);
37113106
YG
7169}
7170
ff8e20c6
DR
7171static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7172{
ff8e20c6
DR
7173 hba->ufs_stats.hibern8_exit_cnt = 0;
7174 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7fabb77b 7175 hba->req_abort_count = 0;
ff8e20c6
DR
7176}
7177
731f0621
BH
7178static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7179{
7180 int err;
7181 size_t buff_len;
7182 u8 *desc_buf;
7183
7a0bf85b 7184 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
731f0621
BH
7185 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7186 if (!desc_buf) {
7187 err = -ENOMEM;
7188 goto out;
7189 }
7190
c4607a09
BH
7191 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7192 desc_buf, buff_len);
731f0621
BH
7193 if (err) {
7194 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7195 __func__, err);
7196 goto out;
7197 }
7198
7199 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7200 hba->dev_info.max_lu_supported = 32;
7201 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7202 hba->dev_info.max_lu_supported = 8;
7203
7204out:
7205 kfree(desc_buf);
7206 return err;
7207}
7208
9e1e8a75
SJ
7209static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7210 {19200000, REF_CLK_FREQ_19_2_MHZ},
7211 {26000000, REF_CLK_FREQ_26_MHZ},
7212 {38400000, REF_CLK_FREQ_38_4_MHZ},
7213 {52000000, REF_CLK_FREQ_52_MHZ},
7214 {0, REF_CLK_FREQ_INVAL},
7215};
7216
7217static enum ufs_ref_clk_freq
7218ufs_get_bref_clk_from_hz(unsigned long freq)
7219{
7220 int i;
7221
7222 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7223 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7224 return ufs_ref_clk_freqs[i].val;
7225
7226 return REF_CLK_FREQ_INVAL;
7227}
7228
7229void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7230{
7231 unsigned long freq;
7232
7233 freq = clk_get_rate(refclk);
7234
7235 hba->dev_ref_clk_freq =
7236 ufs_get_bref_clk_from_hz(freq);
7237
7238 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7239 dev_err(hba->dev,
7240 "invalid ref_clk setting = %ld\n", freq);
7241}
7242
7243static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7244{
7245 int err;
7246 u32 ref_clk;
7247 u32 freq = hba->dev_ref_clk_freq;
7248
7249 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7250 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7251
7252 if (err) {
7253 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7254 err);
7255 goto out;
7256 }
7257
7258 if (ref_clk == freq)
7259 goto out; /* nothing to update */
7260
7261 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7262 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7263
7264 if (err) {
7265 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7266 ufs_ref_clk_freqs[freq].freq_hz);
7267 goto out;
7268 }
7269
7270 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7271 ufs_ref_clk_freqs[freq].freq_hz);
7272
7273out:
7274 return err;
7275}
7276
1b9e2141
BH
7277static int ufshcd_device_params_init(struct ufs_hba *hba)
7278{
7279 bool flag;
7a0bf85b 7280 int ret, i;
1b9e2141 7281
7a0bf85b
BH
7282 /* Init device descriptor sizes */
7283 for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7284 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
1b9e2141 7285
731f0621
BH
7286 /* Init UFS geometry descriptor related parameters */
7287 ret = ufshcd_device_geo_params_init(hba);
7288 if (ret)
7289 goto out;
7290
1b9e2141
BH
7291 /* Check and apply UFS device quirks */
7292 ret = ufs_get_device_desc(hba);
7293 if (ret) {
7294 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7295 __func__, ret);
7296 goto out;
7297 }
7298
09f17791
CG
7299 ufshcd_get_ref_clk_gating_wait(hba);
7300
1b9e2141 7301 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1f34eedf 7302 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
1b9e2141
BH
7303 hba->dev_info.f_power_on_wp_en = flag;
7304
2b35b2ad
BH
7305 /* Probe maximum power mode co-supported by both UFS host and device */
7306 if (ufshcd_get_max_pwr_mode(hba))
7307 dev_err(hba->dev,
7308 "%s: Failed getting max supported power mode\n",
7309 __func__);
1b9e2141
BH
7310out:
7311 return ret;
7312}
7313
7314/**
7315 * ufshcd_add_lus - probe and add UFS logical units
7316 * @hba: per-adapter instance
7317 */
7318static int ufshcd_add_lus(struct ufs_hba *hba)
7319{
7320 int ret;
7321
1b9e2141
BH
7322 /* Add required well known logical units to scsi mid layer */
7323 ret = ufshcd_scsi_add_wlus(hba);
7324 if (ret)
7325 goto out;
7326
7327 /* Initialize devfreq after UFS device is detected */
7328 if (ufshcd_is_clkscaling_supported(hba)) {
7329 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7330 &hba->pwr_info,
7331 sizeof(struct ufs_pa_layer_attr));
7332 hba->clk_scaling.saved_pwr_info.is_valid = true;
7333 if (!hba->devfreq) {
7334 ret = ufshcd_devfreq_init(hba);
7335 if (ret)
7336 goto out;
7337 }
7338
7339 hba->clk_scaling.is_allowed = true;
7340 }
7341
7342 ufs_bsg_probe(hba);
7343 scsi_scan_host(hba->host);
7344 pm_runtime_put_sync(hba->dev);
7345
1b9e2141
BH
7346out:
7347 return ret;
7348}
7349
6ccf44fe 7350/**
1d337ec2
SRT
7351 * ufshcd_probe_hba - probe hba to detect device and initialize
7352 * @hba: per-adapter instance
1b9e2141 7353 * @async: asynchronous execution or not
1d337ec2
SRT
7354 *
7355 * Execute link-startup and verify device initialization
6ccf44fe 7356 */
1b9e2141 7357static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
6ccf44fe 7358{
6ccf44fe 7359 int ret;
7ff5ab47 7360 ktime_t start = ktime_get();
6ccf44fe
SJ
7361
7362 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
7363 if (ret)
7364 goto out;
7365
ff8e20c6
DR
7366 /* Debug counters initialization */
7367 ufshcd_clear_dbg_ufs_stats(hba);
7368
57d104c1
SJ
7369 /* UniPro link is active now */
7370 ufshcd_set_link_active(hba);
d3e89bac 7371
1b9e2141 7372 /* Verify device initialization by sending NOP OUT UPIU */
5a0b0cb9
SRT
7373 ret = ufshcd_verify_dev_init(hba);
7374 if (ret)
7375 goto out;
68078d5c 7376
1b9e2141 7377 /* Initiate UFS initialization, and waiting until completion */
68078d5c
DR
7378 ret = ufshcd_complete_dev_init(hba);
7379 if (ret)
7380 goto out;
5a0b0cb9 7381
1b9e2141
BH
7382 /*
7383 * Initialize UFS device parameters used by driver, these
7384 * parameters are associated with UFS descriptors.
7385 */
7386 if (async) {
7387 ret = ufshcd_device_params_init(hba);
7388 if (ret)
7389 goto out;
93fdd5ac
TW
7390 }
7391
09750066 7392 ufshcd_tune_unipro_params(hba);
4b828fe1 7393
57d104c1
SJ
7394 /* UFS device is also active now */
7395 ufshcd_set_ufs_dev_active(hba);
66ec6d59 7396 ufshcd_force_reset_auto_bkops(hba);
57d104c1
SJ
7397 hba->wlun_dev_clr_ua = true;
7398
2b35b2ad
BH
7399 /* Gear up to HS gear if supported */
7400 if (hba->max_pwr_info.is_valid) {
9e1e8a75
SJ
7401 /*
7402 * Set the right value to bRefClkFreq before attempting to
7403 * switch to HS gears.
7404 */
7405 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7406 ufshcd_set_dev_ref_clk(hba);
7eb584db 7407 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8643ae66 7408 if (ret) {
7eb584db
DR
7409 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7410 __func__, ret);
8643ae66
DL
7411 goto out;
7412 }
6a9df818 7413 ufshcd_print_pwr_info(hba);
7eb584db 7414 }
57d104c1 7415
e89860f1
CG
7416 /*
7417 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7418 * and for removable UFS card as well, hence always set the parameter.
7419 * Note: Error handler may issue the device reset hence resetting
7420 * bActiveICCLevel as well so it is always safe to set this here.
7421 */
7422 ufshcd_set_active_icc_lvl(hba);
7423
53c12d0e
YG
7424 /* set the state as operational after switching to desired gear */
7425 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
a4b0e8a4 7426
3d17b9b5 7427 ufshcd_wb_config(hba);
71d848b8
CG
7428 /* Enable Auto-Hibernate if configured */
7429 ufshcd_auto_hibern8_enable(hba);
7430
5a0b0cb9 7431out:
1d337ec2 7432
7ff5ab47 7433 trace_ufshcd_init(dev_name(hba->dev), ret,
7434 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 7435 hba->curr_dev_pwr_mode, hba->uic_link_state);
1d337ec2
SRT
7436 return ret;
7437}
7438
7439/**
7440 * ufshcd_async_scan - asynchronous execution for probing hba
7441 * @data: data pointer to pass to this function
7442 * @cookie: cookie data
7443 */
7444static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7445{
7446 struct ufs_hba *hba = (struct ufs_hba *)data;
1b9e2141 7447 int ret;
1d337ec2 7448
1b9e2141
BH
7449 /* Initialize hba, detect and initialize UFS device */
7450 ret = ufshcd_probe_hba(hba, true);
7451 if (ret)
7452 goto out;
7453
7454 /* Probe and add UFS logical units */
7455 ret = ufshcd_add_lus(hba);
7456out:
7457 /*
7458 * If we failed to initialize the device or the device is not
7459 * present, turn off the power/clocks etc.
7460 */
7461 if (ret) {
7462 pm_runtime_put_sync(hba->dev);
7463 ufshcd_exit_clk_scaling(hba);
7464 ufshcd_hba_exit(hba);
7465 }
6ccf44fe
SJ
7466}
7467
d829fc8a
SN
7468static const struct attribute_group *ufshcd_driver_groups[] = {
7469 &ufs_sysfs_unit_descriptor_group,
ec92b59c 7470 &ufs_sysfs_lun_attributes_group,
d829fc8a
SN
7471 NULL,
7472};
7473
90b8491c
SC
7474static struct ufs_hba_variant_params ufs_hba_vps = {
7475 .hba_enable_delay_us = 1000,
d14734ae 7476 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
90b8491c
SC
7477 .devfreq_profile.polling_ms = 100,
7478 .devfreq_profile.target = ufshcd_devfreq_target,
7479 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
7480 .ondemand_data.upthreshold = 70,
7481 .ondemand_data.downdifferential = 5,
7482};
7483
7a3e97b0
SY
7484static struct scsi_host_template ufshcd_driver_template = {
7485 .module = THIS_MODULE,
7486 .name = UFSHCD,
7487 .proc_name = UFSHCD,
7488 .queuecommand = ufshcd_queuecommand,
7489 .slave_alloc = ufshcd_slave_alloc,
eeda4749 7490 .slave_configure = ufshcd_slave_configure,
7a3e97b0 7491 .slave_destroy = ufshcd_slave_destroy,
4264fd61 7492 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 7493 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
7494 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7495 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7a3e97b0
SY
7496 .this_id = -1,
7497 .sg_tablesize = SG_ALL,
7498 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7499 .can_queue = UFSHCD_CAN_QUEUE,
552a990c 7500 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
1ab27c9c 7501 .max_host_blocked = 1,
c40ecc12 7502 .track_queue_depth = 1,
d829fc8a 7503 .sdev_groups = ufshcd_driver_groups,
4af14d11 7504 .dma_boundary = PAGE_SIZE - 1,
49615ba1 7505 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
7a3e97b0
SY
7506};
7507
57d104c1
SJ
7508static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7509 int ua)
7510{
7b16a07c 7511 int ret;
57d104c1 7512
7b16a07c
BA
7513 if (!vreg)
7514 return 0;
57d104c1 7515
0487fff7
SC
7516 /*
7517 * "set_load" operation shall be required on those regulators
7518 * which specifically configured current limitation. Otherwise
7519 * zero max_uA may cause unexpected behavior when regulator is
7520 * enabled or set as high power mode.
7521 */
7522 if (!vreg->max_uA)
7523 return 0;
7524
7b16a07c
BA
7525 ret = regulator_set_load(vreg->reg, ua);
7526 if (ret < 0) {
7527 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7528 __func__, vreg->name, ua, ret);
57d104c1
SJ
7529 }
7530
7531 return ret;
7532}
7533
7534static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7535 struct ufs_vreg *vreg)
7536{
73067981 7537 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
57d104c1
SJ
7538}
7539
7540static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7541 struct ufs_vreg *vreg)
7542{
7c7cfdcf
AH
7543 if (!vreg)
7544 return 0;
7545
73067981 7546 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
57d104c1
SJ
7547}
7548
aa497613
SRT
7549static int ufshcd_config_vreg(struct device *dev,
7550 struct ufs_vreg *vreg, bool on)
7551{
7552 int ret = 0;
72753590
GS
7553 struct regulator *reg;
7554 const char *name;
aa497613
SRT
7555 int min_uV, uA_load;
7556
7557 BUG_ON(!vreg);
7558
72753590
GS
7559 reg = vreg->reg;
7560 name = vreg->name;
7561
aa497613 7562 if (regulator_count_voltages(reg) > 0) {
90d88f47
AD
7563 uA_load = on ? vreg->max_uA : 0;
7564 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7565 if (ret)
7566 goto out;
7567
3b141e8c
SC
7568 if (vreg->min_uV && vreg->max_uV) {
7569 min_uV = on ? vreg->min_uV : 0;
7570 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7571 if (ret) {
7572 dev_err(dev,
7573 "%s: %s set voltage failed, err=%d\n",
aa497613 7574 __func__, name, ret);
3b141e8c
SC
7575 goto out;
7576 }
aa497613 7577 }
aa497613
SRT
7578 }
7579out:
7580 return ret;
7581}
7582
7583static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7584{
7585 int ret = 0;
7586
73067981 7587 if (!vreg || vreg->enabled)
aa497613
SRT
7588 goto out;
7589
7590 ret = ufshcd_config_vreg(dev, vreg, true);
7591 if (!ret)
7592 ret = regulator_enable(vreg->reg);
7593
7594 if (!ret)
7595 vreg->enabled = true;
7596 else
7597 dev_err(dev, "%s: %s enable failed, err=%d\n",
7598 __func__, vreg->name, ret);
7599out:
7600 return ret;
7601}
7602
7603static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7604{
7605 int ret = 0;
7606
73067981 7607 if (!vreg || !vreg->enabled)
aa497613
SRT
7608 goto out;
7609
7610 ret = regulator_disable(vreg->reg);
7611
7612 if (!ret) {
7613 /* ignore errors on applying disable config */
7614 ufshcd_config_vreg(dev, vreg, false);
7615 vreg->enabled = false;
7616 } else {
7617 dev_err(dev, "%s: %s disable failed, err=%d\n",
7618 __func__, vreg->name, ret);
7619 }
7620out:
7621 return ret;
7622}
7623
7624static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7625{
7626 int ret = 0;
7627 struct device *dev = hba->dev;
7628 struct ufs_vreg_info *info = &hba->vreg_info;
7629
aa497613
SRT
7630 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7631 if (ret)
7632 goto out;
7633
7634 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7635 if (ret)
7636 goto out;
7637
7638 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7639 if (ret)
7640 goto out;
7641
7642out:
7643 if (ret) {
7644 ufshcd_toggle_vreg(dev, info->vccq2, false);
7645 ufshcd_toggle_vreg(dev, info->vccq, false);
7646 ufshcd_toggle_vreg(dev, info->vcc, false);
7647 }
7648 return ret;
7649}
7650
6a771a65
RS
7651static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7652{
7653 struct ufs_vreg_info *info = &hba->vreg_info;
7654
60b7b823 7655 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6a771a65
RS
7656}
7657
aa497613
SRT
7658static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7659{
7660 int ret = 0;
7661
7662 if (!vreg)
7663 goto out;
7664
7665 vreg->reg = devm_regulator_get(dev, vreg->name);
7666 if (IS_ERR(vreg->reg)) {
7667 ret = PTR_ERR(vreg->reg);
7668 dev_err(dev, "%s: %s get failed, err=%d\n",
7669 __func__, vreg->name, ret);
7670 }
7671out:
7672 return ret;
7673}
7674
7675static int ufshcd_init_vreg(struct ufs_hba *hba)
7676{
7677 int ret = 0;
7678 struct device *dev = hba->dev;
7679 struct ufs_vreg_info *info = &hba->vreg_info;
7680
aa497613
SRT
7681 ret = ufshcd_get_vreg(dev, info->vcc);
7682 if (ret)
7683 goto out;
7684
7685 ret = ufshcd_get_vreg(dev, info->vccq);
7686 if (ret)
7687 goto out;
7688
7689 ret = ufshcd_get_vreg(dev, info->vccq2);
7690out:
7691 return ret;
7692}
7693
6a771a65
RS
7694static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7695{
7696 struct ufs_vreg_info *info = &hba->vreg_info;
7697
7698 if (info)
7699 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7700
7701 return 0;
7702}
7703
57d104c1
SJ
7704static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7705 bool skip_ref_clk)
c6e79dac
SRT
7706{
7707 int ret = 0;
7708 struct ufs_clk_info *clki;
7709 struct list_head *head = &hba->clk_list_head;
1ab27c9c 7710 unsigned long flags;
911a0771 7711 ktime_t start = ktime_get();
7712 bool clk_state_changed = false;
c6e79dac 7713
566ec9ad 7714 if (list_empty(head))
c6e79dac
SRT
7715 goto out;
7716
38f3242e
CG
7717 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7718 if (ret)
7719 return ret;
1e879e8f 7720
c6e79dac
SRT
7721 list_for_each_entry(clki, head, list) {
7722 if (!IS_ERR_OR_NULL(clki->clk)) {
57d104c1
SJ
7723 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7724 continue;
7725
911a0771 7726 clk_state_changed = on ^ clki->enabled;
c6e79dac
SRT
7727 if (on && !clki->enabled) {
7728 ret = clk_prepare_enable(clki->clk);
7729 if (ret) {
7730 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7731 __func__, clki->name, ret);
7732 goto out;
7733 }
7734 } else if (!on && clki->enabled) {
7735 clk_disable_unprepare(clki->clk);
7736 }
7737 clki->enabled = on;
7738 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7739 clki->name, on ? "en" : "dis");
7740 }
7741 }
1ab27c9c 7742
38f3242e
CG
7743 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7744 if (ret)
7745 return ret;
1e879e8f 7746
c6e79dac
SRT
7747out:
7748 if (ret) {
7749 list_for_each_entry(clki, head, list) {
7750 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7751 clk_disable_unprepare(clki->clk);
7752 }
7ff5ab47 7753 } else if (!ret && on) {
1ab27c9c
ST
7754 spin_lock_irqsave(hba->host->host_lock, flags);
7755 hba->clk_gating.state = CLKS_ON;
7ff5ab47 7756 trace_ufshcd_clk_gating(dev_name(hba->dev),
7757 hba->clk_gating.state);
1ab27c9c 7758 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac 7759 }
7ff5ab47 7760
911a0771 7761 if (clk_state_changed)
7762 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7763 (on ? "on" : "off"),
7764 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
c6e79dac
SRT
7765 return ret;
7766}
7767
57d104c1
SJ
7768static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7769{
7770 return __ufshcd_setup_clocks(hba, on, false);
7771}
7772
c6e79dac
SRT
7773static int ufshcd_init_clocks(struct ufs_hba *hba)
7774{
7775 int ret = 0;
7776 struct ufs_clk_info *clki;
7777 struct device *dev = hba->dev;
7778 struct list_head *head = &hba->clk_list_head;
7779
566ec9ad 7780 if (list_empty(head))
c6e79dac
SRT
7781 goto out;
7782
7783 list_for_each_entry(clki, head, list) {
7784 if (!clki->name)
7785 continue;
7786
7787 clki->clk = devm_clk_get(dev, clki->name);
7788 if (IS_ERR(clki->clk)) {
7789 ret = PTR_ERR(clki->clk);
7790 dev_err(dev, "%s: %s clk get failed, %d\n",
7791 __func__, clki->name, ret);
7792 goto out;
7793 }
7794
9e1e8a75
SJ
7795 /*
7796 * Parse device ref clk freq as per device tree "ref_clk".
7797 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7798 * in ufshcd_alloc_host().
7799 */
7800 if (!strcmp(clki->name, "ref_clk"))
7801 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7802
c6e79dac
SRT
7803 if (clki->max_freq) {
7804 ret = clk_set_rate(clki->clk, clki->max_freq);
7805 if (ret) {
7806 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7807 __func__, clki->name,
7808 clki->max_freq, ret);
7809 goto out;
7810 }
856b3483 7811 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
7812 }
7813 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7814 clki->name, clk_get_rate(clki->clk));
7815 }
7816out:
7817 return ret;
7818}
7819
5c0c28a8
SRT
7820static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7821{
7822 int err = 0;
7823
7824 if (!hba->vops)
7825 goto out;
7826
0263bcd0
YG
7827 err = ufshcd_vops_init(hba);
7828 if (err)
7829 goto out;
5c0c28a8 7830
0263bcd0
YG
7831 err = ufshcd_vops_setup_regulators(hba, true);
7832 if (err)
7833 goto out_exit;
5c0c28a8
SRT
7834
7835 goto out;
7836
5c0c28a8 7837out_exit:
0263bcd0 7838 ufshcd_vops_exit(hba);
5c0c28a8
SRT
7839out:
7840 if (err)
7841 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
0263bcd0 7842 __func__, ufshcd_get_var_name(hba), err);
5c0c28a8
SRT
7843 return err;
7844}
7845
7846static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7847{
7848 if (!hba->vops)
7849 return;
7850
0263bcd0 7851 ufshcd_vops_setup_regulators(hba, false);
5c0c28a8 7852
0263bcd0 7853 ufshcd_vops_exit(hba);
5c0c28a8
SRT
7854}
7855
aa497613
SRT
7856static int ufshcd_hba_init(struct ufs_hba *hba)
7857{
7858 int err;
7859
6a771a65
RS
7860 /*
7861 * Handle host controller power separately from the UFS device power
7862 * rails as it will help controlling the UFS host controller power
7863 * collapse easily which is different than UFS device power collapse.
7864 * Also, enable the host controller power before we go ahead with rest
7865 * of the initialization here.
7866 */
7867 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
7868 if (err)
7869 goto out;
7870
6a771a65 7871 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
7872 if (err)
7873 goto out;
7874
6a771a65
RS
7875 err = ufshcd_init_clocks(hba);
7876 if (err)
7877 goto out_disable_hba_vreg;
7878
7879 err = ufshcd_setup_clocks(hba, true);
7880 if (err)
7881 goto out_disable_hba_vreg;
7882
c6e79dac
SRT
7883 err = ufshcd_init_vreg(hba);
7884 if (err)
7885 goto out_disable_clks;
7886
7887 err = ufshcd_setup_vreg(hba, true);
7888 if (err)
7889 goto out_disable_clks;
7890
aa497613
SRT
7891 err = ufshcd_variant_hba_init(hba);
7892 if (err)
7893 goto out_disable_vreg;
7894
1d337ec2 7895 hba->is_powered = true;
aa497613
SRT
7896 goto out;
7897
7898out_disable_vreg:
7899 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
7900out_disable_clks:
7901 ufshcd_setup_clocks(hba, false);
6a771a65
RS
7902out_disable_hba_vreg:
7903 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
7904out:
7905 return err;
7906}
7907
7908static void ufshcd_hba_exit(struct ufs_hba *hba)
7909{
1d337ec2
SRT
7910 if (hba->is_powered) {
7911 ufshcd_variant_hba_exit(hba);
7912 ufshcd_setup_vreg(hba, false);
a508253d 7913 ufshcd_suspend_clkscaling(hba);
eebcc196 7914 if (ufshcd_is_clkscaling_supported(hba))
0701e49d 7915 if (hba->devfreq)
7916 ufshcd_suspend_clkscaling(hba);
1d337ec2
SRT
7917 ufshcd_setup_clocks(hba, false);
7918 ufshcd_setup_hba_vreg(hba, false);
7919 hba->is_powered = false;
09750066 7920 ufs_put_device_desc(hba);
1d337ec2 7921 }
aa497613
SRT
7922}
7923
57d104c1
SJ
7924static int
7925ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7926{
7927 unsigned char cmd[6] = {REQUEST_SENSE,
7928 0,
7929 0,
7930 0,
09a5a24f 7931 UFS_SENSE_SIZE,
57d104c1
SJ
7932 0};
7933 char *buffer;
7934 int ret;
7935
09a5a24f 7936 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
57d104c1
SJ
7937 if (!buffer) {
7938 ret = -ENOMEM;
7939 goto out;
7940 }
7941
fcbfffe2 7942 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
09a5a24f 7943 UFS_SENSE_SIZE, NULL, NULL,
fcbfffe2 7944 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
57d104c1
SJ
7945 if (ret)
7946 pr_err("%s: failed with err %d\n", __func__, ret);
7947
7948 kfree(buffer);
7949out:
7950 return ret;
7951}
7952
7953/**
7954 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7955 * power mode
7956 * @hba: per adapter instance
7957 * @pwr_mode: device power mode to set
7958 *
7959 * Returns 0 if requested power mode is set successfully
7960 * Returns non-zero if failed to set the requested power mode
7961 */
7962static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7963 enum ufs_dev_pwr_mode pwr_mode)
7964{
7965 unsigned char cmd[6] = { START_STOP };
7966 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
7967 struct scsi_device *sdp;
7968 unsigned long flags;
57d104c1
SJ
7969 int ret;
7970
7c48bfd0
AM
7971 spin_lock_irqsave(hba->host->host_lock, flags);
7972 sdp = hba->sdev_ufs_device;
7973 if (sdp) {
7974 ret = scsi_device_get(sdp);
7975 if (!ret && !scsi_device_online(sdp)) {
7976 ret = -ENODEV;
7977 scsi_device_put(sdp);
7978 }
7979 } else {
7980 ret = -ENODEV;
7981 }
7982 spin_unlock_irqrestore(hba->host->host_lock, flags);
7983
7984 if (ret)
7985 return ret;
57d104c1
SJ
7986
7987 /*
7988 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7989 * handling, which would wait for host to be resumed. Since we know
7990 * we are functional while we are here, skip host resume in error
7991 * handling context.
7992 */
7993 hba->host->eh_noresume = 1;
7994 if (hba->wlun_dev_clr_ua) {
7995 ret = ufshcd_send_request_sense(hba, sdp);
7996 if (ret)
7997 goto out;
7998 /* Unit attention condition is cleared now */
7999 hba->wlun_dev_clr_ua = false;
8000 }
8001
8002 cmd[4] = pwr_mode << 4;
8003
8004 /*
8005 * Current function would be generally called from the power management
e8064021 8006 * callbacks hence set the RQF_PM flag so that it doesn't resume the
57d104c1
SJ
8007 * already suspended childs.
8008 */
fcbfffe2
CH
8009 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8010 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
57d104c1
SJ
8011 if (ret) {
8012 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
8013 "START_STOP failed for power mode: %d, result %x\n",
8014 pwr_mode, ret);
c65be1a6 8015 if (driver_byte(ret) == DRIVER_SENSE)
21045519 8016 scsi_print_sense_hdr(sdp, NULL, &sshdr);
57d104c1
SJ
8017 }
8018
8019 if (!ret)
8020 hba->curr_dev_pwr_mode = pwr_mode;
8021out:
7c48bfd0 8022 scsi_device_put(sdp);
57d104c1
SJ
8023 hba->host->eh_noresume = 0;
8024 return ret;
8025}
8026
8027static int ufshcd_link_state_transition(struct ufs_hba *hba,
8028 enum uic_link_state req_link_state,
8029 int check_for_bkops)
8030{
8031 int ret = 0;
8032
8033 if (req_link_state == hba->uic_link_state)
8034 return 0;
8035
8036 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8037 ret = ufshcd_uic_hibern8_enter(hba);
8038 if (!ret)
8039 ufshcd_set_link_hibern8(hba);
8040 else
8041 goto out;
8042 }
8043 /*
8044 * If autobkops is enabled, link can't be turned off because
8045 * turning off the link would also turn off the device.
8046 */
8047 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
dc30c9e6 8048 (!check_for_bkops || !hba->auto_bkops_enabled)) {
f3099fbd
YG
8049 /*
8050 * Let's make sure that link is in low power mode, we are doing
8051 * this currently by putting the link in Hibern8. Otherway to
8052 * put the link in low power mode is to send the DME end point
8053 * to device and then send the DME reset command to local
8054 * unipro. But putting the link in hibern8 is much faster.
8055 */
8056 ret = ufshcd_uic_hibern8_enter(hba);
8057 if (ret)
8058 goto out;
57d104c1
SJ
8059 /*
8060 * Change controller state to "reset state" which
8061 * should also put the link in off/reset state
8062 */
5cac1095 8063 ufshcd_hba_stop(hba);
57d104c1
SJ
8064 /*
8065 * TODO: Check if we need any delay to make sure that
8066 * controller is reset
8067 */
8068 ufshcd_set_link_off(hba);
8069 }
8070
8071out:
8072 return ret;
8073}
8074
8075static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8076{
b799fdf7
YG
8077 /*
8078 * It seems some UFS devices may keep drawing more than sleep current
8079 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8080 * To avoid this situation, add 2ms delay before putting these UFS
8081 * rails in LPM mode.
8082 */
8083 if (!ufshcd_is_link_active(hba) &&
8084 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8085 usleep_range(2000, 2100);
8086
57d104c1
SJ
8087 /*
8088 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8089 * power.
8090 *
8091 * If UFS device and link is in OFF state, all power supplies (VCC,
8092 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8093 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8094 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8095 *
8096 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8097 * in low power state which would save some power.
3d17b9b5
AD
8098 *
8099 * If Write Booster is enabled and the device needs to flush the WB
8100 * buffer OR if bkops status is urgent for WB, keep Vcc on.
57d104c1
SJ
8101 */
8102 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8103 !hba->dev_info.is_lu_power_on_wp) {
8104 ufshcd_setup_vreg(hba, false);
8105 } else if (!ufshcd_is_ufs_dev_active(hba)) {
51dd905b 8106 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
57d104c1
SJ
8107 if (!ufshcd_is_link_active(hba)) {
8108 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8109 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8110 }
8111 }
8112}
8113
8114static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8115{
8116 int ret = 0;
8117
8118 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8119 !hba->dev_info.is_lu_power_on_wp) {
8120 ret = ufshcd_setup_vreg(hba, true);
8121 } else if (!ufshcd_is_ufs_dev_active(hba)) {
57d104c1
SJ
8122 if (!ret && !ufshcd_is_link_active(hba)) {
8123 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8124 if (ret)
8125 goto vcc_disable;
8126 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8127 if (ret)
8128 goto vccq_lpm;
8129 }
69d72ac8 8130 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
57d104c1
SJ
8131 }
8132 goto out;
8133
8134vccq_lpm:
8135 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8136vcc_disable:
8137 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8138out:
8139 return ret;
8140}
8141
8142static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8143{
8144 if (ufshcd_is_link_off(hba))
8145 ufshcd_setup_hba_vreg(hba, false);
8146}
8147
8148static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8149{
8150 if (ufshcd_is_link_off(hba))
8151 ufshcd_setup_hba_vreg(hba, true);
8152}
8153
7a3e97b0 8154/**
57d104c1 8155 * ufshcd_suspend - helper function for suspend operations
3b1d0580 8156 * @hba: per adapter instance
57d104c1
SJ
8157 * @pm_op: desired low power operation type
8158 *
8159 * This function will try to put the UFS device and link into low power
8160 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8161 * (System PM level).
8162 *
8163 * If this function is called during shutdown, it will make sure that
8164 * both UFS device and UFS link is powered off.
7a3e97b0 8165 *
57d104c1
SJ
8166 * NOTE: UFS device & link must be active before we enter in this function.
8167 *
8168 * Returns 0 for success and non-zero for failure
7a3e97b0 8169 */
57d104c1 8170static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 8171{
57d104c1
SJ
8172 int ret = 0;
8173 enum ufs_pm_level pm_lvl;
8174 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8175 enum uic_link_state req_link_state;
8176
8177 hba->pm_op_in_progress = 1;
8178 if (!ufshcd_is_shutdown_pm(pm_op)) {
8179 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8180 hba->rpm_lvl : hba->spm_lvl;
8181 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8182 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8183 } else {
8184 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8185 req_link_state = UIC_LINK_OFF_STATE;
8186 }
8187
7a3e97b0 8188 /*
57d104c1
SJ
8189 * If we can't transition into any of the low power modes
8190 * just gate the clocks.
7a3e97b0 8191 */
1ab27c9c
ST
8192 ufshcd_hold(hba, false);
8193 hba->clk_gating.is_suspended = true;
8194
401f1e44 8195 if (hba->clk_scaling.is_allowed) {
8196 cancel_work_sync(&hba->clk_scaling.suspend_work);
8197 cancel_work_sync(&hba->clk_scaling.resume_work);
8198 ufshcd_suspend_clkscaling(hba);
8199 }
d6fcf81a 8200
57d104c1
SJ
8201 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8202 req_link_state == UIC_LINK_ACTIVE_STATE) {
8203 goto disable_clks;
8204 }
7a3e97b0 8205
57d104c1
SJ
8206 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8207 (req_link_state == hba->uic_link_state))
d6fcf81a 8208 goto enable_gating;
57d104c1
SJ
8209
8210 /* UFS device & link must be active before we enter in this function */
8211 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8212 ret = -EINVAL;
d6fcf81a 8213 goto enable_gating;
57d104c1
SJ
8214 }
8215
8216 if (ufshcd_is_runtime_pm(pm_op)) {
374a246e
SJ
8217 if (ufshcd_can_autobkops_during_suspend(hba)) {
8218 /*
8219 * The device is idle with no requests in the queue,
8220 * allow background operations if bkops status shows
8221 * that performance might be impacted.
8222 */
8223 ret = ufshcd_urgent_bkops(hba);
8224 if (ret)
8225 goto enable_gating;
8226 } else {
8227 /* make sure that auto bkops is disabled */
8228 ufshcd_disable_auto_bkops(hba);
8229 }
3d17b9b5 8230 /*
51dd905b
SC
8231 * If device needs to do BKOP or WB buffer flush during
8232 * Hibern8, keep device power mode as "active power mode"
8233 * and VCC supply.
3d17b9b5 8234 */
51dd905b
SC
8235 hba->dev_info.b_rpm_dev_flush_capable =
8236 hba->auto_bkops_enabled ||
8237 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8238 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8239 ufshcd_is_auto_hibern8_enabled(hba))) &&
8240 ufshcd_wb_need_flush(hba));
8241 }
8242
8243 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8244 if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8245 !ufshcd_is_runtime_pm(pm_op)) {
8246 /* ensure that bkops is disabled */
8247 ufshcd_disable_auto_bkops(hba);
8248 }
57d104c1 8249
51dd905b
SC
8250 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8251 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8252 if (ret)
8253 goto enable_gating;
8254 }
57d104c1
SJ
8255 }
8256
2824ec9f 8257 flush_work(&hba->eeh_work);
57d104c1
SJ
8258 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8259 if (ret)
8260 goto set_dev_active;
8261
8262 ufshcd_vreg_set_lpm(hba);
8263
8264disable_clks:
8265 /*
8266 * Call vendor specific suspend callback. As these callbacks may access
8267 * vendor specific host controller register space call them before the
8268 * host clocks are ON.
8269 */
0263bcd0
YG
8270 ret = ufshcd_vops_suspend(hba, pm_op);
8271 if (ret)
8272 goto set_link_active;
dcb6cec5
SC
8273 /*
8274 * Disable the host irq as host controller as there won't be any
8275 * host controller transaction expected till resume.
8276 */
8277 ufshcd_disable_irq(hba);
57d104c1 8278
57d104c1
SJ
8279 if (!ufshcd_is_link_active(hba))
8280 ufshcd_setup_clocks(hba, false);
8281 else
8282 /* If link is active, device ref_clk can't be switched off */
8283 __ufshcd_setup_clocks(hba, false, true);
8284
1ab27c9c 8285 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 8286 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
dcb6cec5 8287
57d104c1
SJ
8288 /* Put the host controller in low power mode if possible */
8289 ufshcd_hba_vreg_set_lpm(hba);
8290 goto out;
8291
57d104c1 8292set_link_active:
401f1e44 8293 if (hba->clk_scaling.is_allowed)
8294 ufshcd_resume_clkscaling(hba);
57d104c1
SJ
8295 ufshcd_vreg_set_hpm(hba);
8296 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8297 ufshcd_set_link_active(hba);
8298 else if (ufshcd_is_link_off(hba))
8299 ufshcd_host_reset_and_restore(hba);
8300set_dev_active:
8301 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8302 ufshcd_disable_auto_bkops(hba);
1ab27c9c 8303enable_gating:
401f1e44 8304 if (hba->clk_scaling.is_allowed)
8305 ufshcd_resume_clkscaling(hba);
1ab27c9c 8306 hba->clk_gating.is_suspended = false;
51dd905b 8307 hba->dev_info.b_rpm_dev_flush_capable = false;
1ab27c9c 8308 ufshcd_release(hba);
57d104c1 8309out:
51dd905b
SC
8310 if (hba->dev_info.b_rpm_dev_flush_capable) {
8311 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8312 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8313 }
8314
57d104c1 8315 hba->pm_op_in_progress = 0;
51dd905b 8316
8808b4e9
SC
8317 if (ret)
8318 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
57d104c1 8319 return ret;
7a3e97b0
SY
8320}
8321
8322/**
57d104c1 8323 * ufshcd_resume - helper function for resume operations
3b1d0580 8324 * @hba: per adapter instance
57d104c1 8325 * @pm_op: runtime PM or system PM
7a3e97b0 8326 *
57d104c1
SJ
8327 * This function basically brings the UFS device, UniPro link and controller
8328 * to active state.
8329 *
8330 * Returns 0 for success and non-zero for failure
7a3e97b0 8331 */
57d104c1 8332static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 8333{
57d104c1
SJ
8334 int ret;
8335 enum uic_link_state old_link_state;
8336
8337 hba->pm_op_in_progress = 1;
8338 old_link_state = hba->uic_link_state;
8339
8340 ufshcd_hba_vreg_set_hpm(hba);
8341 /* Make sure clocks are enabled before accessing controller */
8342 ret = ufshcd_setup_clocks(hba, true);
8343 if (ret)
8344 goto out;
8345
57d104c1 8346 /* enable the host irq as host controller would be active soon */
5231d38c 8347 ufshcd_enable_irq(hba);
57d104c1
SJ
8348
8349 ret = ufshcd_vreg_set_hpm(hba);
8350 if (ret)
8351 goto disable_irq_and_vops_clks;
8352
7a3e97b0 8353 /*
57d104c1
SJ
8354 * Call vendor specific resume callback. As these callbacks may access
8355 * vendor specific host controller register space call them when the
8356 * host clocks are ON.
7a3e97b0 8357 */
0263bcd0
YG
8358 ret = ufshcd_vops_resume(hba, pm_op);
8359 if (ret)
8360 goto disable_vreg;
57d104c1
SJ
8361
8362 if (ufshcd_is_link_hibern8(hba)) {
8363 ret = ufshcd_uic_hibern8_exit(hba);
8364 if (!ret)
8365 ufshcd_set_link_active(hba);
8366 else
8367 goto vendor_suspend;
8368 } else if (ufshcd_is_link_off(hba)) {
57d104c1 8369 /*
089f5b64
AD
8370 * A full initialization of the host and the device is
8371 * required since the link was put to off during suspend.
8372 */
8373 ret = ufshcd_reset_and_restore(hba);
8374 /*
8375 * ufshcd_reset_and_restore() should have already
57d104c1
SJ
8376 * set the link state as active
8377 */
8378 if (ret || !ufshcd_is_link_active(hba))
8379 goto vendor_suspend;
8380 }
8381
8382 if (!ufshcd_is_ufs_dev_active(hba)) {
8383 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8384 if (ret)
8385 goto set_old_link_state;
8386 }
8387
4e768e76 8388 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8389 ufshcd_enable_auto_bkops(hba);
8390 else
8391 /*
8392 * If BKOPs operations are urgently needed at this moment then
8393 * keep auto-bkops enabled or else disable it.
8394 */
8395 ufshcd_urgent_bkops(hba);
8396
1ab27c9c
ST
8397 hba->clk_gating.is_suspended = false;
8398
fcb0c4b0
ST
8399 if (hba->clk_scaling.is_allowed)
8400 ufshcd_resume_clkscaling(hba);
856b3483 8401
ad448378
AH
8402 /* Enable Auto-Hibernate if configured */
8403 ufshcd_auto_hibern8_enable(hba);
8404
51dd905b
SC
8405 if (hba->dev_info.b_rpm_dev_flush_capable) {
8406 hba->dev_info.b_rpm_dev_flush_capable = false;
8407 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8408 }
8409
71d848b8
CG
8410 /* Schedule clock gating in case of no access to UFS device yet */
8411 ufshcd_release(hba);
8412
57d104c1
SJ
8413 goto out;
8414
8415set_old_link_state:
8416 ufshcd_link_state_transition(hba, old_link_state, 0);
8417vendor_suspend:
0263bcd0 8418 ufshcd_vops_suspend(hba, pm_op);
57d104c1
SJ
8419disable_vreg:
8420 ufshcd_vreg_set_lpm(hba);
8421disable_irq_and_vops_clks:
8422 ufshcd_disable_irq(hba);
401f1e44 8423 if (hba->clk_scaling.is_allowed)
8424 ufshcd_suspend_clkscaling(hba);
57d104c1
SJ
8425 ufshcd_setup_clocks(hba, false);
8426out:
8427 hba->pm_op_in_progress = 0;
8808b4e9
SC
8428 if (ret)
8429 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
57d104c1
SJ
8430 return ret;
8431}
8432
8433/**
8434 * ufshcd_system_suspend - system suspend routine
8435 * @hba: per adapter instance
57d104c1
SJ
8436 *
8437 * Check the description of ufshcd_suspend() function for more details.
8438 *
8439 * Returns 0 for success and non-zero for failure
8440 */
8441int ufshcd_system_suspend(struct ufs_hba *hba)
8442{
8443 int ret = 0;
7ff5ab47 8444 ktime_t start = ktime_get();
57d104c1
SJ
8445
8446 if (!hba || !hba->is_powered)
233b594b 8447 return 0;
57d104c1 8448
0b257734 8449 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8450 hba->curr_dev_pwr_mode) &&
8451 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8452 hba->uic_link_state))
8453 goto out;
57d104c1 8454
0b257734 8455 if (pm_runtime_suspended(hba->dev)) {
57d104c1
SJ
8456 /*
8457 * UFS device and/or UFS link low power states during runtime
8458 * suspend seems to be different than what is expected during
8459 * system suspend. Hence runtime resume the devic & link and
8460 * let the system suspend low power states to take effect.
8461 * TODO: If resume takes longer time, we might have optimize
8462 * it in future by not resuming everything if possible.
8463 */
8464 ret = ufshcd_runtime_resume(hba);
8465 if (ret)
8466 goto out;
8467 }
8468
8469 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8470out:
7ff5ab47 8471 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8472 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8473 hba->curr_dev_pwr_mode, hba->uic_link_state);
e785060e
DR
8474 if (!ret)
8475 hba->is_sys_suspended = true;
57d104c1
SJ
8476 return ret;
8477}
8478EXPORT_SYMBOL(ufshcd_system_suspend);
8479
8480/**
8481 * ufshcd_system_resume - system resume routine
8482 * @hba: per adapter instance
8483 *
8484 * Returns 0 for success and non-zero for failure
8485 */
7a3e97b0 8486
57d104c1
SJ
8487int ufshcd_system_resume(struct ufs_hba *hba)
8488{
7ff5ab47 8489 int ret = 0;
8490 ktime_t start = ktime_get();
8491
e3ce73d6
YG
8492 if (!hba)
8493 return -EINVAL;
8494
8495 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
57d104c1
SJ
8496 /*
8497 * Let the runtime resume take care of resuming
8498 * if runtime suspended.
8499 */
7ff5ab47 8500 goto out;
8501 else
8502 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8503out:
8504 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8505 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8506 hba->curr_dev_pwr_mode, hba->uic_link_state);
ce9e7bce
SC
8507 if (!ret)
8508 hba->is_sys_suspended = false;
7ff5ab47 8509 return ret;
7a3e97b0 8510}
57d104c1 8511EXPORT_SYMBOL(ufshcd_system_resume);
3b1d0580 8512
57d104c1
SJ
8513/**
8514 * ufshcd_runtime_suspend - runtime suspend routine
8515 * @hba: per adapter instance
8516 *
8517 * Check the description of ufshcd_suspend() function for more details.
8518 *
8519 * Returns 0 for success and non-zero for failure
8520 */
66ec6d59
SRT
8521int ufshcd_runtime_suspend(struct ufs_hba *hba)
8522{
7ff5ab47 8523 int ret = 0;
8524 ktime_t start = ktime_get();
8525
e3ce73d6
YG
8526 if (!hba)
8527 return -EINVAL;
8528
8529 if (!hba->is_powered)
7ff5ab47 8530 goto out;
8531 else
8532 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8533out:
8534 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8535 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8536 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 8537 return ret;
66ec6d59
SRT
8538}
8539EXPORT_SYMBOL(ufshcd_runtime_suspend);
8540
57d104c1
SJ
8541/**
8542 * ufshcd_runtime_resume - runtime resume routine
8543 * @hba: per adapter instance
8544 *
8545 * This function basically brings the UFS device, UniPro link and controller
8546 * to active state. Following operations are done in this function:
8547 *
8548 * 1. Turn on all the controller related clocks
8549 * 2. Bring the UniPro link out of Hibernate state
8550 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8551 * to active state.
8552 * 4. If auto-bkops is enabled on the device, disable it.
8553 *
8554 * So following would be the possible power state after this function return
8555 * successfully:
8556 * S1: UFS device in Active state with VCC rail ON
8557 * UniPro link in Active state
8558 * All the UFS/UniPro controller clocks are ON
8559 *
8560 * Returns 0 for success and non-zero for failure
8561 */
66ec6d59
SRT
8562int ufshcd_runtime_resume(struct ufs_hba *hba)
8563{
7ff5ab47 8564 int ret = 0;
8565 ktime_t start = ktime_get();
8566
e3ce73d6
YG
8567 if (!hba)
8568 return -EINVAL;
8569
8570 if (!hba->is_powered)
7ff5ab47 8571 goto out;
8572 else
8573 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8574out:
8575 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8576 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8577 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 8578 return ret;
66ec6d59
SRT
8579}
8580EXPORT_SYMBOL(ufshcd_runtime_resume);
8581
8582int ufshcd_runtime_idle(struct ufs_hba *hba)
8583{
8584 return 0;
8585}
8586EXPORT_SYMBOL(ufshcd_runtime_idle);
8587
57d104c1
SJ
8588/**
8589 * ufshcd_shutdown - shutdown routine
8590 * @hba: per adapter instance
8591 *
8592 * This function would power off both UFS device and UFS link.
8593 *
8594 * Returns 0 always to allow force shutdown even in case of errors.
8595 */
8596int ufshcd_shutdown(struct ufs_hba *hba)
8597{
8598 int ret = 0;
8599
f51913ee
SC
8600 if (!hba->is_powered)
8601 goto out;
8602
57d104c1
SJ
8603 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8604 goto out;
8605
8606 if (pm_runtime_suspended(hba->dev)) {
8607 ret = ufshcd_runtime_resume(hba);
8608 if (ret)
8609 goto out;
8610 }
8611
8612 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8613out:
8614 if (ret)
8615 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8616 /* allow force shutdown even in case of errors */
8617 return 0;
8618}
8619EXPORT_SYMBOL(ufshcd_shutdown);
8620
7a3e97b0 8621/**
3b1d0580 8622 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 8623 * data structure memory
8aa29f19 8624 * @hba: per adapter instance
7a3e97b0 8625 */
3b1d0580 8626void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 8627{
df032bf2 8628 ufs_bsg_remove(hba);
cbb6813e 8629 ufs_sysfs_remove_nodes(hba->dev);
69a6c269
BVA
8630 blk_cleanup_queue(hba->tmf_queue);
8631 blk_mq_free_tag_set(&hba->tmf_tag_set);
7252a360 8632 blk_cleanup_queue(hba->cmd_queue);
cfdf9c91 8633 scsi_remove_host(hba->host);
7a3e97b0 8634 /* disable interrupts */
2fbd009b 8635 ufshcd_disable_intr(hba, hba->intr_mask);
5cac1095 8636 ufshcd_hba_stop(hba);
7a3e97b0 8637
eebcc196 8638 ufshcd_exit_clk_scaling(hba);
1ab27c9c 8639 ufshcd_exit_clk_gating(hba);
fcb0c4b0
ST
8640 if (ufshcd_is_clkscaling_supported(hba))
8641 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
aa497613 8642 ufshcd_hba_exit(hba);
3b1d0580
VH
8643}
8644EXPORT_SYMBOL_GPL(ufshcd_remove);
8645
47555a5c
YG
8646/**
8647 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8648 * @hba: pointer to Host Bus Adapter (HBA)
8649 */
8650void ufshcd_dealloc_host(struct ufs_hba *hba)
8651{
8652 scsi_host_put(hba->host);
8653}
8654EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8655
ca3d7bf9
AM
8656/**
8657 * ufshcd_set_dma_mask - Set dma mask based on the controller
8658 * addressing capability
8659 * @hba: per adapter instance
8660 *
8661 * Returns 0 for success, non-zero for failure
8662 */
8663static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8664{
8665 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8666 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8667 return 0;
8668 }
8669 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8670}
8671
7a3e97b0 8672/**
5c0c28a8 8673 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
8674 * @dev: pointer to device handle
8675 * @hba_handle: driver private handle
7a3e97b0
SY
8676 * Returns 0 on success, non-zero value on failure
8677 */
5c0c28a8 8678int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
8679{
8680 struct Scsi_Host *host;
8681 struct ufs_hba *hba;
5c0c28a8 8682 int err = 0;
7a3e97b0 8683
3b1d0580
VH
8684 if (!dev) {
8685 dev_err(dev,
8686 "Invalid memory reference for dev is NULL\n");
8687 err = -ENODEV;
7a3e97b0
SY
8688 goto out_error;
8689 }
8690
7a3e97b0
SY
8691 host = scsi_host_alloc(&ufshcd_driver_template,
8692 sizeof(struct ufs_hba));
8693 if (!host) {
3b1d0580 8694 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 8695 err = -ENOMEM;
3b1d0580 8696 goto out_error;
7a3e97b0
SY
8697 }
8698 hba = shost_priv(host);
7a3e97b0 8699 hba->host = host;
3b1d0580 8700 hba->dev = dev;
5c0c28a8 8701 *hba_handle = hba;
9e1e8a75 8702 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
5c0c28a8 8703
566ec9ad
SM
8704 INIT_LIST_HEAD(&hba->clk_list_head);
8705
5c0c28a8
SRT
8706out_error:
8707 return err;
8708}
8709EXPORT_SYMBOL(ufshcd_alloc_host);
8710
69a6c269
BVA
8711/* This function exists because blk_mq_alloc_tag_set() requires this. */
8712static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8713 const struct blk_mq_queue_data *qd)
8714{
8715 WARN_ON_ONCE(true);
8716 return BLK_STS_NOTSUPP;
8717}
8718
8719static const struct blk_mq_ops ufshcd_tmf_ops = {
8720 .queue_rq = ufshcd_queue_tmf,
8721};
8722
5c0c28a8
SRT
8723/**
8724 * ufshcd_init - Driver initialization routine
8725 * @hba: per-adapter instance
8726 * @mmio_base: base register address
8727 * @irq: Interrupt line of device
8728 * Returns 0 on success, non-zero value on failure
8729 */
8730int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8731{
8732 int err;
8733 struct Scsi_Host *host = hba->host;
8734 struct device *dev = hba->dev;
8735
8736 if (!mmio_base) {
8737 dev_err(hba->dev,
8738 "Invalid memory reference for mmio_base is NULL\n");
8739 err = -ENODEV;
8740 goto out_error;
8741 }
8742
3b1d0580
VH
8743 hba->mmio_base = mmio_base;
8744 hba->irq = irq;
90b8491c 8745 hba->vps = &ufs_hba_vps;
7a3e97b0 8746
aa497613 8747 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
8748 if (err)
8749 goto out_error;
8750
7a3e97b0
SY
8751 /* Read capabilities registers */
8752 ufshcd_hba_capabilities(hba);
8753
8754 /* Get UFS version supported by the controller */
8755 hba->ufs_version = ufshcd_get_ufs_version(hba);
8756
c01848c6
YG
8757 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8758 (hba->ufs_version != UFSHCI_VERSION_11) &&
8759 (hba->ufs_version != UFSHCI_VERSION_20) &&
8760 (hba->ufs_version != UFSHCI_VERSION_21))
8761 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8762 hba->ufs_version);
8763
2fbd009b
SJ
8764 /* Get Interrupt bit mask per version */
8765 hba->intr_mask = ufshcd_get_intr_mask(hba);
8766
ca3d7bf9
AM
8767 err = ufshcd_set_dma_mask(hba);
8768 if (err) {
8769 dev_err(hba->dev, "set dma mask failed\n");
8770 goto out_disable;
8771 }
8772
7a3e97b0
SY
8773 /* Allocate memory for host memory space */
8774 err = ufshcd_memory_alloc(hba);
8775 if (err) {
3b1d0580
VH
8776 dev_err(hba->dev, "Memory allocation failed\n");
8777 goto out_disable;
7a3e97b0
SY
8778 }
8779
8780 /* Configure LRB */
8781 ufshcd_host_memory_configure(hba);
8782
8783 host->can_queue = hba->nutrs;
8784 host->cmd_per_lun = hba->nutrs;
8785 host->max_id = UFSHCD_MAX_ID;
0ce147d4 8786 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
8787 host->max_channel = UFSHCD_MAX_CHANNEL;
8788 host->unique_id = host->host_no;
a851b2bd 8789 host->max_cmd_len = UFS_CDB_SIZE;
7a3e97b0 8790
7eb584db
DR
8791 hba->max_pwr_info.is_valid = false;
8792
7a3e97b0 8793 /* Initialize work queues */
e8e7f271 8794 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 8795 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 8796
6ccf44fe
SJ
8797 /* Initialize UIC command mutex */
8798 mutex_init(&hba->uic_cmd_mutex);
8799
5a0b0cb9
SRT
8800 /* Initialize mutex for device management commands */
8801 mutex_init(&hba->dev_cmd.lock);
8802
a3cd5ec5 8803 init_rwsem(&hba->clk_scaling_lock);
8804
1ab27c9c 8805 ufshcd_init_clk_gating(hba);
199ef13c 8806
eebcc196
VG
8807 ufshcd_init_clk_scaling(hba);
8808
199ef13c
YG
8809 /*
8810 * In order to avoid any spurious interrupt immediately after
8811 * registering UFS controller interrupt handler, clear any pending UFS
8812 * interrupt status and disable all the UFS interrupts.
8813 */
8814 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8815 REG_INTERRUPT_STATUS);
8816 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8817 /*
8818 * Make sure that UFS interrupts are disabled and any pending interrupt
8819 * status is cleared before registering UFS interrupt handler.
8820 */
8821 mb();
8822
7a3e97b0 8823 /* IRQ registration */
2953f850 8824 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 8825 if (err) {
3b1d0580 8826 dev_err(hba->dev, "request irq failed\n");
1ab27c9c 8827 goto exit_gating;
57d104c1
SJ
8828 } else {
8829 hba->is_irq_enabled = true;
7a3e97b0
SY
8830 }
8831
3b1d0580 8832 err = scsi_add_host(host, hba->dev);
7a3e97b0 8833 if (err) {
3b1d0580 8834 dev_err(hba->dev, "scsi_add_host failed\n");
1ab27c9c 8835 goto exit_gating;
7a3e97b0
SY
8836 }
8837
7252a360
BVA
8838 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8839 if (IS_ERR(hba->cmd_queue)) {
8840 err = PTR_ERR(hba->cmd_queue);
8841 goto out_remove_scsi_host;
8842 }
8843
69a6c269
BVA
8844 hba->tmf_tag_set = (struct blk_mq_tag_set) {
8845 .nr_hw_queues = 1,
8846 .queue_depth = hba->nutmrs,
8847 .ops = &ufshcd_tmf_ops,
8848 .flags = BLK_MQ_F_NO_SCHED,
8849 };
8850 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8851 if (err < 0)
8852 goto free_cmd_queue;
8853 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8854 if (IS_ERR(hba->tmf_queue)) {
8855 err = PTR_ERR(hba->tmf_queue);
8856 goto free_tmf_tag_set;
8857 }
8858
d8d9f793
BA
8859 /* Reset the attached device */
8860 ufshcd_vops_device_reset(hba);
8861
6ccf44fe
SJ
8862 /* Host controller enable */
8863 err = ufshcd_hba_enable(hba);
7a3e97b0 8864 if (err) {
6ccf44fe 8865 dev_err(hba->dev, "Host controller enable failed\n");
66cc820f 8866 ufshcd_print_host_regs(hba);
6ba65588 8867 ufshcd_print_host_state(hba);
69a6c269 8868 goto free_tmf_queue;
7a3e97b0 8869 }
6ccf44fe 8870
0c8f7586 8871 /*
8872 * Set the default power management level for runtime and system PM.
8873 * Default power saving mode is to keep UFS link in Hibern8 state
8874 * and UFS device in sleep state.
8875 */
8876 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8877 UFS_SLEEP_PWR_MODE,
8878 UIC_LINK_HIBERN8_STATE);
8879 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8880 UFS_SLEEP_PWR_MODE,
8881 UIC_LINK_HIBERN8_STATE);
8882
51dd905b
SC
8883 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
8884 ufshcd_rpm_dev_flush_recheck_work);
8885
ad448378 8886 /* Set the default auto-hiberate idle timer value to 150 ms */
f571b377 8887 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
ad448378
AH
8888 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8889 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8890 }
8891
62694735
SRT
8892 /* Hold auto suspend until async scan completes */
8893 pm_runtime_get_sync(dev);
38135535 8894 atomic_set(&hba->scsi_block_reqs_cnt, 0);
57d104c1 8895 /*
7caf489b 8896 * We are assuming that device wasn't put in sleep/power-down
8897 * state exclusively during the boot stage before kernel.
8898 * This assumption helps avoid doing link startup twice during
8899 * ufshcd_probe_hba().
57d104c1 8900 */
7caf489b 8901 ufshcd_set_ufs_dev_active(hba);
57d104c1 8902
6ccf44fe 8903 async_schedule(ufshcd_async_scan, hba);
cbb6813e 8904 ufs_sysfs_add_nodes(hba->dev);
6ccf44fe 8905
7a3e97b0
SY
8906 return 0;
8907
69a6c269
BVA
8908free_tmf_queue:
8909 blk_cleanup_queue(hba->tmf_queue);
8910free_tmf_tag_set:
8911 blk_mq_free_tag_set(&hba->tmf_tag_set);
7252a360
BVA
8912free_cmd_queue:
8913 blk_cleanup_queue(hba->cmd_queue);
3b1d0580
VH
8914out_remove_scsi_host:
8915 scsi_remove_host(hba->host);
1ab27c9c 8916exit_gating:
eebcc196 8917 ufshcd_exit_clk_scaling(hba);
1ab27c9c 8918 ufshcd_exit_clk_gating(hba);
3b1d0580 8919out_disable:
57d104c1 8920 hba->is_irq_enabled = false;
aa497613 8921 ufshcd_hba_exit(hba);
3b1d0580
VH
8922out_error:
8923 return err;
8924}
8925EXPORT_SYMBOL_GPL(ufshcd_init);
8926
3b1d0580
VH
8927MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8928MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 8929MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
8930MODULE_LICENSE("GPL");
8931MODULE_VERSION(UFSHCD_DRIVER_VERSION);