scsi: ufs: core: mcq: Enable multi-circular queue
[linux-block.git] / drivers / ufs / core / ufshcd.c
CommitLineData
67351119 1// SPDX-License-Identifier: GPL-2.0-or-later
7a3e97b0 2/*
e0eca63e 3 * Universal Flash Storage Host controller driver Core
3b1d0580 4 * Copyright (C) 2011-2013 Samsung India Software Operations
52ac95fe 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 */
11
6ccf44fe 12#include <linux/async.h>
856b3483 13#include <linux/devfreq.h>
b573d484 14#include <linux/nls.h>
54b879b7 15#include <linux/of.h>
ad448378 16#include <linux/bitfield.h>
fb276f77 17#include <linux/blk-pm.h>
c72e79c0 18#include <linux/blkdev.h>
3f06f780
BVA
19#include <linux/clk.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/module.h>
3f06f780 23#include <linux/regulator/consumer.h>
0f85e747 24#include <linux/sched/clock.h>
3f06f780
BVA
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_dbg.h>
b294ff3e 27#include <scsi/scsi_driver.h>
3f06f780 28#include <scsi/scsi_eh.h>
4bc26113 29#include "ufshcd-priv.h"
dd11376b
BVA
30#include <ufs/ufs_quirks.h>
31#include <ufs/unipro.h>
cbb6813e 32#include "ufs-sysfs.h"
b6cacaf2 33#include "ufs-debugfs.h"
c11a1ae9 34#include "ufs-fault-injection.h"
df032bf2 35#include "ufs_bsg.h"
df043c74 36#include "ufshcd-crypto.h"
f02bc975 37#include "ufshpb.h"
3d17b9b5 38#include <asm/unaligned.h>
7a3e97b0 39
7ff5ab47 40#define CREATE_TRACE_POINTS
41#include <trace/events/ufs.h>
42
2fbd009b
SJ
43#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
44 UTP_TASK_REQ_COMPL |\
45 UFSHCD_ERROR_MASK)
2468da61
AD
46
47#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
48 UFSHCD_ERROR_MASK |\
49 MCQ_CQ_EVENT_STATUS)
50
51
6ccf44fe
SJ
52/* UIC command timeout, unit: ms */
53#define UIC_CMD_TIMEOUT 500
2fbd009b 54
5a0b0cb9
SRT
55/* NOP OUT retries waiting for NOP IN response */
56#define NOP_OUT_RETRIES 10
782e2efb
DP
57/* Timeout after 50 msecs if NOP OUT hangs without response */
58#define NOP_OUT_TIMEOUT 50 /* msecs */
5a0b0cb9 59
68078d5c 60/* Query request retries */
10fe5888 61#define QUERY_REQ_RETRIES 3
68078d5c 62/* Query request timeout */
10fe5888 63#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68078d5c 64
6ff265fc
BH
65/* Advanced RPMB request timeout */
66#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
67
e2933132
SRT
68/* Task management command timeout */
69#define TM_CMD_TIMEOUT 100 /* msecs */
70
64238fbd
YG
71/* maximum number of retries for a general UIC command */
72#define UFS_UIC_COMMAND_RETRIES 3
73
1d337ec2
SRT
74/* maximum number of link-startup retries */
75#define DME_LINKSTARTUP_RETRIES 3
76
77/* maximum number of reset retries before giving up */
78#define MAX_HOST_RESET_RETRIES 5
79
87bf6a6b
AH
80/* Maximum number of error handler retries before giving up */
81#define MAX_ERR_HANDLER_RETRIES 5
82
68078d5c
DR
83/* Expose the flag value from utp_upiu_query.value */
84#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
85
7d568652
SJ
86/* Interrupt aggregation default timeout, unit: 40us */
87#define INT_AGGR_DEF_TO 0x02
88
49615ba1
SC
89/* default delay of autosuspend: 2000 ms */
90#define RPM_AUTOSUSPEND_DELAY_MS 2000
91
51dd905b
SC
92/* Default delay of RPM device flush delayed work */
93#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
94
09f17791
CG
95/* Default value of wait time before gating device ref clock */
96#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
97
29707fab
KK
98/* Polling time to wait for fDeviceInit */
99#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
100
305a357d
AD
101/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
102static bool use_mcq_mode = true;
103
0cab4023
AD
104static bool is_mcq_supported(struct ufs_hba *hba)
105{
106 return hba->mcq_sup && use_mcq_mode;
107}
108
305a357d
AD
109static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
110{
111 int ret;
112
113 ret = param_set_bool(val, kp);
114 if (ret)
115 return ret;
116
117 return 0;
118}
119
120static const struct kernel_param_ops mcq_mode_ops = {
121 .set = param_set_mcq_mode,
122 .get = param_get_bool,
123};
124
125module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
126MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
127
aa497613
SRT
128#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
129 ({ \
130 int _ret; \
131 if (_on) \
132 _ret = ufshcd_enable_vreg(_dev, _vreg); \
133 else \
134 _ret = ufshcd_disable_vreg(_dev, _vreg); \
135 _ret; \
136 })
137
ba80917d
TW
138#define ufshcd_hex_dump(prefix_str, buf, len) do { \
139 size_t __len = (len); \
140 print_hex_dump(KERN_ERR, prefix_str, \
141 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
142 16, 4, buf, __len, false); \
143} while (0)
144
145int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
146 const char *prefix)
147{
d6724756
MG
148 u32 *regs;
149 size_t pos;
150
151 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
152 return -EINVAL;
ba80917d 153
cddaebaf 154 regs = kzalloc(len, GFP_ATOMIC);
ba80917d
TW
155 if (!regs)
156 return -ENOMEM;
157
ef600310
KK
158 for (pos = 0; pos < len; pos += 4) {
159 if (offset == 0 &&
160 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
161 pos <= REG_UIC_ERROR_CODE_DME)
162 continue;
d6724756 163 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
ef600310 164 }
d6724756 165
ba80917d
TW
166 ufshcd_hex_dump(prefix, regs, len);
167 kfree(regs);
168
169 return 0;
170}
171EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
66cc820f 172
7a3e97b0
SY
173enum {
174 UFSHCD_MAX_CHANNEL = 0,
175 UFSHCD_MAX_ID = 1,
945c3cca
BVA
176 UFSHCD_NUM_RESERVED = 1,
177 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
178 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
7a3e97b0
SY
179};
180
4693fad7
BVA
181static const char *const ufshcd_state_name[] = {
182 [UFSHCD_STATE_RESET] = "reset",
183 [UFSHCD_STATE_OPERATIONAL] = "operational",
184 [UFSHCD_STATE_ERROR] = "error",
185 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
186 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
187};
188
3441da7d
SRT
189/* UFSHCD error handling flags */
190enum {
191 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
192};
193
e8e7f271
SRT
194/* UFSHCD UIC layer error flags */
195enum {
196 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
9a47ec7c
YG
197 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
198 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
199 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
200 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
201 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
2355b66e 202 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
e8e7f271
SRT
203};
204
3441da7d 205#define ufshcd_set_eh_in_progress(h) \
9c490d2d 206 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
3441da7d 207#define ufshcd_eh_in_progress(h) \
9c490d2d 208 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
3441da7d 209#define ufshcd_clear_eh_in_progress(h) \
9c490d2d 210 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
3441da7d 211
35d11ec2 212const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
e2ac7ab2
BVA
213 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
214 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
215 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
216 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
217 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
218 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
fe1d4c2e
AH
219 /*
220 * For DeepSleep, the link is first put in hibern8 and then off.
221 * Leaving the link in hibern8 is not supported.
222 */
e2ac7ab2 223 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
57d104c1
SJ
224};
225
226static inline enum ufs_dev_pwr_mode
227ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
228{
229 return ufs_pm_lvl_states[lvl].dev_state;
230}
231
232static inline enum uic_link_state
233ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
234{
235 return ufs_pm_lvl_states[lvl].link_state;
236}
237
0c8f7586 238static inline enum ufs_pm_level
239ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
240 enum uic_link_state link_state)
241{
242 enum ufs_pm_level lvl;
243
244 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
245 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
246 (ufs_pm_lvl_states[lvl].link_state == link_state))
247 return lvl;
248 }
249
250 /* if no match found, return the level 0 */
251 return UFS_PM_LVL_0;
252}
253
aead21f3 254static const struct ufs_dev_quirk ufs_fixups[] = {
56d4a186 255 /* UFS cards deviations table */
dd2cf44f
BVA
256 { .wmanufacturerid = UFS_VENDOR_MICRON,
257 .model = UFS_ANY_MODEL,
258 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
259 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
260 { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
261 .model = UFS_ANY_MODEL,
262 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
263 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
264 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
265 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
266 .model = UFS_ANY_MODEL,
267 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
268 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
269 .model = "hB8aL1" /*H28U62301AMR*/,
270 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
271 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
272 .model = UFS_ANY_MODEL,
273 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
274 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
275 .model = "THGLF2G9C8KBADG",
276 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
277 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
278 .model = "THGLF2G9D8KBADG",
279 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
280 {}
56d4a186
SJ
281};
282
9333d775 283static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
3441da7d 284static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271 285static int ufshcd_reset_and_restore(struct ufs_hba *hba);
e7d38257 286static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
e8e7f271 287static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2 288static void ufshcd_hba_exit(struct ufs_hba *hba);
68444d73 289static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
1ab27c9c 290static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
cad2e03d 291static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1 292static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
fcb0c4b0
ST
293static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
294static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
401f1e44 295static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
fcb0c4b0 296static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
57d104c1 297static irqreturn_t ufshcd_intr(int irq, void *__hba);
874237f7
YG
298static int ufshcd_change_power_mode(struct ufs_hba *hba,
299 struct ufs_pa_layer_attr *pwr_mode);
c72e79c0
CG
300static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
301static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
302static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
303 struct ufs_vreg *vreg);
307348f6 304static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
4450a165
JC
305static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
306 bool enable);
dd7143e2
CG
307static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
308static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
3d17b9b5 309
5231d38c 310static inline void ufshcd_enable_irq(struct ufs_hba *hba)
57d104c1 311{
57d104c1 312 if (!hba->is_irq_enabled) {
5231d38c 313 enable_irq(hba->irq);
57d104c1
SJ
314 hba->is_irq_enabled = true;
315 }
57d104c1
SJ
316}
317
318static inline void ufshcd_disable_irq(struct ufs_hba *hba)
319{
320 if (hba->is_irq_enabled) {
5231d38c 321 disable_irq(hba->irq);
57d104c1
SJ
322 hba->is_irq_enabled = false;
323 }
324}
3441da7d 325
4450a165 326static void ufshcd_configure_wb(struct ufs_hba *hba)
3d17b9b5 327{
79e3520f 328 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5
AD
329 return;
330
3b5f3c0d
YH
331 ufshcd_wb_toggle(hba, true);
332
4450a165 333 ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
42f8c5cd
JC
334
335 if (ufshcd_is_wb_buf_flush_allowed(hba))
4450a165 336 ufshcd_wb_toggle_buf_flush(hba, true);
3d17b9b5
AD
337}
338
38135535
SJ
339static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
340{
341 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
342 scsi_unblock_requests(hba->host);
343}
344
345static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
346{
347 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
348 scsi_block_requests(hba->host);
349}
350
6667e6d9 351static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
28fa68fc 352 enum ufs_trace_str_t str_t)
6667e6d9
OS
353{
354 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
89ac2c3b 355 struct utp_upiu_header *header;
6667e6d9 356
9d5095e7
BH
357 if (!trace_ufshcd_upiu_enabled())
358 return;
359
89ac2c3b
BH
360 if (str_t == UFS_CMD_SEND)
361 header = &rq->header;
362 else
363 header = &hba->lrb[tag].ucd_rsp_ptr->header;
364
365 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
867fdc2d 366 UFS_TSF_CDB);
6667e6d9
OS
367}
368
fb475b74
AA
369static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
370 enum ufs_trace_str_t str_t,
371 struct utp_upiu_req *rq_rsp)
6667e6d9 372{
9d5095e7
BH
373 if (!trace_ufshcd_upiu_enabled())
374 return;
6667e6d9 375
be20b51c 376 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
867fdc2d 377 &rq_rsp->qr, UFS_TSF_OSF);
6667e6d9
OS
378}
379
380static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
28fa68fc 381 enum ufs_trace_str_t str_t)
6667e6d9 382{
e8c2da7e 383 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
6667e6d9 384
9d5095e7
BH
385 if (!trace_ufshcd_upiu_enabled())
386 return;
387
0ed083e9 388 if (str_t == UFS_TM_SEND)
1352eec8
GS
389 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
390 &descp->upiu_req.req_header,
391 &descp->upiu_req.input_param1,
392 UFS_TSF_TM_INPUT);
0ed083e9 393 else
1352eec8
GS
394 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
395 &descp->upiu_rsp.rsp_header,
396 &descp->upiu_rsp.output_param1,
397 UFS_TSF_TM_OUTPUT);
6667e6d9
OS
398}
399
aa5c6979 400static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
35d11ec2 401 const struct uic_command *ucmd,
28fa68fc 402 enum ufs_trace_str_t str_t)
aa5c6979
SC
403{
404 u32 cmd;
405
406 if (!trace_ufshcd_uic_command_enabled())
407 return;
408
28fa68fc 409 if (str_t == UFS_CMD_SEND)
aa5c6979
SC
410 cmd = ucmd->command;
411 else
412 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
413
28fa68fc 414 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
aa5c6979
SC
415 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
416 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
417 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
418}
419
28fa68fc
BH
420static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
421 enum ufs_trace_str_t str_t)
1a07f2d9 422{
2bd3b6b7 423 u64 lba = 0;
69a314d6 424 u8 opcode = 0, group_id = 0;
1a07f2d9 425 u32 intr, doorbell;
e7c3b379 426 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
e4d2add7 427 struct scsi_cmnd *cmd = lrbp->cmd;
3f2c1002 428 struct request *rq = scsi_cmd_to_rq(cmd);
1a07f2d9
LS
429 int transfer_len = -1;
430
44b5de36
BH
431 if (!cmd)
432 return;
433
44b5de36
BH
434 /* trace UPIU also */
435 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
f0101af4
BH
436 if (!trace_ufshcd_command_enabled())
437 return;
438
44b5de36 439 opcode = cmd->cmnd[0];
44b5de36
BH
440
441 if (opcode == READ_10 || opcode == WRITE_10) {
442 /*
443 * Currently we only fully trace read(10) and write(10) commands
444 */
445 transfer_len =
446 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
2bd3b6b7 447 lba = scsi_get_lba(cmd);
44b5de36
BH
448 if (opcode == WRITE_10)
449 group_id = lrbp->cmd->cmnd[6];
450 } else if (opcode == UNMAP) {
451 /*
452 * The number of Bytes to be unmapped beginning with the lba.
453 */
3f2c1002 454 transfer_len = blk_rq_bytes(rq);
2bd3b6b7 455 lba = scsi_get_lba(cmd);
1a07f2d9
LS
456 }
457
458 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
459 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
28fa68fc 460 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
69a314d6 461 doorbell, transfer_len, intr, lba, opcode, group_id);
1a07f2d9
LS
462}
463
ff8e20c6
DR
464static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
465{
466 struct ufs_clk_info *clki;
467 struct list_head *head = &hba->clk_list_head;
468
566ec9ad 469 if (list_empty(head))
ff8e20c6
DR
470 return;
471
472 list_for_each_entry(clki, head, list) {
473 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
474 clki->max_freq)
475 dev_err(hba->dev, "clk: %s, rate: %u\n",
476 clki->name, clki->curr_freq);
477 }
478}
479
e965e5e0 480static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
35d11ec2 481 const char *err_name)
ff8e20c6
DR
482{
483 int i;
27752647 484 bool found = false;
35d11ec2 485 const struct ufs_event_hist *e;
ff8e20c6 486
e965e5e0
SC
487 if (id >= UFS_EVT_CNT)
488 return;
ff8e20c6 489
e965e5e0 490 e = &hba->ufs_stats.event[id];
ff8e20c6 491
e965e5e0
SC
492 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
493 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
494
495 if (e->tstamp[p] == 0)
ff8e20c6 496 continue;
c5397f13 497 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
0f85e747 498 e->val[p], div_u64(e->tstamp[p], 1000));
27752647 499 found = true;
ff8e20c6 500 }
27752647
SC
501
502 if (!found)
fd1fb4d5 503 dev_err(hba->dev, "No record of %s\n", err_name);
bafd09f8
DH
504 else
505 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
ff8e20c6
DR
506}
507
e965e5e0 508static void ufshcd_print_evt_hist(struct ufs_hba *hba)
66cc820f 509{
ba80917d 510 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
ff8e20c6 511
e965e5e0
SC
512 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
513 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
514 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
515 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
516 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
517 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
518 "auto_hibern8_err");
519 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
520 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
521 "link_startup_fail");
522 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
523 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
524 "suspend_fail");
a301d487
PW
525 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
526 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
527 "wlun suspend_fail");
e965e5e0
SC
528 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
529 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
530 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
ff8e20c6 531
7c486d91 532 ufshcd_vops_dbg_register_dump(hba);
66cc820f
DR
533}
534
535static
536void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
537{
35d11ec2 538 const struct ufshcd_lrb *lrbp;
7fabb77b 539 int prdt_length;
66cc820f
DR
540 int tag;
541
542 for_each_set_bit(tag, &bitmap, hba->nutrs) {
543 lrbp = &hba->lrb[tag];
544
ff8e20c6 545 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
0f85e747 546 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
09017188 547 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
0f85e747 548 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
ff8e20c6
DR
549 dev_err(hba->dev,
550 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551 tag, (u64)lrbp->utrd_dma_addr);
552
66cc820f
DR
553 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
554 sizeof(struct utp_transfer_req_desc));
ff8e20c6
DR
555 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
556 (u64)lrbp->ucd_req_dma_addr);
66cc820f
DR
557 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
558 sizeof(struct utp_upiu_req));
ff8e20c6
DR
559 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
560 (u64)lrbp->ucd_rsp_dma_addr);
66cc820f
DR
561 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
562 sizeof(struct utp_upiu_rsp));
66cc820f 563
7fabb77b
GB
564 prdt_length = le16_to_cpu(
565 lrbp->utr_descriptor_ptr->prd_table_length);
cc770ce3 566 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
ada1e653 567 prdt_length /= ufshcd_sg_entry_size(hba);
cc770ce3 568
7fabb77b
GB
569 dev_err(hba->dev,
570 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
571 tag, prdt_length,
572 (u64)lrbp->ucd_prdt_dma_addr);
573
574 if (pr_prdt)
66cc820f 575 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
ada1e653 576 ufshcd_sg_entry_size(hba) * prdt_length);
66cc820f
DR
577 }
578}
579
580static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
581{
66cc820f
DR
582 int tag;
583
584 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
391e388f
CH
585 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
586
66cc820f 587 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
391e388f 588 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
66cc820f
DR
589 }
590}
591
6ba65588
GB
592static void ufshcd_print_host_state(struct ufs_hba *hba)
593{
35d11ec2 594 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
3f8af604 595
6ba65588 596 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
7252a360
BVA
597 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
598 hba->outstanding_reqs, hba->outstanding_tasks);
6ba65588
GB
599 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
600 hba->saved_err, hba->saved_uic_err);
601 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
602 hba->curr_dev_pwr_mode, hba->uic_link_state);
603 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
604 hba->pm_op_in_progress, hba->is_sys_suspended);
605 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
606 hba->auto_bkops_enabled, hba->host->host_self_blocked);
607 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
3f8af604
CG
608 dev_err(hba->dev,
609 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
0f85e747 610 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
3f8af604
CG
611 hba->ufs_stats.hibern8_exit_cnt);
612 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
0f85e747 613 div_u64(hba->ufs_stats.last_intr_ts, 1000),
3f8af604 614 hba->ufs_stats.last_intr_status);
6ba65588
GB
615 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
616 hba->eh_flags, hba->req_abort_count);
3f8af604
CG
617 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
618 hba->ufs_version, hba->capabilities, hba->caps);
6ba65588
GB
619 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
620 hba->dev_quirks);
3f8af604
CG
621 if (sdev_ufs)
622 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
623 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
624
625 ufshcd_print_clk_freqs(hba);
6ba65588
GB
626}
627
ff8e20c6
DR
628/**
629 * ufshcd_print_pwr_info - print power params as saved in hba
630 * power info
631 * @hba: per-adapter instance
632 */
633static void ufshcd_print_pwr_info(struct ufs_hba *hba)
634{
635 static const char * const names[] = {
636 "INVALID MODE",
637 "FAST MODE",
638 "SLOW_MODE",
639 "INVALID MODE",
640 "FASTAUTO_MODE",
641 "SLOWAUTO_MODE",
642 "INVALID MODE",
643 };
644
71bb9ab6
AH
645 /*
646 * Using dev_dbg to avoid messages during runtime PM to avoid
647 * never-ending cycles of messages written back to storage by user space
648 * causing runtime resume, causing more messages and so on.
649 */
650 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
ff8e20c6
DR
651 __func__,
652 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
653 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
654 names[hba->pwr_info.pwr_rx],
655 names[hba->pwr_info.pwr_tx],
656 hba->pwr_info.hs_rate);
657}
658
31a5d9ca
SC
659static void ufshcd_device_reset(struct ufs_hba *hba)
660{
661 int err;
662
663 err = ufshcd_vops_device_reset(hba);
664
665 if (!err) {
666 ufshcd_set_ufs_dev_active(hba);
667 if (ufshcd_is_wb_allowed(hba)) {
4cd48995
BH
668 hba->dev_info.wb_enabled = false;
669 hba->dev_info.wb_buf_flush_enabled = false;
31a5d9ca
SC
670 }
671 }
672 if (err != -EOPNOTSUPP)
673 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
674}
675
5c955c10
SC
676void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
677{
678 if (!us)
679 return;
680
681 if (us < 10)
682 udelay(us);
683 else
684 usleep_range(us, us + tolerance);
685}
686EXPORT_SYMBOL_GPL(ufshcd_delay_us);
687
5cac1095 688/**
5a0b0cb9 689 * ufshcd_wait_for_register - wait for register value to change
5cac1095
BVA
690 * @hba: per-adapter interface
691 * @reg: mmio register offset
692 * @mask: mask to apply to the read register value
693 * @val: value to wait for
694 * @interval_us: polling interval in microseconds
695 * @timeout_ms: timeout in milliseconds
5a0b0cb9 696 *
5cac1095
BVA
697 * Return:
698 * -ETIMEDOUT on error, zero on success.
5a0b0cb9 699 */
59a57bb7 700static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
596585a2 701 u32 val, unsigned long interval_us,
5cac1095 702 unsigned long timeout_ms)
5a0b0cb9
SRT
703{
704 int err = 0;
705 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
706
707 /* ignore bits that we don't intend to wait on */
708 val = val & mask;
709
710 while ((ufshcd_readl(hba, reg) & mask) != val) {
5cac1095 711 usleep_range(interval_us, interval_us + 50);
5a0b0cb9
SRT
712 if (time_after(jiffies, timeout)) {
713 if ((ufshcd_readl(hba, reg) & mask) != val)
714 err = -ETIMEDOUT;
715 break;
716 }
717 }
718
719 return err;
720}
721
2fbd009b
SJ
722/**
723 * ufshcd_get_intr_mask - Get the interrupt bit mask
8aa29f19 724 * @hba: Pointer to adapter instance
2fbd009b
SJ
725 *
726 * Returns interrupt bit mask per version
727 */
728static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
729{
51428818
CC
730 if (hba->ufs_version == ufshci_version(1, 0))
731 return INTERRUPT_MASK_ALL_VER_10;
732 if (hba->ufs_version <= ufshci_version(2, 0))
733 return INTERRUPT_MASK_ALL_VER_11;
c01848c6 734
51428818 735 return INTERRUPT_MASK_ALL_VER_21;
2fbd009b
SJ
736}
737
7a3e97b0
SY
738/**
739 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
8aa29f19 740 * @hba: Pointer to adapter instance
7a3e97b0
SY
741 *
742 * Returns UFSHCI version supported by the controller
743 */
744static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
745{
51428818
CC
746 u32 ufshci_ver;
747
0263bcd0 748 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
51428818
CC
749 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
750 else
751 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
9949e702 752
51428818
CC
753 /*
754 * UFSHCI v1.x uses a different version scheme, in order
755 * to allow the use of comparisons with the ufshci_version
756 * function, we convert it to the same scheme as ufs 2.0+.
757 */
758 if (ufshci_ver & 0x00010000)
759 return ufshci_version(1, ufshci_ver & 0x00000100);
760
761 return ufshci_ver;
7a3e97b0
SY
762}
763
764/**
765 * ufshcd_is_device_present - Check if any device connected to
766 * the host controller
5c0c28a8 767 * @hba: pointer to adapter instance
7a3e97b0 768 *
c9e6010b 769 * Returns true if device present, false if no device detected
7a3e97b0 770 */
c9e6010b 771static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 772{
51d1628f 773 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
7a3e97b0
SY
774}
775
776/**
777 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
8aa29f19 778 * @lrbp: pointer to local command reference block
c30d8d01 779 * @cqe: pointer to the completion queue entry
7a3e97b0
SY
780 *
781 * This function is used to get the OCS field from UTRD
782 * Returns the OCS field in the UTRD
783 */
c30d8d01
AD
784static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
785 struct cq_entry *cqe)
7a3e97b0 786{
c30d8d01
AD
787 if (cqe)
788 return le32_to_cpu(cqe->status) & MASK_OCS;
789
e8c8e82a 790 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
791}
792
7a3e97b0 793/**
d1a76446 794 * ufshcd_utrl_clear() - Clear requests from the controller request list.
7a3e97b0 795 * @hba: per adapter instance
d1a76446 796 * @mask: mask with one bit set for each request to be cleared
7a3e97b0 797 */
d1a76446 798static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
7a3e97b0 799{
87183841 800 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
d1a76446
BVA
801 mask = ~mask;
802 /*
803 * From the UFSHCI specification: "UTP Transfer Request List CLear
804 * Register (UTRLCLR): This field is bit significant. Each bit
805 * corresponds to a slot in the UTP Transfer Request List, where bit 0
806 * corresponds to request slot 0. A bit in this field is set to ‘0’
807 * by host software to indicate to the host controller that a transfer
808 * request slot is cleared. The host controller
809 * shall free up any resources associated to the request slot
810 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
811 * host software indicates no change to request slots by setting the
812 * associated bits in this field to ‘1’. Bits in this field shall only
813 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
814 */
815 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1399c5b0
AA
816}
817
818/**
4652b58f 819 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
1399c5b0
AA
820 * @hba: per adapter instance
821 * @pos: position of the bit to be cleared
822 */
823static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
824{
87183841
AA
825 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
826 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
827 else
828 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
7a3e97b0
SY
829}
830
831/**
832 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
833 * @reg: Register value of host controller status
834 *
835 * Returns integer, 0 on Success and positive value if failed
836 */
837static inline int ufshcd_get_lists_status(u32 reg)
838{
6cf16115 839 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
7a3e97b0
SY
840}
841
842/**
843 * ufshcd_get_uic_cmd_result - Get the UIC command result
844 * @hba: Pointer to adapter instance
845 *
846 * This function gets the result of UIC command completion
847 * Returns 0 on success, non zero value on error
848 */
849static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
850{
b873a275 851 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
852 MASK_UIC_COMMAND_RESULT;
853}
854
12b4fdb4
SJ
855/**
856 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
857 * @hba: Pointer to adapter instance
858 *
859 * This function gets UIC command argument3
860 * Returns 0 on success, non zero value on error
861 */
862static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
863{
864 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
865}
866
7a3e97b0 867/**
5a0b0cb9 868 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 869 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
870 */
871static inline int
5a0b0cb9 872ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 873{
5a0b0cb9 874 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
875}
876
877/**
878 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
879 * @ucd_rsp_ptr: pointer to response UPIU
880 *
881 * This function gets the response status and scsi_status from response UPIU
882 * Returns the response result code.
883 */
884static inline int
885ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
886{
887 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
888}
889
1c2623c5
SJ
890/*
891 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
892 * from response UPIU
893 * @ucd_rsp_ptr: pointer to response UPIU
894 *
895 * Return the data segment length.
896 */
897static inline unsigned int
898ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
899{
900 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
901 MASK_RSP_UPIU_DATA_SEG_LEN;
902}
903
66ec6d59
SRT
904/**
905 * ufshcd_is_exception_event - Check if the device raised an exception event
906 * @ucd_rsp_ptr: pointer to response UPIU
907 *
908 * The function checks if the device raised an exception event indicated in
909 * the Device Information field of response UPIU.
910 *
911 * Returns true if exception is raised, false otherwise.
912 */
913static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
914{
915 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
51d1628f 916 MASK_RSP_EXCEPTION_EVENT;
66ec6d59
SRT
917}
918
7a3e97b0 919/**
7d568652 920 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 921 * @hba: per adapter instance
7a3e97b0
SY
922 */
923static inline void
7d568652 924ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 925{
7d568652
SJ
926 ufshcd_writel(hba, INT_AGGR_ENABLE |
927 INT_AGGR_COUNTER_AND_TIMER_RESET,
928 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
929}
930
931/**
932 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
933 * @hba: per adapter instance
934 * @cnt: Interrupt aggregation counter threshold
935 * @tmout: Interrupt aggregation timeout value
936 */
937static inline void
938ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
939{
940 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
941 INT_AGGR_COUNTER_THLD_VAL(cnt) |
942 INT_AGGR_TIMEOUT_VAL(tmout),
943 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
944}
945
b852190e
YG
946/**
947 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
948 * @hba: per adapter instance
949 */
950static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
951{
952 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
953}
954
7a3e97b0
SY
955/**
956 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
957 * When run-stop registers are set to 1, it indicates the
958 * host controller that it can process the requests
959 * @hba: per adapter instance
960 */
961static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
962{
b873a275
SJ
963 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
964 REG_UTP_TASK_REQ_LIST_RUN_STOP);
965 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
966 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
967}
968
7a3e97b0
SY
969/**
970 * ufshcd_hba_start - Start controller initialization sequence
971 * @hba: per adapter instance
972 */
973static inline void ufshcd_hba_start(struct ufs_hba *hba)
974{
df043c74
ST
975 u32 val = CONTROLLER_ENABLE;
976
977 if (ufshcd_crypto_enable(hba))
978 val |= CRYPTO_GENERAL_ENABLE;
979
980 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
981}
982
983/**
984 * ufshcd_is_hba_active - Get controller state
985 * @hba: per adapter instance
986 *
acbbfe48 987 * Returns true if and only if the controller is active.
7a3e97b0 988 */
c9e6010b 989static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
7a3e97b0 990{
acbbfe48 991 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
7a3e97b0
SY
992}
993
37113106
YG
994u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
995{
996 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
51428818 997 if (hba->ufs_version <= ufshci_version(1, 1))
37113106
YG
998 return UFS_UNIPRO_VER_1_41;
999 else
1000 return UFS_UNIPRO_VER_1_6;
1001}
1002EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1003
1004static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1005{
1006 /*
1007 * If both host and device support UniPro ver1.6 or later, PA layer
1008 * parameters tuning happens during link startup itself.
1009 *
1010 * We can manually tune PA layer parameters if either host or device
1011 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1012 * logic simple, we will only do manual tuning if local unipro version
1013 * doesn't support ver1.6 or later.
1014 */
a858af9a 1015 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
37113106
YG
1016}
1017
394b949f
SJ
1018/**
1019 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1020 * @hba: per adapter instance
1021 * @scale_up: If True, set max possible frequency othewise set low frequency
1022 *
1023 * Returns 0 if successful
1024 * Returns < 0 for any other errors
1025 */
1026static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
a3cd5ec5 1027{
1028 int ret = 0;
1029 struct ufs_clk_info *clki;
1030 struct list_head *head = &hba->clk_list_head;
a3cd5ec5 1031
566ec9ad 1032 if (list_empty(head))
a3cd5ec5 1033 goto out;
1034
a3cd5ec5 1035 list_for_each_entry(clki, head, list) {
1036 if (!IS_ERR_OR_NULL(clki->clk)) {
1037 if (scale_up && clki->max_freq) {
1038 if (clki->curr_freq == clki->max_freq)
1039 continue;
1040
a3cd5ec5 1041 ret = clk_set_rate(clki->clk, clki->max_freq);
1042 if (ret) {
1043 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1044 __func__, clki->name,
1045 clki->max_freq, ret);
1046 break;
1047 }
1048 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1049 "scaled up", clki->name,
1050 clki->curr_freq,
1051 clki->max_freq);
1052
1053 clki->curr_freq = clki->max_freq;
1054
1055 } else if (!scale_up && clki->min_freq) {
1056 if (clki->curr_freq == clki->min_freq)
1057 continue;
1058
a3cd5ec5 1059 ret = clk_set_rate(clki->clk, clki->min_freq);
1060 if (ret) {
1061 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1062 __func__, clki->name,
1063 clki->min_freq, ret);
1064 break;
1065 }
1066 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1067 "scaled down", clki->name,
1068 clki->curr_freq,
1069 clki->min_freq);
1070 clki->curr_freq = clki->min_freq;
1071 }
1072 }
1073 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1074 clki->name, clk_get_rate(clki->clk));
1075 }
1076
394b949f
SJ
1077out:
1078 return ret;
1079}
1080
1081/**
1082 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1083 * @hba: per adapter instance
1084 * @scale_up: True if scaling up and false if scaling down
1085 *
1086 * Returns 0 if successful
1087 * Returns < 0 for any other errors
1088 */
1089static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1090{
1091 int ret = 0;
1092 ktime_t start = ktime_get();
1093
1094 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1095 if (ret)
1096 goto out;
1097
1098 ret = ufshcd_set_clk_freq(hba, scale_up);
1099 if (ret)
1100 goto out;
1101
a3cd5ec5 1102 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
394b949f
SJ
1103 if (ret)
1104 ufshcd_set_clk_freq(hba, !scale_up);
a3cd5ec5 1105
1106out:
394b949f 1107 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
a3cd5ec5 1108 (scale_up ? "up" : "down"),
1109 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1110 return ret;
1111}
1112
1113/**
1114 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1115 * @hba: per adapter instance
1116 * @scale_up: True if scaling up and false if scaling down
1117 *
1118 * Returns true if scaling is required, false otherwise.
1119 */
1120static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1121 bool scale_up)
1122{
1123 struct ufs_clk_info *clki;
1124 struct list_head *head = &hba->clk_list_head;
1125
566ec9ad 1126 if (list_empty(head))
a3cd5ec5 1127 return false;
1128
1129 list_for_each_entry(clki, head, list) {
1130 if (!IS_ERR_OR_NULL(clki->clk)) {
1131 if (scale_up && clki->max_freq) {
1132 if (clki->curr_freq == clki->max_freq)
1133 continue;
1134 return true;
1135 } else if (!scale_up && clki->min_freq) {
1136 if (clki->curr_freq == clki->min_freq)
1137 continue;
1138 return true;
1139 }
1140 }
1141 }
1142
1143 return false;
1144}
1145
8d077ede
BVA
1146/*
1147 * Determine the number of pending commands by counting the bits in the SCSI
1148 * device budget maps. This approach has been selected because a bit is set in
1149 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1150 * flag. The host_self_blocked flag can be modified by calling
1151 * scsi_block_requests() or scsi_unblock_requests().
1152 */
1153static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1154{
35d11ec2 1155 const struct scsi_device *sdev;
8d077ede
BVA
1156 u32 pending = 0;
1157
99c66a88
BH
1158 lockdep_assert_held(hba->host->host_lock);
1159 __shost_for_each_device(sdev, hba->host)
8d077ede
BVA
1160 pending += sbitmap_weight(&sdev->budget_map);
1161
1162 return pending;
1163}
1164
b434ecfb
BVA
1165/*
1166 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1167 * has expired.
1168 *
1169 * Return: 0 upon success; -EBUSY upon timeout.
1170 */
a3cd5ec5 1171static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1172 u64 wait_timeout_us)
1173{
1174 unsigned long flags;
1175 int ret = 0;
1176 u32 tm_doorbell;
8d077ede 1177 u32 tr_pending;
a3cd5ec5 1178 bool timeout = false, do_last_check = false;
1179 ktime_t start;
1180
1181 ufshcd_hold(hba, false);
1182 spin_lock_irqsave(hba->host->host_lock, flags);
1183 /*
1184 * Wait for all the outstanding tasks/transfer requests.
1185 * Verify by checking the doorbell registers are clear.
1186 */
1187 start = ktime_get();
1188 do {
1189 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1190 ret = -EBUSY;
1191 goto out;
1192 }
1193
1194 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
8d077ede
BVA
1195 tr_pending = ufshcd_pending_cmds(hba);
1196 if (!tm_doorbell && !tr_pending) {
a3cd5ec5 1197 timeout = false;
1198 break;
1199 } else if (do_last_check) {
1200 break;
1201 }
1202
1203 spin_unlock_irqrestore(hba->host->host_lock, flags);
2000bc30 1204 io_schedule_timeout(msecs_to_jiffies(20));
a3cd5ec5 1205 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1206 wait_timeout_us) {
1207 timeout = true;
1208 /*
1209 * We might have scheduled out for long time so make
1210 * sure to check if doorbells are cleared by this time
1211 * or not.
1212 */
1213 do_last_check = true;
1214 }
1215 spin_lock_irqsave(hba->host->host_lock, flags);
8d077ede 1216 } while (tm_doorbell || tr_pending);
a3cd5ec5 1217
1218 if (timeout) {
1219 dev_err(hba->dev,
1220 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
8d077ede 1221 __func__, tm_doorbell, tr_pending);
a3cd5ec5 1222 ret = -EBUSY;
1223 }
1224out:
1225 spin_unlock_irqrestore(hba->host->host_lock, flags);
1226 ufshcd_release(hba);
1227 return ret;
1228}
1229
1230/**
1231 * ufshcd_scale_gear - scale up/down UFS gear
1232 * @hba: per adapter instance
1233 * @scale_up: True for scaling up gear and false for scaling down
1234 *
1235 * Returns 0 for success,
1236 * Returns -EBUSY if scaling can't happen at this time
1237 * Returns non-zero for any other errors
1238 */
1239static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1240{
a3cd5ec5 1241 int ret = 0;
1242 struct ufs_pa_layer_attr new_pwr_info;
1243
1244 if (scale_up) {
1245 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1246 sizeof(struct ufs_pa_layer_attr));
1247 } else {
1248 memcpy(&new_pwr_info, &hba->pwr_info,
1249 sizeof(struct ufs_pa_layer_attr));
1250
29b87e92
CG
1251 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1252 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
a3cd5ec5 1253 /* save the current power mode */
1254 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1255 &hba->pwr_info,
1256 sizeof(struct ufs_pa_layer_attr));
1257
1258 /* scale down gear */
29b87e92
CG
1259 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1260 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
a3cd5ec5 1261 }
1262 }
1263
1264 /* check if the power mode needs to be changed or not? */
6a9df818 1265 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
a3cd5ec5 1266 if (ret)
1267 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1268 __func__, ret,
1269 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1270 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1271
1272 return ret;
1273}
1274
b434ecfb
BVA
1275/*
1276 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1277 * has expired.
1278 *
1279 * Return: 0 upon success; -EBUSY upon timeout.
1280 */
1281static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
a3cd5ec5 1282{
a3cd5ec5 1283 int ret = 0;
1284 /*
1285 * make sure that there are no outstanding requests when
1286 * clock scaling is in progress
1287 */
38135535 1288 ufshcd_scsi_block_requests(hba);
a3cd5ec5 1289 down_write(&hba->clk_scaling_lock);
0e9d4ca4
CG
1290
1291 if (!hba->clk_scaling.is_allowed ||
b434ecfb 1292 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
a3cd5ec5 1293 ret = -EBUSY;
1294 up_write(&hba->clk_scaling_lock);
38135535 1295 ufshcd_scsi_unblock_requests(hba);
0e9d4ca4 1296 goto out;
a3cd5ec5 1297 }
1298
0e9d4ca4
CG
1299 /* let's not get into low power until clock scaling is completed */
1300 ufshcd_hold(hba, false);
1301
1302out:
a3cd5ec5 1303 return ret;
1304}
1305
0e9d4ca4 1306static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
a3cd5ec5 1307{
0e9d4ca4
CG
1308 if (writelock)
1309 up_write(&hba->clk_scaling_lock);
1310 else
1311 up_read(&hba->clk_scaling_lock);
38135535 1312 ufshcd_scsi_unblock_requests(hba);
0e9d4ca4 1313 ufshcd_release(hba);
a3cd5ec5 1314}
1315
1316/**
1317 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1318 * @hba: per adapter instance
1319 * @scale_up: True for scaling up and false for scalin down
1320 *
1321 * Returns 0 for success,
1322 * Returns -EBUSY if scaling can't happen at this time
1323 * Returns non-zero for any other errors
1324 */
1325static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1326{
1327 int ret = 0;
0e9d4ca4 1328 bool is_writelock = true;
401f1e44 1329
b434ecfb 1330 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
a3cd5ec5 1331 if (ret)
0e9d4ca4 1332 return ret;
a3cd5ec5 1333
1334 /* scale down the gear before scaling down clocks */
1335 if (!scale_up) {
1336 ret = ufshcd_scale_gear(hba, false);
1337 if (ret)
394b949f 1338 goto out_unprepare;
a3cd5ec5 1339 }
1340
1341 ret = ufshcd_scale_clks(hba, scale_up);
1342 if (ret) {
1343 if (!scale_up)
1344 ufshcd_scale_gear(hba, true);
394b949f 1345 goto out_unprepare;
a3cd5ec5 1346 }
1347
1348 /* scale up the gear after scaling up clocks */
1349 if (scale_up) {
1350 ret = ufshcd_scale_gear(hba, true);
3d17b9b5 1351 if (ret) {
a3cd5ec5 1352 ufshcd_scale_clks(hba, false);
3d17b9b5
AD
1353 goto out_unprepare;
1354 }
a3cd5ec5 1355 }
1356
3d17b9b5 1357 /* Enable Write Booster if we have scaled up else disable it */
87bd0501
PW
1358 if (ufshcd_enable_wb_if_scaling_up(hba)) {
1359 downgrade_write(&hba->clk_scaling_lock);
1360 is_writelock = false;
1361 ufshcd_wb_toggle(hba, scale_up);
1362 }
3d17b9b5 1363
394b949f 1364out_unprepare:
0e9d4ca4 1365 ufshcd_clock_scaling_unprepare(hba, is_writelock);
a3cd5ec5 1366 return ret;
1367}
1368
401f1e44 1369static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1370{
1371 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1372 clk_scaling.suspend_work);
1373 unsigned long irq_flags;
1374
1375 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1376 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1377 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1378 return;
1379 }
1380 hba->clk_scaling.is_suspended = true;
1381 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1382
1383 __ufshcd_suspend_clkscaling(hba);
1384}
1385
1386static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1387{
1388 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1389 clk_scaling.resume_work);
1390 unsigned long irq_flags;
1391
1392 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1393 if (!hba->clk_scaling.is_suspended) {
1394 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1395 return;
1396 }
1397 hba->clk_scaling.is_suspended = false;
1398 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1399
1400 devfreq_resume_device(hba->devfreq);
1401}
1402
a3cd5ec5 1403static int ufshcd_devfreq_target(struct device *dev,
1404 unsigned long *freq, u32 flags)
1405{
1406 int ret = 0;
1407 struct ufs_hba *hba = dev_get_drvdata(dev);
1408 ktime_t start;
401f1e44 1409 bool scale_up, sched_clk_scaling_suspend_work = false;
092b4558
BA
1410 struct list_head *clk_list = &hba->clk_list_head;
1411 struct ufs_clk_info *clki;
a3cd5ec5 1412 unsigned long irq_flags;
1413
1414 if (!ufshcd_is_clkscaling_supported(hba))
1415 return -EINVAL;
1416
91831d33
AD
1417 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1418 /* Override with the closest supported frequency */
1419 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
a3cd5ec5 1420 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1421 if (ufshcd_eh_in_progress(hba)) {
1422 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1423 return 0;
1424 }
1425
401f1e44 1426 if (!hba->clk_scaling.active_reqs)
1427 sched_clk_scaling_suspend_work = true;
1428
092b4558
BA
1429 if (list_empty(clk_list)) {
1430 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1431 goto out;
1432 }
1433
91831d33 1434 /* Decide based on the rounded-off frequency and update */
51d1628f 1435 scale_up = *freq == clki->max_freq;
91831d33
AD
1436 if (!scale_up)
1437 *freq = clki->min_freq;
1438 /* Update the frequency */
401f1e44 1439 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1440 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1441 ret = 0;
1442 goto out; /* no state change required */
a3cd5ec5 1443 }
1444 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1445
1446 start = ktime_get();
a3cd5ec5 1447 ret = ufshcd_devfreq_scale(hba, scale_up);
1448
a3cd5ec5 1449 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1450 (scale_up ? "up" : "down"),
1451 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1452
401f1e44 1453out:
1454 if (sched_clk_scaling_suspend_work)
1455 queue_work(hba->clk_scaling.workq,
1456 &hba->clk_scaling.suspend_work);
1457
a3cd5ec5 1458 return ret;
1459}
1460
a3cd5ec5 1461static int ufshcd_devfreq_get_dev_status(struct device *dev,
1462 struct devfreq_dev_status *stat)
1463{
1464 struct ufs_hba *hba = dev_get_drvdata(dev);
1465 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1466 unsigned long flags;
91831d33
AD
1467 struct list_head *clk_list = &hba->clk_list_head;
1468 struct ufs_clk_info *clki;
b1bf66d1 1469 ktime_t curr_t;
a3cd5ec5 1470
1471 if (!ufshcd_is_clkscaling_supported(hba))
1472 return -EINVAL;
1473
1474 memset(stat, 0, sizeof(*stat));
1475
1476 spin_lock_irqsave(hba->host->host_lock, flags);
b1bf66d1 1477 curr_t = ktime_get();
a3cd5ec5 1478 if (!scaling->window_start_t)
1479 goto start_window;
1480
91831d33
AD
1481 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1482 /*
1483 * If current frequency is 0, then the ondemand governor considers
1484 * there's no initial frequency set. And it always requests to set
1485 * to max. frequency.
1486 */
1487 stat->current_frequency = clki->curr_freq;
a3cd5ec5 1488 if (scaling->is_busy_started)
b1bf66d1
SC
1489 scaling->tot_busy_t += ktime_us_delta(curr_t,
1490 scaling->busy_start_t);
a3cd5ec5 1491
b1bf66d1 1492 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
a3cd5ec5 1493 stat->busy_time = scaling->tot_busy_t;
1494start_window:
b1bf66d1 1495 scaling->window_start_t = curr_t;
a3cd5ec5 1496 scaling->tot_busy_t = 0;
1497
1498 if (hba->outstanding_reqs) {
b1bf66d1 1499 scaling->busy_start_t = curr_t;
a3cd5ec5 1500 scaling->is_busy_started = true;
1501 } else {
1502 scaling->busy_start_t = 0;
1503 scaling->is_busy_started = false;
1504 }
1505 spin_unlock_irqrestore(hba->host->host_lock, flags);
1506 return 0;
1507}
1508
deac444f
BA
1509static int ufshcd_devfreq_init(struct ufs_hba *hba)
1510{
092b4558
BA
1511 struct list_head *clk_list = &hba->clk_list_head;
1512 struct ufs_clk_info *clki;
deac444f
BA
1513 struct devfreq *devfreq;
1514 int ret;
1515
092b4558
BA
1516 /* Skip devfreq if we don't have any clocks in the list */
1517 if (list_empty(clk_list))
1518 return 0;
1519
1520 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1521 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1522 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1523
90b8491c
SC
1524 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1525 &hba->vps->ondemand_data);
092b4558 1526 devfreq = devfreq_add_device(hba->dev,
90b8491c 1527 &hba->vps->devfreq_profile,
deac444f 1528 DEVFREQ_GOV_SIMPLE_ONDEMAND,
90b8491c 1529 &hba->vps->ondemand_data);
deac444f
BA
1530 if (IS_ERR(devfreq)) {
1531 ret = PTR_ERR(devfreq);
1532 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
092b4558
BA
1533
1534 dev_pm_opp_remove(hba->dev, clki->min_freq);
1535 dev_pm_opp_remove(hba->dev, clki->max_freq);
deac444f
BA
1536 return ret;
1537 }
1538
1539 hba->devfreq = devfreq;
1540
1541 return 0;
1542}
1543
092b4558
BA
1544static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1545{
1546 struct list_head *clk_list = &hba->clk_list_head;
1547 struct ufs_clk_info *clki;
1548
1549 if (!hba->devfreq)
1550 return;
1551
1552 devfreq_remove_device(hba->devfreq);
1553 hba->devfreq = NULL;
1554
1555 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1556 dev_pm_opp_remove(hba->dev, clki->min_freq);
1557 dev_pm_opp_remove(hba->dev, clki->max_freq);
1558}
1559
401f1e44 1560static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1561{
1562 unsigned long flags;
1563
1564 devfreq_suspend_device(hba->devfreq);
1565 spin_lock_irqsave(hba->host->host_lock, flags);
1566 hba->clk_scaling.window_start_t = 0;
1567 spin_unlock_irqrestore(hba->host->host_lock, flags);
1568}
a3cd5ec5 1569
a508253d
GB
1570static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1571{
401f1e44 1572 unsigned long flags;
1573 bool suspend = false;
1574
f9a7fa34
SC
1575 cancel_work_sync(&hba->clk_scaling.suspend_work);
1576 cancel_work_sync(&hba->clk_scaling.resume_work);
fcb0c4b0 1577
401f1e44 1578 spin_lock_irqsave(hba->host->host_lock, flags);
1579 if (!hba->clk_scaling.is_suspended) {
1580 suspend = true;
1581 hba->clk_scaling.is_suspended = true;
1582 }
1583 spin_unlock_irqrestore(hba->host->host_lock, flags);
1584
1585 if (suspend)
1586 __ufshcd_suspend_clkscaling(hba);
a508253d
GB
1587}
1588
1589static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1590{
401f1e44 1591 unsigned long flags;
1592 bool resume = false;
1593
401f1e44 1594 spin_lock_irqsave(hba->host->host_lock, flags);
1595 if (hba->clk_scaling.is_suspended) {
1596 resume = true;
1597 hba->clk_scaling.is_suspended = false;
1598 }
1599 spin_unlock_irqrestore(hba->host->host_lock, flags);
1600
1601 if (resume)
1602 devfreq_resume_device(hba->devfreq);
fcb0c4b0
ST
1603}
1604
1605static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1606 struct device_attribute *attr, char *buf)
1607{
1608 struct ufs_hba *hba = dev_get_drvdata(dev);
1609
1481b7fe 1610 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
fcb0c4b0
ST
1611}
1612
1613static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1614 struct device_attribute *attr, const char *buf, size_t count)
1615{
1616 struct ufs_hba *hba = dev_get_drvdata(dev);
1617 u32 value;
9cd20d3f 1618 int err = 0;
fcb0c4b0
ST
1619
1620 if (kstrtou32(buf, 0, &value))
1621 return -EINVAL;
1622
9cd20d3f
CG
1623 down(&hba->host_sem);
1624 if (!ufshcd_is_user_access_allowed(hba)) {
1625 err = -EBUSY;
1626 goto out;
1627 }
1628
fcb0c4b0 1629 value = !!value;
0e9d4ca4 1630 if (value == hba->clk_scaling.is_enabled)
fcb0c4b0
ST
1631 goto out;
1632
b294ff3e 1633 ufshcd_rpm_get_sync(hba);
fcb0c4b0
ST
1634 ufshcd_hold(hba, false);
1635
0e9d4ca4 1636 hba->clk_scaling.is_enabled = value;
401f1e44 1637
fcb0c4b0
ST
1638 if (value) {
1639 ufshcd_resume_clkscaling(hba);
1640 } else {
1641 ufshcd_suspend_clkscaling(hba);
a3cd5ec5 1642 err = ufshcd_devfreq_scale(hba, true);
fcb0c4b0
ST
1643 if (err)
1644 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1645 __func__, err);
1646 }
fcb0c4b0
ST
1647
1648 ufshcd_release(hba);
b294ff3e 1649 ufshcd_rpm_put_sync(hba);
fcb0c4b0 1650out:
9cd20d3f
CG
1651 up(&hba->host_sem);
1652 return err ? err : count;
a508253d
GB
1653}
1654
4543d9d7 1655static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
a3cd5ec5 1656{
1657 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1658 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1659 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1660 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1661 hba->clk_scaling.enable_attr.attr.mode = 0644;
1662 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1663 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1664}
1665
4543d9d7
CG
1666static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1667{
1668 if (hba->clk_scaling.enable_attr.attr.name)
1669 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1670}
1671
1672static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1673{
1674 char wq_name[sizeof("ufs_clkscaling_00")];
1675
1676 if (!ufshcd_is_clkscaling_supported(hba))
1677 return;
1678
80d892f4
CG
1679 if (!hba->clk_scaling.min_gear)
1680 hba->clk_scaling.min_gear = UFS_HS_G1;
1681
4543d9d7
CG
1682 INIT_WORK(&hba->clk_scaling.suspend_work,
1683 ufshcd_clk_scaling_suspend_work);
1684 INIT_WORK(&hba->clk_scaling.resume_work,
1685 ufshcd_clk_scaling_resume_work);
1686
1687 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1688 hba->host->host_no);
1689 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1690
1691 hba->clk_scaling.is_initialized = true;
1692}
1693
1694static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1695{
1696 if (!hba->clk_scaling.is_initialized)
1697 return;
1698
1699 ufshcd_remove_clk_scaling_sysfs(hba);
1700 destroy_workqueue(hba->clk_scaling.workq);
1701 ufshcd_devfreq_remove(hba);
1702 hba->clk_scaling.is_initialized = false;
1703}
1704
1ab27c9c
ST
1705static void ufshcd_ungate_work(struct work_struct *work)
1706{
1707 int ret;
1708 unsigned long flags;
1709 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1710 clk_gating.ungate_work);
1711
1712 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1713
1714 spin_lock_irqsave(hba->host->host_lock, flags);
1715 if (hba->clk_gating.state == CLKS_ON) {
1716 spin_unlock_irqrestore(hba->host->host_lock, flags);
1717 goto unblock_reqs;
1718 }
1719
1720 spin_unlock_irqrestore(hba->host->host_lock, flags);
dd7143e2 1721 ufshcd_hba_vreg_set_hpm(hba);
1ab27c9c
ST
1722 ufshcd_setup_clocks(hba, true);
1723
8b0bbf00
SC
1724 ufshcd_enable_irq(hba);
1725
1ab27c9c
ST
1726 /* Exit from hibern8 */
1727 if (ufshcd_can_hibern8_during_gating(hba)) {
1728 /* Prevent gating in this path */
1729 hba->clk_gating.is_suspended = true;
1730 if (ufshcd_is_link_hibern8(hba)) {
1731 ret = ufshcd_uic_hibern8_exit(hba);
1732 if (ret)
1733 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1734 __func__, ret);
1735 else
1736 ufshcd_set_link_active(hba);
1737 }
1738 hba->clk_gating.is_suspended = false;
1739 }
1740unblock_reqs:
38135535 1741 ufshcd_scsi_unblock_requests(hba);
1ab27c9c
ST
1742}
1743
1744/**
1745 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1746 * Also, exit from hibern8 mode and set the link as active.
1747 * @hba: per adapter instance
1748 * @async: This indicates whether caller should ungate clocks asynchronously.
1749 */
1750int ufshcd_hold(struct ufs_hba *hba, bool async)
1751{
1752 int rc = 0;
93b6c5db 1753 bool flush_result;
1ab27c9c
ST
1754 unsigned long flags;
1755
3489c34b
BVA
1756 if (!ufshcd_is_clkgating_allowed(hba) ||
1757 !hba->clk_gating.is_initialized)
1ab27c9c 1758 goto out;
1ab27c9c
ST
1759 spin_lock_irqsave(hba->host->host_lock, flags);
1760 hba->clk_gating.active_reqs++;
1761
856b3483 1762start:
1ab27c9c
ST
1763 switch (hba->clk_gating.state) {
1764 case CLKS_ON:
f2a785ac
VG
1765 /*
1766 * Wait for the ungate work to complete if in progress.
1767 * Though the clocks may be in ON state, the link could
1768 * still be in hibner8 state if hibern8 is allowed
1769 * during clock gating.
1770 * Make sure we exit hibern8 state also in addition to
1771 * clocks being ON.
1772 */
1773 if (ufshcd_can_hibern8_during_gating(hba) &&
1774 ufshcd_is_link_hibern8(hba)) {
c63d6099
CG
1775 if (async) {
1776 rc = -EAGAIN;
1777 hba->clk_gating.active_reqs--;
1778 break;
1779 }
f2a785ac 1780 spin_unlock_irqrestore(hba->host->host_lock, flags);
93b6c5db
SC
1781 flush_result = flush_work(&hba->clk_gating.ungate_work);
1782 if (hba->clk_gating.is_suspended && !flush_result)
1783 goto out;
f2a785ac
VG
1784 spin_lock_irqsave(hba->host->host_lock, flags);
1785 goto start;
1786 }
1ab27c9c
ST
1787 break;
1788 case REQ_CLKS_OFF:
1789 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1790 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1791 trace_ufshcd_clk_gating(dev_name(hba->dev),
1792 hba->clk_gating.state);
1ab27c9c
ST
1793 break;
1794 }
1795 /*
9c490d2d 1796 * If we are here, it means gating work is either done or
1ab27c9c
ST
1797 * currently running. Hence, fall through to cancel gating
1798 * work and to enable clocks.
1799 */
df561f66 1800 fallthrough;
1ab27c9c 1801 case CLKS_OFF:
1ab27c9c 1802 hba->clk_gating.state = REQ_CLKS_ON;
7ff5ab47 1803 trace_ufshcd_clk_gating(dev_name(hba->dev),
1804 hba->clk_gating.state);
da3fecb0
CG
1805 if (queue_work(hba->clk_gating.clk_gating_workq,
1806 &hba->clk_gating.ungate_work))
1807 ufshcd_scsi_block_requests(hba);
1ab27c9c
ST
1808 /*
1809 * fall through to check if we should wait for this
1810 * work to be done or not.
1811 */
df561f66 1812 fallthrough;
1ab27c9c
ST
1813 case REQ_CLKS_ON:
1814 if (async) {
1815 rc = -EAGAIN;
1816 hba->clk_gating.active_reqs--;
1817 break;
1818 }
1819
1820 spin_unlock_irqrestore(hba->host->host_lock, flags);
1821 flush_work(&hba->clk_gating.ungate_work);
1822 /* Make sure state is CLKS_ON before returning */
856b3483 1823 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
1824 goto start;
1825 default:
1826 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1827 __func__, hba->clk_gating.state);
1828 break;
1829 }
1830 spin_unlock_irqrestore(hba->host->host_lock, flags);
1831out:
1832 return rc;
1833}
6e3fd44d 1834EXPORT_SYMBOL_GPL(ufshcd_hold);
1ab27c9c
ST
1835
1836static void ufshcd_gate_work(struct work_struct *work)
1837{
1838 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1839 clk_gating.gate_work.work);
1840 unsigned long flags;
4db7a236 1841 int ret;
1ab27c9c
ST
1842
1843 spin_lock_irqsave(hba->host->host_lock, flags);
3f0c06de
VG
1844 /*
1845 * In case you are here to cancel this work the gating state
1846 * would be marked as REQ_CLKS_ON. In this case save time by
1847 * skipping the gating work and exit after changing the clock
1848 * state to CLKS_ON.
1849 */
1850 if (hba->clk_gating.is_suspended ||
18f01374 1851 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1ab27c9c 1852 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1853 trace_ufshcd_clk_gating(dev_name(hba->dev),
1854 hba->clk_gating.state);
1ab27c9c
ST
1855 goto rel_lock;
1856 }
1857
1858 if (hba->clk_gating.active_reqs
1859 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
bd0b3538 1860 || hba->outstanding_reqs || hba->outstanding_tasks
1ab27c9c
ST
1861 || hba->active_uic_cmd || hba->uic_async_done)
1862 goto rel_lock;
1863
1864 spin_unlock_irqrestore(hba->host->host_lock, flags);
1865
1866 /* put the link into hibern8 mode before turning off clocks */
1867 if (ufshcd_can_hibern8_during_gating(hba)) {
4db7a236
CG
1868 ret = ufshcd_uic_hibern8_enter(hba);
1869 if (ret) {
1ab27c9c 1870 hba->clk_gating.state = CLKS_ON;
4db7a236
CG
1871 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1872 __func__, ret);
7ff5ab47 1873 trace_ufshcd_clk_gating(dev_name(hba->dev),
1874 hba->clk_gating.state);
1ab27c9c
ST
1875 goto out;
1876 }
1877 ufshcd_set_link_hibern8(hba);
1878 }
1879
8b0bbf00
SC
1880 ufshcd_disable_irq(hba);
1881
81309c24 1882 ufshcd_setup_clocks(hba, false);
1ab27c9c 1883
dd7143e2
CG
1884 /* Put the host controller in low power mode if possible */
1885 ufshcd_hba_vreg_set_lpm(hba);
1ab27c9c
ST
1886 /*
1887 * In case you are here to cancel this work the gating state
1888 * would be marked as REQ_CLKS_ON. In this case keep the state
1889 * as REQ_CLKS_ON which would anyway imply that clocks are off
1890 * and a request to turn them on is pending. By doing this way,
1891 * we keep the state machine in tact and this would ultimately
1892 * prevent from doing cancel work multiple times when there are
1893 * new requests arriving before the current cancel work is done.
1894 */
1895 spin_lock_irqsave(hba->host->host_lock, flags);
7ff5ab47 1896 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1ab27c9c 1897 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 1898 trace_ufshcd_clk_gating(dev_name(hba->dev),
1899 hba->clk_gating.state);
1900 }
1ab27c9c
ST
1901rel_lock:
1902 spin_unlock_irqrestore(hba->host->host_lock, flags);
1903out:
1904 return;
1905}
1906
1907/* host lock must be held before calling this variant */
1908static void __ufshcd_release(struct ufs_hba *hba)
1909{
1910 if (!ufshcd_is_clkgating_allowed(hba))
1911 return;
1912
1913 hba->clk_gating.active_reqs--;
1914
4db7a236
CG
1915 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1916 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
3489c34b 1917 hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
fd62de11
JK
1918 hba->active_uic_cmd || hba->uic_async_done ||
1919 hba->clk_gating.state == CLKS_OFF)
1ab27c9c
ST
1920 return;
1921
1922 hba->clk_gating.state = REQ_CLKS_OFF;
7ff5ab47 1923 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
f4bb7704
EG
1924 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1925 &hba->clk_gating.gate_work,
1926 msecs_to_jiffies(hba->clk_gating.delay_ms));
1ab27c9c
ST
1927}
1928
1929void ufshcd_release(struct ufs_hba *hba)
1930{
1931 unsigned long flags;
1932
1933 spin_lock_irqsave(hba->host->host_lock, flags);
1934 __ufshcd_release(hba);
1935 spin_unlock_irqrestore(hba->host->host_lock, flags);
1936}
6e3fd44d 1937EXPORT_SYMBOL_GPL(ufshcd_release);
1ab27c9c
ST
1938
1939static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1940 struct device_attribute *attr, char *buf)
1941{
1942 struct ufs_hba *hba = dev_get_drvdata(dev);
1943
bafd09f8 1944 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1ab27c9c
ST
1945}
1946
ad8a647e
BVA
1947void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
1948{
1949 struct ufs_hba *hba = dev_get_drvdata(dev);
1950 unsigned long flags;
1951
1952 spin_lock_irqsave(hba->host->host_lock, flags);
1953 hba->clk_gating.delay_ms = value;
1954 spin_unlock_irqrestore(hba->host->host_lock, flags);
1955}
1956EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
1957
1ab27c9c
ST
1958static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1959 struct device_attribute *attr, const char *buf, size_t count)
1960{
ad8a647e 1961 unsigned long value;
1ab27c9c
ST
1962
1963 if (kstrtoul(buf, 0, &value))
1964 return -EINVAL;
1965
ad8a647e 1966 ufshcd_clkgate_delay_set(dev, value);
1ab27c9c
ST
1967 return count;
1968}
1969
b427411a
ST
1970static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1971 struct device_attribute *attr, char *buf)
1972{
1973 struct ufs_hba *hba = dev_get_drvdata(dev);
1974
bafd09f8 1975 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
b427411a
ST
1976}
1977
1978static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1979 struct device_attribute *attr, const char *buf, size_t count)
1980{
1981 struct ufs_hba *hba = dev_get_drvdata(dev);
1982 unsigned long flags;
1983 u32 value;
1984
1985 if (kstrtou32(buf, 0, &value))
1986 return -EINVAL;
1987
1988 value = !!value;
b6645112
JK
1989
1990 spin_lock_irqsave(hba->host->host_lock, flags);
b427411a
ST
1991 if (value == hba->clk_gating.is_enabled)
1992 goto out;
1993
b6645112
JK
1994 if (value)
1995 __ufshcd_release(hba);
1996 else
b427411a 1997 hba->clk_gating.active_reqs++;
b427411a
ST
1998
1999 hba->clk_gating.is_enabled = value;
2000out:
b6645112 2001 spin_unlock_irqrestore(hba->host->host_lock, flags);
b427411a
ST
2002 return count;
2003}
2004
4543d9d7 2005static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
eebcc196 2006{
4543d9d7
CG
2007 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
2008 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
2009 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
2010 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
2011 hba->clk_gating.delay_attr.attr.mode = 0644;
2012 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
2013 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
eebcc196 2014
4543d9d7
CG
2015 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
2016 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
2017 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
2018 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
2019 hba->clk_gating.enable_attr.attr.mode = 0644;
2020 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
2021 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
eebcc196
VG
2022}
2023
4543d9d7 2024static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
eebcc196 2025{
4543d9d7
CG
2026 if (hba->clk_gating.delay_attr.attr.name)
2027 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2028 if (hba->clk_gating.enable_attr.attr.name)
2029 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
eebcc196
VG
2030}
2031
1ab27c9c
ST
2032static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2033{
10e5e375
VV
2034 char wq_name[sizeof("ufs_clk_gating_00")];
2035
1ab27c9c
ST
2036 if (!ufshcd_is_clkgating_allowed(hba))
2037 return;
2038
2dec9475
CG
2039 hba->clk_gating.state = CLKS_ON;
2040
1ab27c9c
ST
2041 hba->clk_gating.delay_ms = 150;
2042 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2043 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2044
10e5e375
VV
2045 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2046 hba->host->host_no);
2047 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
e93e6e49 2048 WQ_MEM_RECLAIM | WQ_HIGHPRI);
10e5e375 2049
4543d9d7 2050 ufshcd_init_clk_gating_sysfs(hba);
b427411a 2051
4543d9d7
CG
2052 hba->clk_gating.is_enabled = true;
2053 hba->clk_gating.is_initialized = true;
1ab27c9c
ST
2054}
2055
2056static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2057{
4543d9d7 2058 if (!hba->clk_gating.is_initialized)
1ab27c9c 2059 return;
3489c34b 2060
4543d9d7 2061 ufshcd_remove_clk_gating_sysfs(hba);
3489c34b
BVA
2062
2063 /* Ungate the clock if necessary. */
2064 ufshcd_hold(hba, false);
4543d9d7 2065 hba->clk_gating.is_initialized = false;
3489c34b
BVA
2066 ufshcd_release(hba);
2067
2068 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1ab27c9c
ST
2069}
2070
856b3483
ST
2071static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2072{
401f1e44 2073 bool queue_resume_work = false;
b1bf66d1 2074 ktime_t curr_t = ktime_get();
a45f9371 2075 unsigned long flags;
401f1e44 2076
fcb0c4b0 2077 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
2078 return;
2079
a45f9371 2080 spin_lock_irqsave(hba->host->host_lock, flags);
401f1e44 2081 if (!hba->clk_scaling.active_reqs++)
2082 queue_resume_work = true;
2083
a45f9371
CG
2084 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2085 spin_unlock_irqrestore(hba->host->host_lock, flags);
401f1e44 2086 return;
a45f9371 2087 }
401f1e44 2088
2089 if (queue_resume_work)
2090 queue_work(hba->clk_scaling.workq,
2091 &hba->clk_scaling.resume_work);
2092
2093 if (!hba->clk_scaling.window_start_t) {
b1bf66d1 2094 hba->clk_scaling.window_start_t = curr_t;
401f1e44 2095 hba->clk_scaling.tot_busy_t = 0;
2096 hba->clk_scaling.is_busy_started = false;
2097 }
2098
856b3483 2099 if (!hba->clk_scaling.is_busy_started) {
b1bf66d1 2100 hba->clk_scaling.busy_start_t = curr_t;
856b3483
ST
2101 hba->clk_scaling.is_busy_started = true;
2102 }
a45f9371 2103 spin_unlock_irqrestore(hba->host->host_lock, flags);
856b3483
ST
2104}
2105
2106static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2107{
2108 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
a45f9371 2109 unsigned long flags;
856b3483 2110
fcb0c4b0 2111 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
2112 return;
2113
a45f9371
CG
2114 spin_lock_irqsave(hba->host->host_lock, flags);
2115 hba->clk_scaling.active_reqs--;
856b3483
ST
2116 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2117 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2118 scaling->busy_start_t));
8b0e1953 2119 scaling->busy_start_t = 0;
856b3483
ST
2120 scaling->is_busy_started = false;
2121 }
a45f9371 2122 spin_unlock_irqrestore(hba->host->host_lock, flags);
856b3483 2123}
1d8613a2
CG
2124
2125static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2126{
2127 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2128 return READ;
2129 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2130 return WRITE;
2131 else
2132 return -EINVAL;
2133}
2134
2135static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2136 struct ufshcd_lrb *lrbp)
2137{
35d11ec2 2138 const struct ufs_hba_monitor *m = &hba->monitor;
1d8613a2
CG
2139
2140 return (m->enabled && lrbp && lrbp->cmd &&
2141 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2142 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2143}
2144
35d11ec2
KK
2145static void ufshcd_start_monitor(struct ufs_hba *hba,
2146 const struct ufshcd_lrb *lrbp)
1d8613a2
CG
2147{
2148 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
a45f9371 2149 unsigned long flags;
1d8613a2 2150
a45f9371 2151 spin_lock_irqsave(hba->host->host_lock, flags);
1d8613a2
CG
2152 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2153 hba->monitor.busy_start_ts[dir] = ktime_get();
a45f9371 2154 spin_unlock_irqrestore(hba->host->host_lock, flags);
1d8613a2
CG
2155}
2156
35d11ec2 2157static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
1d8613a2
CG
2158{
2159 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
a45f9371 2160 unsigned long flags;
1d8613a2 2161
a45f9371 2162 spin_lock_irqsave(hba->host->host_lock, flags);
1d8613a2 2163 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
35d11ec2 2164 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
1d8613a2
CG
2165 struct ufs_hba_monitor *m = &hba->monitor;
2166 ktime_t now, inc, lat;
2167
2168 now = lrbp->compl_time_stamp;
2169 inc = ktime_sub(now, m->busy_start_ts[dir]);
2170 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2171 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2172
2173 /* Update latencies */
2174 m->nr_req[dir]++;
2175 lat = ktime_sub(now, lrbp->issue_time_stamp);
2176 m->lat_sum[dir] += lat;
2177 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2178 m->lat_max[dir] = lat;
2179 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2180 m->lat_min[dir] = lat;
2181
2182 m->nr_queued[dir]--;
2183 /* Push forward the busy start of monitor */
2184 m->busy_start_ts[dir] = now;
2185 }
a45f9371 2186 spin_unlock_irqrestore(hba->host->host_lock, flags);
856b3483 2187}
1d8613a2 2188
7a3e97b0
SY
2189/**
2190 * ufshcd_send_command - Send SCSI or device management commands
2191 * @hba: per adapter instance
2192 * @task_tag: Task tag of the command
22a2d563 2193 * @hwq: pointer to hardware queue instance
7a3e97b0
SY
2194 */
2195static inline
22a2d563
AD
2196void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2197 struct ufs_hw_queue *hwq)
7a3e97b0 2198{
6edfdcfe 2199 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
1f522c50 2200 unsigned long flags;
6edfdcfe
SC
2201
2202 lrbp->issue_time_stamp = ktime_get();
0f85e747 2203 lrbp->issue_time_stamp_local_clock = local_clock();
6edfdcfe 2204 lrbp->compl_time_stamp = ktime_set(0, 0);
0f85e747 2205 lrbp->compl_time_stamp_local_clock = 0;
28fa68fc 2206 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
856b3483 2207 ufshcd_clk_scaling_start_busy(hba);
1d8613a2
CG
2208 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2209 ufshcd_start_monitor(hba, lrbp);
169f5eb2 2210
22a2d563
AD
2211 if (is_mcq_enabled(hba)) {
2212 int utrd_size = sizeof(struct utp_transfer_req_desc);
2213
2214 spin_lock(&hwq->sq_lock);
2215 memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
2216 lrbp->utr_descriptor_ptr, utrd_size);
2217 ufshcd_inc_sq_tail(hwq);
2218 spin_unlock(&hwq->sq_lock);
2219 } else {
2220 spin_lock_irqsave(&hba->outstanding_lock, flags);
2221 if (hba->vops && hba->vops->setup_xfer_req)
2222 hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2223 !!lrbp->cmd);
2224 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2225 ufshcd_writel(hba, 1 << lrbp->task_tag,
2226 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2227 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2228 }
7a3e97b0
SY
2229}
2230
2231/**
2232 * ufshcd_copy_sense_data - Copy sense data in case of check condition
8aa29f19 2233 * @lrbp: pointer to local reference block
7a3e97b0
SY
2234 */
2235static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2236{
1de4378f 2237 u8 *const sense_buffer = lrbp->cmd->sense_buffer;
7a3e97b0 2238 int len;
1de4378f
BVA
2239
2240 if (sense_buffer &&
1c2623c5 2241 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
e3ce73d6
YG
2242 int len_to_copy;
2243
5a0b0cb9 2244 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
09a5a24f 2245 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
e3ce73d6 2246
1de4378f 2247 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
09a5a24f 2248 len_to_copy);
7a3e97b0
SY
2249 }
2250}
2251
68078d5c
DR
2252/**
2253 * ufshcd_copy_query_response() - Copy the Query Response and the data
2254 * descriptor
2255 * @hba: per adapter instance
8aa29f19 2256 * @lrbp: pointer to local reference block
68078d5c
DR
2257 */
2258static
c6d4a831 2259int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
2260{
2261 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2262
68078d5c 2263 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 2264
68078d5c 2265 /* Get the descriptor */
1c90836f
AA
2266 if (hba->dev_cmd.query.descriptor &&
2267 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 2268 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 2269 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
2270 u16 resp_len;
2271 u16 buf_len;
68078d5c
DR
2272
2273 /* data segment length */
c6d4a831 2274 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 2275 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
2276 buf_len = be16_to_cpu(
2277 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
2278 if (likely(buf_len >= resp_len)) {
2279 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2280 } else {
2281 dev_warn(hba->dev,
3d4881d1
BH
2282 "%s: rsp size %d is bigger than buffer size %d",
2283 __func__, resp_len, buf_len);
c6d4a831
DR
2284 return -EINVAL;
2285 }
68078d5c 2286 }
c6d4a831
DR
2287
2288 return 0;
68078d5c
DR
2289}
2290
7a3e97b0
SY
2291/**
2292 * ufshcd_hba_capabilities - Read controller capabilities
2293 * @hba: per adapter instance
df043c74
ST
2294 *
2295 * Return: 0 on success, negative on error.
7a3e97b0 2296 */
df043c74 2297static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
7a3e97b0 2298{
df043c74
ST
2299 int err;
2300
b873a275 2301 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
6554400d
YS
2302 if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
2303 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
7a3e97b0
SY
2304
2305 /* nutrs and nutmrs are 0 based values */
2306 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2307 hba->nutmrs =
2308 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
945c3cca 2309 hba->reserved_slot = hba->nutrs - 1;
df043c74
ST
2310
2311 /* Read crypto capabilities */
2312 err = ufshcd_hba_init_crypto_capabilities(hba);
2313 if (err)
2314 dev_err(hba->dev, "crypto setup failed\n");
2315
305a357d
AD
2316 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2317 if (!hba->mcq_sup)
2318 return err;
2319
6e1d850a
AD
2320 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2321 hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2322 hba->mcq_capabilities);
2323
df043c74 2324 return err;
7a3e97b0
SY
2325}
2326
2327/**
6ccf44fe
SJ
2328 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2329 * to accept UIC commands
7a3e97b0 2330 * @hba: per adapter instance
6ccf44fe
SJ
2331 * Return true on success, else false
2332 */
2333static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2334{
a858af9a 2335 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
6ccf44fe
SJ
2336}
2337
53b3d9c3
SJ
2338/**
2339 * ufshcd_get_upmcrs - Get the power mode change request status
2340 * @hba: Pointer to adapter instance
2341 *
2342 * This function gets the UPMCRS field of HCS register
2343 * Returns value of UPMCRS field
2344 */
2345static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2346{
2347 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2348}
2349
6ccf44fe 2350/**
35c7d874 2351 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
6ccf44fe
SJ
2352 * @hba: per adapter instance
2353 * @uic_cmd: UIC command
7a3e97b0
SY
2354 */
2355static inline void
6ccf44fe 2356ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 2357{
35c7d874
BVA
2358 lockdep_assert_held(&hba->uic_cmd_mutex);
2359
6ccf44fe
SJ
2360 WARN_ON(hba->active_uic_cmd);
2361
2362 hba->active_uic_cmd = uic_cmd;
2363
7a3e97b0 2364 /* Write Args */
6ccf44fe
SJ
2365 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2366 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2367 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0 2368
28fa68fc 2369 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
aa5c6979 2370
7a3e97b0 2371 /* Write UIC Cmd */
6ccf44fe 2372 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 2373 REG_UIC_COMMAND);
7a3e97b0
SY
2374}
2375
6ccf44fe 2376/**
35c7d874 2377 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
6ccf44fe 2378 * @hba: per adapter instance
8aa29f19 2379 * @uic_cmd: UIC command
6ccf44fe 2380 *
6ccf44fe
SJ
2381 * Returns 0 only if success.
2382 */
2383static int
2384ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2385{
2386 int ret;
2387 unsigned long flags;
2388
35c7d874
BVA
2389 lockdep_assert_held(&hba->uic_cmd_mutex);
2390
6ccf44fe 2391 if (wait_for_completion_timeout(&uic_cmd->done,
0f52fcb9 2392 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
6ccf44fe 2393 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
0f52fcb9 2394 } else {
6ccf44fe 2395 ret = -ETIMEDOUT;
0f52fcb9
CG
2396 dev_err(hba->dev,
2397 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2398 uic_cmd->command, uic_cmd->argument3);
2399
2400 if (!uic_cmd->cmd_active) {
2401 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2402 __func__);
2403 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2404 }
2405 }
6ccf44fe
SJ
2406
2407 spin_lock_irqsave(hba->host->host_lock, flags);
2408 hba->active_uic_cmd = NULL;
2409 spin_unlock_irqrestore(hba->host->host_lock, flags);
2410
2411 return ret;
2412}
2413
2414/**
2415 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2416 * @hba: per adapter instance
2417 * @uic_cmd: UIC command
d75f7fe4 2418 * @completion: initialize the completion only if this is set to true
6ccf44fe 2419 *
6ccf44fe
SJ
2420 * Returns 0 only if success.
2421 */
2422static int
d75f7fe4
YG
2423__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2424 bool completion)
6ccf44fe 2425{
35c7d874
BVA
2426 lockdep_assert_held(&hba->uic_cmd_mutex);
2427 lockdep_assert_held(hba->host->host_lock);
2428
6ccf44fe
SJ
2429 if (!ufshcd_ready_for_uic_cmd(hba)) {
2430 dev_err(hba->dev,
2431 "Controller not ready to accept UIC commands\n");
2432 return -EIO;
2433 }
2434
d75f7fe4
YG
2435 if (completion)
2436 init_completion(&uic_cmd->done);
6ccf44fe 2437
0f52fcb9 2438 uic_cmd->cmd_active = 1;
6ccf44fe 2439 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 2440
57d104c1 2441 return 0;
6ccf44fe
SJ
2442}
2443
2444/**
2445 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2446 * @hba: per adapter instance
2447 * @uic_cmd: UIC command
2448 *
2449 * Returns 0 only if success.
2450 */
e77044c5 2451int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
6ccf44fe
SJ
2452{
2453 int ret;
57d104c1 2454 unsigned long flags;
6ccf44fe 2455
a22bcfdb 2456 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2457 return 0;
2458
1ab27c9c 2459 ufshcd_hold(hba, false);
6ccf44fe 2460 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
2461 ufshcd_add_delay_before_dme_cmd(hba);
2462
57d104c1 2463 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 2464 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
57d104c1
SJ
2465 spin_unlock_irqrestore(hba->host->host_lock, flags);
2466 if (!ret)
2467 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2468
6ccf44fe
SJ
2469 mutex_unlock(&hba->uic_cmd_mutex);
2470
1ab27c9c 2471 ufshcd_release(hba);
6ccf44fe
SJ
2472 return ret;
2473}
2474
7a3e97b0 2475/**
7a4df79d
BH
2476 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2477 * @hba: per-adapter instance
2478 * @lrbp: pointer to local reference block
2479 * @sg_entries: The number of sg lists actually used
2480 * @sg_list: Pointer to SG list
7a3e97b0 2481 */
7a4df79d
BH
2482static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2483 struct scatterlist *sg_list)
7a3e97b0 2484{
ada1e653 2485 struct ufshcd_sg_entry *prd;
7a3e97b0 2486 struct scatterlist *sg;
7a3e97b0
SY
2487 int i;
2488
7a4df79d 2489 if (sg_entries) {
26f968d7
AA
2490
2491 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2492 lrbp->utr_descriptor_ptr->prd_table_length =
4a5bd1a9 2493 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
26f968d7 2494 else
7a4df79d 2495 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
7a3e97b0 2496
ada1e653 2497 prd = lrbp->ucd_prdt_ptr;
7a3e97b0 2498
7a4df79d 2499 for_each_sg(sg_list, sg, sg_entries, i) {
1ea7d802
BVA
2500 const unsigned int len = sg_dma_len(sg);
2501
2502 /*
2503 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2504 * based value that indicates the length, in bytes, of
2505 * the data block. A maximum of length of 256KB may
2506 * exist for any entry. Bits 1:0 of this field shall be
2507 * 11b to indicate Dword granularity. A value of '3'
2508 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2509 */
2510 WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
ada1e653
EB
2511 prd->size = cpu_to_le32(len - 1);
2512 prd->addr = cpu_to_le64(sg->dma_address);
2513 prd->reserved = 0;
2514 prd = (void *)prd + ufshcd_sg_entry_size(hba);
7a3e97b0
SY
2515 }
2516 } else {
2517 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2518 }
7a4df79d
BH
2519}
2520
2521/**
2522 * ufshcd_map_sg - Map scatter-gather list to prdt
2523 * @hba: per adapter instance
2524 * @lrbp: pointer to local reference block
2525 *
2526 * Returns 0 in case of success, non-zero value in case of failure
2527 */
2528static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2529{
2530 struct scsi_cmnd *cmd = lrbp->cmd;
2531 int sg_segments = scsi_dma_map(cmd);
2532
2533 if (sg_segments < 0)
2534 return sg_segments;
2535
2536 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
7a3e97b0
SY
2537
2538 return 0;
2539}
2540
2541/**
2fbd009b 2542 * ufshcd_enable_intr - enable interrupts
7a3e97b0 2543 * @hba: per adapter instance
2fbd009b 2544 * @intrs: interrupt bits
7a3e97b0 2545 */
2fbd009b 2546static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 2547{
2fbd009b
SJ
2548 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2549
51428818 2550 if (hba->ufs_version == ufshci_version(1, 0)) {
2fbd009b
SJ
2551 u32 rw;
2552 rw = set & INTERRUPT_MASK_RW_VER_10;
2553 set = rw | ((set ^ intrs) & intrs);
2554 } else {
2555 set |= intrs;
2556 }
2557
2558 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2559}
2560
2561/**
2562 * ufshcd_disable_intr - disable interrupts
2563 * @hba: per adapter instance
2564 * @intrs: interrupt bits
2565 */
2566static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2567{
2568 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2569
51428818 2570 if (hba->ufs_version == ufshci_version(1, 0)) {
2fbd009b
SJ
2571 u32 rw;
2572 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2573 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2574 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2575
2576 } else {
2577 set &= ~intrs;
7a3e97b0 2578 }
2fbd009b
SJ
2579
2580 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
2581}
2582
5a0b0cb9 2583/**
a4b1c9b9 2584 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
5a0b0cb9
SRT
2585 * descriptor according to request
2586 * @lrbp: pointer to local reference block
2587 * @upiu_flags: flags required in the header
2588 * @cmd_dir: requests data direction
a4b1c9b9 2589 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
5a0b0cb9 2590 */
a4b1c9b9
BH
2591static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2592 enum dma_data_direction cmd_dir, int ehs_length)
5a0b0cb9
SRT
2593{
2594 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2595 u32 data_direction;
2596 u32 dword_0;
df043c74
ST
2597 u32 dword_1 = 0;
2598 u32 dword_3 = 0;
5a0b0cb9
SRT
2599
2600 if (cmd_dir == DMA_FROM_DEVICE) {
2601 data_direction = UTP_DEVICE_TO_HOST;
2602 *upiu_flags = UPIU_CMD_FLAGS_READ;
2603 } else if (cmd_dir == DMA_TO_DEVICE) {
2604 data_direction = UTP_HOST_TO_DEVICE;
2605 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2606 } else {
2607 data_direction = UTP_NO_DATA_TRANSFER;
2608 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2609 }
2610
a4b1c9b9
BH
2611 dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
2612 ehs_length << 8;
5a0b0cb9
SRT
2613 if (lrbp->intr_cmd)
2614 dword_0 |= UTP_REQ_DESC_INT_CMD;
2615
df043c74
ST
2616 /* Prepare crypto related dwords */
2617 ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2618
5a0b0cb9
SRT
2619 /* Transfer request descriptor header fields */
2620 req_desc->header.dword_0 = cpu_to_le32(dword_0);
df043c74 2621 req_desc->header.dword_1 = cpu_to_le32(dword_1);
5a0b0cb9
SRT
2622 /*
2623 * assigning invalid value for command status. Controller
2624 * updates OCS on command completion, with the command
2625 * status
2626 */
2627 req_desc->header.dword_2 =
2628 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
df043c74 2629 req_desc->header.dword_3 = cpu_to_le32(dword_3);
51047266
YG
2630
2631 req_desc->prd_table_length = 0;
5a0b0cb9
SRT
2632}
2633
2634/**
2635 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2636 * for scsi commands
8aa29f19
BVA
2637 * @lrbp: local reference block pointer
2638 * @upiu_flags: flags
5a0b0cb9
SRT
2639 */
2640static
a23064c4 2641void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
5a0b0cb9 2642{
1b21b8f0 2643 struct scsi_cmnd *cmd = lrbp->cmd;
5a0b0cb9 2644 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
52ac95fe 2645 unsigned short cdb_len;
5a0b0cb9
SRT
2646
2647 /* command descriptor fields */
2648 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2649 UPIU_TRANSACTION_COMMAND, upiu_flags,
2650 lrbp->lun, lrbp->task_tag);
2651 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2652 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2653
2654 /* Total EHS length and Data segment length will be zero */
2655 ucd_req_ptr->header.dword_2 = 0;
2656
1b21b8f0 2657 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
5a0b0cb9 2658
1b21b8f0 2659 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
a851b2bd 2660 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1b21b8f0 2661 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
52ac95fe
YG
2662
2663 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2664}
2665
68078d5c 2666/**
a4b1c9b9 2667 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
68078d5c
DR
2668 * @hba: UFS hba
2669 * @lrbp: local reference block pointer
2670 * @upiu_flags: flags
2671 */
2672static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
a23064c4 2673 struct ufshcd_lrb *lrbp, u8 upiu_flags)
68078d5c
DR
2674{
2675 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2676 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 2677 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
2678
2679 /* Query request header */
2680 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2681 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2682 lrbp->lun, lrbp->task_tag);
2683 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2684 0, query->request.query_func, 0, 0);
2685
6861285c
ZL
2686 /* Data segment length only need for WRITE_DESC */
2687 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2688 ucd_req_ptr->header.dword_2 =
2689 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2690 else
2691 ucd_req_ptr->header.dword_2 = 0;
68078d5c
DR
2692
2693 /* Copy the Query Request buffer as is */
2694 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2695 QUERY_OSF_SIZE);
68078d5c
DR
2696
2697 /* Copy the Descriptor */
c6d4a831 2698 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
220d17a6 2699 memcpy(ucd_req_ptr + 1, query->descriptor, len);
c6d4a831 2700
51047266 2701 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
68078d5c
DR
2702}
2703
5a0b0cb9
SRT
2704static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2705{
2706 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2707
2708 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2709
2710 /* command descriptor fields */
2711 ucd_req_ptr->header.dword_0 =
2712 UPIU_HEADER_DWORD(
2713 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
51047266
YG
2714 /* clear rest of the fields of basic header */
2715 ucd_req_ptr->header.dword_1 = 0;
2716 ucd_req_ptr->header.dword_2 = 0;
2717
2718 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2719}
2720
7a3e97b0 2721/**
f273c54b 2722 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
300bb13f 2723 * for Device Management Purposes
8aa29f19
BVA
2724 * @hba: per adapter instance
2725 * @lrbp: pointer to local reference block
7a3e97b0 2726 */
f273c54b
BH
2727static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2728 struct ufshcd_lrb *lrbp)
7a3e97b0 2729{
a23064c4 2730 u8 upiu_flags;
5a0b0cb9 2731 int ret = 0;
7a3e97b0 2732
51428818 2733 if (hba->ufs_version <= ufshci_version(1, 1))
300bb13f 2734 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
83dc7e3d 2735 else
2736 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f 2737
a4b1c9b9 2738 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
300bb13f
JP
2739 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2740 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2741 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2742 ufshcd_prepare_utp_nop_upiu(lrbp);
2743 else
2744 ret = -EINVAL;
2745
2746 return ret;
2747}
2748
2749/**
2750 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2751 * for SCSI Purposes
8aa29f19
BVA
2752 * @hba: per adapter instance
2753 * @lrbp: pointer to local reference block
300bb13f
JP
2754 */
2755static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2756{
a23064c4 2757 u8 upiu_flags;
300bb13f
JP
2758 int ret = 0;
2759
51428818 2760 if (hba->ufs_version <= ufshci_version(1, 1))
300bb13f 2761 lrbp->command_type = UTP_CMD_TYPE_SCSI;
83dc7e3d 2762 else
2763 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2764
2765 if (likely(lrbp->cmd)) {
a4b1c9b9 2766 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
300bb13f
JP
2767 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2768 } else {
2769 ret = -EINVAL;
2770 }
5a0b0cb9
SRT
2771
2772 return ret;
7a3e97b0
SY
2773}
2774
2a8fa600
SJ
2775/**
2776 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
8aa29f19 2777 * @upiu_wlun_id: UPIU W-LUN id
2a8fa600
SJ
2778 *
2779 * Returns SCSI W-LUN id
2780 */
2781static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2782{
2783 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2784}
2785
b294ff3e
AD
2786static inline bool is_device_wlun(struct scsi_device *sdev)
2787{
2788 return sdev->lun ==
2789 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2790}
2791
eaab9b57
BVA
2792/*
2793 * Associate the UFS controller queue with the default and poll HCTX types.
2794 * Initialize the mq_map[] arrays.
2795 */
a4e1d0b7 2796static void ufshcd_map_queues(struct Scsi_Host *shost)
eaab9b57 2797{
0d33728f
AD
2798 struct ufs_hba *hba = shost_priv(shost);
2799 int i, queue_offset = 0;
2800
2801 if (!is_mcq_supported(hba)) {
2802 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2803 hba->nr_queues[HCTX_TYPE_READ] = 0;
2804 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2805 hba->nr_hw_queues = 1;
2806 }
eaab9b57
BVA
2807
2808 for (i = 0; i < shost->nr_maps; i++) {
2809 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2810
0d33728f
AD
2811 map->nr_queues = hba->nr_queues[i];
2812 if (!map->nr_queues)
10af1156 2813 continue;
0d33728f
AD
2814 map->queue_offset = queue_offset;
2815 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2816 map->queue_offset = 0;
2817
a4e1d0b7 2818 blk_mq_map_queues(map);
0d33728f 2819 queue_offset += map->nr_queues;
eaab9b57 2820 }
eaab9b57
BVA
2821}
2822
4d2b8d40
BVA
2823static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2824{
ada1e653
EB
2825 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2826 i * sizeof_utp_transfer_cmd_desc(hba);
4d2b8d40
BVA
2827 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2828 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
ada1e653 2829 i * sizeof_utp_transfer_cmd_desc(hba);
4d2b8d40
BVA
2830 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2831 response_upiu);
2832 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2833
2834 lrb->utr_descriptor_ptr = utrdlp + i;
2835 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2836 i * sizeof(struct utp_transfer_req_desc);
ada1e653 2837 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
4d2b8d40 2838 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
ada1e653 2839 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
4d2b8d40 2840 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
ada1e653 2841 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
4d2b8d40
BVA
2842 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2843}
2844
7a3e97b0
SY
2845/**
2846 * ufshcd_queuecommand - main entry point for SCSI requests
8aa29f19 2847 * @host: SCSI host pointer
7a3e97b0 2848 * @cmd: command from SCSI Midlayer
7a3e97b0
SY
2849 *
2850 * Returns 0 for success, non-zero in case of failure
2851 */
2852static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2853{
4728ab4a 2854 struct ufs_hba *hba = shost_priv(host);
3f2c1002 2855 int tag = scsi_cmd_to_rq(cmd)->tag;
7a3e97b0 2856 struct ufshcd_lrb *lrbp;
7a3e97b0 2857 int err = 0;
22a2d563 2858 struct ufs_hw_queue *hwq = NULL;
7a3e97b0 2859
eaab9b57 2860 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
7a3e97b0 2861
5675c381
BVA
2862 /*
2863 * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
2864 * calls.
2865 */
2866 rcu_read_lock();
2867
a45f9371
CG
2868 switch (hba->ufshcd_state) {
2869 case UFSHCD_STATE_OPERATIONAL:
d489f18a 2870 break;
a45f9371 2871 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
d489f18a
AH
2872 /*
2873 * SCSI error handler can call ->queuecommand() while UFS error
2874 * handler is in progress. Error interrupts could change the
2875 * state from UFSHCD_STATE_RESET to
2876 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2877 * being issued in that case.
2878 */
2879 if (ufshcd_eh_in_progress(hba)) {
2880 err = SCSI_MLQUEUE_HOST_BUSY;
2881 goto out;
2882 }
a45f9371
CG
2883 break;
2884 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2885 /*
2886 * pm_runtime_get_sync() is used at error handling preparation
2887 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2888 * PM ops, it can never be finished if we let SCSI layer keep
2889 * retrying it, which gets err handler stuck forever. Neither
2890 * can we let the scsi cmd pass through, because UFS is in bad
2891 * state, the scsi cmd may eventually time out, which will get
2892 * err handler blocked for too long. So, just fail the scsi cmd
2893 * sent from PM ops, err handler can recover PM error anyways.
2894 */
2895 if (hba->pm_op_in_progress) {
2896 hba->force_reset = true;
2897 set_host_byte(cmd, DID_BAD_TARGET);
35c3730a 2898 scsi_done(cmd);
a45f9371
CG
2899 goto out;
2900 }
2901 fallthrough;
2902 case UFSHCD_STATE_RESET:
2903 err = SCSI_MLQUEUE_HOST_BUSY;
2904 goto out;
2905 case UFSHCD_STATE_ERROR:
2906 set_host_byte(cmd, DID_ERROR);
35c3730a 2907 scsi_done(cmd);
a45f9371 2908 goto out;
a45f9371
CG
2909 }
2910
7fabb77b
GB
2911 hba->req_abort_count = 0;
2912
1ab27c9c
ST
2913 err = ufshcd_hold(hba, true);
2914 if (err) {
2915 err = SCSI_MLQUEUE_HOST_BUSY;
1ab27c9c
ST
2916 goto out;
2917 }
2dec9475
CG
2918 WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2919 (hba->clk_gating.state != CLKS_ON));
1ab27c9c 2920
a45f9371 2921 lrbp = &hba->lrb[tag];
5a0b0cb9 2922 WARN_ON(lrbp->cmd);
7a3e97b0 2923 lrbp->cmd = cmd;
7a3e97b0 2924 lrbp->task_tag = tag;
0ce147d4 2925 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
51d1628f 2926 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
df043c74 2927
3f2c1002 2928 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
df043c74 2929
e0b299e3 2930 lrbp->req_abort_skip = false;
7a3e97b0 2931
09d9e4d0 2932 ufshpb_prep(hba, lrbp);
2fff76f8 2933
300bb13f
JP
2934 ufshcd_comp_scsi_upiu(hba, lrbp);
2935
75b1cc4a 2936 err = ufshcd_map_sg(hba, lrbp);
5a0b0cb9
SRT
2937 if (err) {
2938 lrbp->cmd = NULL;
17c7d35f 2939 ufshcd_release(hba);
7a3e97b0 2940 goto out;
5a0b0cb9 2941 }
7a3e97b0 2942
854f84e7
AD
2943 if (is_mcq_enabled(hba))
2944 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
2945
22a2d563 2946 ufshcd_send_command(hba, tag, hwq);
5675c381 2947
7a3e97b0 2948out:
5675c381
BVA
2949 rcu_read_unlock();
2950
88b09900
AH
2951 if (ufs_trigger_eh()) {
2952 unsigned long flags;
2953
2954 spin_lock_irqsave(hba->host->host_lock, flags);
2955 ufshcd_schedule_eh_work(hba);
2956 spin_unlock_irqrestore(hba->host->host_lock, flags);
2957 }
c11a1ae9 2958
7a3e97b0
SY
2959 return err;
2960}
2961
5a0b0cb9
SRT
2962static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2963 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2964{
2965 lrbp->cmd = NULL;
5a0b0cb9
SRT
2966 lrbp->task_tag = tag;
2967 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
5a0b0cb9 2968 lrbp->intr_cmd = true; /* No interrupt aggregation */
df043c74 2969 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
5a0b0cb9
SRT
2970 hba->dev_cmd.type = cmd_type;
2971
f273c54b 2972 return ufshcd_compose_devman_upiu(hba, lrbp);
5a0b0cb9
SRT
2973}
2974
d1a76446
BVA
2975/*
2976 * Clear all the requests from the controller for which a bit has been set in
2977 * @mask and wait until the controller confirms that these requests have been
2978 * cleared.
2979 */
2980static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
5a0b0cb9 2981{
5a0b0cb9 2982 unsigned long flags;
5a0b0cb9
SRT
2983
2984 /* clear outstanding transaction before retry */
2985 spin_lock_irqsave(hba->host->host_lock, flags);
d1a76446 2986 ufshcd_utrl_clear(hba, mask);
5a0b0cb9
SRT
2987 spin_unlock_irqrestore(hba->host->host_lock, flags);
2988
2989 /*
32424902 2990 * wait for h/w to clear corresponding bit in door-bell.
5a0b0cb9
SRT
2991 * max. wait is 1 sec.
2992 */
da8badd7
BVA
2993 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
2994 mask, ~mask, 1000, 1000);
5a0b0cb9
SRT
2995}
2996
c6d4a831
DR
2997static int
2998ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2999{
3000 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
3001
3002 /* Get the UPIU response */
3003 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
3004 UPIU_RSP_CODE_OFFSET;
3005 return query_res->response;
3006}
3007
5a0b0cb9
SRT
3008/**
3009 * ufshcd_dev_cmd_completion() - handles device management command responses
3010 * @hba: per adapter instance
3011 * @lrbp: pointer to local reference block
3012 */
3013static int
3014ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3015{
3016 int resp;
3017 int err = 0;
3018
ff8e20c6 3019 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
3020 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3021
3022 switch (resp) {
3023 case UPIU_TRANSACTION_NOP_IN:
3024 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3025 err = -EINVAL;
3026 dev_err(hba->dev, "%s: unexpected response %x\n",
3027 __func__, resp);
3028 }
3029 break;
68078d5c 3030 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
3031 err = ufshcd_check_query_response(hba, lrbp);
3032 if (!err)
3033 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 3034 break;
5a0b0cb9
SRT
3035 case UPIU_TRANSACTION_REJECT_UPIU:
3036 /* TODO: handle Reject UPIU Response */
3037 err = -EPERM;
3038 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3039 __func__);
3040 break;
6ff265fc
BH
3041 case UPIU_TRANSACTION_RESPONSE:
3042 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3043 err = -EINVAL;
3044 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3045 }
3046 break;
5a0b0cb9
SRT
3047 default:
3048 err = -EINVAL;
3049 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3050 __func__, resp);
3051 break;
3052 }
3053
3054 return err;
3055}
3056
3057static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3058 struct ufshcd_lrb *lrbp, int max_timeout)
3059{
f5c2976e 3060 unsigned long time_left = msecs_to_jiffies(max_timeout);
5a0b0cb9 3061 unsigned long flags;
f5c2976e
BVA
3062 bool pending;
3063 int err;
5a0b0cb9 3064
f5c2976e 3065retry:
5a0b0cb9 3066 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
f5c2976e 3067 time_left);
5a0b0cb9 3068
5a0b0cb9 3069 if (likely(time_left)) {
f5c2976e
BVA
3070 /*
3071 * The completion handler called complete() and the caller of
3072 * this function still owns the @lrbp tag so the code below does
3073 * not trigger any race conditions.
3074 */
3075 hba->dev_cmd.complete = NULL;
c30d8d01 3076 err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
5a0b0cb9
SRT
3077 if (!err)
3078 err = ufshcd_dev_cmd_completion(hba, lrbp);
f5c2976e 3079 } else {
5a0b0cb9 3080 err = -ETIMEDOUT;
a48353f6
YG
3081 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3082 __func__, lrbp->task_tag);
f5c2976e 3083 if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) {
a48353f6 3084 /* successfully cleared the command, retry if needed */
5a0b0cb9 3085 err = -EAGAIN;
f5c2976e
BVA
3086 /*
3087 * Since clearing the command succeeded we also need to
3088 * clear the task tag bit from the outstanding_reqs
3089 * variable.
3090 */
3091 spin_lock_irqsave(&hba->outstanding_lock, flags);
3092 pending = test_bit(lrbp->task_tag,
3093 &hba->outstanding_reqs);
3094 if (pending) {
3095 hba->dev_cmd.complete = NULL;
3096 __clear_bit(lrbp->task_tag,
3097 &hba->outstanding_reqs);
3098 }
3099 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3100
3101 if (!pending) {
3102 /*
3103 * The completion handler ran while we tried to
3104 * clear the command.
3105 */
3106 time_left = 1;
3107 goto retry;
3108 }
3109 } else {
3110 dev_err(hba->dev, "%s: failed to clear tag %d\n",
3111 __func__, lrbp->task_tag);
3112 }
5a0b0cb9
SRT
3113 }
3114
3115 return err;
3116}
3117
5a0b0cb9
SRT
3118/**
3119 * ufshcd_exec_dev_cmd - API for sending device management requests
8aa29f19
BVA
3120 * @hba: UFS hba
3121 * @cmd_type: specifies the type (NOP, Query...)
d0b2b70e 3122 * @timeout: timeout in milliseconds
5a0b0cb9 3123 *
68078d5c
DR
3124 * NOTE: Since there is only one available tag for device management commands,
3125 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
3126 */
3127static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3128 enum dev_cmd_type cmd_type, int timeout)
3129{
8a686f26 3130 DECLARE_COMPLETION_ONSTACK(wait);
945c3cca 3131 const u32 tag = hba->reserved_slot;
5a0b0cb9
SRT
3132 struct ufshcd_lrb *lrbp;
3133 int err;
5a0b0cb9 3134
945c3cca
BVA
3135 /* Protects use of hba->reserved_slot. */
3136 lockdep_assert_held(&hba->dev_cmd.lock);
a3cd5ec5 3137
945c3cca 3138 down_read(&hba->clk_scaling_lock);
5a0b0cb9 3139
a45f9371 3140 lrbp = &hba->lrb[tag];
5a0b0cb9
SRT
3141 WARN_ON(lrbp->cmd);
3142 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3143 if (unlikely(err))
eb783bb8 3144 goto out;
5a0b0cb9
SRT
3145
3146 hba->dev_cmd.complete = &wait;
22a2d563 3147 hba->dev_cmd.cqe = NULL;
5a0b0cb9 3148
fb475b74 3149 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
5a0b0cb9 3150
22a2d563 3151 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
5a0b0cb9 3152 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
fb475b74
AA
3153 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3154 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
6667e6d9 3155
eb783bb8 3156out:
a3cd5ec5 3157 up_read(&hba->clk_scaling_lock);
5a0b0cb9
SRT
3158 return err;
3159}
3160
d44a5f98
DR
3161/**
3162 * ufshcd_init_query() - init the query response and request parameters
3163 * @hba: per-adapter instance
3164 * @request: address of the request pointer to be initialized
3165 * @response: address of the response pointer to be initialized
3166 * @opcode: operation to perform
3167 * @idn: flag idn to access
3168 * @index: LU number to access
3169 * @selector: query/flag/descriptor further identification
3170 */
3171static inline void ufshcd_init_query(struct ufs_hba *hba,
3172 struct ufs_query_req **request, struct ufs_query_res **response,
3173 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3174{
3175 *request = &hba->dev_cmd.query.request;
3176 *response = &hba->dev_cmd.query.response;
3177 memset(*request, 0, sizeof(struct ufs_query_req));
3178 memset(*response, 0, sizeof(struct ufs_query_res));
3179 (*request)->upiu_req.opcode = opcode;
3180 (*request)->upiu_req.idn = idn;
3181 (*request)->upiu_req.index = index;
3182 (*request)->upiu_req.selector = selector;
3183}
3184
dc3c8d3a 3185static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1f34eedf 3186 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
dc3c8d3a
YG
3187{
3188 int ret;
3189 int retries;
3190
3191 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1f34eedf 3192 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
dc3c8d3a
YG
3193 if (ret)
3194 dev_dbg(hba->dev,
3195 "%s: failed with error %d, retries %d\n",
3196 __func__, ret, retries);
3197 else
3198 break;
3199 }
3200
3201 if (ret)
3202 dev_err(hba->dev,
48ee7952 3203 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
dc3c8d3a
YG
3204 __func__, opcode, idn, ret, retries);
3205 return ret;
3206}
3207
68078d5c
DR
3208/**
3209 * ufshcd_query_flag() - API function for sending flag query requests
8aa29f19
BVA
3210 * @hba: per-adapter instance
3211 * @opcode: flag query to perform
3212 * @idn: flag idn to access
1f34eedf 3213 * @index: flag index to access
8aa29f19 3214 * @flag_res: the flag value after the query request completes
68078d5c
DR
3215 *
3216 * Returns 0 for success, non-zero in case of failure
3217 */
dc3c8d3a 3218int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1f34eedf 3219 enum flag_idn idn, u8 index, bool *flag_res)
68078d5c 3220{
d44a5f98
DR
3221 struct ufs_query_req *request = NULL;
3222 struct ufs_query_res *response = NULL;
1f34eedf 3223 int err, selector = 0;
e5ad406c 3224 int timeout = QUERY_REQ_TIMEOUT;
68078d5c
DR
3225
3226 BUG_ON(!hba);
3227
1ab27c9c 3228 ufshcd_hold(hba, false);
68078d5c 3229 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
3230 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3231 selector);
68078d5c
DR
3232
3233 switch (opcode) {
3234 case UPIU_QUERY_OPCODE_SET_FLAG:
3235 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3236 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3237 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3238 break;
3239 case UPIU_QUERY_OPCODE_READ_FLAG:
3240 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3241 if (!flag_res) {
3242 /* No dummy reads */
3243 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3244 __func__);
3245 err = -EINVAL;
3246 goto out_unlock;
3247 }
3248 break;
3249 default:
3250 dev_err(hba->dev,
3251 "%s: Expected query flag opcode but got = %d\n",
3252 __func__, opcode);
3253 err = -EINVAL;
3254 goto out_unlock;
3255 }
68078d5c 3256
e5ad406c 3257 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
68078d5c
DR
3258
3259 if (err) {
3260 dev_err(hba->dev,
3261 "%s: Sending flag query for idn %d failed, err = %d\n",
3262 __func__, idn, err);
3263 goto out_unlock;
3264 }
3265
3266 if (flag_res)
e8c8e82a 3267 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
3268 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3269
3270out_unlock:
3271 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3272 ufshcd_release(hba);
68078d5c
DR
3273 return err;
3274}
3275
66ec6d59
SRT
3276/**
3277 * ufshcd_query_attr - API function for sending attribute requests
8aa29f19
BVA
3278 * @hba: per-adapter instance
3279 * @opcode: attribute opcode
3280 * @idn: attribute idn to access
3281 * @index: index field
3282 * @selector: selector field
3283 * @attr_val: the attribute value after the query request completes
66ec6d59
SRT
3284 *
3285 * Returns 0 for success, non-zero in case of failure
3286*/
ec92b59c
SN
3287int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3288 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
66ec6d59 3289{
d44a5f98
DR
3290 struct ufs_query_req *request = NULL;
3291 struct ufs_query_res *response = NULL;
66ec6d59
SRT
3292 int err;
3293
3294 BUG_ON(!hba);
3295
3296 if (!attr_val) {
3297 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3298 __func__, opcode);
8ca1a40b 3299 return -EINVAL;
66ec6d59
SRT
3300 }
3301
8ca1a40b 3302 ufshcd_hold(hba, false);
3303
66ec6d59 3304 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
3305 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3306 selector);
66ec6d59
SRT
3307
3308 switch (opcode) {
3309 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3310 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 3311 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
3312 break;
3313 case UPIU_QUERY_OPCODE_READ_ATTR:
3314 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3315 break;
3316 default:
3317 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3318 __func__, opcode);
3319 err = -EINVAL;
3320 goto out_unlock;
3321 }
3322
d44a5f98 3323 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
3324
3325 if (err) {
4b761b58
YG
3326 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3327 __func__, opcode, idn, index, err);
66ec6d59
SRT
3328 goto out_unlock;
3329 }
3330
e8c8e82a 3331 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
3332
3333out_unlock:
3334 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3335 ufshcd_release(hba);
66ec6d59
SRT
3336 return err;
3337}
3338
5e86ae44
YG
3339/**
3340 * ufshcd_query_attr_retry() - API function for sending query
3341 * attribute with retries
3342 * @hba: per-adapter instance
3343 * @opcode: attribute opcode
3344 * @idn: attribute idn to access
3345 * @index: index field
3346 * @selector: selector field
3347 * @attr_val: the attribute value after the query request
3348 * completes
3349 *
3350 * Returns 0 for success, non-zero in case of failure
3351*/
41d8a933 3352int ufshcd_query_attr_retry(struct ufs_hba *hba,
5e86ae44
YG
3353 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3354 u32 *attr_val)
3355{
3356 int ret = 0;
3357 u32 retries;
3358
68c9fcfd 3359 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
5e86ae44
YG
3360 ret = ufshcd_query_attr(hba, opcode, idn, index,
3361 selector, attr_val);
3362 if (ret)
3363 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3364 __func__, ret, retries);
3365 else
3366 break;
3367 }
3368
3369 if (ret)
3370 dev_err(hba->dev,
82ede9c1 3371 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
5e86ae44
YG
3372 __func__, idn, ret, QUERY_REQ_RETRIES);
3373 return ret;
3374}
3375
a70e91b8 3376static int __ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
3377 enum query_opcode opcode, enum desc_idn idn, u8 index,
3378 u8 selector, u8 *desc_buf, int *buf_len)
3379{
3380 struct ufs_query_req *request = NULL;
3381 struct ufs_query_res *response = NULL;
3382 int err;
3383
3384 BUG_ON(!hba);
3385
3386 if (!desc_buf) {
3387 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3388 __func__, opcode);
8ca1a40b 3389 return -EINVAL;
d44a5f98
DR
3390 }
3391
a4b0e8a4 3392 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
d44a5f98
DR
3393 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3394 __func__, *buf_len);
8ca1a40b 3395 return -EINVAL;
d44a5f98
DR
3396 }
3397
8ca1a40b 3398 ufshcd_hold(hba, false);
3399
d44a5f98
DR
3400 mutex_lock(&hba->dev_cmd.lock);
3401 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3402 selector);
3403 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 3404 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
3405
3406 switch (opcode) {
3407 case UPIU_QUERY_OPCODE_WRITE_DESC:
3408 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3409 break;
3410 case UPIU_QUERY_OPCODE_READ_DESC:
3411 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3412 break;
3413 default:
3414 dev_err(hba->dev,
3415 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3416 __func__, opcode);
3417 err = -EINVAL;
3418 goto out_unlock;
3419 }
3420
3421 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3422
3423 if (err) {
4b761b58
YG
3424 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3425 __func__, opcode, idn, index, err);
d44a5f98
DR
3426 goto out_unlock;
3427 }
3428
ea2aab24 3429 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
3430
3431out_unlock:
cfcbae38 3432 hba->dev_cmd.query.descriptor = NULL;
d44a5f98 3433 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3434 ufshcd_release(hba);
d44a5f98
DR
3435 return err;
3436}
3437
a70e91b8 3438/**
8aa29f19
BVA
3439 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3440 * @hba: per-adapter instance
3441 * @opcode: attribute opcode
3442 * @idn: attribute idn to access
3443 * @index: index field
3444 * @selector: selector field
3445 * @desc_buf: the buffer that contains the descriptor
3446 * @buf_len: length parameter passed to the device
a70e91b8
YG
3447 *
3448 * Returns 0 for success, non-zero in case of failure.
3449 * The buf_len parameter will contain, on return, the length parameter
3450 * received on the response.
3451 */
2238d31c
SN
3452int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3453 enum query_opcode opcode,
3454 enum desc_idn idn, u8 index,
3455 u8 selector,
3456 u8 *desc_buf, int *buf_len)
a70e91b8
YG
3457{
3458 int err;
3459 int retries;
3460
3461 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3462 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3463 selector, desc_buf, buf_len);
3464 if (!err || err == -EINVAL)
3465 break;
3466 }
3467
3468 return err;
3469}
a70e91b8 3470
da461cec
SJ
3471/**
3472 * ufshcd_read_desc_param - read the specified descriptor parameter
3473 * @hba: Pointer to adapter instance
3474 * @desc_id: descriptor idn value
3475 * @desc_index: descriptor index
3476 * @param_offset: offset of the parameter to read
3477 * @param_read_buf: pointer to buffer where parameter would be read
3478 * @param_size: sizeof(param_read_buf)
3479 *
3480 * Return 0 in case of success, non-zero otherwise
3481 */
45bced87
SN
3482int ufshcd_read_desc_param(struct ufs_hba *hba,
3483 enum desc_idn desc_id,
3484 int desc_index,
3485 u8 param_offset,
3486 u8 *param_read_buf,
3487 u8 param_size)
da461cec
SJ
3488{
3489 int ret;
3490 u8 *desc_buf;
f2a89b07 3491 int buff_len = QUERY_DESC_MAX_SIZE;
da461cec
SJ
3492 bool is_kmalloc = true;
3493
a4b0e8a4
PM
3494 /* Safety check */
3495 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
da461cec
SJ
3496 return -EINVAL;
3497
a4b0e8a4
PM
3498 /* Check whether we need temp memory */
3499 if (param_offset != 0 || param_size < buff_len) {
1699f980 3500 desc_buf = kzalloc(buff_len, GFP_KERNEL);
da461cec
SJ
3501 if (!desc_buf)
3502 return -ENOMEM;
a4b0e8a4
PM
3503 } else {
3504 desc_buf = param_read_buf;
3505 is_kmalloc = false;
da461cec
SJ
3506 }
3507
a4b0e8a4 3508 /* Request for full descriptor */
a70e91b8 3509 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
16ed9d31
AS
3510 desc_id, desc_index, 0,
3511 desc_buf, &buff_len);
bde44bb6 3512 if (ret) {
1699f980 3513 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
bde44bb6 3514 __func__, desc_id, desc_index, param_offset, ret);
da461cec
SJ
3515 goto out;
3516 }
3517
16ed9d31
AS
3518 /* Update descriptor length */
3519 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3520
3521 if (param_offset >= buff_len) {
3522 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3523 __func__, param_offset, desc_id, buff_len);
3524 return -EINVAL;
3525 }
3526
bde44bb6 3527 /* Sanity check */
3528 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1699f980 3529 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
bde44bb6 3530 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3531 ret = -EINVAL;
3532 goto out;
3533 }
3534
1699f980
CG
3535 if (is_kmalloc) {
3536 /* Make sure we don't copy more data than available */
d3d9c457
BVA
3537 if (param_offset >= buff_len)
3538 ret = -EINVAL;
3539 else
3540 memcpy(param_read_buf, &desc_buf[param_offset],
3541 min_t(u32, param_size, buff_len - param_offset));
1699f980 3542 }
da461cec
SJ
3543out:
3544 if (is_kmalloc)
3545 kfree(desc_buf);
3546 return ret;
3547}
3548
4b828fe1
TW
3549/**
3550 * struct uc_string_id - unicode string
3551 *
3552 * @len: size of this descriptor inclusive
3553 * @type: descriptor type
3554 * @uc: unicode string character
3555 */
3556struct uc_string_id {
3557 u8 len;
3558 u8 type;
ec38c0ad 3559 wchar_t uc[];
4b828fe1
TW
3560} __packed;
3561
3562/* replace non-printable or non-ASCII characters with spaces */
3563static inline char ufshcd_remove_non_printable(u8 ch)
3564{
3565 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3566}
3567
b573d484
YG
3568/**
3569 * ufshcd_read_string_desc - read string descriptor
3570 * @hba: pointer to adapter instance
3571 * @desc_index: descriptor index
4b828fe1
TW
3572 * @buf: pointer to buffer where descriptor would be read,
3573 * the caller should free the memory.
b573d484 3574 * @ascii: if true convert from unicode to ascii characters
4b828fe1 3575 * null terminated string.
b573d484 3576 *
4b828fe1
TW
3577 * Return:
3578 * * string size on success.
3579 * * -ENOMEM: on allocation failure
3580 * * -EINVAL: on a wrong parameter
b573d484 3581 */
4b828fe1
TW
3582int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3583 u8 **buf, bool ascii)
b573d484 3584{
4b828fe1
TW
3585 struct uc_string_id *uc_str;
3586 u8 *str;
3587 int ret;
b573d484 3588
4b828fe1
TW
3589 if (!buf)
3590 return -EINVAL;
b573d484 3591
4b828fe1
TW
3592 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3593 if (!uc_str)
3594 return -ENOMEM;
b573d484 3595
c4607a09
BH
3596 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3597 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
4b828fe1
TW
3598 if (ret < 0) {
3599 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3600 QUERY_REQ_RETRIES, ret);
3601 str = NULL;
3602 goto out;
3603 }
3604
3605 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3606 dev_dbg(hba->dev, "String Desc is of zero length\n");
3607 str = NULL;
3608 ret = 0;
b573d484
YG
3609 goto out;
3610 }
3611
3612 if (ascii) {
4b828fe1 3613 ssize_t ascii_len;
b573d484 3614 int i;
b573d484 3615 /* remove header and divide by 2 to move from UTF16 to UTF8 */
4b828fe1
TW
3616 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3617 str = kzalloc(ascii_len, GFP_KERNEL);
3618 if (!str) {
3619 ret = -ENOMEM;
fcbefc3b 3620 goto out;
b573d484
YG
3621 }
3622
3623 /*
3624 * the descriptor contains string in UTF16 format
3625 * we need to convert to utf-8 so it can be displayed
3626 */
4b828fe1
TW
3627 ret = utf16s_to_utf8s(uc_str->uc,
3628 uc_str->len - QUERY_DESC_HDR_SIZE,
3629 UTF16_BIG_ENDIAN, str, ascii_len);
b573d484
YG
3630
3631 /* replace non-printable or non-ASCII characters with spaces */
4b828fe1
TW
3632 for (i = 0; i < ret; i++)
3633 str[i] = ufshcd_remove_non_printable(str[i]);
b573d484 3634
4b828fe1
TW
3635 str[ret++] = '\0';
3636
3637 } else {
5f57704d 3638 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
4b828fe1
TW
3639 if (!str) {
3640 ret = -ENOMEM;
3641 goto out;
3642 }
4b828fe1 3643 ret = uc_str->len;
b573d484
YG
3644 }
3645out:
4b828fe1
TW
3646 *buf = str;
3647 kfree(uc_str);
3648 return ret;
b573d484 3649}
b573d484 3650
da461cec
SJ
3651/**
3652 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3653 * @hba: Pointer to adapter instance
3654 * @lun: lun id
3655 * @param_offset: offset of the parameter to read
3656 * @param_read_buf: pointer to buffer where parameter would be read
3657 * @param_size: sizeof(param_read_buf)
3658 *
3659 * Return 0 in case of success, non-zero otherwise
3660 */
3661static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3662 int lun,
3663 enum unit_desc_param param_offset,
3664 u8 *param_read_buf,
3665 u32 param_size)
3666{
3667 /*
3668 * Unit descriptors are only available for general purpose LUs (LUN id
3669 * from 0 to 7) and RPMB Well known LU.
3670 */
b43678ea 3671 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
da461cec
SJ
3672 return -EOPNOTSUPP;
3673
3674 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3675 param_offset, param_read_buf, param_size);
3676}
3677
09f17791
CG
3678static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3679{
3680 int err = 0;
3681 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3682
3683 if (hba->dev_info.wspecversion >= 0x300) {
3684 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3685 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3686 &gating_wait);
3687 if (err)
3688 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3689 err, gating_wait);
3690
3691 if (gating_wait == 0) {
3692 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3693 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3694 gating_wait);
3695 }
3696
3697 hba->dev_info.clk_gating_wait_us = gating_wait;
3698 }
3699
3700 return err;
3701}
3702
7a3e97b0
SY
3703/**
3704 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3705 * @hba: per adapter instance
3706 *
3707 * 1. Allocate DMA memory for Command Descriptor array
3708 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3709 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3710 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3711 * (UTMRDL)
3712 * 4. Allocate memory for local reference block(lrb).
3713 *
3714 * Returns 0 for success, non-zero in case of failure
3715 */
3716static int ufshcd_memory_alloc(struct ufs_hba *hba)
3717{
3718 size_t utmrdl_size, utrdl_size, ucdl_size;
3719
3720 /* Allocate memory for UTP command descriptors */
ada1e653 3721 ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
2953f850
SJ
3722 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3723 ucdl_size,
3724 &hba->ucdl_dma_addr,
3725 GFP_KERNEL);
7a3e97b0
SY
3726
3727 /*
3728 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3729 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3730 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3731 * be aligned to 128 bytes as well
3732 */
3733 if (!hba->ucdl_base_addr ||
3734 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3735 dev_err(hba->dev,
7a3e97b0
SY
3736 "Command Descriptor Memory allocation failed\n");
3737 goto out;
3738 }
3739
3740 /*
3741 * Allocate memory for UTP Transfer descriptors
3742 * UFSHCI requires 1024 byte alignment of UTRD
3743 */
3744 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
3745 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3746 utrdl_size,
3747 &hba->utrdl_dma_addr,
3748 GFP_KERNEL);
7a3e97b0
SY
3749 if (!hba->utrdl_base_addr ||
3750 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3751 dev_err(hba->dev,
7a3e97b0
SY
3752 "Transfer Descriptor Memory allocation failed\n");
3753 goto out;
3754 }
3755
4682abfa
AD
3756 /*
3757 * Skip utmrdl allocation; it may have been
3758 * allocated during first pass and not released during
3759 * MCQ memory allocation.
3760 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3761 */
3762 if (hba->utmrdl_base_addr)
3763 goto skip_utmrdl;
7a3e97b0
SY
3764 /*
3765 * Allocate memory for UTP Task Management descriptors
3766 * UFSHCI requires 1024 byte alignment of UTMRD
3767 */
3768 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
3769 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3770 utmrdl_size,
3771 &hba->utmrdl_dma_addr,
3772 GFP_KERNEL);
7a3e97b0
SY
3773 if (!hba->utmrdl_base_addr ||
3774 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3775 dev_err(hba->dev,
7a3e97b0
SY
3776 "Task Management Descriptor Memory allocation failed\n");
3777 goto out;
3778 }
3779
4682abfa 3780skip_utmrdl:
7a3e97b0 3781 /* Allocate memory for local reference block */
a86854d0
KC
3782 hba->lrb = devm_kcalloc(hba->dev,
3783 hba->nutrs, sizeof(struct ufshcd_lrb),
2953f850 3784 GFP_KERNEL);
7a3e97b0 3785 if (!hba->lrb) {
3b1d0580 3786 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
3787 goto out;
3788 }
3789 return 0;
3790out:
7a3e97b0
SY
3791 return -ENOMEM;
3792}
3793
3794/**
3795 * ufshcd_host_memory_configure - configure local reference block with
3796 * memory offsets
3797 * @hba: per adapter instance
3798 *
3799 * Configure Host memory space
3800 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3801 * address.
3802 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3803 * and PRDT offset.
3804 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3805 * into local reference block.
3806 */
3807static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3808{
7a3e97b0
SY
3809 struct utp_transfer_req_desc *utrdlp;
3810 dma_addr_t cmd_desc_dma_addr;
3811 dma_addr_t cmd_desc_element_addr;
3812 u16 response_offset;
3813 u16 prdt_offset;
3814 int cmd_desc_size;
3815 int i;
3816
3817 utrdlp = hba->utrdl_base_addr;
7a3e97b0
SY
3818
3819 response_offset =
3820 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3821 prdt_offset =
3822 offsetof(struct utp_transfer_cmd_desc, prd_table);
3823
ada1e653 3824 cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
7a3e97b0
SY
3825 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3826
3827 for (i = 0; i < hba->nutrs; i++) {
3828 /* Configure UTRD with command descriptor base address */
3829 cmd_desc_element_addr =
3830 (cmd_desc_dma_addr + (cmd_desc_size * i));
3831 utrdlp[i].command_desc_base_addr_lo =
3832 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3833 utrdlp[i].command_desc_base_addr_hi =
3834 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3835
3836 /* Response upiu and prdt offset should be in double words */
26f968d7
AA
3837 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3838 utrdlp[i].response_upiu_offset =
3839 cpu_to_le16(response_offset);
3840 utrdlp[i].prd_table_offset =
3841 cpu_to_le16(prdt_offset);
3842 utrdlp[i].response_upiu_length =
3843 cpu_to_le16(ALIGNED_UPIU_SIZE);
3844 } else {
3845 utrdlp[i].response_upiu_offset =
3846 cpu_to_le16(response_offset >> 2);
3847 utrdlp[i].prd_table_offset =
3848 cpu_to_le16(prdt_offset >> 2);
3849 utrdlp[i].response_upiu_length =
3850 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3851 }
7a3e97b0 3852
4d2b8d40 3853 ufshcd_init_lrb(hba, &hba->lrb[i], i);
7a3e97b0
SY
3854 }
3855}
3856
3857/**
3858 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3859 * @hba: per adapter instance
3860 *
3861 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3862 * in order to initialize the Unipro link startup procedure.
3863 * Once the Unipro links are up, the device connected to the controller
3864 * is detected.
3865 *
3866 * Returns 0 on success, non-zero value on failure
3867 */
3868static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3869{
6ccf44fe
SJ
3870 struct uic_command uic_cmd = {0};
3871 int ret;
7a3e97b0 3872
6ccf44fe 3873 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 3874
6ccf44fe
SJ
3875 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3876 if (ret)
ff8e20c6 3877 dev_dbg(hba->dev,
6ccf44fe
SJ
3878 "dme-link-startup: error code %d\n", ret);
3879 return ret;
7a3e97b0 3880}
39bf2d83
AA
3881/**
3882 * ufshcd_dme_reset - UIC command for DME_RESET
3883 * @hba: per adapter instance
3884 *
3885 * DME_RESET command is issued in order to reset UniPro stack.
3886 * This function now deals with cold reset.
3887 *
3888 * Returns 0 on success, non-zero value on failure
3889 */
3890static int ufshcd_dme_reset(struct ufs_hba *hba)
3891{
3892 struct uic_command uic_cmd = {0};
3893 int ret;
3894
3895 uic_cmd.command = UIC_CMD_DME_RESET;
3896
3897 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3898 if (ret)
3899 dev_err(hba->dev,
3900 "dme-reset: error code %d\n", ret);
3901
3902 return ret;
3903}
3904
fc85a74e
SC
3905int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3906 int agreed_gear,
3907 int adapt_val)
3908{
3909 int ret;
3910
d81c4c6f 3911 if (agreed_gear < UFS_HS_G4)
66df79cc 3912 adapt_val = PA_NO_ADAPT;
fc85a74e
SC
3913
3914 ret = ufshcd_dme_set(hba,
3915 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3916 adapt_val);
3917 return ret;
3918}
3919EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3920
39bf2d83
AA
3921/**
3922 * ufshcd_dme_enable - UIC command for DME_ENABLE
3923 * @hba: per adapter instance
3924 *
3925 * DME_ENABLE command is issued in order to enable UniPro stack.
3926 *
3927 * Returns 0 on success, non-zero value on failure
3928 */
3929static int ufshcd_dme_enable(struct ufs_hba *hba)
3930{
3931 struct uic_command uic_cmd = {0};
3932 int ret;
3933
3934 uic_cmd.command = UIC_CMD_DME_ENABLE;
3935
3936 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3937 if (ret)
3938 dev_err(hba->dev,
1fa05700 3939 "dme-enable: error code %d\n", ret);
39bf2d83
AA
3940
3941 return ret;
3942}
7a3e97b0 3943
cad2e03d
YG
3944static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3945{
3946 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3947 unsigned long min_sleep_time_us;
3948
3949 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3950 return;
3951
3952 /*
3953 * last_dme_cmd_tstamp will be 0 only for 1st call to
3954 * this function
3955 */
3956 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3957 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3958 } else {
3959 unsigned long delta =
3960 (unsigned long) ktime_to_us(
3961 ktime_sub(ktime_get(),
3962 hba->last_dme_cmd_tstamp));
3963
3964 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3965 min_sleep_time_us =
3966 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3967 else
3968 return; /* no more delay required */
3969 }
3970
3971 /* allow sleep for extra 50us if needed */
3972 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3973}
3974
12b4fdb4
SJ
3975/**
3976 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3977 * @hba: per adapter instance
3978 * @attr_sel: uic command argument1
3979 * @attr_set: attribute set type as uic command argument2
3980 * @mib_val: setting value as uic command argument3
3981 * @peer: indicate whether peer or local
3982 *
3983 * Returns 0 on success, non-zero value on failure
3984 */
3985int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3986 u8 attr_set, u32 mib_val, u8 peer)
3987{
3988 struct uic_command uic_cmd = {0};
3989 static const char *const action[] = {
3990 "dme-set",
3991 "dme-peer-set"
3992 };
3993 const char *set = action[!!peer];
3994 int ret;
64238fbd 3995 int retries = UFS_UIC_COMMAND_RETRIES;
12b4fdb4
SJ
3996
3997 uic_cmd.command = peer ?
3998 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3999 uic_cmd.argument1 = attr_sel;
4000 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4001 uic_cmd.argument3 = mib_val;
4002
64238fbd
YG
4003 do {
4004 /* for peer attributes we retry upon failure */
4005 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4006 if (ret)
4007 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4008 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4009 } while (ret && peer && --retries);
4010
f37e9f8c 4011 if (ret)
64238fbd 4012 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
f37e9f8c
YG
4013 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4014 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4
SJ
4015
4016 return ret;
4017}
4018EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4019
4020/**
4021 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4022 * @hba: per adapter instance
4023 * @attr_sel: uic command argument1
4024 * @mib_val: the value of the attribute as returned by the UIC command
4025 * @peer: indicate whether peer or local
4026 *
4027 * Returns 0 on success, non-zero value on failure
4028 */
4029int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4030 u32 *mib_val, u8 peer)
4031{
4032 struct uic_command uic_cmd = {0};
4033 static const char *const action[] = {
4034 "dme-get",
4035 "dme-peer-get"
4036 };
4037 const char *get = action[!!peer];
4038 int ret;
64238fbd 4039 int retries = UFS_UIC_COMMAND_RETRIES;
874237f7
YG
4040 struct ufs_pa_layer_attr orig_pwr_info;
4041 struct ufs_pa_layer_attr temp_pwr_info;
4042 bool pwr_mode_change = false;
4043
4044 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4045 orig_pwr_info = hba->pwr_info;
4046 temp_pwr_info = orig_pwr_info;
4047
4048 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4049 orig_pwr_info.pwr_rx == FAST_MODE) {
4050 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4051 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4052 pwr_mode_change = true;
4053 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4054 orig_pwr_info.pwr_rx == SLOW_MODE) {
4055 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4056 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4057 pwr_mode_change = true;
4058 }
4059 if (pwr_mode_change) {
4060 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4061 if (ret)
4062 goto out;
4063 }
4064 }
12b4fdb4
SJ
4065
4066 uic_cmd.command = peer ?
4067 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4068 uic_cmd.argument1 = attr_sel;
4069
64238fbd
YG
4070 do {
4071 /* for peer attributes we retry upon failure */
4072 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4073 if (ret)
4074 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4075 get, UIC_GET_ATTR_ID(attr_sel), ret);
4076 } while (ret && peer && --retries);
4077
f37e9f8c 4078 if (ret)
64238fbd 4079 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
f37e9f8c
YG
4080 get, UIC_GET_ATTR_ID(attr_sel),
4081 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4 4082
64238fbd 4083 if (mib_val && !ret)
12b4fdb4 4084 *mib_val = uic_cmd.argument3;
874237f7
YG
4085
4086 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4087 && pwr_mode_change)
4088 ufshcd_change_power_mode(hba, &orig_pwr_info);
12b4fdb4
SJ
4089out:
4090 return ret;
4091}
4092EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4093
53b3d9c3 4094/**
57d104c1
SJ
4095 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4096 * state) and waits for it to take effect.
4097 *
53b3d9c3 4098 * @hba: per adapter instance
57d104c1
SJ
4099 * @cmd: UIC command to execute
4100 *
4101 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4102 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4103 * and device UniPro link and hence it's final completion would be indicated by
4104 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4105 * addition to normal UIC command completion Status (UCCS). This function only
4106 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
4107 *
4108 * Returns 0 on success, non-zero value on failure
4109 */
57d104c1 4110static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 4111{
8a686f26 4112 DECLARE_COMPLETION_ONSTACK(uic_async_done);
53b3d9c3
SJ
4113 unsigned long flags;
4114 u8 status;
4115 int ret;
d75f7fe4 4116 bool reenable_intr = false;
53b3d9c3 4117
53b3d9c3 4118 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d 4119 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
4120
4121 spin_lock_irqsave(hba->host->host_lock, flags);
4db7a236
CG
4122 if (ufshcd_is_link_broken(hba)) {
4123 ret = -ENOLINK;
4124 goto out_unlock;
4125 }
57d104c1 4126 hba->uic_async_done = &uic_async_done;
d75f7fe4
YG
4127 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4128 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4129 /*
4130 * Make sure UIC command completion interrupt is disabled before
4131 * issuing UIC command.
4132 */
4133 wmb();
4134 reenable_intr = true;
57d104c1 4135 }
d75f7fe4
YG
4136 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4137 spin_unlock_irqrestore(hba->host->host_lock, flags);
57d104c1
SJ
4138 if (ret) {
4139 dev_err(hba->dev,
4140 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4141 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
4142 goto out;
4143 }
4144
57d104c1 4145 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
4146 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4147 dev_err(hba->dev,
57d104c1
SJ
4148 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4149 cmd->command, cmd->argument3);
0f52fcb9
CG
4150
4151 if (!cmd->cmd_active) {
4152 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4153 __func__);
4154 goto check_upmcrs;
4155 }
4156
53b3d9c3
SJ
4157 ret = -ETIMEDOUT;
4158 goto out;
4159 }
4160
0f52fcb9 4161check_upmcrs:
53b3d9c3
SJ
4162 status = ufshcd_get_upmcrs(hba);
4163 if (status != PWR_LOCAL) {
4164 dev_err(hba->dev,
479da360 4165 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
57d104c1 4166 cmd->command, status);
53b3d9c3
SJ
4167 ret = (status != PWR_OK) ? status : -1;
4168 }
4169out:
7942f7b5
VG
4170 if (ret) {
4171 ufshcd_print_host_state(hba);
4172 ufshcd_print_pwr_info(hba);
e965e5e0 4173 ufshcd_print_evt_hist(hba);
7942f7b5
VG
4174 }
4175
53b3d9c3 4176 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 4177 hba->active_uic_cmd = NULL;
57d104c1 4178 hba->uic_async_done = NULL;
d75f7fe4
YG
4179 if (reenable_intr)
4180 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4db7a236
CG
4181 if (ret) {
4182 ufshcd_set_link_broken(hba);
88b09900 4183 ufshcd_schedule_eh_work(hba);
4db7a236
CG
4184 }
4185out_unlock:
53b3d9c3
SJ
4186 spin_unlock_irqrestore(hba->host->host_lock, flags);
4187 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 4188
53b3d9c3
SJ
4189 return ret;
4190}
4191
57d104c1
SJ
4192/**
4193 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4194 * using DME_SET primitives.
4195 * @hba: per adapter instance
4196 * @mode: powr mode value
4197 *
4198 * Returns 0 on success, non-zero value on failure
4199 */
fc53683b 4200int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
57d104c1
SJ
4201{
4202 struct uic_command uic_cmd = {0};
1ab27c9c 4203 int ret;
57d104c1 4204
c3a2f9ee
YG
4205 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4206 ret = ufshcd_dme_set(hba,
4207 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4208 if (ret) {
4209 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4210 __func__, ret);
4211 goto out;
4212 }
4213 }
4214
57d104c1
SJ
4215 uic_cmd.command = UIC_CMD_DME_SET;
4216 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4217 uic_cmd.argument3 = mode;
1ab27c9c
ST
4218 ufshcd_hold(hba, false);
4219 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4220 ufshcd_release(hba);
57d104c1 4221
c3a2f9ee 4222out:
1ab27c9c 4223 return ret;
57d104c1 4224}
fc53683b 4225EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
57d104c1 4226
087c5efa 4227int ufshcd_link_recovery(struct ufs_hba *hba)
53c12d0e
YG
4228{
4229 int ret;
4230 unsigned long flags;
4231
4232 spin_lock_irqsave(hba->host->host_lock, flags);
4233 hba->ufshcd_state = UFSHCD_STATE_RESET;
4234 ufshcd_set_eh_in_progress(hba);
4235 spin_unlock_irqrestore(hba->host->host_lock, flags);
4236
ebdd1dfd 4237 /* Reset the attached device */
31a5d9ca 4238 ufshcd_device_reset(hba);
ebdd1dfd 4239
53c12d0e
YG
4240 ret = ufshcd_host_reset_and_restore(hba);
4241
4242 spin_lock_irqsave(hba->host->host_lock, flags);
4243 if (ret)
4244 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4245 ufshcd_clear_eh_in_progress(hba);
4246 spin_unlock_irqrestore(hba->host->host_lock, flags);
4247
4248 if (ret)
4249 dev_err(hba->dev, "%s: link recovery failed, err %d",
4250 __func__, ret);
4251
4252 return ret;
4253}
087c5efa 4254EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
53c12d0e 4255
525943a5 4256int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
57d104c1 4257{
87d0b4a6 4258 int ret;
57d104c1 4259 struct uic_command uic_cmd = {0};
911a0771 4260 ktime_t start = ktime_get();
57d104c1 4261
ee32c909
KK
4262 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4263
57d104c1 4264 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
87d0b4a6 4265 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 4266 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4267 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
87d0b4a6 4268
4db7a236 4269 if (ret)
87d0b4a6
YG
4270 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4271 __func__, ret);
4db7a236 4272 else
ee32c909
KK
4273 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4274 POST_CHANGE);
53c12d0e 4275
87d0b4a6
YG
4276 return ret;
4277}
525943a5 4278EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
87d0b4a6 4279
9d19bf7a 4280int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
57d104c1
SJ
4281{
4282 struct uic_command uic_cmd = {0};
4283 int ret;
911a0771 4284 ktime_t start = ktime_get();
57d104c1 4285
ee32c909
KK
4286 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4287
57d104c1
SJ
4288 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4289 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 4290 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4291 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4292
57d104c1 4293 if (ret) {
53c12d0e
YG
4294 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4295 __func__, ret);
ff8e20c6 4296 } else {
ee32c909
KK
4297 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4298 POST_CHANGE);
0f85e747 4299 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
ff8e20c6
DR
4300 hba->ufs_stats.hibern8_exit_cnt++;
4301 }
57d104c1
SJ
4302
4303 return ret;
4304}
9d19bf7a 4305EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
57d104c1 4306
ba7af5ec
SC
4307void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4308{
4309 unsigned long flags;
be7594a4 4310 bool update = false;
ba7af5ec 4311
be7594a4 4312 if (!ufshcd_is_auto_hibern8_supported(hba))
ba7af5ec
SC
4313 return;
4314
4315 spin_lock_irqsave(hba->host->host_lock, flags);
be7594a4
CG
4316 if (hba->ahit != ahit) {
4317 hba->ahit = ahit;
4318 update = true;
4319 }
ba7af5ec 4320 spin_unlock_irqrestore(hba->host->host_lock, flags);
be7594a4 4321
b294ff3e 4322 if (update &&
e2106584 4323 !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
b294ff3e 4324 ufshcd_rpm_get_sync(hba);
be7594a4
CG
4325 ufshcd_hold(hba, false);
4326 ufshcd_auto_hibern8_enable(hba);
4327 ufshcd_release(hba);
b294ff3e 4328 ufshcd_rpm_put_sync(hba);
be7594a4 4329 }
ba7af5ec
SC
4330}
4331EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4332
71d848b8 4333void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
ad448378 4334{
499f7a96 4335 if (!ufshcd_is_auto_hibern8_supported(hba))
ad448378
AH
4336 return;
4337
ad448378 4338 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
ad448378
AH
4339}
4340
5064636c
YG
4341 /**
4342 * ufshcd_init_pwr_info - setting the POR (power on reset)
4343 * values in hba power info
4344 * @hba: per-adapter instance
4345 */
4346static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4347{
4348 hba->pwr_info.gear_rx = UFS_PWM_G1;
4349 hba->pwr_info.gear_tx = UFS_PWM_G1;
4350 hba->pwr_info.lane_rx = 1;
4351 hba->pwr_info.lane_tx = 1;
4352 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4353 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4354 hba->pwr_info.hs_rate = 0;
4355}
4356
d3e89bac 4357/**
7eb584db
DR
4358 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4359 * @hba: per-adapter instance
d3e89bac 4360 */
7eb584db 4361static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 4362{
7eb584db
DR
4363 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4364
4365 if (hba->max_pwr_info.is_valid)
4366 return 0;
4367
2f11bbc2
YS
4368 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4369 pwr_info->pwr_tx = FASTAUTO_MODE;
4370 pwr_info->pwr_rx = FASTAUTO_MODE;
4371 } else {
4372 pwr_info->pwr_tx = FAST_MODE;
4373 pwr_info->pwr_rx = FAST_MODE;
4374 }
7eb584db 4375 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
4376
4377 /* Get the connected lane count */
7eb584db
DR
4378 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4379 &pwr_info->lane_rx);
4380 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4381 &pwr_info->lane_tx);
4382
4383 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4384 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4385 __func__,
4386 pwr_info->lane_rx,
4387 pwr_info->lane_tx);
4388 return -EINVAL;
4389 }
d3e89bac
SJ
4390
4391 /*
4392 * First, get the maximum gears of HS speed.
4393 * If a zero value, it means there is no HSGEAR capability.
4394 * Then, get the maximum gears of PWM speed.
4395 */
7eb584db
DR
4396 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4397 if (!pwr_info->gear_rx) {
4398 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4399 &pwr_info->gear_rx);
4400 if (!pwr_info->gear_rx) {
4401 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4402 __func__, pwr_info->gear_rx);
4403 return -EINVAL;
4404 }
2349b533 4405 pwr_info->pwr_rx = SLOW_MODE;
d3e89bac
SJ
4406 }
4407
7eb584db
DR
4408 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4409 &pwr_info->gear_tx);
4410 if (!pwr_info->gear_tx) {
d3e89bac 4411 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
4412 &pwr_info->gear_tx);
4413 if (!pwr_info->gear_tx) {
4414 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4415 __func__, pwr_info->gear_tx);
4416 return -EINVAL;
4417 }
2349b533 4418 pwr_info->pwr_tx = SLOW_MODE;
7eb584db
DR
4419 }
4420
4421 hba->max_pwr_info.is_valid = true;
4422 return 0;
4423}
4424
4425static int ufshcd_change_power_mode(struct ufs_hba *hba,
4426 struct ufs_pa_layer_attr *pwr_mode)
4427{
4428 int ret;
4429
4430 /* if already configured to the requested pwr_mode */
2355b66e
CG
4431 if (!hba->force_pmc &&
4432 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
7eb584db
DR
4433 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4434 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4435 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4436 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4437 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4438 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4439 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4440 return 0;
d3e89bac
SJ
4441 }
4442
4443 /*
4444 * Configure attributes for power mode change with below.
4445 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4446 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4447 * - PA_HSSERIES
4448 */
7eb584db
DR
4449 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4450 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4451 pwr_mode->lane_rx);
4452 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4453 pwr_mode->pwr_rx == FAST_MODE)
21c2e341 4454 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
7eb584db 4455 else
21c2e341 4456 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
d3e89bac 4457
7eb584db
DR
4458 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4459 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4460 pwr_mode->lane_tx);
4461 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4462 pwr_mode->pwr_tx == FAST_MODE)
21c2e341 4463 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
7eb584db 4464 else
21c2e341 4465 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
d3e89bac 4466
7eb584db
DR
4467 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4468 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4469 pwr_mode->pwr_rx == FAST_MODE ||
4470 pwr_mode->pwr_tx == FAST_MODE)
4471 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4472 pwr_mode->hs_rate);
d3e89bac 4473
b1d0d2eb
KK
4474 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4475 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4476 DL_FC0ProtectionTimeOutVal_Default);
4477 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4478 DL_TC0ReplayTimeOutVal_Default);
4479 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4480 DL_AFC0ReqTimeOutVal_Default);
4481 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4482 DL_FC1ProtectionTimeOutVal_Default);
4483 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4484 DL_TC1ReplayTimeOutVal_Default);
4485 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4486 DL_AFC1ReqTimeOutVal_Default);
4487
4488 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4489 DL_FC0ProtectionTimeOutVal_Default);
4490 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4491 DL_TC0ReplayTimeOutVal_Default);
4492 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4493 DL_AFC0ReqTimeOutVal_Default);
4494 }
08342537 4495
7eb584db
DR
4496 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4497 | pwr_mode->pwr_tx);
4498
4499 if (ret) {
d3e89bac 4500 dev_err(hba->dev,
7eb584db
DR
4501 "%s: power mode change failed %d\n", __func__, ret);
4502 } else {
0263bcd0
YG
4503 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4504 pwr_mode);
7eb584db
DR
4505
4506 memcpy(&hba->pwr_info, pwr_mode,
4507 sizeof(struct ufs_pa_layer_attr));
4508 }
4509
4510 return ret;
4511}
4512
4513/**
4514 * ufshcd_config_pwr_mode - configure a new power mode
4515 * @hba: per-adapter instance
4516 * @desired_pwr_mode: desired power configuration
4517 */
0d846e70 4518int ufshcd_config_pwr_mode(struct ufs_hba *hba,
7eb584db
DR
4519 struct ufs_pa_layer_attr *desired_pwr_mode)
4520{
4521 struct ufs_pa_layer_attr final_params = { 0 };
4522 int ret;
4523
0263bcd0
YG
4524 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4525 desired_pwr_mode, &final_params);
4526
4527 if (ret)
7eb584db
DR
4528 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4529
4530 ret = ufshcd_change_power_mode(hba, &final_params);
d3e89bac
SJ
4531
4532 return ret;
4533}
0d846e70 4534EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
d3e89bac 4535
68078d5c
DR
4536/**
4537 * ufshcd_complete_dev_init() - checks device readiness
8aa29f19 4538 * @hba: per-adapter instance
68078d5c
DR
4539 *
4540 * Set fDeviceInit flag and poll until device toggles it.
4541 */
4542static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4543{
dc3c8d3a 4544 int err;
7dfdcc39 4545 bool flag_res = true;
29707fab 4546 ktime_t timeout;
68078d5c 4547
dc3c8d3a 4548 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 4549 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
68078d5c
DR
4550 if (err) {
4551 dev_err(hba->dev,
859ed37c 4552 "%s: setting fDeviceInit flag failed with error %d\n",
68078d5c
DR
4553 __func__, err);
4554 goto out;
4555 }
4556
29707fab
KK
4557 /* Poll fDeviceInit flag to be cleared */
4558 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4559 do {
4560 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4561 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4562 if (!flag_res)
4563 break;
a4e6496f 4564 usleep_range(500, 1000);
29707fab 4565 } while (ktime_before(ktime_get(), timeout));
dc3c8d3a 4566
29707fab 4567 if (err) {
68078d5c 4568 dev_err(hba->dev,
859ed37c 4569 "%s: reading fDeviceInit flag failed with error %d\n",
29707fab
KK
4570 __func__, err);
4571 } else if (flag_res) {
68078d5c 4572 dev_err(hba->dev,
859ed37c 4573 "%s: fDeviceInit was not cleared by the device\n",
29707fab
KK
4574 __func__);
4575 err = -EBUSY;
4576 }
68078d5c
DR
4577out:
4578 return err;
4579}
4580
7a3e97b0
SY
4581/**
4582 * ufshcd_make_hba_operational - Make UFS controller operational
4583 * @hba: per adapter instance
4584 *
4585 * To bring UFS host controller to operational state,
5c0c28a8
SRT
4586 * 1. Enable required interrupts
4587 * 2. Configure interrupt aggregation
897efe62 4588 * 3. Program UTRL and UTMRL base address
5c0c28a8 4589 * 4. Configure run-stop-registers
7a3e97b0
SY
4590 *
4591 * Returns 0 on success, non-zero value on failure
4592 */
9d19bf7a 4593int ufshcd_make_hba_operational(struct ufs_hba *hba)
7a3e97b0
SY
4594{
4595 int err = 0;
4596 u32 reg;
4597
6ccf44fe
SJ
4598 /* Enable required interrupts */
4599 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4600
4601 /* Configure interrupt aggregation */
b852190e
YG
4602 if (ufshcd_is_intr_aggr_allowed(hba))
4603 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4604 else
4605 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
4606
4607 /* Configure UTRL and UTMRL base address registers */
4608 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4609 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4610 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4611 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4612 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4613 REG_UTP_TASK_REQ_LIST_BASE_L);
4614 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4615 REG_UTP_TASK_REQ_LIST_BASE_H);
4616
897efe62
YG
4617 /*
4618 * Make sure base address and interrupt setup are updated before
4619 * enabling the run/stop registers below.
4620 */
4621 wmb();
4622
7a3e97b0
SY
4623 /*
4624 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
7a3e97b0 4625 */
5c0c28a8 4626 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
4627 if (!(ufshcd_get_lists_status(reg))) {
4628 ufshcd_enable_run_stop_reg(hba);
4629 } else {
3b1d0580 4630 dev_err(hba->dev,
7a3e97b0
SY
4631 "Host controller not ready to process requests");
4632 err = -EIO;
7a3e97b0
SY
4633 }
4634
7a3e97b0
SY
4635 return err;
4636}
9d19bf7a 4637EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
7a3e97b0 4638
596585a2
YG
4639/**
4640 * ufshcd_hba_stop - Send controller to reset state
4641 * @hba: per adapter instance
596585a2 4642 */
3a95f5b3 4643void ufshcd_hba_stop(struct ufs_hba *hba)
596585a2 4644{
5cac1095 4645 unsigned long flags;
596585a2
YG
4646 int err;
4647
5cac1095
BVA
4648 /*
4649 * Obtain the host lock to prevent that the controller is disabled
4650 * while the UFS interrupt handler is active on another CPU.
4651 */
4652 spin_lock_irqsave(hba->host->host_lock, flags);
596585a2 4653 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
5cac1095
BVA
4654 spin_unlock_irqrestore(hba->host->host_lock, flags);
4655
596585a2
YG
4656 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4657 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
5cac1095 4658 10, 1);
596585a2
YG
4659 if (err)
4660 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4661}
3a95f5b3 4662EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
596585a2 4663
7a3e97b0 4664/**
39bf2d83 4665 * ufshcd_hba_execute_hce - initialize the controller
7a3e97b0
SY
4666 * @hba: per adapter instance
4667 *
4668 * The controller resets itself and controller firmware initialization
4669 * sequence kicks off. When controller is ready it will set
4670 * the Host Controller Enable bit to 1.
4671 *
4672 * Returns 0 on success, non-zero value on failure
4673 */
39bf2d83 4674static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
7a3e97b0 4675{
6081b12c
SC
4676 int retry_outer = 3;
4677 int retry_inner;
7a3e97b0 4678
6081b12c 4679start:
acbbfe48 4680 if (ufshcd_is_hba_active(hba))
7a3e97b0 4681 /* change controller state to "reset state" */
5cac1095 4682 ufshcd_hba_stop(hba);
7a3e97b0 4683
57d104c1
SJ
4684 /* UniPro link is disabled at this point */
4685 ufshcd_set_link_off(hba);
4686
0263bcd0 4687 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
5c0c28a8 4688
7a3e97b0
SY
4689 /* start controller initialization sequence */
4690 ufshcd_hba_start(hba);
4691
4692 /*
4693 * To initialize a UFS host controller HCE bit must be set to 1.
4694 * During initialization the HCE bit value changes from 1->0->1.
4695 * When the host controller completes initialization sequence
4696 * it sets the value of HCE bit to 1. The same HCE bit is read back
4697 * to check if the controller has completed initialization sequence.
4698 * So without this delay the value HCE = 1, set in the previous
4699 * instruction might be read back.
4700 * This delay can be changed based on the controller.
4701 */
90b8491c 4702 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
7a3e97b0
SY
4703
4704 /* wait for the host controller to complete initialization */
6081b12c 4705 retry_inner = 50;
acbbfe48 4706 while (!ufshcd_is_hba_active(hba)) {
6081b12c
SC
4707 if (retry_inner) {
4708 retry_inner--;
7a3e97b0 4709 } else {
3b1d0580 4710 dev_err(hba->dev,
7a3e97b0 4711 "Controller enable failed\n");
6081b12c
SC
4712 if (retry_outer) {
4713 retry_outer--;
4714 goto start;
4715 }
7a3e97b0
SY
4716 return -EIO;
4717 }
9fc305ef 4718 usleep_range(1000, 1100);
7a3e97b0 4719 }
5c0c28a8 4720
1d337ec2 4721 /* enable UIC related interrupts */
57d104c1 4722 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 4723
0263bcd0 4724 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
5c0c28a8 4725
7a3e97b0
SY
4726 return 0;
4727}
39bf2d83
AA
4728
4729int ufshcd_hba_enable(struct ufs_hba *hba)
4730{
4731 int ret;
4732
4733 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4734 ufshcd_set_link_off(hba);
4735 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4736
4737 /* enable UIC related interrupts */
4738 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4739 ret = ufshcd_dme_reset(hba);
bc77fb9c
KP
4740 if (ret) {
4741 dev_err(hba->dev, "DME_RESET failed\n");
4742 return ret;
4743 }
4744
4745 ret = ufshcd_dme_enable(hba);
4746 if (ret) {
4747 dev_err(hba->dev, "Enabling DME failed\n");
4748 return ret;
39bf2d83 4749 }
bc77fb9c
KP
4750
4751 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
39bf2d83
AA
4752 } else {
4753 ret = ufshcd_hba_execute_hce(hba);
4754 }
4755
4756 return ret;
4757}
9d19bf7a
SC
4758EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4759
7ca38cf3
YG
4760static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4761{
ba0320fb 4762 int tx_lanes = 0, i, err = 0;
7ca38cf3
YG
4763
4764 if (!peer)
4765 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4766 &tx_lanes);
4767 else
4768 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4769 &tx_lanes);
4770 for (i = 0; i < tx_lanes; i++) {
4771 if (!peer)
4772 err = ufshcd_dme_set(hba,
4773 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4774 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4775 0);
4776 else
4777 err = ufshcd_dme_peer_set(hba,
4778 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4779 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4780 0);
4781 if (err) {
4782 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4783 __func__, peer, i, err);
4784 break;
4785 }
4786 }
4787
4788 return err;
4789}
4790
4791static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4792{
4793 return ufshcd_disable_tx_lcc(hba, true);
4794}
4795
e965e5e0 4796void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
8808b4e9 4797{
e965e5e0
SC
4798 struct ufs_event_hist *e;
4799
4800 if (id >= UFS_EVT_CNT)
4801 return;
4802
4803 e = &hba->ufs_stats.event[id];
4804 e->val[e->pos] = val;
0f85e747 4805 e->tstamp[e->pos] = local_clock();
b6cacaf2 4806 e->cnt += 1;
e965e5e0 4807 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
172614a9
SC
4808
4809 ufshcd_vops_event_notify(hba, id, &val);
8808b4e9 4810}
e965e5e0 4811EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
8808b4e9 4812
7a3e97b0 4813/**
6ccf44fe 4814 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
4815 * @hba: per adapter instance
4816 *
6ccf44fe 4817 * Returns 0 for success, non-zero in case of failure
7a3e97b0 4818 */
6ccf44fe 4819static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 4820{
6ccf44fe 4821 int ret;
1d337ec2 4822 int retries = DME_LINKSTARTUP_RETRIES;
7caf489b 4823 bool link_startup_again = false;
7a3e97b0 4824
7caf489b 4825 /*
4826 * If UFS device isn't active then we will have to issue link startup
4827 * 2 times to make sure the device state move to active.
4828 */
4829 if (!ufshcd_is_ufs_dev_active(hba))
4830 link_startup_again = true;
7a3e97b0 4831
7caf489b 4832link_startup:
1d337ec2 4833 do {
0263bcd0 4834 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 4835
1d337ec2 4836 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 4837
1d337ec2
SRT
4838 /* check if device is detected by inter-connect layer */
4839 if (!ret && !ufshcd_is_device_present(hba)) {
e965e5e0
SC
4840 ufshcd_update_evt_hist(hba,
4841 UFS_EVT_LINK_STARTUP_FAIL,
8808b4e9 4842 0);
1d337ec2
SRT
4843 dev_err(hba->dev, "%s: Device not present\n", __func__);
4844 ret = -ENXIO;
4845 goto out;
4846 }
6ccf44fe 4847
1d337ec2
SRT
4848 /*
4849 * DME link lost indication is only received when link is up,
4850 * but we can't be sure if the link is up until link startup
4851 * succeeds. So reset the local Uni-Pro and try again.
4852 */
174e909b 4853 if (ret && retries && ufshcd_hba_enable(hba)) {
e965e5e0
SC
4854 ufshcd_update_evt_hist(hba,
4855 UFS_EVT_LINK_STARTUP_FAIL,
8808b4e9 4856 (u32)ret);
1d337ec2 4857 goto out;
8808b4e9 4858 }
1d337ec2
SRT
4859 } while (ret && retries--);
4860
8808b4e9 4861 if (ret) {
1d337ec2 4862 /* failed to get the link up... retire */
e965e5e0
SC
4863 ufshcd_update_evt_hist(hba,
4864 UFS_EVT_LINK_STARTUP_FAIL,
8808b4e9 4865 (u32)ret);
5c0c28a8 4866 goto out;
8808b4e9 4867 }
5c0c28a8 4868
7caf489b 4869 if (link_startup_again) {
4870 link_startup_again = false;
4871 retries = DME_LINKSTARTUP_RETRIES;
4872 goto link_startup;
4873 }
4874
d2aebb9b 4875 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4876 ufshcd_init_pwr_info(hba);
4877 ufshcd_print_pwr_info(hba);
4878
7ca38cf3
YG
4879 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4880 ret = ufshcd_disable_device_tx_lcc(hba);
4881 if (ret)
4882 goto out;
4883 }
4884
5c0c28a8 4885 /* Include any host controller configuration via UIC commands */
0263bcd0
YG
4886 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4887 if (ret)
4888 goto out;
7a3e97b0 4889
2355b66e
CG
4890 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4891 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5c0c28a8 4892 ret = ufshcd_make_hba_operational(hba);
6ccf44fe 4893out:
7942f7b5 4894 if (ret) {
6ccf44fe 4895 dev_err(hba->dev, "link startup failed %d\n", ret);
7942f7b5
VG
4896 ufshcd_print_host_state(hba);
4897 ufshcd_print_pwr_info(hba);
e965e5e0 4898 ufshcd_print_evt_hist(hba);
7942f7b5 4899 }
6ccf44fe 4900 return ret;
7a3e97b0
SY
4901}
4902
5a0b0cb9
SRT
4903/**
4904 * ufshcd_verify_dev_init() - Verify device initialization
4905 * @hba: per-adapter instance
4906 *
4907 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4908 * device Transport Protocol (UTP) layer is ready after a reset.
4909 * If the UTP layer at the device side is not initialized, it may
4910 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4911 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4912 */
4913static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4914{
4915 int err = 0;
4916 int retries;
4917
1ab27c9c 4918 ufshcd_hold(hba, false);
5a0b0cb9
SRT
4919 mutex_lock(&hba->dev_cmd.lock);
4920 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4921 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1cbc9ad3 4922 hba->nop_out_timeout);
5a0b0cb9
SRT
4923
4924 if (!err || err == -ETIMEDOUT)
4925 break;
4926
4927 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4928 }
4929 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 4930 ufshcd_release(hba);
5a0b0cb9
SRT
4931
4932 if (err)
4933 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4934 return err;
4935}
4936
b294ff3e
AD
4937/**
4938 * ufshcd_setup_links - associate link b/w device wlun and other luns
4939 * @sdev: pointer to SCSI device
4940 * @hba: pointer to ufs hba
4941 */
4942static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4943{
4944 struct device_link *link;
4945
4946 /*
4947 * Device wlun is the supplier & rest of the luns are consumers.
4948 * This ensures that device wlun suspends after all other luns.
4949 */
e2106584 4950 if (hba->ufs_device_wlun) {
b294ff3e 4951 link = device_link_add(&sdev->sdev_gendev,
e2106584 4952 &hba->ufs_device_wlun->sdev_gendev,
b294ff3e
AD
4953 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4954 if (!link) {
4955 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
e2106584 4956 dev_name(&hba->ufs_device_wlun->sdev_gendev));
b294ff3e
AD
4957 return;
4958 }
4959 hba->luns_avail--;
4960 /* Ignore REPORT_LUN wlun probing */
4961 if (hba->luns_avail == 1) {
4962 ufshcd_rpm_put(hba);
4963 return;
4964 }
4965 } else {
4966 /*
4967 * Device wlun is probed. The assumption is that WLUNs are
4968 * scanned before other LUNs.
4969 */
4970 hba->luns_avail--;
4971 }
4972}
4973
dca899bc
BH
4974/**
4975 * ufshcd_lu_init - Initialize the relevant parameters of the LU
4976 * @hba: per-adapter instance
4977 * @sdev: pointer to SCSI device
4978 */
4979static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
4980{
f2a89b07 4981 int len = QUERY_DESC_MAX_SIZE;
dca899bc
BH
4982 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
4983 u8 lun_qdepth = hba->nutrs;
4984 u8 *desc_buf;
4985 int ret;
4986
4987 desc_buf = kzalloc(len, GFP_KERNEL);
4988 if (!desc_buf)
4989 goto set_qdepth;
4990
4991 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
4992 if (ret < 0) {
4993 if (ret == -EOPNOTSUPP)
4994 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
4995 lun_qdepth = 1;
4996 kfree(desc_buf);
4997 goto set_qdepth;
4998 }
4999
5000 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5001 /*
5002 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5003 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5004 */
5005 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5006 }
5007 /*
5008 * According to UFS device specification, the write protection mode is only supported by
5009 * normal LU, not supported by WLUN.
5010 */
5011 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5012 !hba->dev_info.is_lu_power_on_wp &&
5013 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5014 hba->dev_info.is_lu_power_on_wp = true;
5015
f6b9d0fe
BH
5016 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5017 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5018 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5019 hba->dev_info.b_advanced_rpmb_en = true;
5020
5021
dca899bc
BH
5022 kfree(desc_buf);
5023set_qdepth:
5024 /*
5025 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5026 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5027 */
5028 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5029 scsi_change_queue_depth(sdev, lun_qdepth);
5030}
5031
7a3e97b0
SY
5032/**
5033 * ufshcd_slave_alloc - handle initial SCSI device configurations
5034 * @sdev: pointer to SCSI device
5035 *
5036 * Returns success
5037 */
5038static int ufshcd_slave_alloc(struct scsi_device *sdev)
5039{
5040 struct ufs_hba *hba;
5041
5042 hba = shost_priv(sdev->host);
7a3e97b0
SY
5043
5044 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5045 sdev->use_10_for_ms = 1;
a3a76391
CG
5046
5047 /* DBD field should be set to 1 in mode sense(10) */
5048 sdev->set_dbd_for_ms = 1;
7a3e97b0 5049
e8e7f271
SRT
5050 /* allow SCSI layer to restart the device in case of errors */
5051 sdev->allow_restart = 1;
4264fd61 5052
b2a6c522
SRT
5053 /* REPORT SUPPORTED OPERATION CODES is not supported */
5054 sdev->no_report_opcodes = 1;
5055
84af7e8b
SRT
5056 /* WRITE_SAME command is not supported */
5057 sdev->no_write_same = 1;
e8e7f271 5058
dca899bc 5059 ufshcd_lu_init(hba, sdev);
57d104c1 5060
b294ff3e
AD
5061 ufshcd_setup_links(hba, sdev);
5062
7a3e97b0
SY
5063 return 0;
5064}
5065
4264fd61
SRT
5066/**
5067 * ufshcd_change_queue_depth - change queue depth
5068 * @sdev: pointer to SCSI device
5069 * @depth: required depth to set
4264fd61 5070 *
db5ed4df 5071 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 5072 */
db5ed4df 5073static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61 5074{
fc21da8a 5075 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
4264fd61
SRT
5076}
5077
f02bc975
DP
5078static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
5079{
5080 /* skip well-known LU */
41d8a933
DP
5081 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
5082 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
f02bc975
DP
5083 return;
5084
5085 ufshpb_destroy_lu(hba, sdev);
5086}
5087
5088static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
5089{
5090 /* skip well-known LU */
5091 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
5092 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
5093 return;
5094
5095 ufshpb_init_hpb_lu(hba, sdev);
5096}
5097
eeda4749
AM
5098/**
5099 * ufshcd_slave_configure - adjust SCSI device configurations
5100 * @sdev: pointer to SCSI device
5101 */
5102static int ufshcd_slave_configure(struct scsi_device *sdev)
5103{
49615ba1 5104 struct ufs_hba *hba = shost_priv(sdev->host);
eeda4749
AM
5105 struct request_queue *q = sdev->request_queue;
5106
f02bc975
DP
5107 ufshcd_hpb_configure(hba, sdev);
5108
eeda4749 5109 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2b2bfc8a
KK
5110 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
5111 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
b294ff3e
AD
5112 /*
5113 * Block runtime-pm until all consumers are added.
5114 * Refer ufshcd_setup_links().
5115 */
5116 if (is_device_wlun(sdev))
5117 pm_runtime_get_noresume(&sdev->sdev_gendev);
5118 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
49615ba1 5119 sdev->rpm_autosuspend = 1;
71bb9ab6
AH
5120 /*
5121 * Do not print messages during runtime PM to avoid never-ending cycles
5122 * of messages written back to storage by user space causing runtime
5123 * resume, causing more messages and so on.
5124 */
5125 sdev->silence_suspend = 1;
49615ba1 5126
cb77cb5a 5127 ufshcd_crypto_register(hba, q);
df043c74 5128
eeda4749
AM
5129 return 0;
5130}
5131
7a3e97b0
SY
5132/**
5133 * ufshcd_slave_destroy - remove SCSI device configurations
5134 * @sdev: pointer to SCSI device
5135 */
5136static void ufshcd_slave_destroy(struct scsi_device *sdev)
5137{
5138 struct ufs_hba *hba;
bf25967a 5139 unsigned long flags;
7a3e97b0
SY
5140
5141 hba = shost_priv(sdev->host);
f02bc975
DP
5142
5143 ufshcd_hpb_destroy(hba, sdev);
5144
0ce147d4 5145 /* Drop the reference as it won't be needed anymore */
7c48bfd0 5146 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
7c48bfd0 5147 spin_lock_irqsave(hba->host->host_lock, flags);
e2106584 5148 hba->ufs_device_wlun = NULL;
7c48bfd0 5149 spin_unlock_irqrestore(hba->host->host_lock, flags);
e2106584 5150 } else if (hba->ufs_device_wlun) {
bf25967a
AH
5151 struct device *supplier = NULL;
5152
5153 /* Ensure UFS Device WLUN exists and does not disappear */
5154 spin_lock_irqsave(hba->host->host_lock, flags);
e2106584
BVA
5155 if (hba->ufs_device_wlun) {
5156 supplier = &hba->ufs_device_wlun->sdev_gendev;
bf25967a
AH
5157 get_device(supplier);
5158 }
5159 spin_unlock_irqrestore(hba->host->host_lock, flags);
5160
5161 if (supplier) {
5162 /*
5163 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5164 * device will not have been registered but can still
5165 * have a device link holding a reference to the device.
5166 */
5167 device_link_remove(&sdev->sdev_gendev, supplier);
5168 put_device(supplier);
5169 }
7c48bfd0 5170 }
7a3e97b0
SY
5171}
5172
7a3e97b0
SY
5173/**
5174 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
8aa29f19 5175 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
5176 * @scsi_status: SCSI command status
5177 *
5178 * Returns value base on SCSI command status
5179 */
5180static inline int
5181ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5182{
5183 int result = 0;
5184
5185 switch (scsi_status) {
7a3e97b0 5186 case SAM_STAT_CHECK_CONDITION:
1c2623c5 5187 ufshcd_copy_sense_data(lrbp);
df561f66 5188 fallthrough;
1c2623c5 5189 case SAM_STAT_GOOD:
db83d8a5 5190 result |= DID_OK << 16 | scsi_status;
7a3e97b0
SY
5191 break;
5192 case SAM_STAT_TASK_SET_FULL:
1c2623c5 5193 case SAM_STAT_BUSY:
7a3e97b0 5194 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
5195 ufshcd_copy_sense_data(lrbp);
5196 result |= scsi_status;
7a3e97b0
SY
5197 break;
5198 default:
5199 result |= DID_ERROR << 16;
5200 break;
5201 } /* end of switch */
5202
5203 return result;
5204}
5205
5206/**
5207 * ufshcd_transfer_rsp_status - Get overall status of the response
5208 * @hba: per adapter instance
8aa29f19 5209 * @lrbp: pointer to local reference block of completed command
c30d8d01 5210 * @cqe: pointer to the completion queue entry
7a3e97b0
SY
5211 *
5212 * Returns result of the command to notify SCSI midlayer
5213 */
5214static inline int
c30d8d01
AD
5215ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5216 struct cq_entry *cqe)
7a3e97b0
SY
5217{
5218 int result = 0;
5219 int scsi_status;
957d63e7 5220 enum utp_ocs ocs;
7a3e97b0
SY
5221
5222 /* overall command status of utrd */
c30d8d01 5223 ocs = ufshcd_get_tr_ocs(lrbp, cqe);
7a3e97b0 5224
d779a6e9
KK
5225 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5226 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5227 MASK_RSP_UPIU_RESULT)
5228 ocs = OCS_SUCCESS;
5229 }
5230
7a3e97b0
SY
5231 switch (ocs) {
5232 case OCS_SUCCESS:
5a0b0cb9 5233 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
ff8e20c6 5234 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
5235 switch (result) {
5236 case UPIU_TRANSACTION_RESPONSE:
5237 /*
5238 * get the response UPIU result to extract
5239 * the SCSI command status
5240 */
5241 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5242
5243 /*
5244 * get the result based on SCSI status response
5245 * to notify the SCSI midlayer of the command status
5246 */
5247 scsi_status = result & MASK_SCSI_STATUS;
5248 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59 5249
f05ac2e5
YG
5250 /*
5251 * Currently we are only supporting BKOPs exception
5252 * events hence we can ignore BKOPs exception event
5253 * during power management callbacks. BKOPs exception
5254 * event is not expected to be raised in runtime suspend
5255 * callback as it allows the urgent bkops.
5256 * During system suspend, we are anyway forcefully
5257 * disabling the bkops and if urgent bkops is needed
5258 * it will be enabled on system resume. Long term
5259 * solution could be to abort the system suspend if
5260 * UFS device needs urgent BKOPs.
5261 */
5262 if (!hba->pm_op_in_progress &&
aa53f580 5263 !ufshcd_eh_in_progress(hba) &&
b294ff3e
AD
5264 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5265 /* Flushed in suspend */
5266 schedule_work(&hba->eeh_work);
4b5f4907
DP
5267
5268 if (scsi_status == SAM_STAT_GOOD)
5269 ufshpb_rsp_upiu(hba, lrbp);
5a0b0cb9
SRT
5270 break;
5271 case UPIU_TRANSACTION_REJECT_UPIU:
5272 /* TODO: handle Reject UPIU Response */
5273 result = DID_ERROR << 16;
3b1d0580 5274 dev_err(hba->dev,
5a0b0cb9
SRT
5275 "Reject UPIU not fully implemented\n");
5276 break;
5277 default:
5a0b0cb9
SRT
5278 dev_err(hba->dev,
5279 "Unexpected request response code = %x\n",
5280 result);
e0347d89 5281 result = DID_ERROR << 16;
7a3e97b0
SY
5282 break;
5283 }
7a3e97b0
SY
5284 break;
5285 case OCS_ABORTED:
5286 result |= DID_ABORT << 16;
5287 break;
e8e7f271
SRT
5288 case OCS_INVALID_COMMAND_STATUS:
5289 result |= DID_REQUEUE << 16;
5290 break;
7a3e97b0
SY
5291 case OCS_INVALID_CMD_TABLE_ATTR:
5292 case OCS_INVALID_PRDT_ATTR:
5293 case OCS_MISMATCH_DATA_BUF_SIZE:
5294 case OCS_MISMATCH_RESP_UPIU_SIZE:
5295 case OCS_PEER_COMM_FAILURE:
5296 case OCS_FATAL_ERROR:
5e7341e1
ST
5297 case OCS_DEVICE_FATAL_ERROR:
5298 case OCS_INVALID_CRYPTO_CONFIG:
5299 case OCS_GENERAL_CRYPTO_ERROR:
7a3e97b0
SY
5300 default:
5301 result |= DID_ERROR << 16;
3b1d0580 5302 dev_err(hba->dev,
ff8e20c6
DR
5303 "OCS error from controller = %x for tag %d\n",
5304 ocs, lrbp->task_tag);
e965e5e0 5305 ufshcd_print_evt_hist(hba);
6ba65588 5306 ufshcd_print_host_state(hba);
7a3e97b0
SY
5307 break;
5308 } /* end of switch */
5309
eeb1b55b
JK
5310 if ((host_byte(result) != DID_OK) &&
5311 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
66cc820f 5312 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
7a3e97b0
SY
5313 return result;
5314}
5315
a45f9371
CG
5316static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5317 u32 intr_mask)
5318{
5319 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5320 !ufshcd_is_auto_hibern8_enabled(hba))
5321 return false;
5322
5323 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5324 return false;
5325
5326 if (hba->active_uic_cmd &&
5327 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5328 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5329 return false;
5330
5331 return true;
5332}
5333
6ccf44fe
SJ
5334/**
5335 * ufshcd_uic_cmd_compl - handle completion of uic command
5336 * @hba: per adapter instance
53b3d9c3 5337 * @intr_status: interrupt status generated by the controller
9333d775
VG
5338 *
5339 * Returns
5340 * IRQ_HANDLED - If interrupt is valid
5341 * IRQ_NONE - If invalid interrupt
6ccf44fe 5342 */
9333d775 5343static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 5344{
9333d775
VG
5345 irqreturn_t retval = IRQ_NONE;
5346
a45f9371
CG
5347 spin_lock(hba->host->host_lock);
5348 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5349 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5350
53b3d9c3 5351 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
5352 hba->active_uic_cmd->argument2 |=
5353 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
5354 hba->active_uic_cmd->argument3 =
5355 ufshcd_get_dme_attr_val(hba);
0f52fcb9
CG
5356 if (!hba->uic_async_done)
5357 hba->active_uic_cmd->cmd_active = 0;
6ccf44fe 5358 complete(&hba->active_uic_cmd->done);
9333d775 5359 retval = IRQ_HANDLED;
6ccf44fe 5360 }
53b3d9c3 5361
9333d775 5362 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
0f52fcb9 5363 hba->active_uic_cmd->cmd_active = 0;
57d104c1 5364 complete(hba->uic_async_done);
9333d775
VG
5365 retval = IRQ_HANDLED;
5366 }
aa5c6979
SC
5367
5368 if (retval == IRQ_HANDLED)
5369 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
28fa68fc 5370 UFS_CMD_COMP);
a45f9371 5371 spin_unlock(hba->host->host_lock);
9333d775 5372 return retval;
6ccf44fe
SJ
5373}
5374
6f8dafde
BVA
5375/* Release the resources allocated for processing a SCSI command. */
5376static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5377 struct ufshcd_lrb *lrbp)
5378{
5379 struct scsi_cmnd *cmd = lrbp->cmd;
5380
5381 scsi_dma_unmap(cmd);
5382 lrbp->cmd = NULL; /* Mark the command as completed. */
5383 ufshcd_release(hba);
5384 ufshcd_clk_scaling_update_busy(hba);
5385}
5386
7a3e97b0 5387/**
c30d8d01 5388 * ufshcd_compl_one_cqe - handle a completion queue entry
7a3e97b0 5389 * @hba: per adapter instance
c30d8d01
AD
5390 * @task_tag: the task tag of the request to be completed
5391 * @cqe: pointer to the completion queue entry
7a3e97b0 5392 */
c30d8d01
AD
5393void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5394 struct cq_entry *cqe)
7a3e97b0 5395{
5a0b0cb9
SRT
5396 struct ufshcd_lrb *lrbp;
5397 struct scsi_cmnd *cmd;
c30d8d01
AD
5398
5399 lrbp = &hba->lrb[task_tag];
5400 lrbp->compl_time_stamp = ktime_get();
5401 cmd = lrbp->cmd;
5402 if (cmd) {
5403 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5404 ufshcd_update_monitor(hba, lrbp);
5405 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5406 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5407 ufshcd_release_scsi_cmd(hba, lrbp);
5408 /* Do not touch lrbp after scsi done */
5409 scsi_done(cmd);
5410 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5411 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5412 if (hba->dev_cmd.complete) {
5413 hba->dev_cmd.cqe = cqe;
5414 ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
5415 complete(hba->dev_cmd.complete);
5416 ufshcd_clk_scaling_update_busy(hba);
e9d501b1
DR
5417 }
5418 }
7a3e97b0
SY
5419}
5420
c30d8d01
AD
5421/**
5422 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5423 * @hba: per adapter instance
5424 * @completed_reqs: bitmask that indicates which requests to complete
5425 */
5426static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5427 unsigned long completed_reqs)
5428{
5429 int tag;
5430
5431 for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5432 ufshcd_compl_one_cqe(hba, tag, NULL);
5433}
5434
ee8c88ca
BVA
5435/* Any value that is not an existing queue number is fine for this constant. */
5436enum {
5437 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5438};
5439
5440static void ufshcd_clear_polled(struct ufs_hba *hba,
5441 unsigned long *completed_reqs)
5442{
5443 int tag;
5444
5445 for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5446 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5447
5448 if (!cmd)
5449 continue;
5450 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5451 __clear_bit(tag, completed_reqs);
5452 }
5453}
5454
eaab9b57
BVA
5455/*
5456 * Returns > 0 if one or more commands have been completed or 0 if no
5457 * requests have been completed.
5458 */
5459static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5460{
5461 struct ufs_hba *hba = shost_priv(shost);
5462 unsigned long completed_reqs, flags;
5463 u32 tr_doorbell;
ed975065
AD
5464 struct ufs_hw_queue *hwq;
5465
5466 if (is_mcq_enabled(hba)) {
5467 hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
5468
5469 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5470 }
eaab9b57
BVA
5471
5472 spin_lock_irqsave(&hba->outstanding_lock, flags);
5473 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5474 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5475 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5476 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5477 hba->outstanding_reqs);
ee8c88ca
BVA
5478 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5479 /* Do not complete polled requests from interrupt context. */
5480 ufshcd_clear_polled(hba, &completed_reqs);
5481 }
eaab9b57
BVA
5482 hba->outstanding_reqs &= ~completed_reqs;
5483 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5484
5485 if (completed_reqs)
5486 __ufshcd_transfer_req_compl(hba, completed_reqs);
5487
ee8c88ca 5488 return completed_reqs != 0;
eaab9b57
BVA
5489}
5490
9a47ec7c 5491/**
1f522c50 5492 * ufshcd_transfer_req_compl - handle SCSI and query command completion
9a47ec7c 5493 * @hba: per adapter instance
9333d775
VG
5494 *
5495 * Returns
5496 * IRQ_HANDLED - If interrupt is valid
5497 * IRQ_NONE - If invalid interrupt
9a47ec7c 5498 */
11682523 5499static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
9a47ec7c 5500{
9a47ec7c
YG
5501 /* Resetting interrupt aggregation counters first and reading the
5502 * DOOR_BELL afterward allows us to handle all the completed requests.
5503 * In order to prevent other interrupts starvation the DB is read once
5504 * after reset. The down side of this solution is the possibility of
5505 * false interrupt if device completes another request after resetting
5506 * aggregation and before reading the DB.
5507 */
b638b5eb
AA
5508 if (ufshcd_is_intr_aggr_allowed(hba) &&
5509 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
9a47ec7c
YG
5510 ufshcd_reset_intr_aggr(hba);
5511
c11a1ae9
BVA
5512 if (ufs_fail_completion())
5513 return IRQ_HANDLED;
5514
eaab9b57
BVA
5515 /*
5516 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5517 * do not want polling to trigger spurious interrupt complaints.
5518 */
ee8c88ca 5519 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
9a47ec7c 5520
eaab9b57 5521 return IRQ_HANDLED;
9a47ec7c
YG
5522}
5523
7deedfda 5524int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
cd469475
AH
5525{
5526 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5527 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5528 &ee_ctrl_mask);
5529}
5530
7deedfda 5531int ufshcd_write_ee_control(struct ufs_hba *hba)
cd469475
AH
5532{
5533 int err;
5534
5535 mutex_lock(&hba->ee_ctrl_mutex);
5536 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5537 mutex_unlock(&hba->ee_ctrl_mutex);
5538 if (err)
5539 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5540 __func__, err);
5541 return err;
5542}
5543
35d11ec2
KK
5544int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5545 const u16 *other_mask, u16 set, u16 clr)
cd469475
AH
5546{
5547 u16 new_mask, ee_ctrl_mask;
5548 int err = 0;
5549
5550 mutex_lock(&hba->ee_ctrl_mutex);
5551 new_mask = (*mask & ~clr) | set;
5552 ee_ctrl_mask = new_mask | *other_mask;
5553 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5554 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5555 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5556 if (!err) {
5557 hba->ee_ctrl_mask = ee_ctrl_mask;
5558 *mask = new_mask;
5559 }
5560 mutex_unlock(&hba->ee_ctrl_mutex);
5561 return err;
5562}
5563
66ec6d59
SRT
5564/**
5565 * ufshcd_disable_ee - disable exception event
5566 * @hba: per-adapter instance
5567 * @mask: exception event to disable
5568 *
5569 * Disables exception event in the device so that the EVENT_ALERT
5570 * bit is not set.
5571 *
5572 * Returns zero on success, non-zero error value on failure.
5573 */
cd469475 5574static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
66ec6d59 5575{
cd469475 5576 return ufshcd_update_ee_drv_mask(hba, 0, mask);
66ec6d59
SRT
5577}
5578
5579/**
5580 * ufshcd_enable_ee - enable exception event
5581 * @hba: per-adapter instance
5582 * @mask: exception event to enable
5583 *
5584 * Enable corresponding exception event in the device to allow
5585 * device to alert host in critical scenarios.
5586 *
5587 * Returns zero on success, non-zero error value on failure.
5588 */
cd469475 5589static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
66ec6d59 5590{
cd469475 5591 return ufshcd_update_ee_drv_mask(hba, mask, 0);
66ec6d59
SRT
5592}
5593
5594/**
5595 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5596 * @hba: per-adapter instance
5597 *
5598 * Allow device to manage background operations on its own. Enabling
5599 * this might lead to inconsistent latencies during normal data transfers
5600 * as the device is allowed to manage its own way of handling background
5601 * operations.
5602 *
5603 * Returns zero on success, non-zero on failure.
5604 */
5605static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5606{
5607 int err = 0;
5608
5609 if (hba->auto_bkops_enabled)
5610 goto out;
5611
dc3c8d3a 5612 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 5613 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
66ec6d59
SRT
5614 if (err) {
5615 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5616 __func__, err);
5617 goto out;
5618 }
5619
5620 hba->auto_bkops_enabled = true;
7ff5ab47 5621 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
66ec6d59
SRT
5622
5623 /* No need of URGENT_BKOPS exception from the device */
5624 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5625 if (err)
5626 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5627 __func__, err);
5628out:
5629 return err;
5630}
5631
5632/**
5633 * ufshcd_disable_auto_bkops - block device in doing background operations
5634 * @hba: per-adapter instance
5635 *
5636 * Disabling background operations improves command response latency but
5637 * has drawback of device moving into critical state where the device is
5638 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5639 * host is idle so that BKOPS are managed effectively without any negative
5640 * impacts.
5641 *
5642 * Returns zero on success, non-zero on failure.
5643 */
5644static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5645{
5646 int err = 0;
5647
5648 if (!hba->auto_bkops_enabled)
5649 goto out;
5650
5651 /*
5652 * If host assisted BKOPs is to be enabled, make sure
5653 * urgent bkops exception is allowed.
5654 */
5655 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5656 if (err) {
5657 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5658 __func__, err);
5659 goto out;
5660 }
5661
dc3c8d3a 5662 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
1f34eedf 5663 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
66ec6d59
SRT
5664 if (err) {
5665 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5666 __func__, err);
5667 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5668 goto out;
5669 }
5670
5671 hba->auto_bkops_enabled = false;
7ff5ab47 5672 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
24366c2a 5673 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5674out:
5675 return err;
5676}
5677
5678/**
4e768e76 5679 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
66ec6d59
SRT
5680 * @hba: per adapter instance
5681 *
5682 * After a device reset the device may toggle the BKOPS_EN flag
5683 * to default value. The s/w tracking variables should be updated
4e768e76 5684 * as well. This function would change the auto-bkops state based on
5685 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
66ec6d59 5686 */
4e768e76 5687static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
66ec6d59 5688{
4e768e76 5689 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5690 hba->auto_bkops_enabled = false;
5691 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5692 ufshcd_enable_auto_bkops(hba);
5693 } else {
5694 hba->auto_bkops_enabled = true;
5695 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5696 ufshcd_disable_auto_bkops(hba);
5697 }
7b6668d8 5698 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
24366c2a 5699 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5700}
5701
5702static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5703{
5e86ae44 5704 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5705 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5706}
5707
5708/**
57d104c1 5709 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 5710 * @hba: per-adapter instance
57d104c1 5711 * @status: bkops_status value
66ec6d59 5712 *
57d104c1
SJ
5713 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5714 * flag in the device to permit background operations if the device
5715 * bkops_status is greater than or equal to "status" argument passed to
5716 * this function, disable otherwise.
5717 *
5718 * Returns 0 for success, non-zero in case of failure.
5719 *
5720 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5721 * to know whether auto bkops is enabled or disabled after this function
5722 * returns control to it.
66ec6d59 5723 */
57d104c1
SJ
5724static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5725 enum bkops_status status)
66ec6d59
SRT
5726{
5727 int err;
57d104c1 5728 u32 curr_status = 0;
66ec6d59 5729
57d104c1 5730 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
5731 if (err) {
5732 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5733 __func__, err);
5734 goto out;
57d104c1
SJ
5735 } else if (curr_status > BKOPS_STATUS_MAX) {
5736 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5737 __func__, curr_status);
5738 err = -EINVAL;
5739 goto out;
66ec6d59
SRT
5740 }
5741
57d104c1 5742 if (curr_status >= status)
66ec6d59 5743 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
5744 else
5745 err = ufshcd_disable_auto_bkops(hba);
66ec6d59
SRT
5746out:
5747 return err;
5748}
5749
57d104c1
SJ
5750/**
5751 * ufshcd_urgent_bkops - handle urgent bkops exception event
5752 * @hba: per-adapter instance
5753 *
5754 * Enable fBackgroundOpsEn flag in the device to permit background
5755 * operations.
5756 *
5757 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5758 * and negative error value for any other failure.
5759 */
5760static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5761{
afdfff59 5762 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
57d104c1
SJ
5763}
5764
66ec6d59
SRT
5765static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5766{
5e86ae44 5767 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5768 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5769}
5770
afdfff59
YG
5771static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5772{
5773 int err;
5774 u32 curr_status = 0;
5775
5776 if (hba->is_urgent_bkops_lvl_checked)
5777 goto enable_auto_bkops;
5778
5779 err = ufshcd_get_bkops_status(hba, &curr_status);
5780 if (err) {
5781 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5782 __func__, err);
5783 goto out;
5784 }
5785
5786 /*
5787 * We are seeing that some devices are raising the urgent bkops
5788 * exception events even when BKOPS status doesn't indicate performace
5789 * impacted or critical. Handle these device by determining their urgent
5790 * bkops status at runtime.
5791 */
5792 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5793 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5794 __func__, curr_status);
5795 /* update the current status as the urgent bkops level */
5796 hba->urgent_bkops_lvl = curr_status;
5797 hba->is_urgent_bkops_lvl_checked = true;
5798 }
5799
5800enable_auto_bkops:
5801 err = ufshcd_enable_auto_bkops(hba);
5802out:
5803 if (err < 0)
5804 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5805 __func__, err);
5806}
5807
322c4b29
AA
5808static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5809{
5810 u32 value;
5811
5812 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5813 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5814 return;
5815
5816 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5817
5818 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5819
5820 /*
5821 * A placeholder for the platform vendors to add whatever additional
5822 * steps required
5823 */
5824}
5825
3b5f3c0d 5826static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
3d17b9b5 5827{
6f8d5a6a 5828 u8 index;
3b5f3c0d
YH
5829 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5830 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5831
5832 index = ufshcd_wb_get_query_index(hba);
5833 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5834}
5835
5836int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5837{
5838 int ret;
3d17b9b5 5839
f8dc7a31
JC
5840 if (!ufshcd_is_wb_allowed(hba) ||
5841 hba->dev_info.wb_enabled == enable)
3d17b9b5 5842 return 0;
3d17b9b5 5843
3b5f3c0d 5844 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
3d17b9b5 5845 if (ret) {
4f6b69f3
JC
5846 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
5847 __func__, enable ? "enabling" : "disabling", ret);
3d17b9b5
AD
5848 return ret;
5849 }
5850
4cd48995 5851 hba->dev_info.wb_enabled = enable;
4f6b69f3 5852 dev_dbg(hba->dev, "%s: Write Booster %s\n",
3b5f3c0d 5853 __func__, enable ? "enabled" : "disabled");
3d17b9b5
AD
5854
5855 return ret;
5856}
5857
4450a165
JC
5858static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
5859 bool enable)
3d17b9b5 5860{
3b5f3c0d 5861 int ret;
3d17b9b5 5862
4450a165 5863 ret = __ufshcd_wb_toggle(hba, enable,
3b5f3c0d
YH
5864 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5865 if (ret) {
4f6b69f3
JC
5866 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
5867 __func__, enable ? "enabling" : "disabling", ret);
3b5f3c0d
YH
5868 return;
5869 }
4f6b69f3 5870 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
4450a165 5871 __func__, enable ? "enabled" : "disabled");
3d17b9b5
AD
5872}
5873
6c4148ce 5874int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
3d17b9b5
AD
5875{
5876 int ret;
5877
d3ba622d
BH
5878 if (!ufshcd_is_wb_allowed(hba) ||
5879 hba->dev_info.wb_buf_flush_enabled == enable)
6c4148ce 5880 return 0;
3d17b9b5 5881
3b5f3c0d 5882 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
3d17b9b5 5883 if (ret) {
4f6b69f3
JC
5884 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
5885 __func__, enable ? "enabling" : "disabling", ret);
6c4148ce 5886 return ret;
3d17b9b5
AD
5887 }
5888
d3ba622d 5889 hba->dev_info.wb_buf_flush_enabled = enable;
4f6b69f3 5890 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
3b5f3c0d 5891 __func__, enable ? "enabled" : "disabled");
6c4148ce
JC
5892
5893 return ret;
3d17b9b5
AD
5894}
5895
5896static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5897 u32 avail_buf)
5898{
5899 u32 cur_buf;
5900 int ret;
e31011ab 5901 u8 index;
3d17b9b5 5902
e31011ab 5903 index = ufshcd_wb_get_query_index(hba);
3d17b9b5
AD
5904 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5905 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
e31011ab 5906 index, 0, &cur_buf);
3d17b9b5 5907 if (ret) {
4f6b69f3 5908 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
3d17b9b5
AD
5909 __func__, ret);
5910 return false;
5911 }
5912
5913 if (!cur_buf) {
5914 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5915 cur_buf);
5916 return false;
5917 }
d14734ae 5918 /* Let it continue to flush when available buffer exceeds threshold */
a858af9a 5919 return avail_buf < hba->vps->wb_flush_threshold;
3d17b9b5
AD
5920}
5921
f681d107
JC
5922static void ufshcd_wb_force_disable(struct ufs_hba *hba)
5923{
42f8c5cd 5924 if (ufshcd_is_wb_buf_flush_allowed(hba))
4450a165 5925 ufshcd_wb_toggle_buf_flush(hba, false);
f681d107 5926
4450a165 5927 ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
f681d107
JC
5928 ufshcd_wb_toggle(hba, false);
5929 hba->caps &= ~UFSHCD_CAP_WB_EN;
5930
5931 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
5932}
5933
5934static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
5935{
5936 u32 lifetime;
5937 int ret;
5938 u8 index;
5939
5940 index = ufshcd_wb_get_query_index(hba);
5941 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5942 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
5943 index, 0, &lifetime);
5944 if (ret) {
5945 dev_err(hba->dev,
5946 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5947 __func__, ret);
5948 return false;
5949 }
5950
5951 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
5952 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
5953 __func__, lifetime);
5954 return false;
5955 }
5956
5957 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
5958 __func__, lifetime);
5959
5960 return true;
5961}
5962
51dd905b 5963static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
3d17b9b5
AD
5964{
5965 int ret;
5966 u32 avail_buf;
e31011ab 5967 u8 index;
3d17b9b5 5968
79e3520f 5969 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5 5970 return false;
f681d107
JC
5971
5972 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
5973 ufshcd_wb_force_disable(hba);
5974 return false;
5975 }
5976
3d17b9b5
AD
5977 /*
5978 * The ufs device needs the vcc to be ON to flush.
5979 * With user-space reduction enabled, it's enough to enable flush
5980 * by checking only the available buffer. The threshold
5981 * defined here is > 90% full.
5982 * With user-space preserved enabled, the current-buffer
5983 * should be checked too because the wb buffer size can reduce
5984 * when disk tends to be full. This info is provided by current
5985 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5986 * keeping vcc on when current buffer is empty.
5987 */
e31011ab 5988 index = ufshcd_wb_get_query_index(hba);
3d17b9b5
AD
5989 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5990 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
e31011ab 5991 index, 0, &avail_buf);
3d17b9b5 5992 if (ret) {
4f6b69f3 5993 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
3d17b9b5
AD
5994 __func__, ret);
5995 return false;
5996 }
5997
a858af9a
BVA
5998 if (!hba->dev_info.b_presrv_uspc_en)
5999 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
3d17b9b5
AD
6000
6001 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6002}
6003
51dd905b
SC
6004static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6005{
6006 struct ufs_hba *hba = container_of(to_delayed_work(work),
6007 struct ufs_hba,
6008 rpm_dev_flush_recheck_work);
6009 /*
6010 * To prevent unnecessary VCC power drain after device finishes
6011 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6012 * after a certain delay to recheck the threshold by next runtime
6013 * suspend.
6014 */
b294ff3e
AD
6015 ufshcd_rpm_get_sync(hba);
6016 ufshcd_rpm_put_sync(hba);
51dd905b
SC
6017}
6018
66ec6d59
SRT
6019/**
6020 * ufshcd_exception_event_handler - handle exceptions raised by device
6021 * @work: pointer to work data
6022 *
6023 * Read bExceptionEventStatus attribute from the device and handle the
6024 * exception event accordingly.
6025 */
6026static void ufshcd_exception_event_handler(struct work_struct *work)
6027{
6028 struct ufs_hba *hba;
6029 int err;
6030 u32 status = 0;
6031 hba = container_of(work, struct ufs_hba, eeh_work);
6032
03e1d28e 6033 ufshcd_scsi_block_requests(hba);
66ec6d59
SRT
6034 err = ufshcd_get_ee_status(hba, &status);
6035 if (err) {
6036 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6037 __func__, err);
6038 goto out;
6039 }
6040
f7733625
AH
6041 trace_ufshcd_exception_event(dev_name(hba->dev), status);
6042
cd469475 6043 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
afdfff59
YG
6044 ufshcd_bkops_exception_event_handler(hba);
6045
322c4b29
AA
6046 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6047 ufshcd_temp_exception_event_handler(hba, status);
6048
7deedfda 6049 ufs_debugfs_exception_event(hba, status);
66ec6d59 6050out:
03e1d28e 6051 ufshcd_scsi_unblock_requests(hba);
66ec6d59
SRT
6052}
6053
9a47ec7c
YG
6054/* Complete requests that have door-bell cleared */
6055static void ufshcd_complete_requests(struct ufs_hba *hba)
6056{
11682523 6057 ufshcd_transfer_req_compl(hba);
9a47ec7c
YG
6058 ufshcd_tmc_handler(hba);
6059}
6060
583fa62d
YG
6061/**
6062 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6063 * to recover from the DL NAC errors or not.
6064 * @hba: per-adapter instance
6065 *
6066 * Returns true if error handling is required, false otherwise
6067 */
6068static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6069{
6070 unsigned long flags;
6071 bool err_handling = true;
6072
6073 spin_lock_irqsave(hba->host->host_lock, flags);
6074 /*
6075 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6076 * device fatal error and/or DL NAC & REPLAY timeout errors.
6077 */
6078 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6079 goto out;
6080
6081 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6082 ((hba->saved_err & UIC_ERROR) &&
6083 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6084 goto out;
6085
6086 if ((hba->saved_err & UIC_ERROR) &&
6087 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6088 int err;
6089 /*
6090 * wait for 50ms to see if we can get any other errors or not.
6091 */
6092 spin_unlock_irqrestore(hba->host->host_lock, flags);
6093 msleep(50);
6094 spin_lock_irqsave(hba->host->host_lock, flags);
6095
6096 /*
6097 * now check if we have got any other severe errors other than
6098 * DL NAC error?
6099 */
6100 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6101 ((hba->saved_err & UIC_ERROR) &&
6102 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6103 goto out;
6104
6105 /*
6106 * As DL NAC is the only error received so far, send out NOP
6107 * command to confirm if link is still active or not.
6108 * - If we don't get any response then do error recovery.
6109 * - If we get response then clear the DL NAC error bit.
6110 */
6111
6112 spin_unlock_irqrestore(hba->host->host_lock, flags);
6113 err = ufshcd_verify_dev_init(hba);
6114 spin_lock_irqsave(hba->host->host_lock, flags);
6115
6116 if (err)
6117 goto out;
6118
6119 /* Link seems to be alive hence ignore the DL NAC errors */
6120 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6121 hba->saved_err &= ~UIC_ERROR;
6122 /* clear NAC error */
6123 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
b0008625 6124 if (!hba->saved_uic_err)
583fa62d 6125 err_handling = false;
583fa62d
YG
6126 }
6127out:
6128 spin_unlock_irqrestore(hba->host->host_lock, flags);
6129 return err_handling;
6130}
6131
88b09900
AH
6132/* host lock must be held before calling this func */
6133static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6134{
6135 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6136 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6137}
6138
267a59f6 6139void ufshcd_schedule_eh_work(struct ufs_hba *hba)
88b09900 6140{
267a59f6
BVA
6141 lockdep_assert_held(hba->host->host_lock);
6142
88b09900
AH
6143 /* handle fatal errors only when link is not in error state */
6144 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6145 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6146 ufshcd_is_saved_err_fatal(hba))
6147 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6148 else
6149 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6150 queue_work(hba->eh_wq, &hba->eh_work);
6151 }
6152}
6153
348e1bc5
SC
6154static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6155{
6156 down_write(&hba->clk_scaling_lock);
6157 hba->clk_scaling.is_allowed = allow;
6158 up_write(&hba->clk_scaling_lock);
6159}
6160
6161static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6162{
6163 if (suspend) {
6164 if (hba->clk_scaling.is_enabled)
6165 ufshcd_suspend_clkscaling(hba);
6166 ufshcd_clk_scaling_allow(hba, false);
6167 } else {
6168 ufshcd_clk_scaling_allow(hba, true);
6169 if (hba->clk_scaling.is_enabled)
6170 ufshcd_resume_clkscaling(hba);
6171 }
6172}
6173
c72e79c0
CG
6174static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6175{
b294ff3e 6176 ufshcd_rpm_get_sync(hba);
e2106584 6177 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
b294ff3e 6178 hba->is_sys_suspended) {
88a92d6a
CG
6179 enum ufs_pm_op pm_op;
6180
c72e79c0 6181 /*
b294ff3e 6182 * Don't assume anything of resume, if
c72e79c0
CG
6183 * resume fails, irq and clocks can be OFF, and powers
6184 * can be OFF or in LPM.
6185 */
6186 ufshcd_setup_hba_vreg(hba, true);
6187 ufshcd_enable_irq(hba);
6188 ufshcd_setup_vreg(hba, true);
6189 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6190 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6191 ufshcd_hold(hba, false);
6192 if (!ufshcd_is_clkgating_allowed(hba))
6193 ufshcd_setup_clocks(hba, true);
6194 ufshcd_release(hba);
88a92d6a
CG
6195 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6196 ufshcd_vops_resume(hba, pm_op);
c72e79c0
CG
6197 } else {
6198 ufshcd_hold(hba, false);
348e1bc5
SC
6199 if (ufshcd_is_clkscaling_supported(hba) &&
6200 hba->clk_scaling.is_enabled)
c72e79c0 6201 ufshcd_suspend_clkscaling(hba);
348e1bc5 6202 ufshcd_clk_scaling_allow(hba, false);
c72e79c0 6203 }
aa53f580
CG
6204 ufshcd_scsi_block_requests(hba);
6205 /* Drain ufshcd_queuecommand() */
5675c381 6206 synchronize_rcu();
aa53f580 6207 cancel_work_sync(&hba->eeh_work);
c72e79c0
CG
6208}
6209
6210static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6211{
aa53f580 6212 ufshcd_scsi_unblock_requests(hba);
c72e79c0 6213 ufshcd_release(hba);
348e1bc5
SC
6214 if (ufshcd_is_clkscaling_supported(hba))
6215 ufshcd_clk_scaling_suspend(hba, false);
b294ff3e 6216 ufshcd_rpm_put(hba);
c72e79c0
CG
6217}
6218
6219static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6220{
9cd20d3f 6221 return (!hba->is_powered || hba->shutting_down ||
e2106584 6222 !hba->ufs_device_wlun ||
9cd20d3f 6223 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
c72e79c0 6224 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
9cd20d3f 6225 ufshcd_is_link_broken(hba))));
c72e79c0
CG
6226}
6227
6228#ifdef CONFIG_PM
6229static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6230{
6231 struct Scsi_Host *shost = hba->host;
6232 struct scsi_device *sdev;
6233 struct request_queue *q;
6234 int ret;
6235
88a92d6a 6236 hba->is_sys_suspended = false;
c72e79c0 6237 /*
b294ff3e 6238 * Set RPM status of wlun device to RPM_ACTIVE,
c72e79c0
CG
6239 * this also clears its runtime error.
6240 */
e2106584 6241 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
b294ff3e
AD
6242
6243 /* hba device might have a runtime error otherwise */
6244 if (ret)
6245 ret = pm_runtime_set_active(hba->dev);
c72e79c0 6246 /*
b294ff3e
AD
6247 * If wlun device had runtime error, we also need to resume those
6248 * consumer scsi devices in case any of them has failed to be
6249 * resumed due to supplier runtime resume failure. This is to unblock
c72e79c0
CG
6250 * blk_queue_enter in case there are bios waiting inside it.
6251 */
6252 if (!ret) {
6253 shost_for_each_device(sdev, shost) {
6254 q = sdev->request_queue;
6255 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6256 q->rpm_status == RPM_SUSPENDING))
6257 pm_request_resume(q->dev);
6258 }
6259 }
6260}
6261#else
6262static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6263{
6264}
6265#endif
6266
2355b66e
CG
6267static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6268{
6269 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6270 u32 mode;
6271
6272 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6273
6274 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6275 return true;
6276
6277 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6278 return true;
6279
6280 return false;
6281}
6282
b817e6ff
BVA
6283static bool ufshcd_abort_all(struct ufs_hba *hba)
6284{
6285 bool needs_reset = false;
6286 int tag, ret;
6287
6288 /* Clear pending transfer requests */
6289 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
6290 ret = ufshcd_try_to_abort_task(hba, tag);
6291 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6292 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6293 ret ? "failed" : "succeeded");
6294 if (ret) {
6295 needs_reset = true;
6296 goto out;
6297 }
6298 }
6299
6300 /* Clear pending task management requests */
6301 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6302 if (ufshcd_clear_tm_cmd(hba, tag)) {
6303 needs_reset = true;
6304 goto out;
6305 }
6306 }
6307
6308out:
6309 /* Complete the requests that are cleared by s/w */
6310 ufshcd_complete_requests(hba);
6311
6312 return needs_reset;
6313}
6314
7a3e97b0 6315/**
e8e7f271 6316 * ufshcd_err_handler - handle UFS errors that require s/w attention
88b09900 6317 * @work: pointer to work structure
7a3e97b0 6318 */
88b09900 6319static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0 6320{
87bf6a6b 6321 int retries = MAX_ERR_HANDLER_RETRIES;
88b09900 6322 struct ufs_hba *hba;
e8e7f271 6323 unsigned long flags;
87bf6a6b
AH
6324 bool needs_restore;
6325 bool needs_reset;
87bf6a6b 6326 int pmc_err;
e8e7f271 6327
88b09900
AH
6328 hba = container_of(work, struct ufs_hba, eh_work);
6329
4693fad7
BVA
6330 dev_info(hba->dev,
6331 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6332 __func__, ufshcd_state_name[hba->ufshcd_state],
6333 hba->is_powered, hba->shutting_down, hba->saved_err,
6334 hba->saved_uic_err, hba->force_reset,
6335 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6336
9cd20d3f 6337 down(&hba->host_sem);
e8e7f271 6338 spin_lock_irqsave(hba->host->host_lock, flags);
c72e79c0 6339 if (ufshcd_err_handling_should_stop(hba)) {
4db7a236
CG
6340 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6341 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6342 spin_unlock_irqrestore(hba->host->host_lock, flags);
9cd20d3f 6343 up(&hba->host_sem);
4db7a236
CG
6344 return;
6345 }
6346 ufshcd_set_eh_in_progress(hba);
6347 spin_unlock_irqrestore(hba->host->host_lock, flags);
c72e79c0 6348 ufshcd_err_handling_prepare(hba);
a45f9371
CG
6349 /* Complete requests that have door-bell cleared by h/w */
6350 ufshcd_complete_requests(hba);
e8e7f271 6351 spin_lock_irqsave(hba->host->host_lock, flags);
87bf6a6b
AH
6352again:
6353 needs_restore = false;
6354 needs_reset = false;
87bf6a6b 6355
aa53f580
CG
6356 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6357 hba->ufshcd_state = UFSHCD_STATE_RESET;
88a92d6a
CG
6358 /*
6359 * A full reset and restore might have happened after preparation
6360 * is finished, double check whether we should stop.
6361 */
6362 if (ufshcd_err_handling_should_stop(hba))
6363 goto skip_err_handling;
6364
583fa62d
YG
6365 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6366 bool ret;
6367
6368 spin_unlock_irqrestore(hba->host->host_lock, flags);
6369 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6370 ret = ufshcd_quirk_dl_nac_errors(hba);
6371 spin_lock_irqsave(hba->host->host_lock, flags);
88a92d6a 6372 if (!ret && ufshcd_err_handling_should_stop(hba))
583fa62d
YG
6373 goto skip_err_handling;
6374 }
4db7a236 6375
2355b66e
CG
6376 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6377 (hba->saved_uic_err &&
6378 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
c3be8d1e
CG
6379 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6380
6381 spin_unlock_irqrestore(hba->host->host_lock, flags);
6382 ufshcd_print_host_state(hba);
6383 ufshcd_print_pwr_info(hba);
e965e5e0 6384 ufshcd_print_evt_hist(hba);
c3be8d1e
CG
6385 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6386 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6387 spin_lock_irqsave(hba->host->host_lock, flags);
6388 }
6389
9a47ec7c
YG
6390 /*
6391 * if host reset is required then skip clearing the pending
2df74b69
CG
6392 * transfers forcefully because they will get cleared during
6393 * host reset and restore
9a47ec7c 6394 */
88a92d6a
CG
6395 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6396 ufshcd_is_saved_err_fatal(hba) ||
6397 ((hba->saved_err & UIC_ERROR) &&
6398 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6399 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6400 needs_reset = true;
2355b66e 6401 goto do_reset;
88a92d6a 6402 }
9a47ec7c 6403
2355b66e
CG
6404 /*
6405 * If LINERESET was caught, UFS might have been put to PWM mode,
6406 * check if power mode restore is needed.
6407 */
6408 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6409 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6410 if (!hba->saved_uic_err)
6411 hba->saved_err &= ~UIC_ERROR;
6412 spin_unlock_irqrestore(hba->host->host_lock, flags);
6413 if (ufshcd_is_pwr_mode_restore_needed(hba))
6414 needs_restore = true;
6415 spin_lock_irqsave(hba->host->host_lock, flags);
6416 if (!hba->saved_err && !needs_restore)
6417 goto skip_err_handling;
6418 }
9a47ec7c 6419
2355b66e 6420 hba->silence_err_logs = true;
9a47ec7c
YG
6421 /* release lock as clear command might sleep */
6422 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 6423
b817e6ff 6424 needs_reset = ufshcd_abort_all(hba);
9a47ec7c 6425
a45f9371
CG
6426 spin_lock_irqsave(hba->host->host_lock, flags);
6427 hba->silence_err_logs = false;
b817e6ff 6428 if (needs_reset)
2355b66e 6429 goto do_reset;
9a47ec7c 6430
2355b66e
CG
6431 /*
6432 * After all reqs and tasks are cleared from doorbell,
6433 * now it is safe to retore power mode.
6434 */
6435 if (needs_restore) {
6436 spin_unlock_irqrestore(hba->host->host_lock, flags);
6437 /*
6438 * Hold the scaling lock just in case dev cmds
6439 * are sent via bsg and/or sysfs.
6440 */
6441 down_write(&hba->clk_scaling_lock);
6442 hba->force_pmc = true;
6443 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6444 if (pmc_err) {
6445 needs_reset = true;
6446 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6447 __func__, pmc_err);
6448 }
6449 hba->force_pmc = false;
6450 ufshcd_print_pwr_info(hba);
6451 up_write(&hba->clk_scaling_lock);
6452 spin_lock_irqsave(hba->host->host_lock, flags);
6453 }
9a47ec7c 6454
2355b66e 6455do_reset:
e8e7f271 6456 /* Fatal errors need reset */
9a47ec7c 6457 if (needs_reset) {
87bf6a6b
AH
6458 int err;
6459
4db7a236 6460 hba->force_reset = false;
9a47ec7c 6461 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 6462 err = ufshcd_reset_and_restore(hba);
4db7a236
CG
6463 if (err)
6464 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6465 __func__, err);
c72e79c0
CG
6466 else
6467 ufshcd_recover_pm_error(hba);
9a47ec7c 6468 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 6469 }
9a47ec7c 6470
583fa62d 6471skip_err_handling:
9a47ec7c 6472 if (!needs_reset) {
4db7a236
CG
6473 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6474 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
9a47ec7c
YG
6475 if (hba->saved_err || hba->saved_uic_err)
6476 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6477 __func__, hba->saved_err, hba->saved_uic_err);
6478 }
87bf6a6b
AH
6479 /* Exit in an operational state or dead */
6480 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6481 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6482 if (--retries)
6483 goto again;
6484 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6485 }
e8e7f271 6486 ufshcd_clear_eh_in_progress(hba);
9a47ec7c 6487 spin_unlock_irqrestore(hba->host->host_lock, flags);
c72e79c0 6488 ufshcd_err_handling_unprepare(hba);
9cd20d3f 6489 up(&hba->host_sem);
4693fad7
BVA
6490
6491 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6492 ufshcd_state_name[hba->ufshcd_state]);
7a3e97b0
SY
6493}
6494
6495/**
e8e7f271
SRT
6496 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6497 * @hba: per-adapter instance
9333d775
VG
6498 *
6499 * Returns
6500 * IRQ_HANDLED - If interrupt is valid
6501 * IRQ_NONE - If invalid interrupt
7a3e97b0 6502 */
9333d775 6503static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
6504{
6505 u32 reg;
9333d775 6506 irqreturn_t retval = IRQ_NONE;
7a3e97b0 6507
2355b66e 6508 /* PHY layer error */
fb7b45f0 6509 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
fb7b45f0 6510 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
2355b66e 6511 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6512 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
fb7b45f0
DR
6513 /*
6514 * To know whether this error is fatal or not, DB timeout
6515 * must be checked but this error is handled separately.
6516 */
2355b66e
CG
6517 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6518 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6519 __func__);
6520
6521 /* Got a LINERESET indication. */
6522 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6523 struct uic_command *cmd = NULL;
6524
6525 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6526 if (hba->uic_async_done && hba->active_uic_cmd)
6527 cmd = hba->active_uic_cmd;
6528 /*
6529 * Ignore the LINERESET during power mode change
6530 * operation via DME_SET command.
6531 */
6532 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6533 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6534 }
9333d775 6535 retval |= IRQ_HANDLED;
ff8e20c6 6536 }
fb7b45f0 6537
e8e7f271
SRT
6538 /* PA_INIT_ERROR is fatal and needs UIC reset */
6539 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
9333d775
VG
6540 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6541 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6542 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
ff8e20c6 6543
9333d775
VG
6544 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6545 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6546 else if (hba->dev_quirks &
6547 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6548 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6549 hba->uic_error |=
6550 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6551 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6552 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6553 }
6554 retval |= IRQ_HANDLED;
583fa62d 6555 }
e8e7f271
SRT
6556
6557 /* UIC NL/TL/DME errors needs software retry */
6558 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
9333d775
VG
6559 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6560 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6561 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
e8e7f271 6562 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
9333d775 6563 retval |= IRQ_HANDLED;
ff8e20c6 6564 }
e8e7f271
SRT
6565
6566 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
9333d775
VG
6567 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6568 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6569 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
e8e7f271 6570 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
9333d775 6571 retval |= IRQ_HANDLED;
ff8e20c6 6572 }
e8e7f271
SRT
6573
6574 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
9333d775
VG
6575 if ((reg & UIC_DME_ERROR) &&
6576 (reg & UIC_DME_ERROR_CODE_MASK)) {
e965e5e0 6577 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
e8e7f271 6578 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
9333d775 6579 retval |= IRQ_HANDLED;
ff8e20c6 6580 }
e8e7f271
SRT
6581
6582 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6583 __func__, hba->uic_error);
9333d775 6584 return retval;
e8e7f271
SRT
6585}
6586
6587/**
6588 * ufshcd_check_errors - Check for errors that need s/w attention
6589 * @hba: per-adapter instance
a45f9371 6590 * @intr_status: interrupt status generated by the controller
9333d775
VG
6591 *
6592 * Returns
6593 * IRQ_HANDLED - If interrupt is valid
6594 * IRQ_NONE - If invalid interrupt
e8e7f271 6595 */
a45f9371 6596static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
e8e7f271
SRT
6597{
6598 bool queue_eh_work = false;
9333d775 6599 irqreturn_t retval = IRQ_NONE;
e8e7f271 6600
a45f9371
CG
6601 spin_lock(hba->host->host_lock);
6602 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6603
d3c615bf 6604 if (hba->errors & INT_FATAL_ERRORS) {
e965e5e0
SC
6605 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6606 hba->errors);
e8e7f271 6607 queue_eh_work = true;
d3c615bf 6608 }
7a3e97b0
SY
6609
6610 if (hba->errors & UIC_ERROR) {
e8e7f271 6611 hba->uic_error = 0;
9333d775 6612 retval = ufshcd_update_uic_error(hba);
e8e7f271
SRT
6613 if (hba->uic_error)
6614 queue_eh_work = true;
7a3e97b0 6615 }
e8e7f271 6616
82174440
SC
6617 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6618 dev_err(hba->dev,
6619 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6620 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6621 "Enter" : "Exit",
6622 hba->errors, ufshcd_get_upmcrs(hba));
e965e5e0 6623 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
d3c615bf 6624 hba->errors);
4db7a236 6625 ufshcd_set_link_broken(hba);
82174440
SC
6626 queue_eh_work = true;
6627 }
6628
e8e7f271 6629 if (queue_eh_work) {
9a47ec7c
YG
6630 /*
6631 * update the transfer error masks to sticky bits, let's do this
6632 * irrespective of current ufshcd_state.
6633 */
6634 hba->saved_err |= hba->errors;
6635 hba->saved_uic_err |= hba->uic_error;
6636
4db7a236 6637 /* dump controller state before resetting */
ace3804b
CG
6638 if ((hba->saved_err &
6639 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
2355b66e
CG
6640 (hba->saved_uic_err &&
6641 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
4db7a236 6642 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
66cc820f
DR
6643 __func__, hba->saved_err,
6644 hba->saved_uic_err);
c3be8d1e
CG
6645 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6646 "host_regs: ");
4db7a236 6647 ufshcd_print_pwr_info(hba);
e8e7f271 6648 }
88b09900 6649 ufshcd_schedule_eh_work(hba);
9333d775 6650 retval |= IRQ_HANDLED;
3441da7d 6651 }
e8e7f271
SRT
6652 /*
6653 * if (!queue_eh_work) -
6654 * Other errors are either non-fatal where host recovers
6655 * itself without s/w intervention or errors that will be
6656 * handled by the SCSI core layer.
6657 */
a45f9371
CG
6658 hba->errors = 0;
6659 hba->uic_error = 0;
6660 spin_unlock(hba->host->host_lock);
9333d775 6661 return retval;
7a3e97b0
SY
6662}
6663
6664/**
6665 * ufshcd_tmc_handler - handle task management function completion
6666 * @hba: per adapter instance
9333d775
VG
6667 *
6668 * Returns
6669 * IRQ_HANDLED - If interrupt is valid
6670 * IRQ_NONE - If invalid interrupt
7a3e97b0 6671 */
9333d775 6672static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
7a3e97b0 6673{
f5ef336f
AH
6674 unsigned long flags, pending, issued;
6675 irqreturn_t ret = IRQ_NONE;
6676 int tag;
6677
a45f9371 6678 spin_lock_irqsave(hba->host->host_lock, flags);
5cb37a26 6679 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
f5ef336f
AH
6680 issued = hba->outstanding_tasks & ~pending;
6681 for_each_set_bit(tag, &issued, hba->nutmrs) {
6682 struct request *req = hba->tmf_rqs[tag];
6683 struct completion *c = req->end_io_data;
6684
6685 complete(c);
6686 ret = IRQ_HANDLED;
6687 }
a45f9371
CG
6688 spin_unlock_irqrestore(hba->host->host_lock, flags);
6689
f5ef336f 6690 return ret;
7a3e97b0
SY
6691}
6692
f87b2c41
AD
6693/**
6694 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6695 * @hba: per adapter instance
6696 *
6697 * Returns IRQ_HANDLED if interrupt is handled
6698 */
6699static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6700{
6701 struct ufs_hw_queue *hwq;
6702 unsigned long outstanding_cqs;
6703 unsigned int nr_queues;
6704 int i, ret;
6705 u32 events;
6706
6707 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6708 if (ret)
6709 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6710
6711 /* Exclude the poll queues */
6712 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6713 for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6714 hwq = &hba->uhq[i];
6715
6716 events = ufshcd_mcq_read_cqis(hba, i);
6717 if (events)
6718 ufshcd_mcq_write_cqis(hba, events, i);
6719
6720 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6721 ufshcd_mcq_poll_cqe_nolock(hba, hwq);
6722 }
6723
6724 return IRQ_HANDLED;
6725}
6726
7a3e97b0
SY
6727/**
6728 * ufshcd_sl_intr - Interrupt service routine
6729 * @hba: per adapter instance
6730 * @intr_status: contains interrupts generated by the controller
9333d775
VG
6731 *
6732 * Returns
6733 * IRQ_HANDLED - If interrupt is valid
6734 * IRQ_NONE - If invalid interrupt
7a3e97b0 6735 */
9333d775 6736static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
7a3e97b0 6737{
9333d775
VG
6738 irqreturn_t retval = IRQ_NONE;
6739
53b3d9c3 6740 if (intr_status & UFSHCD_UIC_MASK)
9333d775 6741 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0 6742
a45f9371
CG
6743 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6744 retval |= ufshcd_check_errors(hba, intr_status);
6745
7a3e97b0 6746 if (intr_status & UTP_TASK_REQ_COMPL)
9333d775 6747 retval |= ufshcd_tmc_handler(hba);
7a3e97b0
SY
6748
6749 if (intr_status & UTP_TRANSFER_REQ_COMPL)
11682523 6750 retval |= ufshcd_transfer_req_compl(hba);
9333d775 6751
f87b2c41
AD
6752 if (intr_status & MCQ_CQ_EVENT_STATUS)
6753 retval |= ufshcd_handle_mcq_cq_events(hba);
6754
9333d775 6755 return retval;
7a3e97b0
SY
6756}
6757
6758/**
6759 * ufshcd_intr - Main interrupt service routine
6760 * @irq: irq number
6761 * @__hba: pointer to adapter instance
6762 *
9333d775
VG
6763 * Returns
6764 * IRQ_HANDLED - If interrupt is valid
6765 * IRQ_NONE - If invalid interrupt
7a3e97b0
SY
6766 */
6767static irqreturn_t ufshcd_intr(int irq, void *__hba)
6768{
127d5f7c 6769 u32 intr_status, enabled_intr_status = 0;
7a3e97b0
SY
6770 irqreturn_t retval = IRQ_NONE;
6771 struct ufs_hba *hba = __hba;
7f6ba4f1 6772 int retries = hba->nutrs;
7a3e97b0 6773
b873a275 6774 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
3f8af604 6775 hba->ufs_stats.last_intr_status = intr_status;
0f85e747 6776 hba->ufs_stats.last_intr_ts = local_clock();
7a3e97b0 6777
7f6ba4f1
VG
6778 /*
6779 * There could be max of hba->nutrs reqs in flight and in worst case
6780 * if the reqs get finished 1 by 1 after the interrupt status is
6781 * read, make sure we handle them by checking the interrupt status
6782 * again in a loop until we process all of the reqs before returning.
6783 */
127d5f7c 6784 while (intr_status && retries--) {
7f6ba4f1
VG
6785 enabled_intr_status =
6786 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
60ec3755 6787 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
9333d775
VG
6788 if (enabled_intr_status)
6789 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7f6ba4f1
VG
6790
6791 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
127d5f7c 6792 }
d75f7fe4 6793
eeb1b55b 6794 if (enabled_intr_status && retval == IRQ_NONE &&
40d2fd05
BVA
6795 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6796 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
eeb1b55b
JK
6797 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6798 __func__,
6799 intr_status,
6800 hba->ufs_stats.last_intr_status,
6801 enabled_intr_status);
9333d775
VG
6802 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6803 }
6804
7a3e97b0
SY
6805 return retval;
6806}
6807
e2933132
SRT
6808static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6809{
6810 int err = 0;
6811 u32 mask = 1 << tag;
6812 unsigned long flags;
6813
6814 if (!test_bit(tag, &hba->outstanding_tasks))
6815 goto out;
6816
6817 spin_lock_irqsave(hba->host->host_lock, flags);
1399c5b0 6818 ufshcd_utmrl_clear(hba, tag);
e2933132
SRT
6819 spin_unlock_irqrestore(hba->host->host_lock, flags);
6820
6821 /* poll for max. 1 sec to clear door bell register by h/w */
6822 err = ufshcd_wait_for_register(hba,
6823 REG_UTP_TASK_REQ_DOOR_BELL,
5cac1095 6824 mask, 0, 1000, 1000);
4693fad7
BVA
6825
6826 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6827 tag, err ? "succeeded" : "failed");
6828
e2933132
SRT
6829out:
6830 return err;
6831}
6832
c6049cd9
CH
6833static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6834 struct utp_task_req_desc *treq, u8 tm_function)
7a3e97b0 6835{
69a6c269 6836 struct request_queue *q = hba->tmf_queue;
c6049cd9 6837 struct Scsi_Host *host = hba->host;
69a6c269
BVA
6838 DECLARE_COMPLETION_ONSTACK(wait);
6839 struct request *req;
7a3e97b0 6840 unsigned long flags;
4b42d557 6841 int task_tag, err;
7a3e97b0 6842
e2933132 6843 /*
0bf6d96c 6844 * blk_mq_alloc_request() is used here only to get a free tag.
e2933132 6845 */
0bf6d96c 6846 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
eeb1b55b
JK
6847 if (IS_ERR(req))
6848 return PTR_ERR(req);
6849
69a6c269 6850 req->end_io_data = &wait;
1ab27c9c 6851 ufshcd_hold(hba, false);
7a3e97b0 6852
e2933132 6853 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0 6854
4b42d557 6855 task_tag = req->tag;
eaab9b57
BVA
6856 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
6857 task_tag);
f5ef336f 6858 hba->tmf_rqs[req->tag] = req;
1352eec8 6859 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
c6049cd9 6860
4b42d557
CG
6861 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6862 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
d2877be4 6863
7a3e97b0 6864 /* send command to the controller */
4b42d557 6865 __set_bit(task_tag, &hba->outstanding_tasks);
897efe62 6866
4b42d557 6867 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
ad1a1b9c
GB
6868 /* Make sure that doorbell is committed immediately */
6869 wmb();
7a3e97b0
SY
6870
6871 spin_unlock_irqrestore(host->host_lock, flags);
6872
28fa68fc 6873 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6667e6d9 6874
7a3e97b0 6875 /* wait until the task management command is completed */
69a6c269 6876 err = wait_for_completion_io_timeout(&wait,
e2933132 6877 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 6878 if (!err) {
28fa68fc 6879 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
e2933132
SRT
6880 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6881 __func__, tm_function);
4b42d557
CG
6882 if (ufshcd_clear_tm_cmd(hba, task_tag))
6883 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6884 __func__, task_tag);
e2933132
SRT
6885 err = -ETIMEDOUT;
6886 } else {
c6049cd9 6887 err = 0;
4b42d557 6888 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
c6049cd9 6889
28fa68fc 6890 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7a3e97b0 6891 }
e2933132 6892
b557217c 6893 spin_lock_irqsave(hba->host->host_lock, flags);
f5ef336f 6894 hba->tmf_rqs[req->tag] = NULL;
4b42d557 6895 __clear_bit(task_tag, &hba->outstanding_tasks);
b557217c
SC
6896 spin_unlock_irqrestore(hba->host->host_lock, flags);
6897
4b42d557 6898 ufshcd_release(hba);
0bf6d96c 6899 blk_mq_free_request(req);
e2933132 6900
7a3e97b0
SY
6901 return err;
6902}
6903
c6049cd9
CH
6904/**
6905 * ufshcd_issue_tm_cmd - issues task management commands to controller
6906 * @hba: per adapter instance
6907 * @lun_id: LUN ID to which TM command is sent
6908 * @task_id: task ID to which the TM command is applicable
6909 * @tm_function: task management function opcode
6910 * @tm_response: task management service response return value
6911 *
6912 * Returns non-zero value on error, zero on success.
6913 */
6914static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6915 u8 tm_function, u8 *tm_response)
6916{
6917 struct utp_task_req_desc treq = { { 0 }, };
957d63e7
BVA
6918 enum utp_ocs ocs_value;
6919 int err;
c6049cd9
CH
6920
6921 /* Configure task request descriptor */
6922 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6923 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6924
6925 /* Configure task request UPIU */
1352eec8 6926 treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
c6049cd9 6927 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
1352eec8 6928 treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
c6049cd9
CH
6929
6930 /*
6931 * The host shall provide the same value for LUN field in the basic
6932 * header and for Input Parameter.
6933 */
1352eec8
GS
6934 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6935 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
c6049cd9
CH
6936
6937 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6938 if (err == -ETIMEDOUT)
6939 return err;
6940
6941 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6942 if (ocs_value != OCS_SUCCESS)
6943 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6944 __func__, ocs_value);
6945 else if (tm_response)
1352eec8 6946 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
c6049cd9
CH
6947 MASK_TM_SERVICE_RESP;
6948 return err;
6949}
6950
5e0a86ee
AA
6951/**
6952 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6953 * @hba: per-adapter instance
6954 * @req_upiu: upiu request
6955 * @rsp_upiu: upiu reply
5e0a86ee
AA
6956 * @desc_buff: pointer to descriptor buffer, NULL if NA
6957 * @buff_len: descriptor size, 0 if NA
d0e9760d 6958 * @cmd_type: specifies the type (NOP, Query...)
5e0a86ee
AA
6959 * @desc_op: descriptor operation
6960 *
6961 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6962 * Therefore, it "rides" the device management infrastructure: uses its tag and
6963 * tasks work queues.
6964 *
6965 * Since there is only one available tag for device management commands,
6966 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6967 */
6968static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6969 struct utp_upiu_req *req_upiu,
6970 struct utp_upiu_req *rsp_upiu,
6971 u8 *desc_buff, int *buff_len,
7f674c38 6972 enum dev_cmd_type cmd_type,
5e0a86ee
AA
6973 enum query_opcode desc_op)
6974{
8a686f26 6975 DECLARE_COMPLETION_ONSTACK(wait);
945c3cca 6976 const u32 tag = hba->reserved_slot;
5e0a86ee
AA
6977 struct ufshcd_lrb *lrbp;
6978 int err = 0;
a23064c4 6979 u8 upiu_flags;
5e0a86ee 6980
945c3cca
BVA
6981 /* Protects use of hba->reserved_slot. */
6982 lockdep_assert_held(&hba->dev_cmd.lock);
5e0a86ee 6983
945c3cca 6984 down_read(&hba->clk_scaling_lock);
5e0a86ee 6985
a45f9371 6986 lrbp = &hba->lrb[tag];
7a7e66c6 6987 WARN_ON(lrbp->cmd);
5e0a86ee 6988 lrbp->cmd = NULL;
5e0a86ee
AA
6989 lrbp->task_tag = tag;
6990 lrbp->lun = 0;
6991 lrbp->intr_cmd = true;
df043c74 6992 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
5e0a86ee
AA
6993 hba->dev_cmd.type = cmd_type;
6994
51428818 6995 if (hba->ufs_version <= ufshci_version(1, 1))
5e0a86ee 6996 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
51428818 6997 else
5e0a86ee 6998 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5e0a86ee
AA
6999
7000 /* update the task tag in the request upiu */
7001 req_upiu->header.dword_0 |= cpu_to_be32(tag);
7002
a4b1c9b9 7003 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
5e0a86ee
AA
7004
7005 /* just copy the upiu request as it is */
7006 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7007 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7008 /* The Data Segment Area is optional depending upon the query
7009 * function value. for WRITE DESCRIPTOR, the data segment
7010 * follows right after the tsf.
7011 */
7012 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7013 *buff_len = 0;
7014 }
7015
7016 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7017
7018 hba->dev_cmd.complete = &wait;
7019
10542489 7020 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
5e0a86ee 7021
22a2d563 7022 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
5e0a86ee
AA
7023 /*
7024 * ignore the returning value here - ufshcd_check_query_response is
7025 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7026 * read the response directly ignoring all errors.
7027 */
7028 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7029
7030 /* just copy the upiu response as it is */
7031 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
4bbbe242
AA
7032 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7033 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7034 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
7035 MASK_QUERY_DATA_SEG_LEN;
7036
7037 if (*buff_len >= resp_len) {
7038 memcpy(desc_buff, descp, resp_len);
7039 *buff_len = resp_len;
7040 } else {
3d4881d1
BH
7041 dev_warn(hba->dev,
7042 "%s: rsp size %d is bigger than buffer size %d",
7043 __func__, resp_len, *buff_len);
4bbbe242
AA
7044 *buff_len = 0;
7045 err = -EINVAL;
7046 }
7047 }
10542489
BH
7048 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7049 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
5e0a86ee 7050
5e0a86ee
AA
7051 up_read(&hba->clk_scaling_lock);
7052 return err;
7053}
7054
7055/**
7056 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7057 * @hba: per-adapter instance
7058 * @req_upiu: upiu request
7059 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7060 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7061 * @desc_buff: pointer to descriptor buffer, NULL if NA
7062 * @buff_len: descriptor size, 0 if NA
7063 * @desc_op: descriptor operation
7064 *
7065 * Supports UTP Transfer requests (nop and query), and UTP Task
7066 * Management requests.
7067 * It is up to the caller to fill the upiu conent properly, as it will
7068 * be copied without any further input validations.
7069 */
7070int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7071 struct utp_upiu_req *req_upiu,
7072 struct utp_upiu_req *rsp_upiu,
7073 int msgcode,
7074 u8 *desc_buff, int *buff_len,
7075 enum query_opcode desc_op)
7076{
7077 int err;
7f674c38 7078 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
5e0a86ee 7079 struct utp_task_req_desc treq = { { 0 }, };
957d63e7 7080 enum utp_ocs ocs_value;
5e0a86ee
AA
7081 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
7082
5e0a86ee
AA
7083 switch (msgcode) {
7084 case UPIU_TRANSACTION_NOP_OUT:
7085 cmd_type = DEV_CMD_TYPE_NOP;
df561f66 7086 fallthrough;
5e0a86ee
AA
7087 case UPIU_TRANSACTION_QUERY_REQ:
7088 ufshcd_hold(hba, false);
7089 mutex_lock(&hba->dev_cmd.lock);
7090 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7091 desc_buff, buff_len,
7092 cmd_type, desc_op);
7093 mutex_unlock(&hba->dev_cmd.lock);
7094 ufshcd_release(hba);
7095
7096 break;
7097 case UPIU_TRANSACTION_TASK_REQ:
7098 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
7099 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
7100
1352eec8 7101 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
5e0a86ee
AA
7102
7103 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7104 if (err == -ETIMEDOUT)
7105 break;
7106
7107 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
7108 if (ocs_value != OCS_SUCCESS) {
7109 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7110 ocs_value);
7111 break;
7112 }
7113
1352eec8 7114 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
5e0a86ee
AA
7115
7116 break;
7117 default:
7118 err = -EINVAL;
7119
7120 break;
7121 }
7122
5e0a86ee
AA
7123 return err;
7124}
7125
6ff265fc
BH
7126/**
7127 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7128 * @hba: per adapter instance
7129 * @req_upiu: upiu request
7130 * @rsp_upiu: upiu reply
7131 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7132 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7133 * @sg_cnt: The number of sg lists actually used
7134 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7135 * @dir: DMA direction
7136 *
7137 * Returns zero on success, non-zero on failure
7138 */
7139int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7140 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7141 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7142 enum dma_data_direction dir)
7143{
7144 DECLARE_COMPLETION_ONSTACK(wait);
7145 const u32 tag = hba->reserved_slot;
7146 struct ufshcd_lrb *lrbp;
7147 int err = 0;
7148 int result;
7149 u8 upiu_flags;
7150 u8 *ehs_data;
7151 u16 ehs_len;
7152
7153 /* Protects use of hba->reserved_slot. */
7154 ufshcd_hold(hba, false);
7155 mutex_lock(&hba->dev_cmd.lock);
7156 down_read(&hba->clk_scaling_lock);
7157
7158 lrbp = &hba->lrb[tag];
7159 WARN_ON(lrbp->cmd);
7160 lrbp->cmd = NULL;
7161 lrbp->task_tag = tag;
7162 lrbp->lun = UFS_UPIU_RPMB_WLUN;
7163
7164 lrbp->intr_cmd = true;
7165 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7166 hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7167
7168 /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7169 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7170
7171 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7172
7173 /* update the task tag and LUN in the request upiu */
7174 req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
7175
7176 /* copy the UPIU(contains CDB) request as it is */
7177 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7178 /* Copy EHS, starting with byte32, immediately after the CDB package */
7179 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7180
7181 if (dir != DMA_NONE && sg_list)
7182 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7183
7184 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7185
7186 hba->dev_cmd.complete = &wait;
7187
22a2d563 7188 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
6ff265fc
BH
7189
7190 err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7191
7192 if (!err) {
7193 /* Just copy the upiu response as it is */
7194 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7195 /* Get the response UPIU result */
7196 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
7197
7198 ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
7199 /*
7200 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7201 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7202 * Message is 02h
7203 */
7204 if (ehs_len == 2 && rsp_ehs) {
7205 /*
7206 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7207 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7208 */
7209 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7210 memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7211 }
7212 }
7213
7214 up_read(&hba->clk_scaling_lock);
7215 mutex_unlock(&hba->dev_cmd.lock);
7216 ufshcd_release(hba);
7217 return err ? : result;
7218}
7219
7a3e97b0 7220/**
2acd76e7 7221 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7a3e97b0
SY
7222 * @cmd: SCSI command pointer
7223 *
7224 * Returns SUCCESS/FAILED
7225 */
3441da7d 7226static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0 7227{
2acd76e7 7228 unsigned long flags, pending_reqs = 0, not_cleared = 0;
7a3e97b0
SY
7229 struct Scsi_Host *host;
7230 struct ufs_hba *hba;
7a3e97b0
SY
7231 u32 pos;
7232 int err;
35fc4cd3 7233 u8 resp = 0xF, lun;
7a3e97b0
SY
7234
7235 host = cmd->device->host;
7236 hba = shost_priv(host);
7a3e97b0 7237
35fc4cd3
CG
7238 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7239 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
e2933132 7240 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
7241 if (!err)
7242 err = resp;
7a3e97b0 7243 goto out;
e2933132 7244 }
7a3e97b0 7245
3441da7d 7246 /* clear the commands that were pending for corresponding LUN */
2acd76e7
BVA
7247 spin_lock_irqsave(&hba->outstanding_lock, flags);
7248 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7249 if (hba->lrb[pos].lun == lun)
7250 __set_bit(pos, &pending_reqs);
7251 hba->outstanding_reqs &= ~pending_reqs;
7252 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7253
7254 if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
7255 spin_lock_irqsave(&hba->outstanding_lock, flags);
7256 not_cleared = pending_reqs &
7257 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7258 hba->outstanding_reqs |= not_cleared;
7259 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7260
7261 dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
7262 __func__, not_cleared);
3441da7d 7263 }
2acd76e7 7264 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
7fabb77b 7265
7a3e97b0 7266out:
7fabb77b 7267 hba->req_abort_count = 0;
e965e5e0 7268 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
3441da7d
SRT
7269 if (!err) {
7270 err = SUCCESS;
7271 } else {
7272 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7273 err = FAILED;
7274 }
7a3e97b0
SY
7275 return err;
7276}
7277
e0b299e3
GB
7278static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7279{
7280 struct ufshcd_lrb *lrbp;
7281 int tag;
7282
7283 for_each_set_bit(tag, &bitmap, hba->nutrs) {
7284 lrbp = &hba->lrb[tag];
7285 lrbp->req_abort_skip = true;
7286 }
7287}
7288
7a3e97b0 7289/**
307348f6 7290 * ufshcd_try_to_abort_task - abort a specific task
d23ec0b6
LJ
7291 * @hba: Pointer to adapter instance
7292 * @tag: Task tag/index to be aborted
7a3e97b0 7293 *
f20810d8
SRT
7294 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7295 * command, and in host controller by clearing the door-bell register. There can
7296 * be race between controller sending the command to the device while abort is
7297 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7298 * really issued and then try to abort it.
7299 *
307348f6
CG
7300 * Returns zero on success, non-zero on failure
7301 */
7302static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7303{
7304 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7305 int err = 0;
7306 int poll_cnt;
7307 u8 resp = 0xF;
7308 u32 reg;
7309
7310 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7311 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7312 UFS_QUERY_TASK, &resp);
7313 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7314 /* cmd pending in the device */
7315 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7316 __func__, tag);
7317 break;
7318 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7319 /*
7320 * cmd not pending in the device, check if it is
7321 * in transition.
7322 */
7323 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7324 __func__, tag);
7325 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7326 if (reg & (1 << tag)) {
7327 /* sleep for max. 200us to stabilize */
7328 usleep_range(100, 200);
7329 continue;
7330 }
7331 /* command completed already */
7332 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7333 __func__, tag);
7334 goto out;
7335 } else {
7336 dev_err(hba->dev,
7337 "%s: no response from device. tag = %d, err %d\n",
7338 __func__, tag, err);
7339 if (!err)
7340 err = resp; /* service response error */
7341 goto out;
7342 }
7343 }
7344
7345 if (!poll_cnt) {
7346 err = -EBUSY;
7347 goto out;
7348 }
7349
7350 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7351 UFS_ABORT_TASK, &resp);
7352 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7353 if (!err) {
7354 err = resp; /* service response error */
7355 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7356 __func__, tag, err);
7357 }
7358 goto out;
7359 }
7360
d1a76446 7361 err = ufshcd_clear_cmds(hba, 1U << tag);
307348f6
CG
7362 if (err)
7363 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7364 __func__, tag, err);
7365
7366out:
7367 return err;
7368}
7369
7370/**
7371 * ufshcd_abort - scsi host template eh_abort_handler callback
7372 * @cmd: SCSI command pointer
7373 *
7a3e97b0
SY
7374 * Returns SUCCESS/FAILED
7375 */
7376static int ufshcd_abort(struct scsi_cmnd *cmd)
7377{
4728ab4a
BVA
7378 struct Scsi_Host *host = cmd->device->host;
7379 struct ufs_hba *hba = shost_priv(host);
3f2c1002 7380 int tag = scsi_cmd_to_rq(cmd)->tag;
4728ab4a 7381 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7a3e97b0 7382 unsigned long flags;
64180742 7383 int err = FAILED;
1fbaa02d 7384 bool outstanding;
e9d501b1 7385 u32 reg;
7a3e97b0 7386
4728ab4a 7387 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7a3e97b0 7388
1ab27c9c 7389 ufshcd_hold(hba, false);
14497328 7390 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
64180742 7391 /* If command is already aborted/completed, return FAILED. */
14497328
YG
7392 if (!(test_bit(tag, &hba->outstanding_reqs))) {
7393 dev_err(hba->dev,
7394 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7395 __func__, tag, hba->outstanding_reqs, reg);
64180742 7396 goto release;
14497328 7397 }
7a3e97b0 7398
66cc820f 7399 /* Print Transfer Request of aborted task */
d87a1f6d 7400 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
66cc820f 7401
7fabb77b
GB
7402 /*
7403 * Print detailed info about aborted request.
7404 * As more than one request might get aborted at the same time,
7405 * print full information only for the first aborted request in order
7406 * to reduce repeated printouts. For other aborted requests only print
7407 * basic details.
7408 */
7a7e66c6 7409 scsi_print_command(cmd);
7fabb77b 7410 if (!hba->req_abort_count) {
e965e5e0
SC
7411 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7412 ufshcd_print_evt_hist(hba);
6ba65588 7413 ufshcd_print_host_state(hba);
7fabb77b
GB
7414 ufshcd_print_pwr_info(hba);
7415 ufshcd_print_trs(hba, 1 << tag, true);
7416 } else {
7417 ufshcd_print_trs(hba, 1 << tag, false);
7418 }
7419 hba->req_abort_count++;
e0b299e3 7420
d87a1f6d
BH
7421 if (!(reg & (1 << tag))) {
7422 dev_err(hba->dev,
7423 "%s: cmd was completed, but without a notifying intr, tag = %d",
7424 __func__, tag);
11682523 7425 __ufshcd_transfer_req_compl(hba, 1UL << tag);
64180742 7426 goto release;
d87a1f6d
BH
7427 }
7428
7a7e66c6
CG
7429 /*
7430 * Task abort to the device W-LUN is illegal. When this command
7431 * will fail, due to spec violation, scsi err handling next step
7432 * will be to send LU reset which, again, is a spec violation.
7433 * To avoid these unnecessary/illegal steps, first we clean up
a45f9371 7434 * the lrb taken by this cmd and re-set it in outstanding_reqs,
88b09900 7435 * then queue the eh_work and bail.
7a7e66c6
CG
7436 */
7437 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7438 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
64180742 7439
7a7e66c6 7440 spin_lock_irqsave(host->host_lock, flags);
a45f9371 7441 hba->force_reset = true;
88b09900 7442 ufshcd_schedule_eh_work(hba);
7a7e66c6 7443 spin_unlock_irqrestore(host->host_lock, flags);
64180742 7444 goto release;
7a7e66c6
CG
7445 }
7446
e0b299e3 7447 /* Skip task abort in case previous aborts failed and report failure */
64180742
BVA
7448 if (lrbp->req_abort_skip) {
7449 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7450 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7451 goto release;
7452 }
f20810d8 7453
64180742
BVA
7454 err = ufshcd_try_to_abort_task(hba, tag);
7455 if (err) {
f20810d8 7456 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
e0b299e3 7457 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
f20810d8 7458 err = FAILED;
64180742 7459 goto release;
f20810d8
SRT
7460 }
7461
1fbaa02d
BVA
7462 /*
7463 * Clear the corresponding bit from outstanding_reqs since the command
7464 * has been aborted successfully.
7465 */
7466 spin_lock_irqsave(&hba->outstanding_lock, flags);
7467 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7468 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7469
7470 if (outstanding)
7471 ufshcd_release_scsi_cmd(hba, lrbp);
7472
64180742
BVA
7473 err = SUCCESS;
7474
7475release:
7476 /* Matches the ufshcd_hold() call at the start of this function. */
1ab27c9c 7477 ufshcd_release(hba);
7a3e97b0
SY
7478 return err;
7479}
7480
3441da7d
SRT
7481/**
7482 * ufshcd_host_reset_and_restore - reset and restore host controller
7483 * @hba: per-adapter instance
7484 *
7485 * Note that host controller reset may issue DME_RESET to
7486 * local and remote (device) Uni-Pro stack and the attributes
7487 * are reset to default state.
7488 *
7489 * Returns zero on success, non-zero on failure
7490 */
7491static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7492{
7493 int err;
3441da7d 7494
2df74b69
CG
7495 /*
7496 * Stop the host controller and complete the requests
7497 * cleared by h/w
7498 */
facc239c 7499 ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
5cac1095 7500 ufshcd_hba_stop(hba);
2df74b69 7501 hba->silence_err_logs = true;
11682523 7502 ufshcd_complete_requests(hba);
2df74b69 7503 hba->silence_err_logs = false;
3441da7d 7504
a3cd5ec5 7505 /* scale up clocks to max frequency before full reinitialization */
52a51801 7506 ufshcd_scale_clks(hba, true);
a3cd5ec5 7507
3441da7d 7508 err = ufshcd_hba_enable(hba);
3441da7d
SRT
7509
7510 /* Establish the link again and restore the device */
1918651f 7511 if (!err)
4ee7ee53
JK
7512 err = ufshcd_probe_hba(hba, false);
7513
3441da7d
SRT
7514 if (err)
7515 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
e965e5e0 7516 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
3441da7d
SRT
7517 return err;
7518}
7519
7520/**
7521 * ufshcd_reset_and_restore - reset and re-initialize host/device
7522 * @hba: per-adapter instance
7523 *
7524 * Reset and recover device, host and re-establish link. This
7525 * is helpful to recover the communication in fatal error conditions.
7526 *
7527 * Returns zero on success, non-zero on failure
7528 */
7529static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7530{
54a40453
AH
7531 u32 saved_err = 0;
7532 u32 saved_uic_err = 0;
3441da7d 7533 int err = 0;
4db7a236 7534 unsigned long flags;
1d337ec2 7535 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 7536
4db7a236 7537 spin_lock_irqsave(hba->host->host_lock, flags);
1d337ec2 7538 do {
54a40453
AH
7539 /*
7540 * This is a fresh start, cache and clear saved error first,
7541 * in case new error generated during reset and restore.
7542 */
7543 saved_err |= hba->saved_err;
7544 saved_uic_err |= hba->saved_uic_err;
7545 hba->saved_err = 0;
7546 hba->saved_uic_err = 0;
7547 hba->force_reset = false;
7548 hba->ufshcd_state = UFSHCD_STATE_RESET;
7549 spin_unlock_irqrestore(hba->host->host_lock, flags);
7550
d8d9f793 7551 /* Reset the attached device */
31a5d9ca 7552 ufshcd_device_reset(hba);
d8d9f793 7553
1d337ec2 7554 err = ufshcd_host_reset_and_restore(hba);
54a40453
AH
7555
7556 spin_lock_irqsave(hba->host->host_lock, flags);
7557 if (err)
7558 continue;
7559 /* Do not exit unless operational or dead */
7560 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7561 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7562 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7563 err = -EAGAIN;
1d337ec2 7564 } while (err && --retries);
3441da7d 7565
4db7a236
CG
7566 /*
7567 * Inform scsi mid-layer that we did reset and allow to handle
7568 * Unit Attention properly.
7569 */
7570 scsi_report_bus_reset(hba->host, 0);
7571 if (err) {
88a92d6a 7572 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4db7a236
CG
7573 hba->saved_err |= saved_err;
7574 hba->saved_uic_err |= saved_uic_err;
7575 }
7576 spin_unlock_irqrestore(hba->host->host_lock, flags);
7577
3441da7d
SRT
7578 return err;
7579}
7580
7581/**
7582 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
8aa29f19 7583 * @cmd: SCSI command pointer
3441da7d
SRT
7584 *
7585 * Returns SUCCESS/FAILED
7586 */
7587static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7588{
4db7a236 7589 int err = SUCCESS;
3441da7d
SRT
7590 unsigned long flags;
7591 struct ufs_hba *hba;
7592
7593 hba = shost_priv(cmd->device->host);
7594
4db7a236
CG
7595 spin_lock_irqsave(hba->host->host_lock, flags);
7596 hba->force_reset = true;
88b09900 7597 ufshcd_schedule_eh_work(hba);
4db7a236 7598 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
3441da7d
SRT
7599 spin_unlock_irqrestore(hba->host->host_lock, flags);
7600
88b09900 7601 flush_work(&hba->eh_work);
3441da7d
SRT
7602
7603 spin_lock_irqsave(hba->host->host_lock, flags);
4db7a236 7604 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
3441da7d 7605 err = FAILED;
3441da7d
SRT
7606 spin_unlock_irqrestore(hba->host->host_lock, flags);
7607
7608 return err;
7609}
7610
3a4bf06d
YG
7611/**
7612 * ufshcd_get_max_icc_level - calculate the ICC level
7613 * @sup_curr_uA: max. current supported by the regulator
7614 * @start_scan: row at the desc table to start scan from
7615 * @buff: power descriptor buffer
7616 *
7617 * Returns calculated max ICC level for specific regulator
7618 */
35d11ec2
KK
7619static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7620 const char *buff)
3a4bf06d
YG
7621{
7622 int i;
7623 int curr_uA;
7624 u16 data;
7625 u16 unit;
7626
7627 for (i = start_scan; i >= 0; i--) {
9d3ab17e 7628 data = get_unaligned_be16(&buff[2 * i]);
3a4bf06d
YG
7629 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7630 ATTR_ICC_LVL_UNIT_OFFSET;
7631 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7632 switch (unit) {
7633 case UFSHCD_NANO_AMP:
7634 curr_uA = curr_uA / 1000;
7635 break;
7636 case UFSHCD_MILI_AMP:
7637 curr_uA = curr_uA * 1000;
7638 break;
7639 case UFSHCD_AMP:
7640 curr_uA = curr_uA * 1000 * 1000;
7641 break;
7642 case UFSHCD_MICRO_AMP:
7643 default:
7644 break;
7645 }
7646 if (sup_curr_uA >= curr_uA)
7647 break;
7648 }
7649 if (i < 0) {
7650 i = 0;
7651 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7652 }
7653
7654 return (u32)i;
7655}
7656
7657/**
11eea9b3 7658 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
3a4bf06d
YG
7659 * In case regulators are not initialized we'll return 0
7660 * @hba: per-adapter instance
7661 * @desc_buf: power descriptor buffer to extract ICC levels from.
3a4bf06d
YG
7662 *
7663 * Returns calculated ICC level
7664 */
7665static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
01a0d515 7666 const u8 *desc_buf)
3a4bf06d
YG
7667{
7668 u32 icc_level = 0;
7669
7670 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7671 !hba->vreg_info.vccq2) {
71bb9ab6
AH
7672 /*
7673 * Using dev_dbg to avoid messages during runtime PM to avoid
7674 * never-ending cycles of messages written back to storage by
7675 * user space causing runtime resume, causing more messages and
7676 * so on.
7677 */
7678 dev_dbg(hba->dev,
3a4bf06d
YG
7679 "%s: Regulator capability was not set, actvIccLevel=%d",
7680 __func__, icc_level);
7681 goto out;
7682 }
7683
0873045f 7684 if (hba->vreg_info.vcc->max_uA)
3a4bf06d
YG
7685 icc_level = ufshcd_get_max_icc_level(
7686 hba->vreg_info.vcc->max_uA,
7687 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7688 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7689
0873045f 7690 if (hba->vreg_info.vccq->max_uA)
3a4bf06d
YG
7691 icc_level = ufshcd_get_max_icc_level(
7692 hba->vreg_info.vccq->max_uA,
7693 icc_level,
7694 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7695
0873045f 7696 if (hba->vreg_info.vccq2->max_uA)
3a4bf06d
YG
7697 icc_level = ufshcd_get_max_icc_level(
7698 hba->vreg_info.vccq2->max_uA,
7699 icc_level,
7700 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7701out:
7702 return icc_level;
7703}
7704
e89860f1 7705static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
3a4bf06d
YG
7706{
7707 int ret;
bbe21d7a 7708 u8 *desc_buf;
e89860f1 7709 u32 icc_level;
bbe21d7a 7710
f2a89b07 7711 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
bbe21d7a
KC
7712 if (!desc_buf)
7713 return;
3a4bf06d 7714
c4607a09 7715 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
f2a89b07 7716 desc_buf, QUERY_DESC_MAX_SIZE);
3a4bf06d
YG
7717 if (ret) {
7718 dev_err(hba->dev,
f2a89b07
AS
7719 "%s: Failed reading power descriptor ret = %d",
7720 __func__, ret);
bbe21d7a 7721 goto out;
3a4bf06d
YG
7722 }
7723
01a0d515 7724 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
e89860f1 7725 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
3a4bf06d 7726
dbd34a61 7727 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
e89860f1 7728 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
3a4bf06d
YG
7729
7730 if (ret)
7731 dev_err(hba->dev,
7732 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
e89860f1 7733 __func__, icc_level, ret);
3a4bf06d 7734
bbe21d7a
KC
7735out:
7736 kfree(desc_buf);
3a4bf06d
YG
7737}
7738
fb276f77
CG
7739static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7740{
7741 scsi_autopm_get_device(sdev);
7742 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7743 if (sdev->rpm_autosuspend)
7744 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7745 RPM_AUTOSUSPEND_DELAY_MS);
7746 scsi_autopm_put_device(sdev);
7747}
7748
2a8fa600
SJ
7749/**
7750 * ufshcd_scsi_add_wlus - Adds required W-LUs
7751 * @hba: per-adapter instance
7752 *
7753 * UFS device specification requires the UFS devices to support 4 well known
7754 * logical units:
7755 * "REPORT_LUNS" (address: 01h)
7756 * "UFS Device" (address: 50h)
7757 * "RPMB" (address: 44h)
7758 * "BOOT" (address: 30h)
7759 * UFS device's power management needs to be controlled by "POWER CONDITION"
7760 * field of SSU (START STOP UNIT) command. But this "power condition" field
7761 * will take effect only when its sent to "UFS device" well known logical unit
7762 * hence we require the scsi_device instance to represent this logical unit in
7763 * order for the UFS host driver to send the SSU command for power management.
8aa29f19 7764 *
2a8fa600
SJ
7765 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7766 * Block) LU so user space process can control this LU. User space may also
7767 * want to have access to BOOT LU.
8aa29f19 7768 *
2a8fa600
SJ
7769 * This function adds scsi device instances for each of all well known LUs
7770 * (except "REPORT LUNS" LU).
7771 *
7772 * Returns zero on success (all required W-LUs are added successfully),
7773 * non-zero error value on failure (if failed to add any of the required W-LU).
7774 */
7775static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7776{
7777 int ret = 0;
59830c09 7778 struct scsi_device *sdev_boot, *sdev_rpmb;
2a8fa600 7779
e2106584 7780 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
2a8fa600 7781 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
e2106584
BVA
7782 if (IS_ERR(hba->ufs_device_wlun)) {
7783 ret = PTR_ERR(hba->ufs_device_wlun);
7784 hba->ufs_device_wlun = NULL;
2a8fa600
SJ
7785 goto out;
7786 }
e2106584 7787 scsi_device_put(hba->ufs_device_wlun);
2a8fa600 7788
59830c09 7789 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 7790 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
59830c09
BVA
7791 if (IS_ERR(sdev_rpmb)) {
7792 ret = PTR_ERR(sdev_rpmb);
e2106584 7793 goto remove_ufs_device_wlun;
2a8fa600 7794 }
59830c09
BVA
7795 ufshcd_blk_pm_runtime_init(sdev_rpmb);
7796 scsi_device_put(sdev_rpmb);
3d21fbde
HK
7797
7798 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7799 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
fb276f77 7800 if (IS_ERR(sdev_boot)) {
3d21fbde 7801 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
fb276f77
CG
7802 } else {
7803 ufshcd_blk_pm_runtime_init(sdev_boot);
3d21fbde 7804 scsi_device_put(sdev_boot);
fb276f77 7805 }
2a8fa600
SJ
7806 goto out;
7807
e2106584
BVA
7808remove_ufs_device_wlun:
7809 scsi_remove_device(hba->ufs_device_wlun);
2a8fa600
SJ
7810out:
7811 return ret;
7812}
7813
35d11ec2 7814static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
3d17b9b5 7815{
a7f1e69d 7816 struct ufs_dev_info *dev_info = &hba->dev_info;
6f8d5a6a
SC
7817 u8 lun;
7818 u32 d_lu_wb_buf_alloc;
e8d03813 7819 u32 ext_ufs_feature;
6f8d5a6a 7820
817d7e14
SC
7821 if (!ufshcd_is_wb_allowed(hba))
7822 return;
f681d107 7823
a7f1e69d
SC
7824 /*
7825 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7826 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7827 * enabled
7828 */
7829 if (!(dev_info->wspecversion >= 0x310 ||
7830 dev_info->wspecversion == 0x220 ||
7831 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7832 goto wb_disabled;
817d7e14 7833
e8d03813
BH
7834 ext_ufs_feature = get_unaligned_be32(desc_buf +
7835 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
817d7e14 7836
e8d03813 7837 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
817d7e14
SC
7838 goto wb_disabled;
7839
3d17b9b5 7840 /*
ae1ce1fc
BH
7841 * WB may be supported but not configured while provisioning. The spec
7842 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7843 * buffer configured.
3d17b9b5 7844 */
4cd48995 7845 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
3d17b9b5 7846
a7f1e69d 7847 dev_info->b_presrv_uspc_en =
3d17b9b5
AD
7848 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7849
4cd48995 7850 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
e8d03813
BH
7851 if (!get_unaligned_be32(desc_buf +
7852 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
6f8d5a6a
SC
7853 goto wb_disabled;
7854 } else {
7855 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7856 d_lu_wb_buf_alloc = 0;
7857 ufshcd_read_unit_desc_param(hba,
7858 lun,
7859 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7860 (u8 *)&d_lu_wb_buf_alloc,
7861 sizeof(d_lu_wb_buf_alloc));
7862 if (d_lu_wb_buf_alloc) {
a7f1e69d 7863 dev_info->wb_dedicated_lu = lun;
6f8d5a6a
SC
7864 break;
7865 }
7866 }
817d7e14 7867
6f8d5a6a
SC
7868 if (!d_lu_wb_buf_alloc)
7869 goto wb_disabled;
7870 }
f681d107
JC
7871
7872 if (!ufshcd_is_wb_buf_lifetime_available(hba))
7873 goto wb_disabled;
7874
817d7e14
SC
7875 return;
7876
7877wb_disabled:
7878 hba->caps &= ~UFSHCD_CAP_WB_EN;
7879}
7880
35d11ec2 7881static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
e88e2d32
AA
7882{
7883 struct ufs_dev_info *dev_info = &hba->dev_info;
7884 u32 ext_ufs_feature;
7885 u8 mask = 0;
7886
7887 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
7888 return;
7889
7890 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7891
7892 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
7893 mask |= MASK_EE_TOO_LOW_TEMP;
7894
7895 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
7896 mask |= MASK_EE_TOO_HIGH_TEMP;
7897
7898 if (mask) {
7899 ufshcd_enable_ee(hba, mask);
7900 ufs_hwmon_probe(hba, mask);
7901 }
7902}
7903
6e1d850a
AD
7904static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
7905{
7906 struct ufs_dev_info *dev_info = &hba->dev_info;
7907 u32 ext_ufs_feature;
7908 u32 ext_iid_en = 0;
7909 int err;
7910
7911 /* Only UFS-4.0 and above may support EXT_IID */
7912 if (dev_info->wspecversion < 0x400)
7913 goto out;
7914
7915 ext_ufs_feature = get_unaligned_be32(desc_buf +
7916 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7917 if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
7918 goto out;
7919
7920 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7921 QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
7922 if (err)
7923 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
7924
7925out:
7926 dev_info->b_ext_iid_en = ext_iid_en;
7927}
7928
aead21f3
BVA
7929void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
7930 const struct ufs_dev_quirk *fixups)
817d7e14 7931{
aead21f3 7932 const struct ufs_dev_quirk *f;
817d7e14
SC
7933 struct ufs_dev_info *dev_info = &hba->dev_info;
7934
8db269a5
SC
7935 if (!fixups)
7936 return;
7937
7938 for (f = fixups; f->quirk; f++) {
817d7e14
SC
7939 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7940 f->wmanufacturerid == UFS_ANY_VENDOR) &&
7941 ((dev_info->model &&
7942 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7943 !strcmp(f->model, UFS_ANY_MODEL)))
7944 hba->dev_quirks |= f->quirk;
7945 }
3d17b9b5 7946}
8db269a5 7947EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
3d17b9b5 7948
c28c00ba
SC
7949static void ufs_fixup_device_setup(struct ufs_hba *hba)
7950{
7951 /* fix by general quirk table */
8db269a5 7952 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
c28c00ba
SC
7953
7954 /* allow vendors to fix quirks */
7955 ufshcd_vops_fixup_dev_quirks(hba);
7956}
7957
09750066 7958static int ufs_get_device_desc(struct ufs_hba *hba)
c58ab7aa
YG
7959{
7960 int err;
7961 u8 model_index;
f02bc975 7962 u8 b_ufs_feature_sup;
bbe21d7a 7963 u8 *desc_buf;
09750066 7964 struct ufs_dev_info *dev_info = &hba->dev_info;
4b828fe1 7965
f2a89b07 7966 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
bbe21d7a
KC
7967 if (!desc_buf) {
7968 err = -ENOMEM;
7969 goto out;
7970 }
c58ab7aa 7971
c4607a09 7972 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
f2a89b07 7973 QUERY_DESC_MAX_SIZE);
c58ab7aa
YG
7974 if (err) {
7975 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7976 __func__, err);
7977 goto out;
7978 }
7979
7980 /*
7981 * getting vendor (manufacturerID) and Bank Index in big endian
7982 * format
7983 */
09750066 7984 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
c58ab7aa
YG
7985 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7986
09f17791
CG
7987 /* getting Specification Version in big endian format */
7988 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7989 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7224c806 7990 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
f02bc975 7991 b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
09f17791 7992
c58ab7aa 7993 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
3d17b9b5 7994
f02bc975
DP
7995 if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7996 (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
41d8a933
DP
7997 bool hpb_en = false;
7998
f02bc975 7999 ufshpb_get_dev_info(hba, desc_buf);
41d8a933
DP
8000
8001 if (!ufshpb_is_legacy(hba))
8002 err = ufshcd_query_flag_retry(hba,
8003 UPIU_QUERY_OPCODE_READ_FLAG,
8004 QUERY_FLAG_IDN_HPB_EN, 0,
8005 &hpb_en);
8006
8007 if (ufshpb_is_legacy(hba) || (!err && hpb_en))
8008 dev_info->hpb_enabled = true;
f02bc975
DP
8009 }
8010
4b828fe1 8011 err = ufshcd_read_string_desc(hba, model_index,
09750066 8012 &dev_info->model, SD_ASCII_STD);
4b828fe1 8013 if (err < 0) {
c58ab7aa
YG
8014 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8015 __func__, err);
8016 goto out;
8017 }
8018
b294ff3e
AD
8019 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8020 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8021
817d7e14
SC
8022 ufs_fixup_device_setup(hba);
8023
a7f1e69d 8024 ufshcd_wb_probe(hba, desc_buf);
817d7e14 8025
e88e2d32
AA
8026 ufshcd_temp_notif_probe(hba, desc_buf);
8027
6e1d850a
AD
8028 if (hba->ext_iid_sup)
8029 ufshcd_ext_iid_probe(hba, desc_buf);
8030
4b828fe1
TW
8031 /*
8032 * ufshcd_read_string_desc returns size of the string
8033 * reset the error value
8034 */
8035 err = 0;
c58ab7aa
YG
8036
8037out:
bbe21d7a 8038 kfree(desc_buf);
c58ab7aa
YG
8039 return err;
8040}
8041
09750066 8042static void ufs_put_device_desc(struct ufs_hba *hba)
4b828fe1 8043{
09750066
BH
8044 struct ufs_dev_info *dev_info = &hba->dev_info;
8045
8046 kfree(dev_info->model);
8047 dev_info->model = NULL;
4b828fe1
TW
8048}
8049
37113106
YG
8050/**
8051 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8052 * @hba: per-adapter instance
8053 *
8054 * PA_TActivate parameter can be tuned manually if UniPro version is less than
8055 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8056 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8057 * the hibern8 exit latency.
8058 *
8059 * Returns zero on success, non-zero error value on failure.
8060 */
8061static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8062{
8063 int ret = 0;
8064 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8065
8066 ret = ufshcd_dme_peer_get(hba,
8067 UIC_ARG_MIB_SEL(
8068 RX_MIN_ACTIVATETIME_CAPABILITY,
8069 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8070 &peer_rx_min_activatetime);
8071 if (ret)
8072 goto out;
8073
8074 /* make sure proper unit conversion is applied */
8075 tuned_pa_tactivate =
8076 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8077 / PA_TACTIVATE_TIME_UNIT_US);
8078 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8079 tuned_pa_tactivate);
8080
8081out:
8082 return ret;
8083}
8084
8085/**
8086 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8087 * @hba: per-adapter instance
8088 *
8089 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8090 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8091 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8092 * This optimal value can help reduce the hibern8 exit latency.
8093 *
8094 * Returns zero on success, non-zero error value on failure.
8095 */
8096static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8097{
8098 int ret = 0;
8099 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8100 u32 max_hibern8_time, tuned_pa_hibern8time;
8101
8102 ret = ufshcd_dme_get(hba,
8103 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8104 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8105 &local_tx_hibern8_time_cap);
8106 if (ret)
8107 goto out;
8108
8109 ret = ufshcd_dme_peer_get(hba,
8110 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8111 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8112 &peer_rx_hibern8_time_cap);
8113 if (ret)
8114 goto out;
8115
8116 max_hibern8_time = max(local_tx_hibern8_time_cap,
8117 peer_rx_hibern8_time_cap);
8118 /* make sure proper unit conversion is applied */
8119 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8120 / PA_HIBERN8_TIME_UNIT_US);
8121 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8122 tuned_pa_hibern8time);
8123out:
8124 return ret;
8125}
8126
c6a6db43 8127/**
8128 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8129 * less than device PA_TACTIVATE time.
8130 * @hba: per-adapter instance
8131 *
8132 * Some UFS devices require host PA_TACTIVATE to be lower than device
8133 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8134 * for such devices.
8135 *
8136 * Returns zero on success, non-zero error value on failure.
8137 */
8138static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8139{
8140 int ret = 0;
8141 u32 granularity, peer_granularity;
8142 u32 pa_tactivate, peer_pa_tactivate;
8143 u32 pa_tactivate_us, peer_pa_tactivate_us;
35d11ec2 8144 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
c6a6db43 8145
8146 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8147 &granularity);
8148 if (ret)
8149 goto out;
8150
8151 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8152 &peer_granularity);
8153 if (ret)
8154 goto out;
8155
8156 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8157 (granularity > PA_GRANULARITY_MAX_VAL)) {
8158 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8159 __func__, granularity);
8160 return -EINVAL;
8161 }
8162
8163 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8164 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8165 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8166 __func__, peer_granularity);
8167 return -EINVAL;
8168 }
8169
8170 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8171 if (ret)
8172 goto out;
8173
8174 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8175 &peer_pa_tactivate);
8176 if (ret)
8177 goto out;
8178
8179 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8180 peer_pa_tactivate_us = peer_pa_tactivate *
8181 gran_to_us_table[peer_granularity - 1];
8182
9008661e 8183 if (pa_tactivate_us >= peer_pa_tactivate_us) {
c6a6db43 8184 u32 new_peer_pa_tactivate;
8185
8186 new_peer_pa_tactivate = pa_tactivate_us /
8187 gran_to_us_table[peer_granularity - 1];
8188 new_peer_pa_tactivate++;
8189 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8190 new_peer_pa_tactivate);
8191 }
8192
8193out:
8194 return ret;
8195}
8196
09750066 8197static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
37113106
YG
8198{
8199 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8200 ufshcd_tune_pa_tactivate(hba);
8201 ufshcd_tune_pa_hibern8time(hba);
8202 }
8203
e91ed9e0
CG
8204 ufshcd_vops_apply_dev_quirks(hba);
8205
37113106
YG
8206 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8207 /* set 1ms timeout for PA_TACTIVATE */
8208 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
c6a6db43 8209
8210 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8211 ufshcd_quirk_tune_host_pa_tactivate(hba);
37113106
YG
8212}
8213
ff8e20c6
DR
8214static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8215{
ff8e20c6
DR
8216 hba->ufs_stats.hibern8_exit_cnt = 0;
8217 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7fabb77b 8218 hba->req_abort_count = 0;
ff8e20c6
DR
8219}
8220
731f0621
BH
8221static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8222{
8223 int err;
731f0621
BH
8224 u8 *desc_buf;
8225
f2a89b07 8226 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
731f0621
BH
8227 if (!desc_buf) {
8228 err = -ENOMEM;
8229 goto out;
8230 }
8231
c4607a09 8232 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
f2a89b07 8233 desc_buf, QUERY_DESC_MAX_SIZE);
731f0621
BH
8234 if (err) {
8235 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8236 __func__, err);
8237 goto out;
8238 }
8239
8240 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8241 hba->dev_info.max_lu_supported = 32;
8242 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8243 hba->dev_info.max_lu_supported = 8;
8244
f2a89b07 8245 if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
f02bc975
DP
8246 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
8247 ufshpb_get_geo_info(hba, desc_buf);
8248
731f0621
BH
8249out:
8250 kfree(desc_buf);
8251 return err;
8252}
8253
743b09d8
BVA
8254struct ufs_ref_clk {
8255 unsigned long freq_hz;
8256 enum ufs_ref_clk_freq val;
8257};
8258
35d11ec2 8259static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
9e1e8a75
SJ
8260 {19200000, REF_CLK_FREQ_19_2_MHZ},
8261 {26000000, REF_CLK_FREQ_26_MHZ},
8262 {38400000, REF_CLK_FREQ_38_4_MHZ},
8263 {52000000, REF_CLK_FREQ_52_MHZ},
8264 {0, REF_CLK_FREQ_INVAL},
8265};
8266
8267static enum ufs_ref_clk_freq
8268ufs_get_bref_clk_from_hz(unsigned long freq)
8269{
8270 int i;
8271
8272 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8273 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8274 return ufs_ref_clk_freqs[i].val;
8275
8276 return REF_CLK_FREQ_INVAL;
8277}
8278
8279void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8280{
8281 unsigned long freq;
8282
8283 freq = clk_get_rate(refclk);
8284
8285 hba->dev_ref_clk_freq =
8286 ufs_get_bref_clk_from_hz(freq);
8287
8288 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8289 dev_err(hba->dev,
8290 "invalid ref_clk setting = %ld\n", freq);
8291}
8292
8293static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8294{
8295 int err;
8296 u32 ref_clk;
8297 u32 freq = hba->dev_ref_clk_freq;
8298
8299 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8300 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8301
8302 if (err) {
8303 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8304 err);
8305 goto out;
8306 }
8307
8308 if (ref_clk == freq)
8309 goto out; /* nothing to update */
8310
8311 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8312 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8313
8314 if (err) {
8315 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8316 ufs_ref_clk_freqs[freq].freq_hz);
8317 goto out;
8318 }
8319
8320 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8321 ufs_ref_clk_freqs[freq].freq_hz);
8322
8323out:
8324 return err;
8325}
8326
1b9e2141
BH
8327static int ufshcd_device_params_init(struct ufs_hba *hba)
8328{
8329 bool flag;
f2a89b07 8330 int ret;
1b9e2141 8331
731f0621
BH
8332 /* Init UFS geometry descriptor related parameters */
8333 ret = ufshcd_device_geo_params_init(hba);
8334 if (ret)
8335 goto out;
8336
1b9e2141
BH
8337 /* Check and apply UFS device quirks */
8338 ret = ufs_get_device_desc(hba);
8339 if (ret) {
8340 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8341 __func__, ret);
8342 goto out;
8343 }
8344
09f17791
CG
8345 ufshcd_get_ref_clk_gating_wait(hba);
8346
1b9e2141 8347 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1f34eedf 8348 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
1b9e2141
BH
8349 hba->dev_info.f_power_on_wp_en = flag;
8350
2b35b2ad
BH
8351 /* Probe maximum power mode co-supported by both UFS host and device */
8352 if (ufshcd_get_max_pwr_mode(hba))
8353 dev_err(hba->dev,
8354 "%s: Failed getting max supported power mode\n",
8355 __func__);
1b9e2141
BH
8356out:
8357 return ret;
8358}
8359
8360/**
8361 * ufshcd_add_lus - probe and add UFS logical units
8362 * @hba: per-adapter instance
8363 */
8364static int ufshcd_add_lus(struct ufs_hba *hba)
8365{
8366 int ret;
8367
1b9e2141
BH
8368 /* Add required well known logical units to scsi mid layer */
8369 ret = ufshcd_scsi_add_wlus(hba);
8370 if (ret)
8371 goto out;
8372
8373 /* Initialize devfreq after UFS device is detected */
8374 if (ufshcd_is_clkscaling_supported(hba)) {
8375 memcpy(&hba->clk_scaling.saved_pwr_info.info,
8376 &hba->pwr_info,
8377 sizeof(struct ufs_pa_layer_attr));
8378 hba->clk_scaling.saved_pwr_info.is_valid = true;
1b9e2141 8379 hba->clk_scaling.is_allowed = true;
1b9e2141 8380
b058fa86
SC
8381 ret = ufshcd_devfreq_init(hba);
8382 if (ret)
8383 goto out;
8384
8385 hba->clk_scaling.is_enabled = true;
8386 ufshcd_init_clk_scaling_sysfs(hba);
1b9e2141
BH
8387 }
8388
8389 ufs_bsg_probe(hba);
f02bc975 8390 ufshpb_init(hba);
1b9e2141
BH
8391 scsi_scan_host(hba->host);
8392 pm_runtime_put_sync(hba->dev);
8393
1b9e2141
BH
8394out:
8395 return ret;
8396}
8397
4682abfa
AD
8398/* SDB - Single Doorbell */
8399static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8400{
8401 size_t ucdl_size, utrdl_size;
8402
8403 ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
8404 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8405 hba->ucdl_dma_addr);
8406
8407 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8408 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8409 hba->utrdl_dma_addr);
8410
8411 devm_kfree(hba->dev, hba->lrb);
8412}
8413
57b1c0ef
AD
8414static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8415{
7224c806
AD
8416 int ret;
8417 int old_nutrs = hba->nutrs;
8418
8419 ret = ufshcd_mcq_decide_queue_depth(hba);
8420 if (ret < 0)
8421 return ret;
8422
8423 hba->nutrs = ret;
8424 ret = ufshcd_mcq_init(hba);
4682abfa
AD
8425 if (ret)
8426 goto err;
8427
8428 /*
8429 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8430 * Number of supported tags in MCQ mode may be larger than SDB mode.
8431 */
8432 if (hba->nutrs != old_nutrs) {
8433 ufshcd_release_sdb_queue(hba, old_nutrs);
8434 ret = ufshcd_memory_alloc(hba);
8435 if (ret)
8436 goto err;
8437 ufshcd_host_memory_configure(hba);
7224c806
AD
8438 }
8439
4682abfa
AD
8440 ret = ufshcd_mcq_memory_alloc(hba);
8441 if (ret)
8442 goto err;
8443
7224c806 8444 return 0;
4682abfa
AD
8445err:
8446 hba->nutrs = old_nutrs;
8447 return ret;
57b1c0ef
AD
8448}
8449
2468da61
AD
8450static void ufshcd_config_mcq(struct ufs_hba *hba)
8451{
8452 ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
8453 ufshcd_mcq_make_queues_operational(hba);
8454 ufshcd_mcq_config_mac(hba, hba->nutrs);
8455
8456 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8457 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
eacb139b
AD
8458
8459 /* Select MCQ mode */
8460 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
8461 REG_UFS_MEM_CFG);
8462 hba->mcq_enabled = true;
8463
2468da61
AD
8464 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8465 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8466 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8467 hba->nutrs);
8468}
8469
96a7141d 8470static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
6ccf44fe 8471{
6ccf44fe 8472 int ret;
0cab4023 8473 struct Scsi_Host *host = hba->host;
6ccf44fe 8474
aa53f580
CG
8475 hba->ufshcd_state = UFSHCD_STATE_RESET;
8476
6ccf44fe 8477 ret = ufshcd_link_startup(hba);
5a0b0cb9 8478 if (ret)
96a7141d 8479 return ret;
5a0b0cb9 8480
10fb4f87 8481 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
96a7141d 8482 return ret;
10fb4f87 8483
ff8e20c6
DR
8484 /* Debug counters initialization */
8485 ufshcd_clear_dbg_ufs_stats(hba);
8486
57d104c1
SJ
8487 /* UniPro link is active now */
8488 ufshcd_set_link_active(hba);
d3e89bac 8489
2468da61
AD
8490 /* Reconfigure MCQ upon reset */
8491 if (is_mcq_enabled(hba) && !init_dev_params)
8492 ufshcd_config_mcq(hba);
8493
1b9e2141 8494 /* Verify device initialization by sending NOP OUT UPIU */
5a0b0cb9
SRT
8495 ret = ufshcd_verify_dev_init(hba);
8496 if (ret)
96a7141d 8497 return ret;
68078d5c 8498
1b9e2141 8499 /* Initiate UFS initialization, and waiting until completion */
68078d5c
DR
8500 ret = ufshcd_complete_dev_init(hba);
8501 if (ret)
96a7141d 8502 return ret;
5a0b0cb9 8503
1b9e2141
BH
8504 /*
8505 * Initialize UFS device parameters used by driver, these
8506 * parameters are associated with UFS descriptors.
8507 */
568dd995 8508 if (init_dev_params) {
1b9e2141
BH
8509 ret = ufshcd_device_params_init(hba);
8510 if (ret)
96a7141d 8511 return ret;
0cab4023 8512 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
57b1c0ef
AD
8513 ret = ufshcd_alloc_mcq(hba);
8514 if (ret) {
8515 /* Continue with SDB mode */
8516 use_mcq_mode = false;
8517 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8518 ret);
8519 }
0cab4023
AD
8520 ret = scsi_add_host(host, hba->dev);
8521 if (ret) {
8522 dev_err(hba->dev, "scsi_add_host failed\n");
8523 return ret;
8524 }
8525 hba->scsi_host_added = true;
8526 }
2468da61
AD
8527 /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
8528 if (is_mcq_supported(hba) && use_mcq_mode)
8529 ufshcd_config_mcq(hba);
93fdd5ac
TW
8530 }
8531
09750066 8532 ufshcd_tune_unipro_params(hba);
4b828fe1 8533
57d104c1
SJ
8534 /* UFS device is also active now */
8535 ufshcd_set_ufs_dev_active(hba);
66ec6d59 8536 ufshcd_force_reset_auto_bkops(hba);
57d104c1 8537
2b35b2ad
BH
8538 /* Gear up to HS gear if supported */
8539 if (hba->max_pwr_info.is_valid) {
9e1e8a75
SJ
8540 /*
8541 * Set the right value to bRefClkFreq before attempting to
8542 * switch to HS gears.
8543 */
8544 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8545 ufshcd_set_dev_ref_clk(hba);
7eb584db 8546 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8643ae66 8547 if (ret) {
7eb584db
DR
8548 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8549 __func__, ret);
96a7141d
MS
8550 return ret;
8551 }
8552 }
8553
8554 return 0;
8555}
8556
8557/**
8558 * ufshcd_probe_hba - probe hba to detect device and initialize it
8559 * @hba: per-adapter instance
8560 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8561 *
8562 * Execute link-startup and verify device initialization
8563 */
8564static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8565{
8566 ktime_t start = ktime_get();
8567 unsigned long flags;
8568 int ret;
8569
8570 ret = ufshcd_device_init(hba, init_dev_params);
8571 if (ret)
8572 goto out;
8573
8574 if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
8575 /* Reset the device and controller before doing reinit */
8576 ufshcd_device_reset(hba);
8577 ufshcd_hba_stop(hba);
8578 ufshcd_vops_reinit_notify(hba);
8579 ret = ufshcd_hba_enable(hba);
8580 if (ret) {
8581 dev_err(hba->dev, "Host controller enable failed\n");
8582 ufshcd_print_evt_hist(hba);
8583 ufshcd_print_host_state(hba);
8643ae66
DL
8584 goto out;
8585 }
96a7141d
MS
8586
8587 /* Reinit the device */
8588 ret = ufshcd_device_init(hba, init_dev_params);
8589 if (ret)
8590 goto out;
7eb584db 8591 }
57d104c1 8592
96a7141d
MS
8593 ufshcd_print_pwr_info(hba);
8594
e89860f1
CG
8595 /*
8596 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8597 * and for removable UFS card as well, hence always set the parameter.
8598 * Note: Error handler may issue the device reset hence resetting
8599 * bActiveICCLevel as well so it is always safe to set this here.
8600 */
8601 ufshcd_set_active_icc_lvl(hba);
8602
4450a165
JC
8603 /* Enable UFS Write Booster if supported */
8604 ufshcd_configure_wb(hba);
8605
cd469475
AH
8606 if (hba->ee_usr_mask)
8607 ufshcd_write_ee_control(hba);
71d848b8
CG
8608 /* Enable Auto-Hibernate if configured */
8609 ufshcd_auto_hibern8_enable(hba);
8610
facc239c 8611 ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
5a0b0cb9 8612out:
4db7a236
CG
8613 spin_lock_irqsave(hba->host->host_lock, flags);
8614 if (ret)
8615 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8616 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8617 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8618 spin_unlock_irqrestore(hba->host->host_lock, flags);
1d337ec2 8619
7ff5ab47 8620 trace_ufshcd_init(dev_name(hba->dev), ret,
8621 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8622 hba->curr_dev_pwr_mode, hba->uic_link_state);
1d337ec2
SRT
8623 return ret;
8624}
8625
8626/**
8627 * ufshcd_async_scan - asynchronous execution for probing hba
8628 * @data: data pointer to pass to this function
8629 * @cookie: cookie data
8630 */
8631static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8632{
8633 struct ufs_hba *hba = (struct ufs_hba *)data;
1b9e2141 8634 int ret;
1d337ec2 8635
9cd20d3f 8636 down(&hba->host_sem);
1b9e2141
BH
8637 /* Initialize hba, detect and initialize UFS device */
8638 ret = ufshcd_probe_hba(hba, true);
9cd20d3f 8639 up(&hba->host_sem);
1b9e2141
BH
8640 if (ret)
8641 goto out;
8642
8643 /* Probe and add UFS logical units */
8644 ret = ufshcd_add_lus(hba);
8645out:
8646 /*
8647 * If we failed to initialize the device or the device is not
8648 * present, turn off the power/clocks etc.
8649 */
8650 if (ret) {
8651 pm_runtime_put_sync(hba->dev);
1b9e2141
BH
8652 ufshcd_hba_exit(hba);
8653 }
6ccf44fe
SJ
8654}
8655
7029e215
BVA
8656static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
8657{
8658 struct ufs_hba *hba = shost_priv(scmd->device->host);
8659
8660 if (!hba->system_suspending) {
8661 /* Activate the error handler in the SCSI core. */
8662 return SCSI_EH_NOT_HANDLED;
8663 }
8664
8665 /*
8666 * If we get here we know that no TMFs are outstanding and also that
8667 * the only pending command is a START STOP UNIT command. Handle the
8668 * timeout of that command directly to prevent a deadlock between
8669 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8670 */
8671 ufshcd_link_recovery(hba);
8672 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
8673 __func__, hba->outstanding_tasks);
8674
8675 return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
8676}
8677
d829fc8a
SN
8678static const struct attribute_group *ufshcd_driver_groups[] = {
8679 &ufs_sysfs_unit_descriptor_group,
ec92b59c 8680 &ufs_sysfs_lun_attributes_group,
f02bc975
DP
8681#ifdef CONFIG_SCSI_UFS_HPB
8682 &ufs_sysfs_hpb_stat_group,
41d8a933 8683 &ufs_sysfs_hpb_param_group,
f02bc975 8684#endif
d829fc8a
SN
8685 NULL,
8686};
8687
90b8491c
SC
8688static struct ufs_hba_variant_params ufs_hba_vps = {
8689 .hba_enable_delay_us = 1000,
d14734ae 8690 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
90b8491c
SC
8691 .devfreq_profile.polling_ms = 100,
8692 .devfreq_profile.target = ufshcd_devfreq_target,
8693 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8694 .ondemand_data.upthreshold = 70,
8695 .ondemand_data.downdifferential = 5,
8696};
8697
7a3e97b0
SY
8698static struct scsi_host_template ufshcd_driver_template = {
8699 .module = THIS_MODULE,
8700 .name = UFSHCD,
8701 .proc_name = UFSHCD,
eaab9b57 8702 .map_queues = ufshcd_map_queues,
7a3e97b0 8703 .queuecommand = ufshcd_queuecommand,
eaab9b57 8704 .mq_poll = ufshcd_poll,
7a3e97b0 8705 .slave_alloc = ufshcd_slave_alloc,
eeda4749 8706 .slave_configure = ufshcd_slave_configure,
7a3e97b0 8707 .slave_destroy = ufshcd_slave_destroy,
4264fd61 8708 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 8709 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
8710 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8711 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7029e215 8712 .eh_timed_out = ufshcd_eh_timed_out,
7a3e97b0
SY
8713 .this_id = -1,
8714 .sg_tablesize = SG_ALL,
8715 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8716 .can_queue = UFSHCD_CAN_QUEUE,
552a990c 8717 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
86a44f04 8718 .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
1ab27c9c 8719 .max_host_blocked = 1,
c40ecc12 8720 .track_queue_depth = 1,
d829fc8a 8721 .sdev_groups = ufshcd_driver_groups,
4af14d11 8722 .dma_boundary = PAGE_SIZE - 1,
49615ba1 8723 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
7a3e97b0
SY
8724};
8725
57d104c1
SJ
8726static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8727 int ua)
8728{
7b16a07c 8729 int ret;
57d104c1 8730
7b16a07c
BA
8731 if (!vreg)
8732 return 0;
57d104c1 8733
0487fff7
SC
8734 /*
8735 * "set_load" operation shall be required on those regulators
8736 * which specifically configured current limitation. Otherwise
8737 * zero max_uA may cause unexpected behavior when regulator is
8738 * enabled or set as high power mode.
8739 */
8740 if (!vreg->max_uA)
8741 return 0;
8742
7b16a07c
BA
8743 ret = regulator_set_load(vreg->reg, ua);
8744 if (ret < 0) {
8745 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8746 __func__, vreg->name, ua, ret);
57d104c1
SJ
8747 }
8748
8749 return ret;
8750}
8751
8752static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8753 struct ufs_vreg *vreg)
8754{
73067981 8755 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
57d104c1
SJ
8756}
8757
8758static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8759 struct ufs_vreg *vreg)
8760{
7c7cfdcf
AH
8761 if (!vreg)
8762 return 0;
8763
73067981 8764 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
57d104c1
SJ
8765}
8766
aa497613
SRT
8767static int ufshcd_config_vreg(struct device *dev,
8768 struct ufs_vreg *vreg, bool on)
8769{
9474c64e
BVA
8770 if (regulator_count_voltages(vreg->reg) <= 0)
8771 return 0;
90d88f47 8772
9474c64e 8773 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
aa497613
SRT
8774}
8775
8776static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8777{
8778 int ret = 0;
8779
73067981 8780 if (!vreg || vreg->enabled)
aa497613
SRT
8781 goto out;
8782
8783 ret = ufshcd_config_vreg(dev, vreg, true);
8784 if (!ret)
8785 ret = regulator_enable(vreg->reg);
8786
8787 if (!ret)
8788 vreg->enabled = true;
8789 else
8790 dev_err(dev, "%s: %s enable failed, err=%d\n",
8791 __func__, vreg->name, ret);
8792out:
8793 return ret;
8794}
8795
8796static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8797{
8798 int ret = 0;
8799
f8162ac7 8800 if (!vreg || !vreg->enabled || vreg->always_on)
aa497613
SRT
8801 goto out;
8802
8803 ret = regulator_disable(vreg->reg);
8804
8805 if (!ret) {
8806 /* ignore errors on applying disable config */
8807 ufshcd_config_vreg(dev, vreg, false);
8808 vreg->enabled = false;
8809 } else {
8810 dev_err(dev, "%s: %s disable failed, err=%d\n",
8811 __func__, vreg->name, ret);
8812 }
8813out:
8814 return ret;
8815}
8816
8817static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8818{
8819 int ret = 0;
8820 struct device *dev = hba->dev;
8821 struct ufs_vreg_info *info = &hba->vreg_info;
8822
aa497613
SRT
8823 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8824 if (ret)
8825 goto out;
8826
8827 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8828 if (ret)
8829 goto out;
8830
8831 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
aa497613
SRT
8832
8833out:
8834 if (ret) {
8835 ufshcd_toggle_vreg(dev, info->vccq2, false);
8836 ufshcd_toggle_vreg(dev, info->vccq, false);
8837 ufshcd_toggle_vreg(dev, info->vcc, false);
8838 }
8839 return ret;
8840}
8841
6a771a65
RS
8842static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8843{
8844 struct ufs_vreg_info *info = &hba->vreg_info;
8845
60b7b823 8846 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6a771a65
RS
8847}
8848
1d6f9dec 8849int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
aa497613
SRT
8850{
8851 int ret = 0;
8852
8853 if (!vreg)
8854 goto out;
8855
8856 vreg->reg = devm_regulator_get(dev, vreg->name);
8857 if (IS_ERR(vreg->reg)) {
8858 ret = PTR_ERR(vreg->reg);
8859 dev_err(dev, "%s: %s get failed, err=%d\n",
8860 __func__, vreg->name, ret);
8861 }
8862out:
8863 return ret;
8864}
1d6f9dec 8865EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
aa497613
SRT
8866
8867static int ufshcd_init_vreg(struct ufs_hba *hba)
8868{
8869 int ret = 0;
8870 struct device *dev = hba->dev;
8871 struct ufs_vreg_info *info = &hba->vreg_info;
8872
aa497613
SRT
8873 ret = ufshcd_get_vreg(dev, info->vcc);
8874 if (ret)
8875 goto out;
8876
8877 ret = ufshcd_get_vreg(dev, info->vccq);
b0008625
BH
8878 if (!ret)
8879 ret = ufshcd_get_vreg(dev, info->vccq2);
aa497613
SRT
8880out:
8881 return ret;
8882}
8883
6a771a65
RS
8884static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8885{
8886 struct ufs_vreg_info *info = &hba->vreg_info;
8887
476e4592 8888 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6a771a65
RS
8889}
8890
81309c24 8891static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
c6e79dac
SRT
8892{
8893 int ret = 0;
8894 struct ufs_clk_info *clki;
8895 struct list_head *head = &hba->clk_list_head;
1ab27c9c 8896 unsigned long flags;
911a0771 8897 ktime_t start = ktime_get();
8898 bool clk_state_changed = false;
c6e79dac 8899
566ec9ad 8900 if (list_empty(head))
c6e79dac
SRT
8901 goto out;
8902
38f3242e
CG
8903 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8904 if (ret)
8905 return ret;
1e879e8f 8906
c6e79dac
SRT
8907 list_for_each_entry(clki, head, list) {
8908 if (!IS_ERR_OR_NULL(clki->clk)) {
81309c24
CG
8909 /*
8910 * Don't disable clocks which are needed
8911 * to keep the link active.
8912 */
8913 if (ufshcd_is_link_active(hba) &&
8914 clki->keep_link_active)
57d104c1
SJ
8915 continue;
8916
911a0771 8917 clk_state_changed = on ^ clki->enabled;
c6e79dac
SRT
8918 if (on && !clki->enabled) {
8919 ret = clk_prepare_enable(clki->clk);
8920 if (ret) {
8921 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8922 __func__, clki->name, ret);
8923 goto out;
8924 }
8925 } else if (!on && clki->enabled) {
8926 clk_disable_unprepare(clki->clk);
8927 }
8928 clki->enabled = on;
8929 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8930 clki->name, on ? "en" : "dis");
8931 }
8932 }
1ab27c9c 8933
38f3242e
CG
8934 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8935 if (ret)
8936 return ret;
1e879e8f 8937
c6e79dac
SRT
8938out:
8939 if (ret) {
8940 list_for_each_entry(clki, head, list) {
8941 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8942 clk_disable_unprepare(clki->clk);
8943 }
7ff5ab47 8944 } else if (!ret && on) {
1ab27c9c
ST
8945 spin_lock_irqsave(hba->host->host_lock, flags);
8946 hba->clk_gating.state = CLKS_ON;
7ff5ab47 8947 trace_ufshcd_clk_gating(dev_name(hba->dev),
8948 hba->clk_gating.state);
1ab27c9c 8949 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac 8950 }
7ff5ab47 8951
911a0771 8952 if (clk_state_changed)
8953 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8954 (on ? "on" : "off"),
8955 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
c6e79dac
SRT
8956 return ret;
8957}
8958
ca452621
DL
8959static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
8960{
8961 u32 freq;
8962 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
8963
8964 if (ret) {
aaa26e38 8965 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
ca452621
DL
8966 return REF_CLK_FREQ_INVAL;
8967 }
8968
8969 return ufs_get_bref_clk_from_hz(freq);
8970}
8971
c6e79dac
SRT
8972static int ufshcd_init_clocks(struct ufs_hba *hba)
8973{
8974 int ret = 0;
8975 struct ufs_clk_info *clki;
8976 struct device *dev = hba->dev;
8977 struct list_head *head = &hba->clk_list_head;
8978
566ec9ad 8979 if (list_empty(head))
c6e79dac
SRT
8980 goto out;
8981
8982 list_for_each_entry(clki, head, list) {
8983 if (!clki->name)
8984 continue;
8985
8986 clki->clk = devm_clk_get(dev, clki->name);
8987 if (IS_ERR(clki->clk)) {
8988 ret = PTR_ERR(clki->clk);
8989 dev_err(dev, "%s: %s clk get failed, %d\n",
8990 __func__, clki->name, ret);
8991 goto out;
8992 }
8993
9e1e8a75
SJ
8994 /*
8995 * Parse device ref clk freq as per device tree "ref_clk".
8996 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8997 * in ufshcd_alloc_host().
8998 */
8999 if (!strcmp(clki->name, "ref_clk"))
9000 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9001
c6e79dac
SRT
9002 if (clki->max_freq) {
9003 ret = clk_set_rate(clki->clk, clki->max_freq);
9004 if (ret) {
9005 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9006 __func__, clki->name,
9007 clki->max_freq, ret);
9008 goto out;
9009 }
856b3483 9010 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
9011 }
9012 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9013 clki->name, clk_get_rate(clki->clk));
9014 }
9015out:
9016 return ret;
9017}
9018
5c0c28a8
SRT
9019static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9020{
9021 int err = 0;
9022
9023 if (!hba->vops)
9024 goto out;
9025
0263bcd0 9026 err = ufshcd_vops_init(hba);
5c0c28a8
SRT
9027 if (err)
9028 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
0263bcd0 9029 __func__, ufshcd_get_var_name(hba), err);
ade921a8 9030out:
5c0c28a8
SRT
9031 return err;
9032}
9033
9034static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9035{
9036 if (!hba->vops)
9037 return;
9038
0263bcd0 9039 ufshcd_vops_exit(hba);
5c0c28a8
SRT
9040}
9041
aa497613
SRT
9042static int ufshcd_hba_init(struct ufs_hba *hba)
9043{
9044 int err;
9045
6a771a65
RS
9046 /*
9047 * Handle host controller power separately from the UFS device power
9048 * rails as it will help controlling the UFS host controller power
9049 * collapse easily which is different than UFS device power collapse.
9050 * Also, enable the host controller power before we go ahead with rest
9051 * of the initialization here.
9052 */
9053 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
9054 if (err)
9055 goto out;
9056
6a771a65 9057 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
9058 if (err)
9059 goto out;
9060
6a771a65
RS
9061 err = ufshcd_init_clocks(hba);
9062 if (err)
9063 goto out_disable_hba_vreg;
9064
ca452621
DL
9065 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9066 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9067
6a771a65
RS
9068 err = ufshcd_setup_clocks(hba, true);
9069 if (err)
9070 goto out_disable_hba_vreg;
9071
c6e79dac
SRT
9072 err = ufshcd_init_vreg(hba);
9073 if (err)
9074 goto out_disable_clks;
9075
9076 err = ufshcd_setup_vreg(hba, true);
9077 if (err)
9078 goto out_disable_clks;
9079
aa497613
SRT
9080 err = ufshcd_variant_hba_init(hba);
9081 if (err)
9082 goto out_disable_vreg;
9083
b6cacaf2
AH
9084 ufs_debugfs_hba_init(hba);
9085
1d337ec2 9086 hba->is_powered = true;
aa497613
SRT
9087 goto out;
9088
9089out_disable_vreg:
9090 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
9091out_disable_clks:
9092 ufshcd_setup_clocks(hba, false);
6a771a65
RS
9093out_disable_hba_vreg:
9094 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
9095out:
9096 return err;
9097}
9098
9099static void ufshcd_hba_exit(struct ufs_hba *hba)
9100{
1d337ec2 9101 if (hba->is_powered) {
4543d9d7
CG
9102 ufshcd_exit_clk_scaling(hba);
9103 ufshcd_exit_clk_gating(hba);
88b09900
AH
9104 if (hba->eh_wq)
9105 destroy_workqueue(hba->eh_wq);
b6cacaf2 9106 ufs_debugfs_hba_exit(hba);
1d337ec2
SRT
9107 ufshcd_variant_hba_exit(hba);
9108 ufshcd_setup_vreg(hba, false);
9109 ufshcd_setup_clocks(hba, false);
9110 ufshcd_setup_hba_vreg(hba, false);
9111 hba->is_powered = false;
09750066 9112 ufs_put_device_desc(hba);
1d337ec2 9113 }
aa497613
SRT
9114}
9115
6a354a7e
BVA
9116static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9117 enum ufs_dev_pwr_mode pwr_mode,
9118 struct scsi_sense_hdr *sshdr)
9119{
9120 unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9121 struct request *req;
9122 struct scsi_cmnd *scmd;
9123 int ret;
9124
9125 req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
9126 BLK_MQ_REQ_PM);
9127 if (IS_ERR(req))
9128 return PTR_ERR(req);
9129
9130 scmd = blk_mq_rq_to_pdu(req);
9131 scmd->cmd_len = COMMAND_SIZE(cdb[0]);
9132 memcpy(scmd->cmnd, cdb, scmd->cmd_len);
9133 scmd->allowed = 0/*retries*/;
7029e215 9134 scmd->flags |= SCMD_FAIL_IF_RECOVERING;
6a354a7e
BVA
9135 req->timeout = 1 * HZ;
9136 req->rq_flags |= RQF_PM | RQF_QUIET;
9137
9138 blk_execute_rq(req, /*at_head=*/true);
9139
9140 if (sshdr)
9141 scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
9142 sshdr);
9143 ret = scmd->result;
9144
9145 blk_mq_free_request(req);
9146
9147 return ret;
9148}
9149
57d104c1
SJ
9150/**
9151 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9152 * power mode
9153 * @hba: per adapter instance
9154 * @pwr_mode: device power mode to set
9155 *
9156 * Returns 0 if requested power mode is set successfully
ad6c8a42 9157 * Returns < 0 if failed to set the requested power mode
57d104c1
SJ
9158 */
9159static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9160 enum ufs_dev_pwr_mode pwr_mode)
9161{
57d104c1 9162 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
9163 struct scsi_device *sdp;
9164 unsigned long flags;
af21c3fd 9165 int ret, retries;
57d104c1 9166
7c48bfd0 9167 spin_lock_irqsave(hba->host->host_lock, flags);
e2106584 9168 sdp = hba->ufs_device_wlun;
6d1aa3b0 9169 if (sdp && scsi_device_online(sdp))
7c48bfd0 9170 ret = scsi_device_get(sdp);
6d1aa3b0 9171 else
7c48bfd0 9172 ret = -ENODEV;
7c48bfd0
AM
9173 spin_unlock_irqrestore(hba->host->host_lock, flags);
9174
9175 if (ret)
9176 return ret;
57d104c1
SJ
9177
9178 /*
9179 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9180 * handling, which would wait for host to be resumed. Since we know
9181 * we are functional while we are here, skip host resume in error
9182 * handling context.
9183 */
9184 hba->host->eh_noresume = 1;
57d104c1 9185
57d104c1
SJ
9186 /*
9187 * Current function would be generally called from the power management
e8064021 9188 * callbacks hence set the RQF_PM flag so that it doesn't resume the
57d104c1
SJ
9189 * already suspended childs.
9190 */
af21c3fd 9191 for (retries = 3; retries > 0; --retries) {
6a354a7e 9192 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
579a4e9d
BVA
9193 /*
9194 * scsi_execute() only returns a negative value if the request
9195 * queue is dying.
9196 */
9197 if (ret <= 0)
af21c3fd
JK
9198 break;
9199 }
57d104c1
SJ
9200 if (ret) {
9201 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
9202 "START_STOP failed for power mode: %d, result %x\n",
9203 pwr_mode, ret);
ad6c8a42
KK
9204 if (ret > 0) {
9205 if (scsi_sense_valid(&sshdr))
9206 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9207 ret = -EIO;
9208 }
836d322d 9209 } else {
57d104c1 9210 hba->curr_dev_pwr_mode = pwr_mode;
836d322d 9211 }
1918651f 9212
7c48bfd0 9213 scsi_device_put(sdp);
57d104c1
SJ
9214 hba->host->eh_noresume = 0;
9215 return ret;
9216}
9217
9218static int ufshcd_link_state_transition(struct ufs_hba *hba,
9219 enum uic_link_state req_link_state,
5277326d 9220 bool check_for_bkops)
57d104c1
SJ
9221{
9222 int ret = 0;
9223
9224 if (req_link_state == hba->uic_link_state)
9225 return 0;
9226
9227 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9228 ret = ufshcd_uic_hibern8_enter(hba);
4db7a236 9229 if (!ret) {
57d104c1 9230 ufshcd_set_link_hibern8(hba);
4db7a236
CG
9231 } else {
9232 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9233 __func__, ret);
57d104c1 9234 goto out;
4db7a236 9235 }
57d104c1
SJ
9236 }
9237 /*
9238 * If autobkops is enabled, link can't be turned off because
fe1d4c2e
AH
9239 * turning off the link would also turn off the device, except in the
9240 * case of DeepSleep where the device is expected to remain powered.
57d104c1
SJ
9241 */
9242 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
dc30c9e6 9243 (!check_for_bkops || !hba->auto_bkops_enabled)) {
f3099fbd
YG
9244 /*
9245 * Let's make sure that link is in low power mode, we are doing
9246 * this currently by putting the link in Hibern8. Otherway to
9247 * put the link in low power mode is to send the DME end point
9248 * to device and then send the DME reset command to local
9249 * unipro. But putting the link in hibern8 is much faster.
fe1d4c2e
AH
9250 *
9251 * Note also that putting the link in Hibern8 is a requirement
9252 * for entering DeepSleep.
f3099fbd
YG
9253 */
9254 ret = ufshcd_uic_hibern8_enter(hba);
4db7a236
CG
9255 if (ret) {
9256 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9257 __func__, ret);
f3099fbd 9258 goto out;
4db7a236 9259 }
57d104c1
SJ
9260 /*
9261 * Change controller state to "reset state" which
9262 * should also put the link in off/reset state
9263 */
5cac1095 9264 ufshcd_hba_stop(hba);
57d104c1
SJ
9265 /*
9266 * TODO: Check if we need any delay to make sure that
9267 * controller is reset
9268 */
9269 ufshcd_set_link_off(hba);
9270 }
9271
9272out:
9273 return ret;
9274}
9275
9276static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9277{
c4df6eed
SC
9278 bool vcc_off = false;
9279
b799fdf7
YG
9280 /*
9281 * It seems some UFS devices may keep drawing more than sleep current
9282 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9283 * To avoid this situation, add 2ms delay before putting these UFS
9284 * rails in LPM mode.
9285 */
9286 if (!ufshcd_is_link_active(hba) &&
9287 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9288 usleep_range(2000, 2100);
9289
57d104c1
SJ
9290 /*
9291 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9292 * power.
9293 *
9294 * If UFS device and link is in OFF state, all power supplies (VCC,
9295 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9296 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9297 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9298 *
9299 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9300 * in low power state which would save some power.
3d17b9b5
AD
9301 *
9302 * If Write Booster is enabled and the device needs to flush the WB
9303 * buffer OR if bkops status is urgent for WB, keep Vcc on.
57d104c1
SJ
9304 */
9305 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9306 !hba->dev_info.is_lu_power_on_wp) {
9307 ufshcd_setup_vreg(hba, false);
c4df6eed 9308 vcc_off = true;
57d104c1 9309 } else if (!ufshcd_is_ufs_dev_active(hba)) {
51dd905b 9310 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
c4df6eed 9311 vcc_off = true;
23043dd8 9312 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
57d104c1
SJ
9313 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9314 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9315 }
9316 }
c4df6eed
SC
9317
9318 /*
9319 * Some UFS devices require delay after VCC power rail is turned-off.
9320 */
9321 if (vcc_off && hba->vreg_info.vcc &&
9322 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9323 usleep_range(5000, 5100);
57d104c1
SJ
9324}
9325
9bb25e5d 9326#ifdef CONFIG_PM
57d104c1
SJ
9327static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9328{
9329 int ret = 0;
9330
9331 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9332 !hba->dev_info.is_lu_power_on_wp) {
9333 ret = ufshcd_setup_vreg(hba, true);
9334 } else if (!ufshcd_is_ufs_dev_active(hba)) {
23043dd8 9335 if (!ufshcd_is_link_active(hba)) {
57d104c1
SJ
9336 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9337 if (ret)
9338 goto vcc_disable;
9339 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9340 if (ret)
9341 goto vccq_lpm;
9342 }
69d72ac8 9343 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
57d104c1
SJ
9344 }
9345 goto out;
9346
9347vccq_lpm:
9348 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9349vcc_disable:
9350 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9351out:
9352 return ret;
9353}
9bb25e5d 9354#endif /* CONFIG_PM */
57d104c1
SJ
9355
9356static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9357{
dd7143e2 9358 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
57d104c1
SJ
9359 ufshcd_setup_hba_vreg(hba, false);
9360}
9361
9362static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9363{
dd7143e2 9364 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
57d104c1
SJ
9365 ufshcd_setup_hba_vreg(hba, true);
9366}
9367
b294ff3e 9368static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 9369{
57d104c1 9370 int ret = 0;
5277326d 9371 bool check_for_bkops;
57d104c1
SJ
9372 enum ufs_pm_level pm_lvl;
9373 enum ufs_dev_pwr_mode req_dev_pwr_mode;
9374 enum uic_link_state req_link_state;
9375
b294ff3e 9376 hba->pm_op_in_progress = true;
4c6cb9ed
BVA
9377 if (pm_op != UFS_SHUTDOWN_PM) {
9378 pm_lvl = pm_op == UFS_RUNTIME_PM ?
57d104c1
SJ
9379 hba->rpm_lvl : hba->spm_lvl;
9380 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9381 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9382 } else {
9383 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9384 req_link_state = UIC_LINK_OFF_STATE;
9385 }
9386
f02bc975
DP
9387 ufshpb_suspend(hba);
9388
7a3e97b0 9389 /*
57d104c1
SJ
9390 * If we can't transition into any of the low power modes
9391 * just gate the clocks.
7a3e97b0 9392 */
1ab27c9c
ST
9393 ufshcd_hold(hba, false);
9394 hba->clk_gating.is_suspended = true;
9395
348e1bc5
SC
9396 if (ufshcd_is_clkscaling_supported(hba))
9397 ufshcd_clk_scaling_suspend(hba, true);
d6fcf81a 9398
57d104c1
SJ
9399 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9400 req_link_state == UIC_LINK_ACTIVE_STATE) {
b294ff3e 9401 goto vops_suspend;
57d104c1 9402 }
7a3e97b0 9403
57d104c1
SJ
9404 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9405 (req_link_state == hba->uic_link_state))
b294ff3e 9406 goto enable_scaling;
57d104c1
SJ
9407
9408 /* UFS device & link must be active before we enter in this function */
9409 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9410 ret = -EINVAL;
b294ff3e 9411 goto enable_scaling;
57d104c1
SJ
9412 }
9413
4c6cb9ed 9414 if (pm_op == UFS_RUNTIME_PM) {
374a246e
SJ
9415 if (ufshcd_can_autobkops_during_suspend(hba)) {
9416 /*
9417 * The device is idle with no requests in the queue,
9418 * allow background operations if bkops status shows
9419 * that performance might be impacted.
9420 */
9421 ret = ufshcd_urgent_bkops(hba);
9422 if (ret)
b294ff3e 9423 goto enable_scaling;
374a246e
SJ
9424 } else {
9425 /* make sure that auto bkops is disabled */
9426 ufshcd_disable_auto_bkops(hba);
9427 }
3d17b9b5 9428 /*
51dd905b
SC
9429 * If device needs to do BKOP or WB buffer flush during
9430 * Hibern8, keep device power mode as "active power mode"
9431 * and VCC supply.
3d17b9b5 9432 */
51dd905b
SC
9433 hba->dev_info.b_rpm_dev_flush_capable =
9434 hba->auto_bkops_enabled ||
9435 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9436 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9437 ufshcd_is_auto_hibern8_enabled(hba))) &&
9438 ufshcd_wb_need_flush(hba));
9439 }
9440
6948a96a
KK
9441 flush_work(&hba->eeh_work);
9442
9561f584
PW
9443 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9444 if (ret)
9445 goto enable_scaling;
9446
51dd905b 9447 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
4c6cb9ed 9448 if (pm_op != UFS_RUNTIME_PM)
51dd905b
SC
9449 /* ensure that bkops is disabled */
9450 ufshcd_disable_auto_bkops(hba);
57d104c1 9451
51dd905b
SC
9452 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9453 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9454 if (ret)
b294ff3e 9455 goto enable_scaling;
51dd905b 9456 }
57d104c1
SJ
9457 }
9458
fe1d4c2e
AH
9459 /*
9460 * In the case of DeepSleep, the device is expected to remain powered
9461 * with the link off, so do not check for bkops.
9462 */
9463 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9464 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
57d104c1
SJ
9465 if (ret)
9466 goto set_dev_active;
9467
b294ff3e 9468vops_suspend:
57d104c1
SJ
9469 /*
9470 * Call vendor specific suspend callback. As these callbacks may access
9471 * vendor specific host controller register space call them before the
9472 * host clocks are ON.
9473 */
9561f584 9474 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
0263bcd0
YG
9475 if (ret)
9476 goto set_link_active;
57d104c1
SJ
9477 goto out;
9478
57d104c1 9479set_link_active:
fe1d4c2e
AH
9480 /*
9481 * Device hardware reset is required to exit DeepSleep. Also, for
9482 * DeepSleep, the link is off so host reset and restore will be done
9483 * further below.
9484 */
9485 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
31a5d9ca 9486 ufshcd_device_reset(hba);
fe1d4c2e
AH
9487 WARN_ON(!ufshcd_is_link_off(hba));
9488 }
57d104c1
SJ
9489 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9490 ufshcd_set_link_active(hba);
9491 else if (ufshcd_is_link_off(hba))
9492 ufshcd_host_reset_and_restore(hba);
9493set_dev_active:
fe1d4c2e
AH
9494 /* Can also get here needing to exit DeepSleep */
9495 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
31a5d9ca 9496 ufshcd_device_reset(hba);
fe1d4c2e
AH
9497 ufshcd_host_reset_and_restore(hba);
9498 }
57d104c1
SJ
9499 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9500 ufshcd_disable_auto_bkops(hba);
b294ff3e 9501enable_scaling:
348e1bc5
SC
9502 if (ufshcd_is_clkscaling_supported(hba))
9503 ufshcd_clk_scaling_suspend(hba, false);
9504
51dd905b 9505 hba->dev_info.b_rpm_dev_flush_capable = false;
57d104c1 9506out:
51dd905b
SC
9507 if (hba->dev_info.b_rpm_dev_flush_capable) {
9508 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9509 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9510 }
9511
b294ff3e
AD
9512 if (ret) {
9513 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9514 hba->clk_gating.is_suspended = false;
9515 ufshcd_release(hba);
f02bc975 9516 ufshpb_resume(hba);
b294ff3e
AD
9517 }
9518 hba->pm_op_in_progress = false;
57d104c1 9519 return ret;
7a3e97b0
SY
9520}
9521
75d645a6 9522#ifdef CONFIG_PM
b294ff3e 9523static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 9524{
57d104c1 9525 int ret;
b294ff3e 9526 enum uic_link_state old_link_state = hba->uic_link_state;
57d104c1 9527
b294ff3e 9528 hba->pm_op_in_progress = true;
57d104c1 9529
7a3e97b0 9530 /*
57d104c1
SJ
9531 * Call vendor specific resume callback. As these callbacks may access
9532 * vendor specific host controller register space call them when the
9533 * host clocks are ON.
7a3e97b0 9534 */
0263bcd0
YG
9535 ret = ufshcd_vops_resume(hba, pm_op);
9536 if (ret)
b294ff3e 9537 goto out;
57d104c1 9538
fe1d4c2e
AH
9539 /* For DeepSleep, the only supported option is to have the link off */
9540 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9541
57d104c1
SJ
9542 if (ufshcd_is_link_hibern8(hba)) {
9543 ret = ufshcd_uic_hibern8_exit(hba);
4db7a236 9544 if (!ret) {
57d104c1 9545 ufshcd_set_link_active(hba);
4db7a236
CG
9546 } else {
9547 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9548 __func__, ret);
57d104c1 9549 goto vendor_suspend;
4db7a236 9550 }
57d104c1 9551 } else if (ufshcd_is_link_off(hba)) {
57d104c1 9552 /*
089f5b64
AD
9553 * A full initialization of the host and the device is
9554 * required since the link was put to off during suspend.
fe1d4c2e
AH
9555 * Note, in the case of DeepSleep, the device will exit
9556 * DeepSleep due to device reset.
089f5b64
AD
9557 */
9558 ret = ufshcd_reset_and_restore(hba);
9559 /*
9560 * ufshcd_reset_and_restore() should have already
57d104c1
SJ
9561 * set the link state as active
9562 */
9563 if (ret || !ufshcd_is_link_active(hba))
9564 goto vendor_suspend;
9565 }
9566
9567 if (!ufshcd_is_ufs_dev_active(hba)) {
9568 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9569 if (ret)
9570 goto set_old_link_state;
9571 }
9572
4e768e76 9573 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9574 ufshcd_enable_auto_bkops(hba);
9575 else
9576 /*
9577 * If BKOPs operations are urgently needed at this moment then
9578 * keep auto-bkops enabled or else disable it.
9579 */
9580 ufshcd_urgent_bkops(hba);
9581
cd469475
AH
9582 if (hba->ee_usr_mask)
9583 ufshcd_write_ee_control(hba);
9584
348e1bc5
SC
9585 if (ufshcd_is_clkscaling_supported(hba))
9586 ufshcd_clk_scaling_suspend(hba, false);
856b3483 9587
51dd905b
SC
9588 if (hba->dev_info.b_rpm_dev_flush_capable) {
9589 hba->dev_info.b_rpm_dev_flush_capable = false;
9590 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9591 }
9592
b294ff3e
AD
9593 /* Enable Auto-Hibernate if configured */
9594 ufshcd_auto_hibern8_enable(hba);
f02bc975
DP
9595
9596 ufshpb_resume(hba);
57d104c1
SJ
9597 goto out;
9598
9599set_old_link_state:
9600 ufshcd_link_state_transition(hba, old_link_state, 0);
9601vendor_suspend:
9561f584
PW
9602 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9603 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
b294ff3e
AD
9604out:
9605 if (ret)
9606 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9607 hba->clk_gating.is_suspended = false;
9608 ufshcd_release(hba);
9609 hba->pm_op_in_progress = false;
9610 return ret;
9611}
9612
9613static int ufshcd_wl_runtime_suspend(struct device *dev)
9614{
9615 struct scsi_device *sdev = to_scsi_device(dev);
9616 struct ufs_hba *hba;
9617 int ret;
9618 ktime_t start = ktime_get();
9619
9620 hba = shost_priv(sdev->host);
9621
9622 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9623 if (ret)
9624 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9625
9626 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9627 ktime_to_us(ktime_sub(ktime_get(), start)),
9628 hba->curr_dev_pwr_mode, hba->uic_link_state);
9629
9630 return ret;
9631}
9632
9633static int ufshcd_wl_runtime_resume(struct device *dev)
9634{
9635 struct scsi_device *sdev = to_scsi_device(dev);
9636 struct ufs_hba *hba;
9637 int ret = 0;
9638 ktime_t start = ktime_get();
9639
9640 hba = shost_priv(sdev->host);
9641
9642 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9643 if (ret)
9644 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9645
9646 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9647 ktime_to_us(ktime_sub(ktime_get(), start)),
9648 hba->curr_dev_pwr_mode, hba->uic_link_state);
9649
9650 return ret;
9651}
75d645a6 9652#endif
b294ff3e
AD
9653
9654#ifdef CONFIG_PM_SLEEP
9655static int ufshcd_wl_suspend(struct device *dev)
9656{
9657 struct scsi_device *sdev = to_scsi_device(dev);
9658 struct ufs_hba *hba;
9659 int ret = 0;
9660 ktime_t start = ktime_get();
9661
9662 hba = shost_priv(sdev->host);
9663 down(&hba->host_sem);
1a547cbc 9664 hba->system_suspending = true;
b294ff3e
AD
9665
9666 if (pm_runtime_suspended(dev))
9667 goto out;
9668
9669 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9670 if (ret) {
9671 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9672 up(&hba->host_sem);
9673 }
9674
9675out:
9676 if (!ret)
9677 hba->is_sys_suspended = true;
9678 trace_ufshcd_wl_suspend(dev_name(dev), ret,
9679 ktime_to_us(ktime_sub(ktime_get(), start)),
9680 hba->curr_dev_pwr_mode, hba->uic_link_state);
9681
9682 return ret;
9683}
9684
9685static int ufshcd_wl_resume(struct device *dev)
9686{
9687 struct scsi_device *sdev = to_scsi_device(dev);
9688 struct ufs_hba *hba;
9689 int ret = 0;
9690 ktime_t start = ktime_get();
9691
9692 hba = shost_priv(sdev->host);
9693
9694 if (pm_runtime_suspended(dev))
9695 goto out;
9696
9697 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9698 if (ret)
9699 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9700out:
9701 trace_ufshcd_wl_resume(dev_name(dev), ret,
9702 ktime_to_us(ktime_sub(ktime_get(), start)),
9703 hba->curr_dev_pwr_mode, hba->uic_link_state);
9704 if (!ret)
9705 hba->is_sys_suspended = false;
1a547cbc 9706 hba->system_suspending = false;
b294ff3e
AD
9707 up(&hba->host_sem);
9708 return ret;
9709}
9710#endif
9711
9712static void ufshcd_wl_shutdown(struct device *dev)
9713{
9714 struct scsi_device *sdev = to_scsi_device(dev);
9715 struct ufs_hba *hba;
9716
9717 hba = shost_priv(sdev->host);
9718
9719 down(&hba->host_sem);
9720 hba->shutting_down = true;
9721 up(&hba->host_sem);
9722
9723 /* Turn on everything while shutting down */
9724 ufshcd_rpm_get_sync(hba);
9725 scsi_device_quiesce(sdev);
9726 shost_for_each_device(sdev, hba->host) {
e2106584 9727 if (sdev == hba->ufs_device_wlun)
b294ff3e
AD
9728 continue;
9729 scsi_device_quiesce(sdev);
9730 }
9731 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9732}
9733
9734/**
9735 * ufshcd_suspend - helper function for suspend operations
9736 * @hba: per adapter instance
9737 *
9738 * This function will put disable irqs, turn off clocks
9739 * and set vreg and hba-vreg in lpm mode.
b294ff3e
AD
9740 */
9741static int ufshcd_suspend(struct ufs_hba *hba)
9742{
9743 int ret;
9744
9745 if (!hba->is_powered)
9746 return 0;
9747 /*
9748 * Disable the host irq as host controller as there won't be any
9749 * host controller transaction expected till resume.
9750 */
57d104c1 9751 ufshcd_disable_irq(hba);
b294ff3e
AD
9752 ret = ufshcd_setup_clocks(hba, false);
9753 if (ret) {
9754 ufshcd_enable_irq(hba);
9755 return ret;
9756 }
2dec9475
CG
9757 if (ufshcd_is_clkgating_allowed(hba)) {
9758 hba->clk_gating.state = CLKS_OFF;
9759 trace_ufshcd_clk_gating(dev_name(hba->dev),
9760 hba->clk_gating.state);
9761 }
b294ff3e
AD
9762
9763 ufshcd_vreg_set_lpm(hba);
9764 /* Put the host controller in low power mode if possible */
9765 ufshcd_hba_vreg_set_lpm(hba);
9766 return ret;
9767}
9768
9bb25e5d 9769#ifdef CONFIG_PM
b294ff3e
AD
9770/**
9771 * ufshcd_resume - helper function for resume operations
9772 * @hba: per adapter instance
9773 *
9774 * This function basically turns on the regulators, clocks and
9775 * irqs of the hba.
b294ff3e
AD
9776 *
9777 * Returns 0 for success and non-zero for failure
9778 */
9779static int ufshcd_resume(struct ufs_hba *hba)
9780{
9781 int ret;
9782
9783 if (!hba->is_powered)
9784 return 0;
9785
9786 ufshcd_hba_vreg_set_hpm(hba);
9787 ret = ufshcd_vreg_set_hpm(hba);
9788 if (ret)
9789 goto out;
9790
9791 /* Make sure clocks are enabled before accessing controller */
9792 ret = ufshcd_setup_clocks(hba, true);
9793 if (ret)
9794 goto disable_vreg;
9795
9796 /* enable the host irq as host controller would be active soon */
9797 ufshcd_enable_irq(hba);
9798 goto out;
9799
528db9e5
ZC
9800disable_vreg:
9801 ufshcd_vreg_set_lpm(hba);
57d104c1 9802out:
8808b4e9 9803 if (ret)
e965e5e0 9804 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
57d104c1
SJ
9805 return ret;
9806}
9bb25e5d 9807#endif /* CONFIG_PM */
57d104c1 9808
9bb25e5d 9809#ifdef CONFIG_PM_SLEEP
57d104c1 9810/**
f1ecbe1e
BVA
9811 * ufshcd_system_suspend - system suspend callback
9812 * @dev: Device associated with the UFS controller.
57d104c1 9813 *
f1ecbe1e
BVA
9814 * Executed before putting the system into a sleep state in which the contents
9815 * of main memory are preserved.
57d104c1
SJ
9816 *
9817 * Returns 0 for success and non-zero for failure
9818 */
f1ecbe1e 9819int ufshcd_system_suspend(struct device *dev)
57d104c1 9820{
f1ecbe1e 9821 struct ufs_hba *hba = dev_get_drvdata(dev);
57d104c1 9822 int ret = 0;
7ff5ab47 9823 ktime_t start = ktime_get();
57d104c1 9824
b294ff3e 9825 if (pm_runtime_suspended(hba->dev))
0b257734 9826 goto out;
57d104c1 9827
b294ff3e 9828 ret = ufshcd_suspend(hba);
57d104c1 9829out:
7ff5ab47 9830 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9831 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9832 hba->curr_dev_pwr_mode, hba->uic_link_state);
57d104c1
SJ
9833 return ret;
9834}
9835EXPORT_SYMBOL(ufshcd_system_suspend);
9836
9837/**
f1ecbe1e
BVA
9838 * ufshcd_system_resume - system resume callback
9839 * @dev: Device associated with the UFS controller.
9840 *
9841 * Executed after waking the system up from a sleep state in which the contents
9842 * of main memory were preserved.
57d104c1
SJ
9843 *
9844 * Returns 0 for success and non-zero for failure
9845 */
f1ecbe1e 9846int ufshcd_system_resume(struct device *dev)
57d104c1 9847{
f1ecbe1e 9848 struct ufs_hba *hba = dev_get_drvdata(dev);
7ff5ab47 9849 ktime_t start = ktime_get();
f1ecbe1e 9850 int ret = 0;
7ff5ab47 9851
b294ff3e 9852 if (pm_runtime_suspended(hba->dev))
7ff5ab47 9853 goto out;
b294ff3e
AD
9854
9855 ret = ufshcd_resume(hba);
9856
7ff5ab47 9857out:
9858 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9859 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9860 hba->curr_dev_pwr_mode, hba->uic_link_state);
b294ff3e 9861
7ff5ab47 9862 return ret;
7a3e97b0 9863}
57d104c1 9864EXPORT_SYMBOL(ufshcd_system_resume);
9bb25e5d 9865#endif /* CONFIG_PM_SLEEP */
3b1d0580 9866
9bb25e5d 9867#ifdef CONFIG_PM
57d104c1 9868/**
f1ecbe1e
BVA
9869 * ufshcd_runtime_suspend - runtime suspend callback
9870 * @dev: Device associated with the UFS controller.
57d104c1
SJ
9871 *
9872 * Check the description of ufshcd_suspend() function for more details.
9873 *
9874 * Returns 0 for success and non-zero for failure
9875 */
f1ecbe1e 9876int ufshcd_runtime_suspend(struct device *dev)
66ec6d59 9877{
f1ecbe1e 9878 struct ufs_hba *hba = dev_get_drvdata(dev);
b294ff3e 9879 int ret;
7ff5ab47 9880 ktime_t start = ktime_get();
9881
b294ff3e
AD
9882 ret = ufshcd_suspend(hba);
9883
7ff5ab47 9884 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9885 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9886 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 9887 return ret;
66ec6d59
SRT
9888}
9889EXPORT_SYMBOL(ufshcd_runtime_suspend);
9890
57d104c1
SJ
9891/**
9892 * ufshcd_runtime_resume - runtime resume routine
f1ecbe1e 9893 * @dev: Device associated with the UFS controller.
57d104c1 9894 *
b294ff3e 9895 * This function basically brings controller
57d104c1
SJ
9896 * to active state. Following operations are done in this function:
9897 *
9898 * 1. Turn on all the controller related clocks
b294ff3e 9899 * 2. Turn ON VCC rail
57d104c1 9900 */
f1ecbe1e 9901int ufshcd_runtime_resume(struct device *dev)
66ec6d59 9902{
f1ecbe1e 9903 struct ufs_hba *hba = dev_get_drvdata(dev);
b294ff3e 9904 int ret;
7ff5ab47 9905 ktime_t start = ktime_get();
9906
b294ff3e
AD
9907 ret = ufshcd_resume(hba);
9908
7ff5ab47 9909 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9910 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9911 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 9912 return ret;
66ec6d59
SRT
9913}
9914EXPORT_SYMBOL(ufshcd_runtime_resume);
9bb25e5d 9915#endif /* CONFIG_PM */
66ec6d59 9916
57d104c1
SJ
9917/**
9918 * ufshcd_shutdown - shutdown routine
9919 * @hba: per adapter instance
9920 *
b294ff3e
AD
9921 * This function would turn off both UFS device and UFS hba
9922 * regulators. It would also disable clocks.
57d104c1
SJ
9923 *
9924 * Returns 0 always to allow force shutdown even in case of errors.
9925 */
9926int ufshcd_shutdown(struct ufs_hba *hba)
9927{
57d104c1 9928 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
00511d2a 9929 ufshcd_suspend(hba);
57d104c1 9930
88a92d6a 9931 hba->is_powered = false;
57d104c1
SJ
9932 /* allow force shutdown even in case of errors */
9933 return 0;
9934}
9935EXPORT_SYMBOL(ufshcd_shutdown);
9936
7a3e97b0 9937/**
3b1d0580 9938 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 9939 * data structure memory
8aa29f19 9940 * @hba: per adapter instance
7a3e97b0 9941 */
3b1d0580 9942void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 9943{
e2106584 9944 if (hba->ufs_device_wlun)
b294ff3e 9945 ufshcd_rpm_get_sync(hba);
e88e2d32 9946 ufs_hwmon_remove(hba);
df032bf2 9947 ufs_bsg_remove(hba);
4b5f4907 9948 ufshpb_remove(hba);
cbb6813e 9949 ufs_sysfs_remove_nodes(hba->dev);
6f8191fd 9950 blk_mq_destroy_queue(hba->tmf_queue);
2b3f056f 9951 blk_put_queue(hba->tmf_queue);
69a6c269 9952 blk_mq_free_tag_set(&hba->tmf_tag_set);
cfdf9c91 9953 scsi_remove_host(hba->host);
7a3e97b0 9954 /* disable interrupts */
2fbd009b 9955 ufshcd_disable_intr(hba, hba->intr_mask);
5cac1095 9956 ufshcd_hba_stop(hba);
aa497613 9957 ufshcd_hba_exit(hba);
3b1d0580
VH
9958}
9959EXPORT_SYMBOL_GPL(ufshcd_remove);
9960
47555a5c
YG
9961/**
9962 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9963 * @hba: pointer to Host Bus Adapter (HBA)
9964 */
9965void ufshcd_dealloc_host(struct ufs_hba *hba)
9966{
9967 scsi_host_put(hba->host);
9968}
9969EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9970
ca3d7bf9
AM
9971/**
9972 * ufshcd_set_dma_mask - Set dma mask based on the controller
9973 * addressing capability
9974 * @hba: per adapter instance
9975 *
9976 * Returns 0 for success, non-zero for failure
9977 */
9978static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9979{
9980 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9981 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9982 return 0;
9983 }
9984 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9985}
9986
7a3e97b0 9987/**
5c0c28a8 9988 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
9989 * @dev: pointer to device handle
9990 * @hba_handle: driver private handle
7a3e97b0
SY
9991 * Returns 0 on success, non-zero value on failure
9992 */
5c0c28a8 9993int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
9994{
9995 struct Scsi_Host *host;
9996 struct ufs_hba *hba;
5c0c28a8 9997 int err = 0;
7a3e97b0 9998
3b1d0580
VH
9999 if (!dev) {
10000 dev_err(dev,
10001 "Invalid memory reference for dev is NULL\n");
10002 err = -ENODEV;
7a3e97b0
SY
10003 goto out_error;
10004 }
10005
7a3e97b0
SY
10006 host = scsi_host_alloc(&ufshcd_driver_template,
10007 sizeof(struct ufs_hba));
10008 if (!host) {
3b1d0580 10009 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 10010 err = -ENOMEM;
3b1d0580 10011 goto out_error;
7a3e97b0 10012 }
eaab9b57 10013 host->nr_maps = HCTX_TYPE_POLL + 1;
7a3e97b0 10014 hba = shost_priv(host);
7a3e97b0 10015 hba->host = host;
3b1d0580 10016 hba->dev = dev;
9e1e8a75 10017 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
1cbc9ad3 10018 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
ada1e653 10019 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
566ec9ad 10020 INIT_LIST_HEAD(&hba->clk_list_head);
169f5eb2
BVA
10021 spin_lock_init(&hba->outstanding_lock);
10022
10023 *hba_handle = hba;
566ec9ad 10024
5c0c28a8
SRT
10025out_error:
10026 return err;
10027}
10028EXPORT_SYMBOL(ufshcd_alloc_host);
10029
69a6c269
BVA
10030/* This function exists because blk_mq_alloc_tag_set() requires this. */
10031static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10032 const struct blk_mq_queue_data *qd)
10033{
10034 WARN_ON_ONCE(true);
10035 return BLK_STS_NOTSUPP;
10036}
10037
10038static const struct blk_mq_ops ufshcd_tmf_ops = {
10039 .queue_rq = ufshcd_queue_tmf,
10040};
10041
5c0c28a8
SRT
10042/**
10043 * ufshcd_init - Driver initialization routine
10044 * @hba: per-adapter instance
10045 * @mmio_base: base register address
10046 * @irq: Interrupt line of device
10047 * Returns 0 on success, non-zero value on failure
10048 */
10049int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10050{
10051 int err;
10052 struct Scsi_Host *host = hba->host;
10053 struct device *dev = hba->dev;
88b09900 10054 char eh_wq_name[sizeof("ufs_eh_wq_00")];
5c0c28a8 10055
21ad0e49
BVA
10056 /*
10057 * dev_set_drvdata() must be called before any callbacks are registered
10058 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10059 * sysfs).
10060 */
10061 dev_set_drvdata(dev, hba);
10062
5c0c28a8
SRT
10063 if (!mmio_base) {
10064 dev_err(hba->dev,
10065 "Invalid memory reference for mmio_base is NULL\n");
10066 err = -ENODEV;
10067 goto out_error;
10068 }
10069
3b1d0580
VH
10070 hba->mmio_base = mmio_base;
10071 hba->irq = irq;
90b8491c 10072 hba->vps = &ufs_hba_vps;
7a3e97b0 10073
aa497613 10074 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
10075 if (err)
10076 goto out_error;
10077
7a3e97b0 10078 /* Read capabilities registers */
df043c74
ST
10079 err = ufshcd_hba_capabilities(hba);
10080 if (err)
10081 goto out_disable;
7a3e97b0
SY
10082
10083 /* Get UFS version supported by the controller */
10084 hba->ufs_version = ufshcd_get_ufs_version(hba);
10085
2fbd009b
SJ
10086 /* Get Interrupt bit mask per version */
10087 hba->intr_mask = ufshcd_get_intr_mask(hba);
10088
ca3d7bf9
AM
10089 err = ufshcd_set_dma_mask(hba);
10090 if (err) {
10091 dev_err(hba->dev, "set dma mask failed\n");
10092 goto out_disable;
10093 }
10094
7a3e97b0
SY
10095 /* Allocate memory for host memory space */
10096 err = ufshcd_memory_alloc(hba);
10097 if (err) {
3b1d0580
VH
10098 dev_err(hba->dev, "Memory allocation failed\n");
10099 goto out_disable;
7a3e97b0
SY
10100 }
10101
10102 /* Configure LRB */
10103 ufshcd_host_memory_configure(hba);
10104
945c3cca
BVA
10105 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10106 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
7a3e97b0 10107 host->max_id = UFSHCD_MAX_ID;
0ce147d4 10108 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
10109 host->max_channel = UFSHCD_MAX_CHANNEL;
10110 host->unique_id = host->host_no;
a851b2bd 10111 host->max_cmd_len = UFS_CDB_SIZE;
7a3e97b0 10112
7eb584db
DR
10113 hba->max_pwr_info.is_valid = false;
10114
88b09900
AH
10115 /* Initialize work queues */
10116 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10117 hba->host->host_no);
10118 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10119 if (!hba->eh_wq) {
10120 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10121 __func__);
10122 err = -ENOMEM;
10123 goto out_disable;
10124 }
10125 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 10126 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 10127
9cd20d3f 10128 sema_init(&hba->host_sem, 1);
88a92d6a 10129
6ccf44fe
SJ
10130 /* Initialize UIC command mutex */
10131 mutex_init(&hba->uic_cmd_mutex);
10132
5a0b0cb9
SRT
10133 /* Initialize mutex for device management commands */
10134 mutex_init(&hba->dev_cmd.lock);
10135
cd469475
AH
10136 /* Initialize mutex for exception event control */
10137 mutex_init(&hba->ee_ctrl_mutex);
10138
a3cd5ec5 10139 init_rwsem(&hba->clk_scaling_lock);
10140
1ab27c9c 10141 ufshcd_init_clk_gating(hba);
199ef13c 10142
eebcc196
VG
10143 ufshcd_init_clk_scaling(hba);
10144
199ef13c
YG
10145 /*
10146 * In order to avoid any spurious interrupt immediately after
10147 * registering UFS controller interrupt handler, clear any pending UFS
10148 * interrupt status and disable all the UFS interrupts.
10149 */
10150 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10151 REG_INTERRUPT_STATUS);
10152 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10153 /*
10154 * Make sure that UFS interrupts are disabled and any pending interrupt
10155 * status is cleared before registering UFS interrupt handler.
10156 */
10157 mb();
10158
7a3e97b0 10159 /* IRQ registration */
2953f850 10160 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 10161 if (err) {
3b1d0580 10162 dev_err(hba->dev, "request irq failed\n");
4543d9d7 10163 goto out_disable;
57d104c1
SJ
10164 } else {
10165 hba->is_irq_enabled = true;
7a3e97b0
SY
10166 }
10167
0cab4023
AD
10168 if (!is_mcq_supported(hba)) {
10169 err = scsi_add_host(host, hba->dev);
10170 if (err) {
10171 dev_err(hba->dev, "scsi_add_host failed\n");
10172 goto out_disable;
10173 }
7a3e97b0
SY
10174 }
10175
69a6c269
BVA
10176 hba->tmf_tag_set = (struct blk_mq_tag_set) {
10177 .nr_hw_queues = 1,
10178 .queue_depth = hba->nutmrs,
10179 .ops = &ufshcd_tmf_ops,
10180 .flags = BLK_MQ_F_NO_SCHED,
10181 };
10182 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10183 if (err < 0)
511a083b 10184 goto out_remove_scsi_host;
69a6c269
BVA
10185 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
10186 if (IS_ERR(hba->tmf_queue)) {
10187 err = PTR_ERR(hba->tmf_queue);
10188 goto free_tmf_tag_set;
10189 }
f5ef336f
AH
10190 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10191 sizeof(*hba->tmf_rqs), GFP_KERNEL);
10192 if (!hba->tmf_rqs) {
10193 err = -ENOMEM;
10194 goto free_tmf_queue;
10195 }
69a6c269 10196
d8d9f793 10197 /* Reset the attached device */
31a5d9ca 10198 ufshcd_device_reset(hba);
d8d9f793 10199
df043c74
ST
10200 ufshcd_init_crypto(hba);
10201
6ccf44fe
SJ
10202 /* Host controller enable */
10203 err = ufshcd_hba_enable(hba);
7a3e97b0 10204 if (err) {
6ccf44fe 10205 dev_err(hba->dev, "Host controller enable failed\n");
e965e5e0 10206 ufshcd_print_evt_hist(hba);
6ba65588 10207 ufshcd_print_host_state(hba);
69a6c269 10208 goto free_tmf_queue;
7a3e97b0 10209 }
6ccf44fe 10210
0c8f7586 10211 /*
10212 * Set the default power management level for runtime and system PM.
10213 * Default power saving mode is to keep UFS link in Hibern8 state
10214 * and UFS device in sleep state.
10215 */
10216 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10217 UFS_SLEEP_PWR_MODE,
10218 UIC_LINK_HIBERN8_STATE);
10219 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10220 UFS_SLEEP_PWR_MODE,
10221 UIC_LINK_HIBERN8_STATE);
10222
51dd905b
SC
10223 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
10224 ufshcd_rpm_dev_flush_recheck_work);
10225
ad448378 10226 /* Set the default auto-hiberate idle timer value to 150 ms */
f571b377 10227 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
ad448378
AH
10228 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10229 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10230 }
10231
62694735
SRT
10232 /* Hold auto suspend until async scan completes */
10233 pm_runtime_get_sync(dev);
38135535 10234 atomic_set(&hba->scsi_block_reqs_cnt, 0);
57d104c1 10235 /*
7caf489b 10236 * We are assuming that device wasn't put in sleep/power-down
10237 * state exclusively during the boot stage before kernel.
10238 * This assumption helps avoid doing link startup twice during
10239 * ufshcd_probe_hba().
57d104c1 10240 */
7caf489b 10241 ufshcd_set_ufs_dev_active(hba);
57d104c1 10242
6ccf44fe 10243 async_schedule(ufshcd_async_scan, hba);
cbb6813e 10244 ufs_sysfs_add_nodes(hba->dev);
6ccf44fe 10245
1084514c 10246 device_enable_async_suspend(dev);
7a3e97b0
SY
10247 return 0;
10248
69a6c269 10249free_tmf_queue:
6f8191fd 10250 blk_mq_destroy_queue(hba->tmf_queue);
2b3f056f 10251 blk_put_queue(hba->tmf_queue);
69a6c269
BVA
10252free_tmf_tag_set:
10253 blk_mq_free_tag_set(&hba->tmf_tag_set);
3b1d0580
VH
10254out_remove_scsi_host:
10255 scsi_remove_host(hba->host);
3b1d0580 10256out_disable:
57d104c1 10257 hba->is_irq_enabled = false;
aa497613 10258 ufshcd_hba_exit(hba);
3b1d0580
VH
10259out_error:
10260 return err;
10261}
10262EXPORT_SYMBOL_GPL(ufshcd_init);
10263
b294ff3e
AD
10264void ufshcd_resume_complete(struct device *dev)
10265{
10266 struct ufs_hba *hba = dev_get_drvdata(dev);
10267
10268 if (hba->complete_put) {
10269 ufshcd_rpm_put(hba);
10270 hba->complete_put = false;
10271 }
b294ff3e
AD
10272}
10273EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10274
ddba1cf7
AH
10275static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10276{
e2106584 10277 struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
ddba1cf7
AH
10278 enum ufs_dev_pwr_mode dev_pwr_mode;
10279 enum uic_link_state link_state;
10280 unsigned long flags;
10281 bool res;
10282
10283 spin_lock_irqsave(&dev->power.lock, flags);
10284 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10285 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10286 res = pm_runtime_suspended(dev) &&
10287 hba->curr_dev_pwr_mode == dev_pwr_mode &&
10288 hba->uic_link_state == link_state &&
10289 !hba->dev_info.b_rpm_dev_flush_capable;
10290 spin_unlock_irqrestore(&dev->power.lock, flags);
10291
10292 return res;
10293}
10294
10295int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
b294ff3e
AD
10296{
10297 struct ufs_hba *hba = dev_get_drvdata(dev);
10298 int ret;
10299
10300 /*
10301 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10302 * are same. And it doesn't wake up the device for system-suspend
10303 * if it's runtime suspended. But ufs doesn't follow that.
10304 * Refer ufshcd_resume_complete()
10305 */
e2106584 10306 if (hba->ufs_device_wlun) {
ddba1cf7
AH
10307 /* Prevent runtime suspend */
10308 ufshcd_rpm_get_noresume(hba);
10309 /*
10310 * Check if already runtime suspended in same state as system
10311 * suspend would be.
10312 */
10313 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10314 /* RPM state is not ok for SPM, so runtime resume */
10315 ret = ufshcd_rpm_resume(hba);
10316 if (ret < 0 && ret != -EACCES) {
10317 ufshcd_rpm_put(hba);
10318 return ret;
10319 }
b294ff3e
AD
10320 }
10321 hba->complete_put = true;
10322 }
b294ff3e
AD
10323 return 0;
10324}
ddba1cf7
AH
10325EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10326
10327int ufshcd_suspend_prepare(struct device *dev)
10328{
10329 return __ufshcd_suspend_prepare(dev, true);
10330}
b294ff3e
AD
10331EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10332
10333#ifdef CONFIG_PM_SLEEP
10334static int ufshcd_wl_poweroff(struct device *dev)
10335{
10336 struct scsi_device *sdev = to_scsi_device(dev);
10337 struct ufs_hba *hba = shost_priv(sdev->host);
10338
10339 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10340 return 0;
10341}
10342#endif
10343
10344static int ufshcd_wl_probe(struct device *dev)
10345{
10346 struct scsi_device *sdev = to_scsi_device(dev);
10347
10348 if (!is_device_wlun(sdev))
10349 return -ENODEV;
10350
10351 blk_pm_runtime_init(sdev->request_queue, dev);
10352 pm_runtime_set_autosuspend_delay(dev, 0);
10353 pm_runtime_allow(dev);
10354
10355 return 0;
10356}
10357
10358static int ufshcd_wl_remove(struct device *dev)
10359{
10360 pm_runtime_forbid(dev);
10361 return 0;
10362}
10363
10364static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10365#ifdef CONFIG_PM_SLEEP
10366 .suspend = ufshcd_wl_suspend,
10367 .resume = ufshcd_wl_resume,
10368 .freeze = ufshcd_wl_suspend,
10369 .thaw = ufshcd_wl_resume,
10370 .poweroff = ufshcd_wl_poweroff,
10371 .restore = ufshcd_wl_resume,
10372#endif
10373 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10374};
10375
10376/*
10377 * ufs_dev_wlun_template - describes ufs device wlun
10378 * ufs-device wlun - used to send pm commands
10379 * All luns are consumers of ufs-device wlun.
10380 *
10381 * Currently, no sd driver is present for wluns.
10382 * Hence the no specific pm operations are performed.
10383 * With ufs design, SSU should be sent to ufs-device wlun.
10384 * Hence register a scsi driver for ufs wluns only.
10385 */
10386static struct scsi_driver ufs_dev_wlun_template = {
10387 .gendrv = {
10388 .name = "ufs_device_wlun",
10389 .owner = THIS_MODULE,
10390 .probe = ufshcd_wl_probe,
10391 .remove = ufshcd_wl_remove,
10392 .pm = &ufshcd_wl_pm_ops,
10393 .shutdown = ufshcd_wl_shutdown,
10394 },
10395};
10396
b6cacaf2
AH
10397static int __init ufshcd_core_init(void)
10398{
b294ff3e
AD
10399 int ret;
10400
b6cacaf2 10401 ufs_debugfs_init();
b294ff3e
AD
10402
10403 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10404 if (ret)
edc0596c 10405 ufs_debugfs_exit();
b294ff3e 10406 return ret;
b6cacaf2
AH
10407}
10408
10409static void __exit ufshcd_core_exit(void)
10410{
10411 ufs_debugfs_exit();
b294ff3e 10412 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
b6cacaf2
AH
10413}
10414
10415module_init(ufshcd_core_init);
10416module_exit(ufshcd_core_exit);
10417
3b1d0580
VH
10418MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10419MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 10420MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0 10421MODULE_LICENSE("GPL");