scsi: ufs: Remove ufshcd_lrb.sense_bufflen
[linux-block.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
67351119 1// SPDX-License-Identifier: GPL-2.0-or-later
7a3e97b0 2/*
e0eca63e 3 * Universal Flash Storage Host controller driver Core
3b1d0580 4 * Copyright (C) 2011-2013 Samsung India Software Operations
52ac95fe 5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 */
11
6ccf44fe 12#include <linux/async.h>
856b3483 13#include <linux/devfreq.h>
b573d484 14#include <linux/nls.h>
54b879b7 15#include <linux/of.h>
ad448378 16#include <linux/bitfield.h>
fb276f77 17#include <linux/blk-pm.h>
c72e79c0 18#include <linux/blkdev.h>
b294ff3e 19#include <scsi/scsi_driver.h>
e0eca63e 20#include "ufshcd.h"
c58ab7aa 21#include "ufs_quirks.h"
53b3d9c3 22#include "unipro.h"
cbb6813e 23#include "ufs-sysfs.h"
b6cacaf2 24#include "ufs-debugfs.h"
c11a1ae9 25#include "ufs-fault-injection.h"
df032bf2 26#include "ufs_bsg.h"
df043c74 27#include "ufshcd-crypto.h"
f02bc975 28#include "ufshpb.h"
3d17b9b5 29#include <asm/unaligned.h>
7a3e97b0 30
7ff5ab47 31#define CREATE_TRACE_POINTS
32#include <trace/events/ufs.h>
33
2fbd009b
SJ
34#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
35 UTP_TASK_REQ_COMPL |\
36 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
37/* UIC command timeout, unit: ms */
38#define UIC_CMD_TIMEOUT 500
2fbd009b 39
5a0b0cb9
SRT
40/* NOP OUT retries waiting for NOP IN response */
41#define NOP_OUT_RETRIES 10
782e2efb
DP
42/* Timeout after 50 msecs if NOP OUT hangs without response */
43#define NOP_OUT_TIMEOUT 50 /* msecs */
5a0b0cb9 44
68078d5c 45/* Query request retries */
10fe5888 46#define QUERY_REQ_RETRIES 3
68078d5c 47/* Query request timeout */
10fe5888 48#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68078d5c 49
e2933132
SRT
50/* Task management command timeout */
51#define TM_CMD_TIMEOUT 100 /* msecs */
52
64238fbd
YG
53/* maximum number of retries for a general UIC command */
54#define UFS_UIC_COMMAND_RETRIES 3
55
1d337ec2
SRT
56/* maximum number of link-startup retries */
57#define DME_LINKSTARTUP_RETRIES 3
58
87d0b4a6
YG
59/* Maximum retries for Hibern8 enter */
60#define UIC_HIBERN8_ENTER_RETRIES 3
61
1d337ec2
SRT
62/* maximum number of reset retries before giving up */
63#define MAX_HOST_RESET_RETRIES 5
64
87bf6a6b
AH
65/* Maximum number of error handler retries before giving up */
66#define MAX_ERR_HANDLER_RETRIES 5
67
68078d5c
DR
68/* Expose the flag value from utp_upiu_query.value */
69#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
70
7d568652
SJ
71/* Interrupt aggregation default timeout, unit: 40us */
72#define INT_AGGR_DEF_TO 0x02
73
49615ba1
SC
74/* default delay of autosuspend: 2000 ms */
75#define RPM_AUTOSUSPEND_DELAY_MS 2000
76
51dd905b
SC
77/* Default delay of RPM device flush delayed work */
78#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
79
09f17791
CG
80/* Default value of wait time before gating device ref clock */
81#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
82
29707fab
KK
83/* Polling time to wait for fDeviceInit */
84#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
85
aa497613
SRT
86#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
87 ({ \
88 int _ret; \
89 if (_on) \
90 _ret = ufshcd_enable_vreg(_dev, _vreg); \
91 else \
92 _ret = ufshcd_disable_vreg(_dev, _vreg); \
93 _ret; \
94 })
95
ba80917d
TW
96#define ufshcd_hex_dump(prefix_str, buf, len) do { \
97 size_t __len = (len); \
98 print_hex_dump(KERN_ERR, prefix_str, \
99 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
100 16, 4, buf, __len, false); \
101} while (0)
102
103int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
104 const char *prefix)
105{
d6724756
MG
106 u32 *regs;
107 size_t pos;
108
109 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
110 return -EINVAL;
ba80917d 111
cddaebaf 112 regs = kzalloc(len, GFP_ATOMIC);
ba80917d
TW
113 if (!regs)
114 return -ENOMEM;
115
d6724756
MG
116 for (pos = 0; pos < len; pos += 4)
117 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
118
ba80917d
TW
119 ufshcd_hex_dump(prefix, regs, len);
120 kfree(regs);
121
122 return 0;
123}
124EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
66cc820f 125
7a3e97b0
SY
126enum {
127 UFSHCD_MAX_CHANNEL = 0,
128 UFSHCD_MAX_ID = 1,
945c3cca
BVA
129 UFSHCD_NUM_RESERVED = 1,
130 UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
131 UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
7a3e97b0
SY
132};
133
4693fad7
BVA
134static const char *const ufshcd_state_name[] = {
135 [UFSHCD_STATE_RESET] = "reset",
136 [UFSHCD_STATE_OPERATIONAL] = "operational",
137 [UFSHCD_STATE_ERROR] = "error",
138 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
139 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
140};
141
3441da7d
SRT
142/* UFSHCD error handling flags */
143enum {
144 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
145};
146
e8e7f271
SRT
147/* UFSHCD UIC layer error flags */
148enum {
149 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
9a47ec7c
YG
150 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
151 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
152 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
153 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
154 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
2355b66e 155 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
e8e7f271
SRT
156};
157
3441da7d 158#define ufshcd_set_eh_in_progress(h) \
9c490d2d 159 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
3441da7d 160#define ufshcd_eh_in_progress(h) \
9c490d2d 161 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
3441da7d 162#define ufshcd_clear_eh_in_progress(h) \
9c490d2d 163 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
3441da7d 164
cbb6813e 165struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
e2ac7ab2
BVA
166 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
167 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
168 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
169 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
170 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
171 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
fe1d4c2e
AH
172 /*
173 * For DeepSleep, the link is first put in hibern8 and then off.
174 * Leaving the link in hibern8 is not supported.
175 */
e2ac7ab2 176 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
57d104c1
SJ
177};
178
179static inline enum ufs_dev_pwr_mode
180ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
181{
182 return ufs_pm_lvl_states[lvl].dev_state;
183}
184
185static inline enum uic_link_state
186ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
187{
188 return ufs_pm_lvl_states[lvl].link_state;
189}
190
0c8f7586 191static inline enum ufs_pm_level
192ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
193 enum uic_link_state link_state)
194{
195 enum ufs_pm_level lvl;
196
197 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
198 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
199 (ufs_pm_lvl_states[lvl].link_state == link_state))
200 return lvl;
201 }
202
203 /* if no match found, return the level 0 */
204 return UFS_PM_LVL_0;
205}
206
56d4a186
SJ
207static struct ufs_dev_fix ufs_fixups[] = {
208 /* UFS cards deviations table */
c0a18ee0 209 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
63522bf3
BH
210 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
211 UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
56d4a186 212 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
ed0b40ff
SC
213 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
214 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
56d4a186 215 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
ed0b40ff
SC
216 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
217 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
218 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
219 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
56d4a186
SJ
220 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
221 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
222 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
223 UFS_DEVICE_QUIRK_PA_TACTIVATE),
224 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
225 UFS_DEVICE_QUIRK_PA_TACTIVATE),
56d4a186
SJ
226 END_FIX
227};
228
9333d775 229static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
3441da7d 230static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271 231static int ufshcd_reset_and_restore(struct ufs_hba *hba);
e7d38257 232static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
e8e7f271 233static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2 234static void ufshcd_hba_exit(struct ufs_hba *hba);
68444d73 235static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
1ab27c9c 236static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
cad2e03d 237static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1 238static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
fcb0c4b0
ST
239static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
240static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
401f1e44 241static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
fcb0c4b0 242static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
57d104c1 243static irqreturn_t ufshcd_intr(int irq, void *__hba);
874237f7
YG
244static int ufshcd_change_power_mode(struct ufs_hba *hba,
245 struct ufs_pa_layer_attr *pwr_mode);
c72e79c0
CG
246static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
247static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
248static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
249 struct ufs_vreg *vreg);
307348f6 250static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
3b5f3c0d
YH
251static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
252static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
dd7143e2
CG
253static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
254static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
3d17b9b5 255
5231d38c 256static inline void ufshcd_enable_irq(struct ufs_hba *hba)
57d104c1 257{
57d104c1 258 if (!hba->is_irq_enabled) {
5231d38c 259 enable_irq(hba->irq);
57d104c1
SJ
260 hba->is_irq_enabled = true;
261 }
57d104c1
SJ
262}
263
264static inline void ufshcd_disable_irq(struct ufs_hba *hba)
265{
266 if (hba->is_irq_enabled) {
5231d38c 267 disable_irq(hba->irq);
57d104c1
SJ
268 hba->is_irq_enabled = false;
269 }
270}
3441da7d 271
3d17b9b5
AD
272static inline void ufshcd_wb_config(struct ufs_hba *hba)
273{
79e3520f 274 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5
AD
275 return;
276
3b5f3c0d
YH
277 ufshcd_wb_toggle(hba, true);
278
279 ufshcd_wb_toggle_flush_during_h8(hba, true);
21acf460
SC
280 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
281 ufshcd_wb_toggle_flush(hba, true);
3d17b9b5
AD
282}
283
38135535
SJ
284static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
285{
286 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
287 scsi_unblock_requests(hba->host);
288}
289
290static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
291{
292 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
293 scsi_block_requests(hba->host);
294}
295
6667e6d9 296static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
28fa68fc 297 enum ufs_trace_str_t str_t)
6667e6d9
OS
298{
299 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
89ac2c3b 300 struct utp_upiu_header *header;
6667e6d9 301
9d5095e7
BH
302 if (!trace_ufshcd_upiu_enabled())
303 return;
304
89ac2c3b
BH
305 if (str_t == UFS_CMD_SEND)
306 header = &rq->header;
307 else
308 header = &hba->lrb[tag].ucd_rsp_ptr->header;
309
310 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
867fdc2d 311 UFS_TSF_CDB);
6667e6d9
OS
312}
313
fb475b74
AA
314static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
315 enum ufs_trace_str_t str_t,
316 struct utp_upiu_req *rq_rsp)
6667e6d9 317{
9d5095e7
BH
318 if (!trace_ufshcd_upiu_enabled())
319 return;
6667e6d9 320
be20b51c 321 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
867fdc2d 322 &rq_rsp->qr, UFS_TSF_OSF);
6667e6d9
OS
323}
324
325static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
28fa68fc 326 enum ufs_trace_str_t str_t)
6667e6d9 327{
e8c2da7e 328 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
6667e6d9 329
9d5095e7
BH
330 if (!trace_ufshcd_upiu_enabled())
331 return;
332
0ed083e9 333 if (str_t == UFS_TM_SEND)
1352eec8
GS
334 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
335 &descp->upiu_req.req_header,
336 &descp->upiu_req.input_param1,
337 UFS_TSF_TM_INPUT);
0ed083e9 338 else
1352eec8
GS
339 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
340 &descp->upiu_rsp.rsp_header,
341 &descp->upiu_rsp.output_param1,
342 UFS_TSF_TM_OUTPUT);
6667e6d9
OS
343}
344
aa5c6979
SC
345static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
346 struct uic_command *ucmd,
28fa68fc 347 enum ufs_trace_str_t str_t)
aa5c6979
SC
348{
349 u32 cmd;
350
351 if (!trace_ufshcd_uic_command_enabled())
352 return;
353
28fa68fc 354 if (str_t == UFS_CMD_SEND)
aa5c6979
SC
355 cmd = ucmd->command;
356 else
357 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
358
28fa68fc 359 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
aa5c6979
SC
360 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
361 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
362 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
363}
364
28fa68fc
BH
365static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
366 enum ufs_trace_str_t str_t)
1a07f2d9 367{
2bd3b6b7 368 u64 lba = 0;
69a314d6 369 u8 opcode = 0, group_id = 0;
1a07f2d9 370 u32 intr, doorbell;
e7c3b379 371 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
e4d2add7 372 struct scsi_cmnd *cmd = lrbp->cmd;
3f2c1002 373 struct request *rq = scsi_cmd_to_rq(cmd);
1a07f2d9
LS
374 int transfer_len = -1;
375
44b5de36
BH
376 if (!cmd)
377 return;
378
44b5de36
BH
379 /* trace UPIU also */
380 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
f0101af4
BH
381 if (!trace_ufshcd_command_enabled())
382 return;
383
44b5de36 384 opcode = cmd->cmnd[0];
44b5de36
BH
385
386 if (opcode == READ_10 || opcode == WRITE_10) {
387 /*
388 * Currently we only fully trace read(10) and write(10) commands
389 */
390 transfer_len =
391 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
2bd3b6b7 392 lba = scsi_get_lba(cmd);
44b5de36
BH
393 if (opcode == WRITE_10)
394 group_id = lrbp->cmd->cmnd[6];
395 } else if (opcode == UNMAP) {
396 /*
397 * The number of Bytes to be unmapped beginning with the lba.
398 */
3f2c1002 399 transfer_len = blk_rq_bytes(rq);
2bd3b6b7 400 lba = scsi_get_lba(cmd);
1a07f2d9
LS
401 }
402
403 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
404 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
28fa68fc 405 trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
69a314d6 406 doorbell, transfer_len, intr, lba, opcode, group_id);
1a07f2d9
LS
407}
408
ff8e20c6
DR
409static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
410{
411 struct ufs_clk_info *clki;
412 struct list_head *head = &hba->clk_list_head;
413
566ec9ad 414 if (list_empty(head))
ff8e20c6
DR
415 return;
416
417 list_for_each_entry(clki, head, list) {
418 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
419 clki->max_freq)
420 dev_err(hba->dev, "clk: %s, rate: %u\n",
421 clki->name, clki->curr_freq);
422 }
423}
424
e965e5e0
SC
425static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
426 char *err_name)
ff8e20c6
DR
427{
428 int i;
27752647 429 bool found = false;
e965e5e0 430 struct ufs_event_hist *e;
ff8e20c6 431
e965e5e0
SC
432 if (id >= UFS_EVT_CNT)
433 return;
ff8e20c6 434
e965e5e0 435 e = &hba->ufs_stats.event[id];
ff8e20c6 436
e965e5e0
SC
437 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
438 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
439
440 if (e->tstamp[p] == 0)
ff8e20c6 441 continue;
c5397f13 442 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
e965e5e0 443 e->val[p], ktime_to_us(e->tstamp[p]));
27752647 444 found = true;
ff8e20c6 445 }
27752647
SC
446
447 if (!found)
fd1fb4d5 448 dev_err(hba->dev, "No record of %s\n", err_name);
bafd09f8
DH
449 else
450 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
ff8e20c6
DR
451}
452
e965e5e0 453static void ufshcd_print_evt_hist(struct ufs_hba *hba)
66cc820f 454{
ba80917d 455 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
ff8e20c6 456
e965e5e0
SC
457 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
458 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
459 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
460 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
461 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
462 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
463 "auto_hibern8_err");
464 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
465 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
466 "link_startup_fail");
467 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
468 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
469 "suspend_fail");
470 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
471 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
472 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
ff8e20c6 473
7c486d91 474 ufshcd_vops_dbg_register_dump(hba);
66cc820f
DR
475}
476
477static
478void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
479{
480 struct ufshcd_lrb *lrbp;
7fabb77b 481 int prdt_length;
66cc820f
DR
482 int tag;
483
484 for_each_set_bit(tag, &bitmap, hba->nutrs) {
485 lrbp = &hba->lrb[tag];
486
ff8e20c6
DR
487 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
488 tag, ktime_to_us(lrbp->issue_time_stamp));
09017188
ZL
489 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
490 tag, ktime_to_us(lrbp->compl_time_stamp));
ff8e20c6
DR
491 dev_err(hba->dev,
492 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
493 tag, (u64)lrbp->utrd_dma_addr);
494
66cc820f
DR
495 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
496 sizeof(struct utp_transfer_req_desc));
ff8e20c6
DR
497 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
498 (u64)lrbp->ucd_req_dma_addr);
66cc820f
DR
499 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
500 sizeof(struct utp_upiu_req));
ff8e20c6
DR
501 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
502 (u64)lrbp->ucd_rsp_dma_addr);
66cc820f
DR
503 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
504 sizeof(struct utp_upiu_rsp));
66cc820f 505
7fabb77b
GB
506 prdt_length = le16_to_cpu(
507 lrbp->utr_descriptor_ptr->prd_table_length);
cc770ce3
EB
508 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
509 prdt_length /= sizeof(struct ufshcd_sg_entry);
510
7fabb77b
GB
511 dev_err(hba->dev,
512 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
513 tag, prdt_length,
514 (u64)lrbp->ucd_prdt_dma_addr);
515
516 if (pr_prdt)
66cc820f 517 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
7fabb77b 518 sizeof(struct ufshcd_sg_entry) * prdt_length);
66cc820f
DR
519 }
520}
521
522static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
523{
66cc820f
DR
524 int tag;
525
526 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
391e388f
CH
527 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
528
66cc820f 529 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
391e388f 530 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
66cc820f
DR
531 }
532}
533
6ba65588
GB
534static void ufshcd_print_host_state(struct ufs_hba *hba)
535{
3f8af604
CG
536 struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
537
6ba65588 538 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
7252a360
BVA
539 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
540 hba->outstanding_reqs, hba->outstanding_tasks);
6ba65588
GB
541 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
542 hba->saved_err, hba->saved_uic_err);
543 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
544 hba->curr_dev_pwr_mode, hba->uic_link_state);
545 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
546 hba->pm_op_in_progress, hba->is_sys_suspended);
547 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
548 hba->auto_bkops_enabled, hba->host->host_self_blocked);
549 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
3f8af604
CG
550 dev_err(hba->dev,
551 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
552 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
553 hba->ufs_stats.hibern8_exit_cnt);
554 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
555 ktime_to_us(hba->ufs_stats.last_intr_ts),
556 hba->ufs_stats.last_intr_status);
6ba65588
GB
557 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
558 hba->eh_flags, hba->req_abort_count);
3f8af604
CG
559 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
560 hba->ufs_version, hba->capabilities, hba->caps);
6ba65588
GB
561 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
562 hba->dev_quirks);
3f8af604
CG
563 if (sdev_ufs)
564 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
565 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
566
567 ufshcd_print_clk_freqs(hba);
6ba65588
GB
568}
569
ff8e20c6
DR
570/**
571 * ufshcd_print_pwr_info - print power params as saved in hba
572 * power info
573 * @hba: per-adapter instance
574 */
575static void ufshcd_print_pwr_info(struct ufs_hba *hba)
576{
577 static const char * const names[] = {
578 "INVALID MODE",
579 "FAST MODE",
580 "SLOW_MODE",
581 "INVALID MODE",
582 "FASTAUTO_MODE",
583 "SLOWAUTO_MODE",
584 "INVALID MODE",
585 };
586
71bb9ab6
AH
587 /*
588 * Using dev_dbg to avoid messages during runtime PM to avoid
589 * never-ending cycles of messages written back to storage by user space
590 * causing runtime resume, causing more messages and so on.
591 */
592 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
ff8e20c6
DR
593 __func__,
594 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
595 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
596 names[hba->pwr_info.pwr_rx],
597 names[hba->pwr_info.pwr_tx],
598 hba->pwr_info.hs_rate);
599}
600
31a5d9ca
SC
601static void ufshcd_device_reset(struct ufs_hba *hba)
602{
603 int err;
604
605 err = ufshcd_vops_device_reset(hba);
606
607 if (!err) {
608 ufshcd_set_ufs_dev_active(hba);
609 if (ufshcd_is_wb_allowed(hba)) {
4cd48995
BH
610 hba->dev_info.wb_enabled = false;
611 hba->dev_info.wb_buf_flush_enabled = false;
31a5d9ca
SC
612 }
613 }
614 if (err != -EOPNOTSUPP)
615 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
616}
617
5c955c10
SC
618void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
619{
620 if (!us)
621 return;
622
623 if (us < 10)
624 udelay(us);
625 else
626 usleep_range(us, us + tolerance);
627}
628EXPORT_SYMBOL_GPL(ufshcd_delay_us);
629
5cac1095 630/**
5a0b0cb9 631 * ufshcd_wait_for_register - wait for register value to change
5cac1095
BVA
632 * @hba: per-adapter interface
633 * @reg: mmio register offset
634 * @mask: mask to apply to the read register value
635 * @val: value to wait for
636 * @interval_us: polling interval in microseconds
637 * @timeout_ms: timeout in milliseconds
5a0b0cb9 638 *
5cac1095
BVA
639 * Return:
640 * -ETIMEDOUT on error, zero on success.
5a0b0cb9 641 */
59a57bb7 642static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
596585a2 643 u32 val, unsigned long interval_us,
5cac1095 644 unsigned long timeout_ms)
5a0b0cb9
SRT
645{
646 int err = 0;
647 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
648
649 /* ignore bits that we don't intend to wait on */
650 val = val & mask;
651
652 while ((ufshcd_readl(hba, reg) & mask) != val) {
5cac1095 653 usleep_range(interval_us, interval_us + 50);
5a0b0cb9
SRT
654 if (time_after(jiffies, timeout)) {
655 if ((ufshcd_readl(hba, reg) & mask) != val)
656 err = -ETIMEDOUT;
657 break;
658 }
659 }
660
661 return err;
662}
663
2fbd009b
SJ
664/**
665 * ufshcd_get_intr_mask - Get the interrupt bit mask
8aa29f19 666 * @hba: Pointer to adapter instance
2fbd009b
SJ
667 *
668 * Returns interrupt bit mask per version
669 */
670static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
671{
51428818
CC
672 if (hba->ufs_version == ufshci_version(1, 0))
673 return INTERRUPT_MASK_ALL_VER_10;
674 if (hba->ufs_version <= ufshci_version(2, 0))
675 return INTERRUPT_MASK_ALL_VER_11;
c01848c6 676
51428818 677 return INTERRUPT_MASK_ALL_VER_21;
2fbd009b
SJ
678}
679
7a3e97b0
SY
680/**
681 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
8aa29f19 682 * @hba: Pointer to adapter instance
7a3e97b0
SY
683 *
684 * Returns UFSHCI version supported by the controller
685 */
686static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
687{
51428818
CC
688 u32 ufshci_ver;
689
0263bcd0 690 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
51428818
CC
691 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
692 else
693 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
9949e702 694
51428818
CC
695 /*
696 * UFSHCI v1.x uses a different version scheme, in order
697 * to allow the use of comparisons with the ufshci_version
698 * function, we convert it to the same scheme as ufs 2.0+.
699 */
700 if (ufshci_ver & 0x00010000)
701 return ufshci_version(1, ufshci_ver & 0x00000100);
702
703 return ufshci_ver;
7a3e97b0
SY
704}
705
706/**
707 * ufshcd_is_device_present - Check if any device connected to
708 * the host controller
5c0c28a8 709 * @hba: pointer to adapter instance
7a3e97b0 710 *
c9e6010b 711 * Returns true if device present, false if no device detected
7a3e97b0 712 */
c9e6010b 713static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 714{
51d1628f 715 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
7a3e97b0
SY
716}
717
718/**
719 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
8aa29f19 720 * @lrbp: pointer to local command reference block
7a3e97b0
SY
721 *
722 * This function is used to get the OCS field from UTRD
723 * Returns the OCS field in the UTRD
724 */
957d63e7 725static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
7a3e97b0 726{
e8c8e82a 727 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
728}
729
7a3e97b0
SY
730/**
731 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
732 * @hba: per adapter instance
733 * @pos: position of the bit to be cleared
734 */
735static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
736{
87183841
AA
737 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
738 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
739 else
740 ufshcd_writel(hba, ~(1 << pos),
741 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1399c5b0
AA
742}
743
744/**
745 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
746 * @hba: per adapter instance
747 * @pos: position of the bit to be cleared
748 */
749static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
750{
87183841
AA
751 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
752 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
753 else
754 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
7a3e97b0
SY
755}
756
757/**
758 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
759 * @reg: Register value of host controller status
760 *
761 * Returns integer, 0 on Success and positive value if failed
762 */
763static inline int ufshcd_get_lists_status(u32 reg)
764{
6cf16115 765 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
7a3e97b0
SY
766}
767
768/**
769 * ufshcd_get_uic_cmd_result - Get the UIC command result
770 * @hba: Pointer to adapter instance
771 *
772 * This function gets the result of UIC command completion
773 * Returns 0 on success, non zero value on error
774 */
775static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
776{
b873a275 777 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
778 MASK_UIC_COMMAND_RESULT;
779}
780
12b4fdb4
SJ
781/**
782 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
783 * @hba: Pointer to adapter instance
784 *
785 * This function gets UIC command argument3
786 * Returns 0 on success, non zero value on error
787 */
788static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
789{
790 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
791}
792
7a3e97b0 793/**
5a0b0cb9 794 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 795 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
796 */
797static inline int
5a0b0cb9 798ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 799{
5a0b0cb9 800 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
801}
802
803/**
804 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
805 * @ucd_rsp_ptr: pointer to response UPIU
806 *
807 * This function gets the response status and scsi_status from response UPIU
808 * Returns the response result code.
809 */
810static inline int
811ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
812{
813 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
814}
815
1c2623c5
SJ
816/*
817 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
818 * from response UPIU
819 * @ucd_rsp_ptr: pointer to response UPIU
820 *
821 * Return the data segment length.
822 */
823static inline unsigned int
824ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
825{
826 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
827 MASK_RSP_UPIU_DATA_SEG_LEN;
828}
829
66ec6d59
SRT
830/**
831 * ufshcd_is_exception_event - Check if the device raised an exception event
832 * @ucd_rsp_ptr: pointer to response UPIU
833 *
834 * The function checks if the device raised an exception event indicated in
835 * the Device Information field of response UPIU.
836 *
837 * Returns true if exception is raised, false otherwise.
838 */
839static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
840{
841 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
51d1628f 842 MASK_RSP_EXCEPTION_EVENT;
66ec6d59
SRT
843}
844
7a3e97b0 845/**
7d568652 846 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 847 * @hba: per adapter instance
7a3e97b0
SY
848 */
849static inline void
7d568652 850ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 851{
7d568652
SJ
852 ufshcd_writel(hba, INT_AGGR_ENABLE |
853 INT_AGGR_COUNTER_AND_TIMER_RESET,
854 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
855}
856
857/**
858 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
859 * @hba: per adapter instance
860 * @cnt: Interrupt aggregation counter threshold
861 * @tmout: Interrupt aggregation timeout value
862 */
863static inline void
864ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
865{
866 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
867 INT_AGGR_COUNTER_THLD_VAL(cnt) |
868 INT_AGGR_TIMEOUT_VAL(tmout),
869 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
870}
871
b852190e
YG
872/**
873 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
874 * @hba: per adapter instance
875 */
876static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
877{
878 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
879}
880
7a3e97b0
SY
881/**
882 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
883 * When run-stop registers are set to 1, it indicates the
884 * host controller that it can process the requests
885 * @hba: per adapter instance
886 */
887static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
888{
b873a275
SJ
889 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
890 REG_UTP_TASK_REQ_LIST_RUN_STOP);
891 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
892 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
893}
894
7a3e97b0
SY
895/**
896 * ufshcd_hba_start - Start controller initialization sequence
897 * @hba: per adapter instance
898 */
899static inline void ufshcd_hba_start(struct ufs_hba *hba)
900{
df043c74
ST
901 u32 val = CONTROLLER_ENABLE;
902
903 if (ufshcd_crypto_enable(hba))
904 val |= CRYPTO_GENERAL_ENABLE;
905
906 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
907}
908
909/**
910 * ufshcd_is_hba_active - Get controller state
911 * @hba: per adapter instance
912 *
c9e6010b 913 * Returns false if controller is active, true otherwise
7a3e97b0 914 */
c9e6010b 915static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
7a3e97b0 916{
4a8eec2b
TK
917 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
918 ? false : true;
7a3e97b0
SY
919}
920
37113106
YG
921u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
922{
923 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
51428818 924 if (hba->ufs_version <= ufshci_version(1, 1))
37113106
YG
925 return UFS_UNIPRO_VER_1_41;
926 else
927 return UFS_UNIPRO_VER_1_6;
928}
929EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
930
931static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
932{
933 /*
934 * If both host and device support UniPro ver1.6 or later, PA layer
935 * parameters tuning happens during link startup itself.
936 *
937 * We can manually tune PA layer parameters if either host or device
938 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
939 * logic simple, we will only do manual tuning if local unipro version
940 * doesn't support ver1.6 or later.
941 */
a858af9a 942 return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
37113106
YG
943}
944
394b949f
SJ
945/**
946 * ufshcd_set_clk_freq - set UFS controller clock frequencies
947 * @hba: per adapter instance
948 * @scale_up: If True, set max possible frequency othewise set low frequency
949 *
950 * Returns 0 if successful
951 * Returns < 0 for any other errors
952 */
953static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
a3cd5ec5 954{
955 int ret = 0;
956 struct ufs_clk_info *clki;
957 struct list_head *head = &hba->clk_list_head;
a3cd5ec5 958
566ec9ad 959 if (list_empty(head))
a3cd5ec5 960 goto out;
961
a3cd5ec5 962 list_for_each_entry(clki, head, list) {
963 if (!IS_ERR_OR_NULL(clki->clk)) {
964 if (scale_up && clki->max_freq) {
965 if (clki->curr_freq == clki->max_freq)
966 continue;
967
a3cd5ec5 968 ret = clk_set_rate(clki->clk, clki->max_freq);
969 if (ret) {
970 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
971 __func__, clki->name,
972 clki->max_freq, ret);
973 break;
974 }
975 trace_ufshcd_clk_scaling(dev_name(hba->dev),
976 "scaled up", clki->name,
977 clki->curr_freq,
978 clki->max_freq);
979
980 clki->curr_freq = clki->max_freq;
981
982 } else if (!scale_up && clki->min_freq) {
983 if (clki->curr_freq == clki->min_freq)
984 continue;
985
a3cd5ec5 986 ret = clk_set_rate(clki->clk, clki->min_freq);
987 if (ret) {
988 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
989 __func__, clki->name,
990 clki->min_freq, ret);
991 break;
992 }
993 trace_ufshcd_clk_scaling(dev_name(hba->dev),
994 "scaled down", clki->name,
995 clki->curr_freq,
996 clki->min_freq);
997 clki->curr_freq = clki->min_freq;
998 }
999 }
1000 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1001 clki->name, clk_get_rate(clki->clk));
1002 }
1003
394b949f
SJ
1004out:
1005 return ret;
1006}
1007
1008/**
1009 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1010 * @hba: per adapter instance
1011 * @scale_up: True if scaling up and false if scaling down
1012 *
1013 * Returns 0 if successful
1014 * Returns < 0 for any other errors
1015 */
1016static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1017{
1018 int ret = 0;
1019 ktime_t start = ktime_get();
1020
1021 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1022 if (ret)
1023 goto out;
1024
1025 ret = ufshcd_set_clk_freq(hba, scale_up);
1026 if (ret)
1027 goto out;
1028
a3cd5ec5 1029 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
394b949f
SJ
1030 if (ret)
1031 ufshcd_set_clk_freq(hba, !scale_up);
a3cd5ec5 1032
1033out:
394b949f 1034 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
a3cd5ec5 1035 (scale_up ? "up" : "down"),
1036 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1037 return ret;
1038}
1039
1040/**
1041 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1042 * @hba: per adapter instance
1043 * @scale_up: True if scaling up and false if scaling down
1044 *
1045 * Returns true if scaling is required, false otherwise.
1046 */
1047static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1048 bool scale_up)
1049{
1050 struct ufs_clk_info *clki;
1051 struct list_head *head = &hba->clk_list_head;
1052
566ec9ad 1053 if (list_empty(head))
a3cd5ec5 1054 return false;
1055
1056 list_for_each_entry(clki, head, list) {
1057 if (!IS_ERR_OR_NULL(clki->clk)) {
1058 if (scale_up && clki->max_freq) {
1059 if (clki->curr_freq == clki->max_freq)
1060 continue;
1061 return true;
1062 } else if (!scale_up && clki->min_freq) {
1063 if (clki->curr_freq == clki->min_freq)
1064 continue;
1065 return true;
1066 }
1067 }
1068 }
1069
1070 return false;
1071}
1072
8d077ede
BVA
1073/*
1074 * Determine the number of pending commands by counting the bits in the SCSI
1075 * device budget maps. This approach has been selected because a bit is set in
1076 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1077 * flag. The host_self_blocked flag can be modified by calling
1078 * scsi_block_requests() or scsi_unblock_requests().
1079 */
1080static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1081{
1082 struct scsi_device *sdev;
1083 u32 pending = 0;
1084
99c66a88
BH
1085 lockdep_assert_held(hba->host->host_lock);
1086 __shost_for_each_device(sdev, hba->host)
8d077ede
BVA
1087 pending += sbitmap_weight(&sdev->budget_map);
1088
1089 return pending;
1090}
1091
a3cd5ec5 1092static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1093 u64 wait_timeout_us)
1094{
1095 unsigned long flags;
1096 int ret = 0;
1097 u32 tm_doorbell;
8d077ede 1098 u32 tr_pending;
a3cd5ec5 1099 bool timeout = false, do_last_check = false;
1100 ktime_t start;
1101
1102 ufshcd_hold(hba, false);
1103 spin_lock_irqsave(hba->host->host_lock, flags);
1104 /*
1105 * Wait for all the outstanding tasks/transfer requests.
1106 * Verify by checking the doorbell registers are clear.
1107 */
1108 start = ktime_get();
1109 do {
1110 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1111 ret = -EBUSY;
1112 goto out;
1113 }
1114
1115 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
8d077ede
BVA
1116 tr_pending = ufshcd_pending_cmds(hba);
1117 if (!tm_doorbell && !tr_pending) {
a3cd5ec5 1118 timeout = false;
1119 break;
1120 } else if (do_last_check) {
1121 break;
1122 }
1123
1124 spin_unlock_irqrestore(hba->host->host_lock, flags);
1125 schedule();
1126 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1127 wait_timeout_us) {
1128 timeout = true;
1129 /*
1130 * We might have scheduled out for long time so make
1131 * sure to check if doorbells are cleared by this time
1132 * or not.
1133 */
1134 do_last_check = true;
1135 }
1136 spin_lock_irqsave(hba->host->host_lock, flags);
8d077ede 1137 } while (tm_doorbell || tr_pending);
a3cd5ec5 1138
1139 if (timeout) {
1140 dev_err(hba->dev,
1141 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
8d077ede 1142 __func__, tm_doorbell, tr_pending);
a3cd5ec5 1143 ret = -EBUSY;
1144 }
1145out:
1146 spin_unlock_irqrestore(hba->host->host_lock, flags);
1147 ufshcd_release(hba);
1148 return ret;
1149}
1150
1151/**
1152 * ufshcd_scale_gear - scale up/down UFS gear
1153 * @hba: per adapter instance
1154 * @scale_up: True for scaling up gear and false for scaling down
1155 *
1156 * Returns 0 for success,
1157 * Returns -EBUSY if scaling can't happen at this time
1158 * Returns non-zero for any other errors
1159 */
1160static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1161{
a3cd5ec5 1162 int ret = 0;
1163 struct ufs_pa_layer_attr new_pwr_info;
1164
1165 if (scale_up) {
1166 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1167 sizeof(struct ufs_pa_layer_attr));
1168 } else {
1169 memcpy(&new_pwr_info, &hba->pwr_info,
1170 sizeof(struct ufs_pa_layer_attr));
1171
29b87e92
CG
1172 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1173 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
a3cd5ec5 1174 /* save the current power mode */
1175 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1176 &hba->pwr_info,
1177 sizeof(struct ufs_pa_layer_attr));
1178
1179 /* scale down gear */
29b87e92
CG
1180 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1181 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
a3cd5ec5 1182 }
1183 }
1184
1185 /* check if the power mode needs to be changed or not? */
6a9df818 1186 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
a3cd5ec5 1187 if (ret)
1188 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1189 __func__, ret,
1190 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1191 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1192
1193 return ret;
1194}
1195
1196static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1197{
1198 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1199 int ret = 0;
1200 /*
1201 * make sure that there are no outstanding requests when
1202 * clock scaling is in progress
1203 */
38135535 1204 ufshcd_scsi_block_requests(hba);
a3cd5ec5 1205 down_write(&hba->clk_scaling_lock);
0e9d4ca4
CG
1206
1207 if (!hba->clk_scaling.is_allowed ||
1208 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
a3cd5ec5 1209 ret = -EBUSY;
1210 up_write(&hba->clk_scaling_lock);
38135535 1211 ufshcd_scsi_unblock_requests(hba);
0e9d4ca4 1212 goto out;
a3cd5ec5 1213 }
1214
0e9d4ca4
CG
1215 /* let's not get into low power until clock scaling is completed */
1216 ufshcd_hold(hba, false);
1217
1218out:
a3cd5ec5 1219 return ret;
1220}
1221
0e9d4ca4 1222static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
a3cd5ec5 1223{
0e9d4ca4
CG
1224 if (writelock)
1225 up_write(&hba->clk_scaling_lock);
1226 else
1227 up_read(&hba->clk_scaling_lock);
38135535 1228 ufshcd_scsi_unblock_requests(hba);
0e9d4ca4 1229 ufshcd_release(hba);
a3cd5ec5 1230}
1231
1232/**
1233 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1234 * @hba: per adapter instance
1235 * @scale_up: True for scaling up and false for scalin down
1236 *
1237 * Returns 0 for success,
1238 * Returns -EBUSY if scaling can't happen at this time
1239 * Returns non-zero for any other errors
1240 */
1241static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1242{
1243 int ret = 0;
0e9d4ca4 1244 bool is_writelock = true;
401f1e44 1245
a3cd5ec5 1246 ret = ufshcd_clock_scaling_prepare(hba);
1247 if (ret)
0e9d4ca4 1248 return ret;
a3cd5ec5 1249
1250 /* scale down the gear before scaling down clocks */
1251 if (!scale_up) {
1252 ret = ufshcd_scale_gear(hba, false);
1253 if (ret)
394b949f 1254 goto out_unprepare;
a3cd5ec5 1255 }
1256
1257 ret = ufshcd_scale_clks(hba, scale_up);
1258 if (ret) {
1259 if (!scale_up)
1260 ufshcd_scale_gear(hba, true);
394b949f 1261 goto out_unprepare;
a3cd5ec5 1262 }
1263
1264 /* scale up the gear after scaling up clocks */
1265 if (scale_up) {
1266 ret = ufshcd_scale_gear(hba, true);
3d17b9b5 1267 if (ret) {
a3cd5ec5 1268 ufshcd_scale_clks(hba, false);
3d17b9b5
AD
1269 goto out_unprepare;
1270 }
a3cd5ec5 1271 }
1272
3d17b9b5 1273 /* Enable Write Booster if we have scaled up else disable it */
0e9d4ca4
CG
1274 downgrade_write(&hba->clk_scaling_lock);
1275 is_writelock = false;
3b5f3c0d 1276 ufshcd_wb_toggle(hba, scale_up);
3d17b9b5 1277
394b949f 1278out_unprepare:
0e9d4ca4 1279 ufshcd_clock_scaling_unprepare(hba, is_writelock);
a3cd5ec5 1280 return ret;
1281}
1282
401f1e44 1283static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1284{
1285 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1286 clk_scaling.suspend_work);
1287 unsigned long irq_flags;
1288
1289 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1290 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1291 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1292 return;
1293 }
1294 hba->clk_scaling.is_suspended = true;
1295 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1296
1297 __ufshcd_suspend_clkscaling(hba);
1298}
1299
1300static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1301{
1302 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1303 clk_scaling.resume_work);
1304 unsigned long irq_flags;
1305
1306 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1307 if (!hba->clk_scaling.is_suspended) {
1308 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1309 return;
1310 }
1311 hba->clk_scaling.is_suspended = false;
1312 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1313
1314 devfreq_resume_device(hba->devfreq);
1315}
1316
a3cd5ec5 1317static int ufshcd_devfreq_target(struct device *dev,
1318 unsigned long *freq, u32 flags)
1319{
1320 int ret = 0;
1321 struct ufs_hba *hba = dev_get_drvdata(dev);
1322 ktime_t start;
401f1e44 1323 bool scale_up, sched_clk_scaling_suspend_work = false;
092b4558
BA
1324 struct list_head *clk_list = &hba->clk_list_head;
1325 struct ufs_clk_info *clki;
a3cd5ec5 1326 unsigned long irq_flags;
1327
1328 if (!ufshcd_is_clkscaling_supported(hba))
1329 return -EINVAL;
1330
91831d33
AD
1331 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1332 /* Override with the closest supported frequency */
1333 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
a3cd5ec5 1334 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1335 if (ufshcd_eh_in_progress(hba)) {
1336 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1337 return 0;
1338 }
1339
401f1e44 1340 if (!hba->clk_scaling.active_reqs)
1341 sched_clk_scaling_suspend_work = true;
1342
092b4558
BA
1343 if (list_empty(clk_list)) {
1344 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1345 goto out;
1346 }
1347
91831d33 1348 /* Decide based on the rounded-off frequency and update */
51d1628f 1349 scale_up = *freq == clki->max_freq;
91831d33
AD
1350 if (!scale_up)
1351 *freq = clki->min_freq;
1352 /* Update the frequency */
401f1e44 1353 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1354 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1355 ret = 0;
1356 goto out; /* no state change required */
a3cd5ec5 1357 }
1358 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1359
1360 start = ktime_get();
a3cd5ec5 1361 ret = ufshcd_devfreq_scale(hba, scale_up);
1362
a3cd5ec5 1363 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1364 (scale_up ? "up" : "down"),
1365 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1366
401f1e44 1367out:
1368 if (sched_clk_scaling_suspend_work)
1369 queue_work(hba->clk_scaling.workq,
1370 &hba->clk_scaling.suspend_work);
1371
a3cd5ec5 1372 return ret;
1373}
1374
a3cd5ec5 1375static int ufshcd_devfreq_get_dev_status(struct device *dev,
1376 struct devfreq_dev_status *stat)
1377{
1378 struct ufs_hba *hba = dev_get_drvdata(dev);
1379 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1380 unsigned long flags;
91831d33
AD
1381 struct list_head *clk_list = &hba->clk_list_head;
1382 struct ufs_clk_info *clki;
b1bf66d1 1383 ktime_t curr_t;
a3cd5ec5 1384
1385 if (!ufshcd_is_clkscaling_supported(hba))
1386 return -EINVAL;
1387
1388 memset(stat, 0, sizeof(*stat));
1389
1390 spin_lock_irqsave(hba->host->host_lock, flags);
b1bf66d1 1391 curr_t = ktime_get();
a3cd5ec5 1392 if (!scaling->window_start_t)
1393 goto start_window;
1394
91831d33
AD
1395 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1396 /*
1397 * If current frequency is 0, then the ondemand governor considers
1398 * there's no initial frequency set. And it always requests to set
1399 * to max. frequency.
1400 */
1401 stat->current_frequency = clki->curr_freq;
a3cd5ec5 1402 if (scaling->is_busy_started)
b1bf66d1
SC
1403 scaling->tot_busy_t += ktime_us_delta(curr_t,
1404 scaling->busy_start_t);
a3cd5ec5 1405
b1bf66d1 1406 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
a3cd5ec5 1407 stat->busy_time = scaling->tot_busy_t;
1408start_window:
b1bf66d1 1409 scaling->window_start_t = curr_t;
a3cd5ec5 1410 scaling->tot_busy_t = 0;
1411
1412 if (hba->outstanding_reqs) {
b1bf66d1 1413 scaling->busy_start_t = curr_t;
a3cd5ec5 1414 scaling->is_busy_started = true;
1415 } else {
1416 scaling->busy_start_t = 0;
1417 scaling->is_busy_started = false;
1418 }
1419 spin_unlock_irqrestore(hba->host->host_lock, flags);
1420 return 0;
1421}
1422
deac444f
BA
1423static int ufshcd_devfreq_init(struct ufs_hba *hba)
1424{
092b4558
BA
1425 struct list_head *clk_list = &hba->clk_list_head;
1426 struct ufs_clk_info *clki;
deac444f
BA
1427 struct devfreq *devfreq;
1428 int ret;
1429
092b4558
BA
1430 /* Skip devfreq if we don't have any clocks in the list */
1431 if (list_empty(clk_list))
1432 return 0;
1433
1434 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1435 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1436 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1437
90b8491c
SC
1438 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1439 &hba->vps->ondemand_data);
092b4558 1440 devfreq = devfreq_add_device(hba->dev,
90b8491c 1441 &hba->vps->devfreq_profile,
deac444f 1442 DEVFREQ_GOV_SIMPLE_ONDEMAND,
90b8491c 1443 &hba->vps->ondemand_data);
deac444f
BA
1444 if (IS_ERR(devfreq)) {
1445 ret = PTR_ERR(devfreq);
1446 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
092b4558
BA
1447
1448 dev_pm_opp_remove(hba->dev, clki->min_freq);
1449 dev_pm_opp_remove(hba->dev, clki->max_freq);
deac444f
BA
1450 return ret;
1451 }
1452
1453 hba->devfreq = devfreq;
1454
1455 return 0;
1456}
1457
092b4558
BA
1458static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1459{
1460 struct list_head *clk_list = &hba->clk_list_head;
1461 struct ufs_clk_info *clki;
1462
1463 if (!hba->devfreq)
1464 return;
1465
1466 devfreq_remove_device(hba->devfreq);
1467 hba->devfreq = NULL;
1468
1469 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1470 dev_pm_opp_remove(hba->dev, clki->min_freq);
1471 dev_pm_opp_remove(hba->dev, clki->max_freq);
1472}
1473
401f1e44 1474static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1475{
1476 unsigned long flags;
1477
1478 devfreq_suspend_device(hba->devfreq);
1479 spin_lock_irqsave(hba->host->host_lock, flags);
1480 hba->clk_scaling.window_start_t = 0;
1481 spin_unlock_irqrestore(hba->host->host_lock, flags);
1482}
a3cd5ec5 1483
a508253d
GB
1484static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1485{
401f1e44 1486 unsigned long flags;
1487 bool suspend = false;
1488
f9a7fa34
SC
1489 cancel_work_sync(&hba->clk_scaling.suspend_work);
1490 cancel_work_sync(&hba->clk_scaling.resume_work);
fcb0c4b0 1491
401f1e44 1492 spin_lock_irqsave(hba->host->host_lock, flags);
1493 if (!hba->clk_scaling.is_suspended) {
1494 suspend = true;
1495 hba->clk_scaling.is_suspended = true;
1496 }
1497 spin_unlock_irqrestore(hba->host->host_lock, flags);
1498
1499 if (suspend)
1500 __ufshcd_suspend_clkscaling(hba);
a508253d
GB
1501}
1502
1503static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1504{
401f1e44 1505 unsigned long flags;
1506 bool resume = false;
1507
401f1e44 1508 spin_lock_irqsave(hba->host->host_lock, flags);
1509 if (hba->clk_scaling.is_suspended) {
1510 resume = true;
1511 hba->clk_scaling.is_suspended = false;
1512 }
1513 spin_unlock_irqrestore(hba->host->host_lock, flags);
1514
1515 if (resume)
1516 devfreq_resume_device(hba->devfreq);
fcb0c4b0
ST
1517}
1518
1519static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1520 struct device_attribute *attr, char *buf)
1521{
1522 struct ufs_hba *hba = dev_get_drvdata(dev);
1523
1481b7fe 1524 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
fcb0c4b0
ST
1525}
1526
1527static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1528 struct device_attribute *attr, const char *buf, size_t count)
1529{
1530 struct ufs_hba *hba = dev_get_drvdata(dev);
1531 u32 value;
9cd20d3f 1532 int err = 0;
fcb0c4b0
ST
1533
1534 if (kstrtou32(buf, 0, &value))
1535 return -EINVAL;
1536
9cd20d3f
CG
1537 down(&hba->host_sem);
1538 if (!ufshcd_is_user_access_allowed(hba)) {
1539 err = -EBUSY;
1540 goto out;
1541 }
1542
fcb0c4b0 1543 value = !!value;
0e9d4ca4 1544 if (value == hba->clk_scaling.is_enabled)
fcb0c4b0
ST
1545 goto out;
1546
b294ff3e 1547 ufshcd_rpm_get_sync(hba);
fcb0c4b0
ST
1548 ufshcd_hold(hba, false);
1549
0e9d4ca4 1550 hba->clk_scaling.is_enabled = value;
401f1e44 1551
fcb0c4b0
ST
1552 if (value) {
1553 ufshcd_resume_clkscaling(hba);
1554 } else {
1555 ufshcd_suspend_clkscaling(hba);
a3cd5ec5 1556 err = ufshcd_devfreq_scale(hba, true);
fcb0c4b0
ST
1557 if (err)
1558 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1559 __func__, err);
1560 }
fcb0c4b0
ST
1561
1562 ufshcd_release(hba);
b294ff3e 1563 ufshcd_rpm_put_sync(hba);
fcb0c4b0 1564out:
9cd20d3f
CG
1565 up(&hba->host_sem);
1566 return err ? err : count;
a508253d
GB
1567}
1568
4543d9d7 1569static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
a3cd5ec5 1570{
1571 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1572 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1573 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1574 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1575 hba->clk_scaling.enable_attr.attr.mode = 0644;
1576 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1577 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1578}
1579
4543d9d7
CG
1580static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1581{
1582 if (hba->clk_scaling.enable_attr.attr.name)
1583 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1584}
1585
1586static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1587{
1588 char wq_name[sizeof("ufs_clkscaling_00")];
1589
1590 if (!ufshcd_is_clkscaling_supported(hba))
1591 return;
1592
80d892f4
CG
1593 if (!hba->clk_scaling.min_gear)
1594 hba->clk_scaling.min_gear = UFS_HS_G1;
1595
4543d9d7
CG
1596 INIT_WORK(&hba->clk_scaling.suspend_work,
1597 ufshcd_clk_scaling_suspend_work);
1598 INIT_WORK(&hba->clk_scaling.resume_work,
1599 ufshcd_clk_scaling_resume_work);
1600
1601 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1602 hba->host->host_no);
1603 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1604
1605 hba->clk_scaling.is_initialized = true;
1606}
1607
1608static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1609{
1610 if (!hba->clk_scaling.is_initialized)
1611 return;
1612
1613 ufshcd_remove_clk_scaling_sysfs(hba);
1614 destroy_workqueue(hba->clk_scaling.workq);
1615 ufshcd_devfreq_remove(hba);
1616 hba->clk_scaling.is_initialized = false;
1617}
1618
1ab27c9c
ST
1619static void ufshcd_ungate_work(struct work_struct *work)
1620{
1621 int ret;
1622 unsigned long flags;
1623 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1624 clk_gating.ungate_work);
1625
1626 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1627
1628 spin_lock_irqsave(hba->host->host_lock, flags);
1629 if (hba->clk_gating.state == CLKS_ON) {
1630 spin_unlock_irqrestore(hba->host->host_lock, flags);
1631 goto unblock_reqs;
1632 }
1633
1634 spin_unlock_irqrestore(hba->host->host_lock, flags);
dd7143e2 1635 ufshcd_hba_vreg_set_hpm(hba);
1ab27c9c
ST
1636 ufshcd_setup_clocks(hba, true);
1637
8b0bbf00
SC
1638 ufshcd_enable_irq(hba);
1639
1ab27c9c
ST
1640 /* Exit from hibern8 */
1641 if (ufshcd_can_hibern8_during_gating(hba)) {
1642 /* Prevent gating in this path */
1643 hba->clk_gating.is_suspended = true;
1644 if (ufshcd_is_link_hibern8(hba)) {
1645 ret = ufshcd_uic_hibern8_exit(hba);
1646 if (ret)
1647 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1648 __func__, ret);
1649 else
1650 ufshcd_set_link_active(hba);
1651 }
1652 hba->clk_gating.is_suspended = false;
1653 }
1654unblock_reqs:
38135535 1655 ufshcd_scsi_unblock_requests(hba);
1ab27c9c
ST
1656}
1657
1658/**
1659 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1660 * Also, exit from hibern8 mode and set the link as active.
1661 * @hba: per adapter instance
1662 * @async: This indicates whether caller should ungate clocks asynchronously.
1663 */
1664int ufshcd_hold(struct ufs_hba *hba, bool async)
1665{
1666 int rc = 0;
93b6c5db 1667 bool flush_result;
1ab27c9c
ST
1668 unsigned long flags;
1669
3489c34b
BVA
1670 if (!ufshcd_is_clkgating_allowed(hba) ||
1671 !hba->clk_gating.is_initialized)
1ab27c9c 1672 goto out;
1ab27c9c
ST
1673 spin_lock_irqsave(hba->host->host_lock, flags);
1674 hba->clk_gating.active_reqs++;
1675
856b3483 1676start:
1ab27c9c
ST
1677 switch (hba->clk_gating.state) {
1678 case CLKS_ON:
f2a785ac
VG
1679 /*
1680 * Wait for the ungate work to complete if in progress.
1681 * Though the clocks may be in ON state, the link could
1682 * still be in hibner8 state if hibern8 is allowed
1683 * during clock gating.
1684 * Make sure we exit hibern8 state also in addition to
1685 * clocks being ON.
1686 */
1687 if (ufshcd_can_hibern8_during_gating(hba) &&
1688 ufshcd_is_link_hibern8(hba)) {
c63d6099
CG
1689 if (async) {
1690 rc = -EAGAIN;
1691 hba->clk_gating.active_reqs--;
1692 break;
1693 }
f2a785ac 1694 spin_unlock_irqrestore(hba->host->host_lock, flags);
93b6c5db
SC
1695 flush_result = flush_work(&hba->clk_gating.ungate_work);
1696 if (hba->clk_gating.is_suspended && !flush_result)
1697 goto out;
f2a785ac
VG
1698 spin_lock_irqsave(hba->host->host_lock, flags);
1699 goto start;
1700 }
1ab27c9c
ST
1701 break;
1702 case REQ_CLKS_OFF:
1703 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1704 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1705 trace_ufshcd_clk_gating(dev_name(hba->dev),
1706 hba->clk_gating.state);
1ab27c9c
ST
1707 break;
1708 }
1709 /*
9c490d2d 1710 * If we are here, it means gating work is either done or
1ab27c9c
ST
1711 * currently running. Hence, fall through to cancel gating
1712 * work and to enable clocks.
1713 */
df561f66 1714 fallthrough;
1ab27c9c 1715 case CLKS_OFF:
1ab27c9c 1716 hba->clk_gating.state = REQ_CLKS_ON;
7ff5ab47 1717 trace_ufshcd_clk_gating(dev_name(hba->dev),
1718 hba->clk_gating.state);
da3fecb0
CG
1719 if (queue_work(hba->clk_gating.clk_gating_workq,
1720 &hba->clk_gating.ungate_work))
1721 ufshcd_scsi_block_requests(hba);
1ab27c9c
ST
1722 /*
1723 * fall through to check if we should wait for this
1724 * work to be done or not.
1725 */
df561f66 1726 fallthrough;
1ab27c9c
ST
1727 case REQ_CLKS_ON:
1728 if (async) {
1729 rc = -EAGAIN;
1730 hba->clk_gating.active_reqs--;
1731 break;
1732 }
1733
1734 spin_unlock_irqrestore(hba->host->host_lock, flags);
1735 flush_work(&hba->clk_gating.ungate_work);
1736 /* Make sure state is CLKS_ON before returning */
856b3483 1737 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
1738 goto start;
1739 default:
1740 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1741 __func__, hba->clk_gating.state);
1742 break;
1743 }
1744 spin_unlock_irqrestore(hba->host->host_lock, flags);
1745out:
1746 return rc;
1747}
6e3fd44d 1748EXPORT_SYMBOL_GPL(ufshcd_hold);
1ab27c9c
ST
1749
1750static void ufshcd_gate_work(struct work_struct *work)
1751{
1752 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1753 clk_gating.gate_work.work);
1754 unsigned long flags;
4db7a236 1755 int ret;
1ab27c9c
ST
1756
1757 spin_lock_irqsave(hba->host->host_lock, flags);
3f0c06de
VG
1758 /*
1759 * In case you are here to cancel this work the gating state
1760 * would be marked as REQ_CLKS_ON. In this case save time by
1761 * skipping the gating work and exit after changing the clock
1762 * state to CLKS_ON.
1763 */
1764 if (hba->clk_gating.is_suspended ||
18f01374 1765 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1ab27c9c 1766 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1767 trace_ufshcd_clk_gating(dev_name(hba->dev),
1768 hba->clk_gating.state);
1ab27c9c
ST
1769 goto rel_lock;
1770 }
1771
1772 if (hba->clk_gating.active_reqs
1773 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
bd0b3538 1774 || hba->outstanding_reqs || hba->outstanding_tasks
1ab27c9c
ST
1775 || hba->active_uic_cmd || hba->uic_async_done)
1776 goto rel_lock;
1777
1778 spin_unlock_irqrestore(hba->host->host_lock, flags);
1779
1780 /* put the link into hibern8 mode before turning off clocks */
1781 if (ufshcd_can_hibern8_during_gating(hba)) {
4db7a236
CG
1782 ret = ufshcd_uic_hibern8_enter(hba);
1783 if (ret) {
1ab27c9c 1784 hba->clk_gating.state = CLKS_ON;
4db7a236
CG
1785 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1786 __func__, ret);
7ff5ab47 1787 trace_ufshcd_clk_gating(dev_name(hba->dev),
1788 hba->clk_gating.state);
1ab27c9c
ST
1789 goto out;
1790 }
1791 ufshcd_set_link_hibern8(hba);
1792 }
1793
8b0bbf00
SC
1794 ufshcd_disable_irq(hba);
1795
81309c24 1796 ufshcd_setup_clocks(hba, false);
1ab27c9c 1797
dd7143e2
CG
1798 /* Put the host controller in low power mode if possible */
1799 ufshcd_hba_vreg_set_lpm(hba);
1ab27c9c
ST
1800 /*
1801 * In case you are here to cancel this work the gating state
1802 * would be marked as REQ_CLKS_ON. In this case keep the state
1803 * as REQ_CLKS_ON which would anyway imply that clocks are off
1804 * and a request to turn them on is pending. By doing this way,
1805 * we keep the state machine in tact and this would ultimately
1806 * prevent from doing cancel work multiple times when there are
1807 * new requests arriving before the current cancel work is done.
1808 */
1809 spin_lock_irqsave(hba->host->host_lock, flags);
7ff5ab47 1810 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1ab27c9c 1811 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 1812 trace_ufshcd_clk_gating(dev_name(hba->dev),
1813 hba->clk_gating.state);
1814 }
1ab27c9c
ST
1815rel_lock:
1816 spin_unlock_irqrestore(hba->host->host_lock, flags);
1817out:
1818 return;
1819}
1820
1821/* host lock must be held before calling this variant */
1822static void __ufshcd_release(struct ufs_hba *hba)
1823{
1824 if (!ufshcd_is_clkgating_allowed(hba))
1825 return;
1826
1827 hba->clk_gating.active_reqs--;
1828
4db7a236
CG
1829 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1830 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
3489c34b 1831 hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
fd62de11
JK
1832 hba->active_uic_cmd || hba->uic_async_done ||
1833 hba->clk_gating.state == CLKS_OFF)
1ab27c9c
ST
1834 return;
1835
1836 hba->clk_gating.state = REQ_CLKS_OFF;
7ff5ab47 1837 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
f4bb7704
EG
1838 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1839 &hba->clk_gating.gate_work,
1840 msecs_to_jiffies(hba->clk_gating.delay_ms));
1ab27c9c
ST
1841}
1842
1843void ufshcd_release(struct ufs_hba *hba)
1844{
1845 unsigned long flags;
1846
1847 spin_lock_irqsave(hba->host->host_lock, flags);
1848 __ufshcd_release(hba);
1849 spin_unlock_irqrestore(hba->host->host_lock, flags);
1850}
6e3fd44d 1851EXPORT_SYMBOL_GPL(ufshcd_release);
1ab27c9c
ST
1852
1853static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1854 struct device_attribute *attr, char *buf)
1855{
1856 struct ufs_hba *hba = dev_get_drvdata(dev);
1857
bafd09f8 1858 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1ab27c9c
ST
1859}
1860
1861static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1862 struct device_attribute *attr, const char *buf, size_t count)
1863{
1864 struct ufs_hba *hba = dev_get_drvdata(dev);
1865 unsigned long flags, value;
1866
1867 if (kstrtoul(buf, 0, &value))
1868 return -EINVAL;
1869
1870 spin_lock_irqsave(hba->host->host_lock, flags);
1871 hba->clk_gating.delay_ms = value;
1872 spin_unlock_irqrestore(hba->host->host_lock, flags);
1873 return count;
1874}
1875
b427411a
ST
1876static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1877 struct device_attribute *attr, char *buf)
1878{
1879 struct ufs_hba *hba = dev_get_drvdata(dev);
1880
bafd09f8 1881 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
b427411a
ST
1882}
1883
1884static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1885 struct device_attribute *attr, const char *buf, size_t count)
1886{
1887 struct ufs_hba *hba = dev_get_drvdata(dev);
1888 unsigned long flags;
1889 u32 value;
1890
1891 if (kstrtou32(buf, 0, &value))
1892 return -EINVAL;
1893
1894 value = !!value;
b6645112
JK
1895
1896 spin_lock_irqsave(hba->host->host_lock, flags);
b427411a
ST
1897 if (value == hba->clk_gating.is_enabled)
1898 goto out;
1899
b6645112
JK
1900 if (value)
1901 __ufshcd_release(hba);
1902 else
b427411a 1903 hba->clk_gating.active_reqs++;
b427411a
ST
1904
1905 hba->clk_gating.is_enabled = value;
1906out:
b6645112 1907 spin_unlock_irqrestore(hba->host->host_lock, flags);
b427411a
ST
1908 return count;
1909}
1910
4543d9d7 1911static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
eebcc196 1912{
4543d9d7
CG
1913 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1914 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1915 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1916 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1917 hba->clk_gating.delay_attr.attr.mode = 0644;
1918 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1919 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
eebcc196 1920
4543d9d7
CG
1921 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1922 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1923 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1924 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1925 hba->clk_gating.enable_attr.attr.mode = 0644;
1926 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1927 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
eebcc196
VG
1928}
1929
4543d9d7 1930static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
eebcc196 1931{
4543d9d7
CG
1932 if (hba->clk_gating.delay_attr.attr.name)
1933 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1934 if (hba->clk_gating.enable_attr.attr.name)
1935 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
eebcc196
VG
1936}
1937
1ab27c9c
ST
1938static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1939{
10e5e375
VV
1940 char wq_name[sizeof("ufs_clk_gating_00")];
1941
1ab27c9c
ST
1942 if (!ufshcd_is_clkgating_allowed(hba))
1943 return;
1944
2dec9475
CG
1945 hba->clk_gating.state = CLKS_ON;
1946
1ab27c9c
ST
1947 hba->clk_gating.delay_ms = 150;
1948 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1949 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1950
10e5e375
VV
1951 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1952 hba->host->host_no);
1953 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
e93e6e49 1954 WQ_MEM_RECLAIM | WQ_HIGHPRI);
10e5e375 1955
4543d9d7 1956 ufshcd_init_clk_gating_sysfs(hba);
b427411a 1957
4543d9d7
CG
1958 hba->clk_gating.is_enabled = true;
1959 hba->clk_gating.is_initialized = true;
1ab27c9c
ST
1960}
1961
1962static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1963{
4543d9d7 1964 if (!hba->clk_gating.is_initialized)
1ab27c9c 1965 return;
3489c34b 1966
4543d9d7 1967 ufshcd_remove_clk_gating_sysfs(hba);
3489c34b
BVA
1968
1969 /* Ungate the clock if necessary. */
1970 ufshcd_hold(hba, false);
4543d9d7 1971 hba->clk_gating.is_initialized = false;
3489c34b
BVA
1972 ufshcd_release(hba);
1973
1974 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1ab27c9c
ST
1975}
1976
856b3483
ST
1977/* Must be called with host lock acquired */
1978static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1979{
401f1e44 1980 bool queue_resume_work = false;
b1bf66d1 1981 ktime_t curr_t = ktime_get();
a45f9371 1982 unsigned long flags;
401f1e44 1983
fcb0c4b0 1984 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1985 return;
1986
a45f9371 1987 spin_lock_irqsave(hba->host->host_lock, flags);
401f1e44 1988 if (!hba->clk_scaling.active_reqs++)
1989 queue_resume_work = true;
1990
a45f9371
CG
1991 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
1992 spin_unlock_irqrestore(hba->host->host_lock, flags);
401f1e44 1993 return;
a45f9371 1994 }
401f1e44 1995
1996 if (queue_resume_work)
1997 queue_work(hba->clk_scaling.workq,
1998 &hba->clk_scaling.resume_work);
1999
2000 if (!hba->clk_scaling.window_start_t) {
b1bf66d1 2001 hba->clk_scaling.window_start_t = curr_t;
401f1e44 2002 hba->clk_scaling.tot_busy_t = 0;
2003 hba->clk_scaling.is_busy_started = false;
2004 }
2005
856b3483 2006 if (!hba->clk_scaling.is_busy_started) {
b1bf66d1 2007 hba->clk_scaling.busy_start_t = curr_t;
856b3483
ST
2008 hba->clk_scaling.is_busy_started = true;
2009 }
a45f9371 2010 spin_unlock_irqrestore(hba->host->host_lock, flags);
856b3483
ST
2011}
2012
2013static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2014{
2015 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
a45f9371 2016 unsigned long flags;
856b3483 2017
fcb0c4b0 2018 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
2019 return;
2020
a45f9371
CG
2021 spin_lock_irqsave(hba->host->host_lock, flags);
2022 hba->clk_scaling.active_reqs--;
856b3483
ST
2023 if (!hba->outstanding_reqs && scaling->is_busy_started) {
2024 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2025 scaling->busy_start_t));
8b0e1953 2026 scaling->busy_start_t = 0;
856b3483
ST
2027 scaling->is_busy_started = false;
2028 }
a45f9371 2029 spin_unlock_irqrestore(hba->host->host_lock, flags);
856b3483 2030}
1d8613a2
CG
2031
2032static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2033{
2034 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2035 return READ;
2036 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2037 return WRITE;
2038 else
2039 return -EINVAL;
2040}
2041
2042static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2043 struct ufshcd_lrb *lrbp)
2044{
2045 struct ufs_hba_monitor *m = &hba->monitor;
2046
2047 return (m->enabled && lrbp && lrbp->cmd &&
2048 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2049 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2050}
2051
2052static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2053{
2054 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
a45f9371 2055 unsigned long flags;
1d8613a2 2056
a45f9371 2057 spin_lock_irqsave(hba->host->host_lock, flags);
1d8613a2
CG
2058 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2059 hba->monitor.busy_start_ts[dir] = ktime_get();
a45f9371 2060 spin_unlock_irqrestore(hba->host->host_lock, flags);
1d8613a2
CG
2061}
2062
2063static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2064{
2065 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
a45f9371 2066 unsigned long flags;
1d8613a2 2067
a45f9371 2068 spin_lock_irqsave(hba->host->host_lock, flags);
1d8613a2 2069 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
3f2c1002 2070 struct request *req = scsi_cmd_to_rq(lrbp->cmd);
1d8613a2
CG
2071 struct ufs_hba_monitor *m = &hba->monitor;
2072 ktime_t now, inc, lat;
2073
2074 now = lrbp->compl_time_stamp;
2075 inc = ktime_sub(now, m->busy_start_ts[dir]);
2076 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2077 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2078
2079 /* Update latencies */
2080 m->nr_req[dir]++;
2081 lat = ktime_sub(now, lrbp->issue_time_stamp);
2082 m->lat_sum[dir] += lat;
2083 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2084 m->lat_max[dir] = lat;
2085 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2086 m->lat_min[dir] = lat;
2087
2088 m->nr_queued[dir]--;
2089 /* Push forward the busy start of monitor */
2090 m->busy_start_ts[dir] = now;
2091 }
a45f9371 2092 spin_unlock_irqrestore(hba->host->host_lock, flags);
856b3483 2093}
1d8613a2 2094
7a3e97b0
SY
2095/**
2096 * ufshcd_send_command - Send SCSI or device management commands
2097 * @hba: per adapter instance
2098 * @task_tag: Task tag of the command
2099 */
2100static inline
2101void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2102{
6edfdcfe 2103 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
1f522c50 2104 unsigned long flags;
6edfdcfe
SC
2105
2106 lrbp->issue_time_stamp = ktime_get();
2107 lrbp->compl_time_stamp = ktime_set(0, 0);
28fa68fc 2108 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
856b3483 2109 ufshcd_clk_scaling_start_busy(hba);
1d8613a2
CG
2110 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2111 ufshcd_start_monitor(hba, lrbp);
169f5eb2
BVA
2112
2113 spin_lock_irqsave(&hba->outstanding_lock, flags);
a024ad0d
BVA
2114 if (hba->vops && hba->vops->setup_xfer_req)
2115 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
169f5eb2 2116 __set_bit(task_tag, &hba->outstanding_reqs);
1f522c50 2117 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
169f5eb2
BVA
2118 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2119
ad1a1b9c
GB
2120 /* Make sure that doorbell is committed immediately */
2121 wmb();
7a3e97b0
SY
2122}
2123
2124/**
2125 * ufshcd_copy_sense_data - Copy sense data in case of check condition
8aa29f19 2126 * @lrbp: pointer to local reference block
7a3e97b0
SY
2127 */
2128static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2129{
2130 int len;
1c2623c5
SJ
2131 if (lrbp->sense_buffer &&
2132 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
e3ce73d6
YG
2133 int len_to_copy;
2134
5a0b0cb9 2135 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
09a5a24f 2136 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
e3ce73d6 2137
09a5a24f
AA
2138 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2139 len_to_copy);
7a3e97b0
SY
2140 }
2141}
2142
68078d5c
DR
2143/**
2144 * ufshcd_copy_query_response() - Copy the Query Response and the data
2145 * descriptor
2146 * @hba: per adapter instance
8aa29f19 2147 * @lrbp: pointer to local reference block
68078d5c
DR
2148 */
2149static
c6d4a831 2150int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
2151{
2152 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2153
68078d5c 2154 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 2155
68078d5c 2156 /* Get the descriptor */
1c90836f
AA
2157 if (hba->dev_cmd.query.descriptor &&
2158 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 2159 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 2160 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
2161 u16 resp_len;
2162 u16 buf_len;
68078d5c
DR
2163
2164 /* data segment length */
c6d4a831 2165 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 2166 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
2167 buf_len = be16_to_cpu(
2168 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
2169 if (likely(buf_len >= resp_len)) {
2170 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2171 } else {
2172 dev_warn(hba->dev,
3d4881d1
BH
2173 "%s: rsp size %d is bigger than buffer size %d",
2174 __func__, resp_len, buf_len);
c6d4a831
DR
2175 return -EINVAL;
2176 }
68078d5c 2177 }
c6d4a831
DR
2178
2179 return 0;
68078d5c
DR
2180}
2181
7a3e97b0
SY
2182/**
2183 * ufshcd_hba_capabilities - Read controller capabilities
2184 * @hba: per adapter instance
df043c74
ST
2185 *
2186 * Return: 0 on success, negative on error.
7a3e97b0 2187 */
df043c74 2188static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
7a3e97b0 2189{
df043c74
ST
2190 int err;
2191
b873a275 2192 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
2193
2194 /* nutrs and nutmrs are 0 based values */
2195 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2196 hba->nutmrs =
2197 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
945c3cca 2198 hba->reserved_slot = hba->nutrs - 1;
df043c74
ST
2199
2200 /* Read crypto capabilities */
2201 err = ufshcd_hba_init_crypto_capabilities(hba);
2202 if (err)
2203 dev_err(hba->dev, "crypto setup failed\n");
2204
2205 return err;
7a3e97b0
SY
2206}
2207
2208/**
6ccf44fe
SJ
2209 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2210 * to accept UIC commands
7a3e97b0 2211 * @hba: per adapter instance
6ccf44fe
SJ
2212 * Return true on success, else false
2213 */
2214static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2215{
a858af9a 2216 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
6ccf44fe
SJ
2217}
2218
53b3d9c3
SJ
2219/**
2220 * ufshcd_get_upmcrs - Get the power mode change request status
2221 * @hba: Pointer to adapter instance
2222 *
2223 * This function gets the UPMCRS field of HCS register
2224 * Returns value of UPMCRS field
2225 */
2226static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2227{
2228 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2229}
2230
6ccf44fe 2231/**
35c7d874 2232 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
6ccf44fe
SJ
2233 * @hba: per adapter instance
2234 * @uic_cmd: UIC command
7a3e97b0
SY
2235 */
2236static inline void
6ccf44fe 2237ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 2238{
35c7d874
BVA
2239 lockdep_assert_held(&hba->uic_cmd_mutex);
2240
6ccf44fe
SJ
2241 WARN_ON(hba->active_uic_cmd);
2242
2243 hba->active_uic_cmd = uic_cmd;
2244
7a3e97b0 2245 /* Write Args */
6ccf44fe
SJ
2246 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2247 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2248 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0 2249
28fa68fc 2250 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
aa5c6979 2251
7a3e97b0 2252 /* Write UIC Cmd */
6ccf44fe 2253 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 2254 REG_UIC_COMMAND);
7a3e97b0
SY
2255}
2256
6ccf44fe 2257/**
35c7d874 2258 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
6ccf44fe 2259 * @hba: per adapter instance
8aa29f19 2260 * @uic_cmd: UIC command
6ccf44fe 2261 *
6ccf44fe
SJ
2262 * Returns 0 only if success.
2263 */
2264static int
2265ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2266{
2267 int ret;
2268 unsigned long flags;
2269
35c7d874
BVA
2270 lockdep_assert_held(&hba->uic_cmd_mutex);
2271
6ccf44fe 2272 if (wait_for_completion_timeout(&uic_cmd->done,
0f52fcb9 2273 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
6ccf44fe 2274 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
0f52fcb9 2275 } else {
6ccf44fe 2276 ret = -ETIMEDOUT;
0f52fcb9
CG
2277 dev_err(hba->dev,
2278 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2279 uic_cmd->command, uic_cmd->argument3);
2280
2281 if (!uic_cmd->cmd_active) {
2282 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2283 __func__);
2284 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2285 }
2286 }
6ccf44fe
SJ
2287
2288 spin_lock_irqsave(hba->host->host_lock, flags);
2289 hba->active_uic_cmd = NULL;
2290 spin_unlock_irqrestore(hba->host->host_lock, flags);
2291
2292 return ret;
2293}
2294
2295/**
2296 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2297 * @hba: per adapter instance
2298 * @uic_cmd: UIC command
d75f7fe4 2299 * @completion: initialize the completion only if this is set to true
6ccf44fe 2300 *
6ccf44fe
SJ
2301 * Returns 0 only if success.
2302 */
2303static int
d75f7fe4
YG
2304__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2305 bool completion)
6ccf44fe 2306{
35c7d874
BVA
2307 lockdep_assert_held(&hba->uic_cmd_mutex);
2308 lockdep_assert_held(hba->host->host_lock);
2309
6ccf44fe
SJ
2310 if (!ufshcd_ready_for_uic_cmd(hba)) {
2311 dev_err(hba->dev,
2312 "Controller not ready to accept UIC commands\n");
2313 return -EIO;
2314 }
2315
d75f7fe4
YG
2316 if (completion)
2317 init_completion(&uic_cmd->done);
6ccf44fe 2318
0f52fcb9 2319 uic_cmd->cmd_active = 1;
6ccf44fe 2320 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 2321
57d104c1 2322 return 0;
6ccf44fe
SJ
2323}
2324
2325/**
2326 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2327 * @hba: per adapter instance
2328 * @uic_cmd: UIC command
2329 *
2330 * Returns 0 only if success.
2331 */
e77044c5 2332int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
6ccf44fe
SJ
2333{
2334 int ret;
57d104c1 2335 unsigned long flags;
6ccf44fe 2336
a22bcfdb 2337 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2338 return 0;
2339
1ab27c9c 2340 ufshcd_hold(hba, false);
6ccf44fe 2341 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
2342 ufshcd_add_delay_before_dme_cmd(hba);
2343
57d104c1 2344 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 2345 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
57d104c1
SJ
2346 spin_unlock_irqrestore(hba->host->host_lock, flags);
2347 if (!ret)
2348 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2349
6ccf44fe
SJ
2350 mutex_unlock(&hba->uic_cmd_mutex);
2351
1ab27c9c 2352 ufshcd_release(hba);
6ccf44fe
SJ
2353 return ret;
2354}
2355
7a3e97b0
SY
2356/**
2357 * ufshcd_map_sg - Map scatter-gather list to prdt
8aa29f19
BVA
2358 * @hba: per adapter instance
2359 * @lrbp: pointer to local reference block
7a3e97b0
SY
2360 *
2361 * Returns 0 in case of success, non-zero value in case of failure
2362 */
75b1cc4a 2363static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0
SY
2364{
2365 struct ufshcd_sg_entry *prd_table;
2366 struct scatterlist *sg;
2367 struct scsi_cmnd *cmd;
2368 int sg_segments;
2369 int i;
2370
2371 cmd = lrbp->cmd;
2372 sg_segments = scsi_dma_map(cmd);
2373 if (sg_segments < 0)
2374 return sg_segments;
2375
2376 if (sg_segments) {
26f968d7
AA
2377
2378 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2379 lrbp->utr_descriptor_ptr->prd_table_length =
2380 cpu_to_le16((sg_segments *
2381 sizeof(struct ufshcd_sg_entry)));
2382 else
2383 lrbp->utr_descriptor_ptr->prd_table_length =
3ad317a1 2384 cpu_to_le16(sg_segments);
7a3e97b0 2385
3ad317a1 2386 prd_table = lrbp->ucd_prdt_ptr;
7a3e97b0
SY
2387
2388 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1ea7d802
BVA
2389 const unsigned int len = sg_dma_len(sg);
2390
2391 /*
2392 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2393 * based value that indicates the length, in bytes, of
2394 * the data block. A maximum of length of 256KB may
2395 * exist for any entry. Bits 1:0 of this field shall be
2396 * 11b to indicate Dword granularity. A value of '3'
2397 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2398 */
2399 WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
2400 prd_table[i].size = cpu_to_le32(len - 1);
2401 prd_table[i].addr = cpu_to_le64(sg->dma_address);
52ac95fe 2402 prd_table[i].reserved = 0;
7a3e97b0
SY
2403 }
2404 } else {
2405 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2406 }
2407
2408 return 0;
2409}
2410
2411/**
2fbd009b 2412 * ufshcd_enable_intr - enable interrupts
7a3e97b0 2413 * @hba: per adapter instance
2fbd009b 2414 * @intrs: interrupt bits
7a3e97b0 2415 */
2fbd009b 2416static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 2417{
2fbd009b
SJ
2418 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2419
51428818 2420 if (hba->ufs_version == ufshci_version(1, 0)) {
2fbd009b
SJ
2421 u32 rw;
2422 rw = set & INTERRUPT_MASK_RW_VER_10;
2423 set = rw | ((set ^ intrs) & intrs);
2424 } else {
2425 set |= intrs;
2426 }
2427
2428 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2429}
2430
2431/**
2432 * ufshcd_disable_intr - disable interrupts
2433 * @hba: per adapter instance
2434 * @intrs: interrupt bits
2435 */
2436static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2437{
2438 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2439
51428818 2440 if (hba->ufs_version == ufshci_version(1, 0)) {
2fbd009b
SJ
2441 u32 rw;
2442 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2443 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2444 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2445
2446 } else {
2447 set &= ~intrs;
7a3e97b0 2448 }
2fbd009b
SJ
2449
2450 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
2451}
2452
5a0b0cb9
SRT
2453/**
2454 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2455 * descriptor according to request
2456 * @lrbp: pointer to local reference block
2457 * @upiu_flags: flags required in the header
2458 * @cmd_dir: requests data direction
2459 */
2460static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
a23064c4 2461 u8 *upiu_flags, enum dma_data_direction cmd_dir)
5a0b0cb9
SRT
2462{
2463 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2464 u32 data_direction;
2465 u32 dword_0;
df043c74
ST
2466 u32 dword_1 = 0;
2467 u32 dword_3 = 0;
5a0b0cb9
SRT
2468
2469 if (cmd_dir == DMA_FROM_DEVICE) {
2470 data_direction = UTP_DEVICE_TO_HOST;
2471 *upiu_flags = UPIU_CMD_FLAGS_READ;
2472 } else if (cmd_dir == DMA_TO_DEVICE) {
2473 data_direction = UTP_HOST_TO_DEVICE;
2474 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2475 } else {
2476 data_direction = UTP_NO_DATA_TRANSFER;
2477 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2478 }
2479
2480 dword_0 = data_direction | (lrbp->command_type
2481 << UPIU_COMMAND_TYPE_OFFSET);
2482 if (lrbp->intr_cmd)
2483 dword_0 |= UTP_REQ_DESC_INT_CMD;
2484
df043c74
ST
2485 /* Prepare crypto related dwords */
2486 ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2487
5a0b0cb9
SRT
2488 /* Transfer request descriptor header fields */
2489 req_desc->header.dword_0 = cpu_to_le32(dword_0);
df043c74 2490 req_desc->header.dword_1 = cpu_to_le32(dword_1);
5a0b0cb9
SRT
2491 /*
2492 * assigning invalid value for command status. Controller
2493 * updates OCS on command completion, with the command
2494 * status
2495 */
2496 req_desc->header.dword_2 =
2497 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
df043c74 2498 req_desc->header.dword_3 = cpu_to_le32(dword_3);
51047266
YG
2499
2500 req_desc->prd_table_length = 0;
5a0b0cb9
SRT
2501}
2502
2503/**
2504 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2505 * for scsi commands
8aa29f19
BVA
2506 * @lrbp: local reference block pointer
2507 * @upiu_flags: flags
5a0b0cb9
SRT
2508 */
2509static
a23064c4 2510void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
5a0b0cb9 2511{
1b21b8f0 2512 struct scsi_cmnd *cmd = lrbp->cmd;
5a0b0cb9 2513 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
52ac95fe 2514 unsigned short cdb_len;
5a0b0cb9
SRT
2515
2516 /* command descriptor fields */
2517 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2518 UPIU_TRANSACTION_COMMAND, upiu_flags,
2519 lrbp->lun, lrbp->task_tag);
2520 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2521 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2522
2523 /* Total EHS length and Data segment length will be zero */
2524 ucd_req_ptr->header.dword_2 = 0;
2525
1b21b8f0 2526 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
5a0b0cb9 2527
1b21b8f0 2528 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
a851b2bd 2529 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1b21b8f0 2530 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
52ac95fe
YG
2531
2532 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2533}
2534
68078d5c
DR
2535/**
2536 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2537 * for query requsts
2538 * @hba: UFS hba
2539 * @lrbp: local reference block pointer
2540 * @upiu_flags: flags
2541 */
2542static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
a23064c4 2543 struct ufshcd_lrb *lrbp, u8 upiu_flags)
68078d5c
DR
2544{
2545 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2546 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 2547 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
2548
2549 /* Query request header */
2550 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2551 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2552 lrbp->lun, lrbp->task_tag);
2553 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2554 0, query->request.query_func, 0, 0);
2555
6861285c
ZL
2556 /* Data segment length only need for WRITE_DESC */
2557 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2558 ucd_req_ptr->header.dword_2 =
2559 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2560 else
2561 ucd_req_ptr->header.dword_2 = 0;
68078d5c
DR
2562
2563 /* Copy the Query Request buffer as is */
2564 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2565 QUERY_OSF_SIZE);
68078d5c
DR
2566
2567 /* Copy the Descriptor */
c6d4a831 2568 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
220d17a6 2569 memcpy(ucd_req_ptr + 1, query->descriptor, len);
c6d4a831 2570
51047266 2571 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
68078d5c
DR
2572}
2573
5a0b0cb9
SRT
2574static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2575{
2576 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2577
2578 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2579
2580 /* command descriptor fields */
2581 ucd_req_ptr->header.dword_0 =
2582 UPIU_HEADER_DWORD(
2583 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
51047266
YG
2584 /* clear rest of the fields of basic header */
2585 ucd_req_ptr->header.dword_1 = 0;
2586 ucd_req_ptr->header.dword_2 = 0;
2587
2588 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2589}
2590
7a3e97b0 2591/**
f273c54b 2592 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
300bb13f 2593 * for Device Management Purposes
8aa29f19
BVA
2594 * @hba: per adapter instance
2595 * @lrbp: pointer to local reference block
7a3e97b0 2596 */
f273c54b
BH
2597static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2598 struct ufshcd_lrb *lrbp)
7a3e97b0 2599{
a23064c4 2600 u8 upiu_flags;
5a0b0cb9 2601 int ret = 0;
7a3e97b0 2602
51428818 2603 if (hba->ufs_version <= ufshci_version(1, 1))
300bb13f 2604 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
83dc7e3d 2605 else
2606 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2607
2608 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2609 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2610 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2611 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2612 ufshcd_prepare_utp_nop_upiu(lrbp);
2613 else
2614 ret = -EINVAL;
2615
2616 return ret;
2617}
2618
2619/**
2620 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2621 * for SCSI Purposes
8aa29f19
BVA
2622 * @hba: per adapter instance
2623 * @lrbp: pointer to local reference block
300bb13f
JP
2624 */
2625static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2626{
a23064c4 2627 u8 upiu_flags;
300bb13f
JP
2628 int ret = 0;
2629
51428818 2630 if (hba->ufs_version <= ufshci_version(1, 1))
300bb13f 2631 lrbp->command_type = UTP_CMD_TYPE_SCSI;
83dc7e3d 2632 else
2633 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2634
2635 if (likely(lrbp->cmd)) {
2636 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2637 lrbp->cmd->sc_data_direction);
2638 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2639 } else {
2640 ret = -EINVAL;
2641 }
5a0b0cb9
SRT
2642
2643 return ret;
7a3e97b0
SY
2644}
2645
2a8fa600
SJ
2646/**
2647 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
8aa29f19 2648 * @upiu_wlun_id: UPIU W-LUN id
2a8fa600
SJ
2649 *
2650 * Returns SCSI W-LUN id
2651 */
2652static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2653{
2654 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2655}
2656
b294ff3e
AD
2657static inline bool is_device_wlun(struct scsi_device *sdev)
2658{
2659 return sdev->lun ==
2660 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2661}
2662
eaab9b57
BVA
2663/*
2664 * Associate the UFS controller queue with the default and poll HCTX types.
2665 * Initialize the mq_map[] arrays.
2666 */
2667static int ufshcd_map_queues(struct Scsi_Host *shost)
2668{
2669 int i, ret;
2670
2671 for (i = 0; i < shost->nr_maps; i++) {
2672 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2673
2674 switch (i) {
2675 case HCTX_TYPE_DEFAULT:
2676 case HCTX_TYPE_POLL:
2677 map->nr_queues = 1;
2678 break;
2679 case HCTX_TYPE_READ:
2680 map->nr_queues = 0;
10af1156 2681 continue;
eaab9b57
BVA
2682 default:
2683 WARN_ON_ONCE(true);
2684 }
2685 map->queue_offset = 0;
2686 ret = blk_mq_map_queues(map);
2687 WARN_ON_ONCE(ret);
2688 }
2689
2690 return 0;
2691}
2692
4d2b8d40
BVA
2693static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2694{
2695 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2696 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2697 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2698 i * sizeof(struct utp_transfer_cmd_desc);
2699 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2700 response_upiu);
2701 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2702
2703 lrb->utr_descriptor_ptr = utrdlp + i;
2704 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2705 i * sizeof(struct utp_transfer_req_desc);
2706 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2707 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2708 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2709 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
3ad317a1 2710 lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
4d2b8d40
BVA
2711 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2712}
2713
7a3e97b0
SY
2714/**
2715 * ufshcd_queuecommand - main entry point for SCSI requests
8aa29f19 2716 * @host: SCSI host pointer
7a3e97b0 2717 * @cmd: command from SCSI Midlayer
7a3e97b0
SY
2718 *
2719 * Returns 0 for success, non-zero in case of failure
2720 */
2721static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2722{
4728ab4a 2723 struct ufs_hba *hba = shost_priv(host);
3f2c1002 2724 int tag = scsi_cmd_to_rq(cmd)->tag;
7a3e97b0 2725 struct ufshcd_lrb *lrbp;
7a3e97b0
SY
2726 int err = 0;
2727
eaab9b57 2728 WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
7a3e97b0 2729
5675c381
BVA
2730 /*
2731 * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
2732 * calls.
2733 */
2734 rcu_read_lock();
2735
a45f9371
CG
2736 switch (hba->ufshcd_state) {
2737 case UFSHCD_STATE_OPERATIONAL:
d489f18a 2738 break;
a45f9371 2739 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
d489f18a
AH
2740 /*
2741 * SCSI error handler can call ->queuecommand() while UFS error
2742 * handler is in progress. Error interrupts could change the
2743 * state from UFSHCD_STATE_RESET to
2744 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2745 * being issued in that case.
2746 */
2747 if (ufshcd_eh_in_progress(hba)) {
2748 err = SCSI_MLQUEUE_HOST_BUSY;
2749 goto out;
2750 }
a45f9371
CG
2751 break;
2752 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2753 /*
2754 * pm_runtime_get_sync() is used at error handling preparation
2755 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2756 * PM ops, it can never be finished if we let SCSI layer keep
2757 * retrying it, which gets err handler stuck forever. Neither
2758 * can we let the scsi cmd pass through, because UFS is in bad
2759 * state, the scsi cmd may eventually time out, which will get
2760 * err handler blocked for too long. So, just fail the scsi cmd
2761 * sent from PM ops, err handler can recover PM error anyways.
2762 */
2763 if (hba->pm_op_in_progress) {
2764 hba->force_reset = true;
2765 set_host_byte(cmd, DID_BAD_TARGET);
35c3730a 2766 scsi_done(cmd);
a45f9371
CG
2767 goto out;
2768 }
2769 fallthrough;
2770 case UFSHCD_STATE_RESET:
2771 err = SCSI_MLQUEUE_HOST_BUSY;
2772 goto out;
2773 case UFSHCD_STATE_ERROR:
2774 set_host_byte(cmd, DID_ERROR);
35c3730a 2775 scsi_done(cmd);
a45f9371 2776 goto out;
a45f9371
CG
2777 }
2778
7fabb77b
GB
2779 hba->req_abort_count = 0;
2780
1ab27c9c
ST
2781 err = ufshcd_hold(hba, true);
2782 if (err) {
2783 err = SCSI_MLQUEUE_HOST_BUSY;
1ab27c9c
ST
2784 goto out;
2785 }
2dec9475
CG
2786 WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2787 (hba->clk_gating.state != CLKS_ON));
1ab27c9c 2788
a45f9371 2789 lrbp = &hba->lrb[tag];
5a0b0cb9 2790 WARN_ON(lrbp->cmd);
7a3e97b0 2791 lrbp->cmd = cmd;
7a3e97b0
SY
2792 lrbp->sense_buffer = cmd->sense_buffer;
2793 lrbp->task_tag = tag;
0ce147d4 2794 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
51d1628f 2795 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
df043c74 2796
3f2c1002 2797 ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
df043c74 2798
e0b299e3 2799 lrbp->req_abort_skip = false;
7a3e97b0 2800
09d9e4d0 2801 ufshpb_prep(hba, lrbp);
2fff76f8 2802
300bb13f
JP
2803 ufshcd_comp_scsi_upiu(hba, lrbp);
2804
75b1cc4a 2805 err = ufshcd_map_sg(hba, lrbp);
5a0b0cb9
SRT
2806 if (err) {
2807 lrbp->cmd = NULL;
17c7d35f 2808 ufshcd_release(hba);
7a3e97b0 2809 goto out;
5a0b0cb9 2810 }
7a3e97b0 2811
7a3e97b0 2812 ufshcd_send_command(hba, tag);
5675c381 2813
7a3e97b0 2814out:
5675c381
BVA
2815 rcu_read_unlock();
2816
88b09900
AH
2817 if (ufs_trigger_eh()) {
2818 unsigned long flags;
2819
2820 spin_lock_irqsave(hba->host->host_lock, flags);
2821 ufshcd_schedule_eh_work(hba);
2822 spin_unlock_irqrestore(hba->host->host_lock, flags);
2823 }
c11a1ae9 2824
7a3e97b0
SY
2825 return err;
2826}
2827
5a0b0cb9
SRT
2828static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2829 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2830{
2831 lrbp->cmd = NULL;
5a0b0cb9
SRT
2832 lrbp->sense_buffer = NULL;
2833 lrbp->task_tag = tag;
2834 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
5a0b0cb9 2835 lrbp->intr_cmd = true; /* No interrupt aggregation */
df043c74 2836 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
5a0b0cb9
SRT
2837 hba->dev_cmd.type = cmd_type;
2838
f273c54b 2839 return ufshcd_compose_devman_upiu(hba, lrbp);
5a0b0cb9
SRT
2840}
2841
2842static int
2843ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2844{
2845 int err = 0;
2846 unsigned long flags;
2847 u32 mask = 1 << tag;
2848
2849 /* clear outstanding transaction before retry */
2850 spin_lock_irqsave(hba->host->host_lock, flags);
2851 ufshcd_utrl_clear(hba, tag);
2852 spin_unlock_irqrestore(hba->host->host_lock, flags);
2853
2854 /*
32424902 2855 * wait for h/w to clear corresponding bit in door-bell.
5a0b0cb9
SRT
2856 * max. wait is 1 sec.
2857 */
2858 err = ufshcd_wait_for_register(hba,
2859 REG_UTP_TRANSFER_REQ_DOOR_BELL,
5cac1095 2860 mask, ~mask, 1000, 1000);
5a0b0cb9
SRT
2861
2862 return err;
2863}
2864
c6d4a831
DR
2865static int
2866ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2867{
2868 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2869
2870 /* Get the UPIU response */
2871 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2872 UPIU_RSP_CODE_OFFSET;
2873 return query_res->response;
2874}
2875
5a0b0cb9
SRT
2876/**
2877 * ufshcd_dev_cmd_completion() - handles device management command responses
2878 * @hba: per adapter instance
2879 * @lrbp: pointer to local reference block
2880 */
2881static int
2882ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2883{
2884 int resp;
2885 int err = 0;
2886
ff8e20c6 2887 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
2888 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2889
2890 switch (resp) {
2891 case UPIU_TRANSACTION_NOP_IN:
2892 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2893 err = -EINVAL;
2894 dev_err(hba->dev, "%s: unexpected response %x\n",
2895 __func__, resp);
2896 }
2897 break;
68078d5c 2898 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
2899 err = ufshcd_check_query_response(hba, lrbp);
2900 if (!err)
2901 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 2902 break;
5a0b0cb9
SRT
2903 case UPIU_TRANSACTION_REJECT_UPIU:
2904 /* TODO: handle Reject UPIU Response */
2905 err = -EPERM;
2906 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2907 __func__);
2908 break;
2909 default:
2910 err = -EINVAL;
2911 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2912 __func__, resp);
2913 break;
2914 }
2915
2916 return err;
2917}
2918
2919static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2920 struct ufshcd_lrb *lrbp, int max_timeout)
2921{
2922 int err = 0;
2923 unsigned long time_left;
2924 unsigned long flags;
2925
2926 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2927 msecs_to_jiffies(max_timeout));
2928
2929 spin_lock_irqsave(hba->host->host_lock, flags);
2930 hba->dev_cmd.complete = NULL;
2931 if (likely(time_left)) {
2932 err = ufshcd_get_tr_ocs(lrbp);
2933 if (!err)
2934 err = ufshcd_dev_cmd_completion(hba, lrbp);
2935 }
2936 spin_unlock_irqrestore(hba->host->host_lock, flags);
2937
2938 if (!time_left) {
2939 err = -ETIMEDOUT;
a48353f6
YG
2940 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2941 __func__, lrbp->task_tag);
5a0b0cb9 2942 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
a48353f6 2943 /* successfully cleared the command, retry if needed */
5a0b0cb9 2944 err = -EAGAIN;
a48353f6
YG
2945 /*
2946 * in case of an error, after clearing the doorbell,
2947 * we also need to clear the outstanding_request
2948 * field in hba
2949 */
169f5eb2
BVA
2950 spin_lock_irqsave(&hba->outstanding_lock, flags);
2951 __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
2952 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5a0b0cb9
SRT
2953 }
2954
2955 return err;
2956}
2957
5a0b0cb9
SRT
2958/**
2959 * ufshcd_exec_dev_cmd - API for sending device management requests
8aa29f19
BVA
2960 * @hba: UFS hba
2961 * @cmd_type: specifies the type (NOP, Query...)
d0b2b70e 2962 * @timeout: timeout in milliseconds
5a0b0cb9 2963 *
68078d5c
DR
2964 * NOTE: Since there is only one available tag for device management commands,
2965 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
2966 */
2967static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2968 enum dev_cmd_type cmd_type, int timeout)
2969{
8a686f26 2970 DECLARE_COMPLETION_ONSTACK(wait);
945c3cca 2971 const u32 tag = hba->reserved_slot;
5a0b0cb9
SRT
2972 struct ufshcd_lrb *lrbp;
2973 int err;
5a0b0cb9 2974
945c3cca
BVA
2975 /* Protects use of hba->reserved_slot. */
2976 lockdep_assert_held(&hba->dev_cmd.lock);
a3cd5ec5 2977
945c3cca 2978 down_read(&hba->clk_scaling_lock);
5a0b0cb9 2979
a45f9371 2980 lrbp = &hba->lrb[tag];
5a0b0cb9
SRT
2981 WARN_ON(lrbp->cmd);
2982 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2983 if (unlikely(err))
eb783bb8 2984 goto out;
5a0b0cb9
SRT
2985
2986 hba->dev_cmd.complete = &wait;
2987
fb475b74 2988 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
5a0b0cb9 2989
a45f9371 2990 ufshcd_send_command(hba, tag);
5a0b0cb9 2991 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
fb475b74
AA
2992 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
2993 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
6667e6d9 2994
eb783bb8 2995out:
a3cd5ec5 2996 up_read(&hba->clk_scaling_lock);
5a0b0cb9
SRT
2997 return err;
2998}
2999
d44a5f98
DR
3000/**
3001 * ufshcd_init_query() - init the query response and request parameters
3002 * @hba: per-adapter instance
3003 * @request: address of the request pointer to be initialized
3004 * @response: address of the response pointer to be initialized
3005 * @opcode: operation to perform
3006 * @idn: flag idn to access
3007 * @index: LU number to access
3008 * @selector: query/flag/descriptor further identification
3009 */
3010static inline void ufshcd_init_query(struct ufs_hba *hba,
3011 struct ufs_query_req **request, struct ufs_query_res **response,
3012 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3013{
3014 *request = &hba->dev_cmd.query.request;
3015 *response = &hba->dev_cmd.query.response;
3016 memset(*request, 0, sizeof(struct ufs_query_req));
3017 memset(*response, 0, sizeof(struct ufs_query_res));
3018 (*request)->upiu_req.opcode = opcode;
3019 (*request)->upiu_req.idn = idn;
3020 (*request)->upiu_req.index = index;
3021 (*request)->upiu_req.selector = selector;
3022}
3023
dc3c8d3a 3024static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1f34eedf 3025 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
dc3c8d3a
YG
3026{
3027 int ret;
3028 int retries;
3029
3030 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1f34eedf 3031 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
dc3c8d3a
YG
3032 if (ret)
3033 dev_dbg(hba->dev,
3034 "%s: failed with error %d, retries %d\n",
3035 __func__, ret, retries);
3036 else
3037 break;
3038 }
3039
3040 if (ret)
3041 dev_err(hba->dev,
3042 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
3043 __func__, opcode, idn, ret, retries);
3044 return ret;
3045}
3046
68078d5c
DR
3047/**
3048 * ufshcd_query_flag() - API function for sending flag query requests
8aa29f19
BVA
3049 * @hba: per-adapter instance
3050 * @opcode: flag query to perform
3051 * @idn: flag idn to access
1f34eedf 3052 * @index: flag index to access
8aa29f19 3053 * @flag_res: the flag value after the query request completes
68078d5c
DR
3054 *
3055 * Returns 0 for success, non-zero in case of failure
3056 */
dc3c8d3a 3057int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1f34eedf 3058 enum flag_idn idn, u8 index, bool *flag_res)
68078d5c 3059{
d44a5f98
DR
3060 struct ufs_query_req *request = NULL;
3061 struct ufs_query_res *response = NULL;
1f34eedf 3062 int err, selector = 0;
e5ad406c 3063 int timeout = QUERY_REQ_TIMEOUT;
68078d5c
DR
3064
3065 BUG_ON(!hba);
3066
1ab27c9c 3067 ufshcd_hold(hba, false);
68078d5c 3068 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
3069 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3070 selector);
68078d5c
DR
3071
3072 switch (opcode) {
3073 case UPIU_QUERY_OPCODE_SET_FLAG:
3074 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3075 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3076 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3077 break;
3078 case UPIU_QUERY_OPCODE_READ_FLAG:
3079 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3080 if (!flag_res) {
3081 /* No dummy reads */
3082 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3083 __func__);
3084 err = -EINVAL;
3085 goto out_unlock;
3086 }
3087 break;
3088 default:
3089 dev_err(hba->dev,
3090 "%s: Expected query flag opcode but got = %d\n",
3091 __func__, opcode);
3092 err = -EINVAL;
3093 goto out_unlock;
3094 }
68078d5c 3095
e5ad406c 3096 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
68078d5c
DR
3097
3098 if (err) {
3099 dev_err(hba->dev,
3100 "%s: Sending flag query for idn %d failed, err = %d\n",
3101 __func__, idn, err);
3102 goto out_unlock;
3103 }
3104
3105 if (flag_res)
e8c8e82a 3106 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
3107 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3108
3109out_unlock:
3110 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3111 ufshcd_release(hba);
68078d5c
DR
3112 return err;
3113}
3114
66ec6d59
SRT
3115/**
3116 * ufshcd_query_attr - API function for sending attribute requests
8aa29f19
BVA
3117 * @hba: per-adapter instance
3118 * @opcode: attribute opcode
3119 * @idn: attribute idn to access
3120 * @index: index field
3121 * @selector: selector field
3122 * @attr_val: the attribute value after the query request completes
66ec6d59
SRT
3123 *
3124 * Returns 0 for success, non-zero in case of failure
3125*/
ec92b59c
SN
3126int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3127 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
66ec6d59 3128{
d44a5f98
DR
3129 struct ufs_query_req *request = NULL;
3130 struct ufs_query_res *response = NULL;
66ec6d59
SRT
3131 int err;
3132
3133 BUG_ON(!hba);
3134
3135 if (!attr_val) {
3136 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3137 __func__, opcode);
8ca1a40b 3138 return -EINVAL;
66ec6d59
SRT
3139 }
3140
8ca1a40b 3141 ufshcd_hold(hba, false);
3142
66ec6d59 3143 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
3144 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3145 selector);
66ec6d59
SRT
3146
3147 switch (opcode) {
3148 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3149 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 3150 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
3151 break;
3152 case UPIU_QUERY_OPCODE_READ_ATTR:
3153 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3154 break;
3155 default:
3156 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3157 __func__, opcode);
3158 err = -EINVAL;
3159 goto out_unlock;
3160 }
3161
d44a5f98 3162 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
3163
3164 if (err) {
4b761b58
YG
3165 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3166 __func__, opcode, idn, index, err);
66ec6d59
SRT
3167 goto out_unlock;
3168 }
3169
e8c8e82a 3170 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
3171
3172out_unlock:
3173 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3174 ufshcd_release(hba);
66ec6d59
SRT
3175 return err;
3176}
3177
5e86ae44
YG
3178/**
3179 * ufshcd_query_attr_retry() - API function for sending query
3180 * attribute with retries
3181 * @hba: per-adapter instance
3182 * @opcode: attribute opcode
3183 * @idn: attribute idn to access
3184 * @index: index field
3185 * @selector: selector field
3186 * @attr_val: the attribute value after the query request
3187 * completes
3188 *
3189 * Returns 0 for success, non-zero in case of failure
3190*/
41d8a933 3191int ufshcd_query_attr_retry(struct ufs_hba *hba,
5e86ae44
YG
3192 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3193 u32 *attr_val)
3194{
3195 int ret = 0;
3196 u32 retries;
3197
68c9fcfd 3198 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
5e86ae44
YG
3199 ret = ufshcd_query_attr(hba, opcode, idn, index,
3200 selector, attr_val);
3201 if (ret)
3202 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3203 __func__, ret, retries);
3204 else
3205 break;
3206 }
3207
3208 if (ret)
3209 dev_err(hba->dev,
3210 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3211 __func__, idn, ret, QUERY_REQ_RETRIES);
3212 return ret;
3213}
3214
a70e91b8 3215static int __ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
3216 enum query_opcode opcode, enum desc_idn idn, u8 index,
3217 u8 selector, u8 *desc_buf, int *buf_len)
3218{
3219 struct ufs_query_req *request = NULL;
3220 struct ufs_query_res *response = NULL;
3221 int err;
3222
3223 BUG_ON(!hba);
3224
3225 if (!desc_buf) {
3226 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3227 __func__, opcode);
8ca1a40b 3228 return -EINVAL;
d44a5f98
DR
3229 }
3230
a4b0e8a4 3231 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
d44a5f98
DR
3232 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3233 __func__, *buf_len);
8ca1a40b 3234 return -EINVAL;
d44a5f98
DR
3235 }
3236
8ca1a40b 3237 ufshcd_hold(hba, false);
3238
d44a5f98
DR
3239 mutex_lock(&hba->dev_cmd.lock);
3240 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3241 selector);
3242 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 3243 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
3244
3245 switch (opcode) {
3246 case UPIU_QUERY_OPCODE_WRITE_DESC:
3247 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3248 break;
3249 case UPIU_QUERY_OPCODE_READ_DESC:
3250 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3251 break;
3252 default:
3253 dev_err(hba->dev,
3254 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3255 __func__, opcode);
3256 err = -EINVAL;
3257 goto out_unlock;
3258 }
3259
3260 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3261
3262 if (err) {
4b761b58
YG
3263 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3264 __func__, opcode, idn, index, err);
d44a5f98
DR
3265 goto out_unlock;
3266 }
3267
ea2aab24 3268 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
3269
3270out_unlock:
cfcbae38 3271 hba->dev_cmd.query.descriptor = NULL;
d44a5f98 3272 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3273 ufshcd_release(hba);
d44a5f98
DR
3274 return err;
3275}
3276
a70e91b8 3277/**
8aa29f19
BVA
3278 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3279 * @hba: per-adapter instance
3280 * @opcode: attribute opcode
3281 * @idn: attribute idn to access
3282 * @index: index field
3283 * @selector: selector field
3284 * @desc_buf: the buffer that contains the descriptor
3285 * @buf_len: length parameter passed to the device
a70e91b8
YG
3286 *
3287 * Returns 0 for success, non-zero in case of failure.
3288 * The buf_len parameter will contain, on return, the length parameter
3289 * received on the response.
3290 */
2238d31c
SN
3291int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3292 enum query_opcode opcode,
3293 enum desc_idn idn, u8 index,
3294 u8 selector,
3295 u8 *desc_buf, int *buf_len)
a70e91b8
YG
3296{
3297 int err;
3298 int retries;
3299
3300 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3301 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3302 selector, desc_buf, buf_len);
3303 if (!err || err == -EINVAL)
3304 break;
3305 }
3306
3307 return err;
3308}
a70e91b8 3309
a4b0e8a4
PM
3310/**
3311 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3312 * @hba: Pointer to adapter instance
3313 * @desc_id: descriptor idn value
3314 * @desc_len: mapped desc length (out)
a4b0e8a4 3315 */
7a0bf85b
BH
3316void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3317 int *desc_len)
a4b0e8a4 3318{
7a0bf85b
BH
3319 if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3320 desc_id == QUERY_DESC_IDN_RFU_1)
a4b0e8a4 3321 *desc_len = 0;
7a0bf85b
BH
3322 else
3323 *desc_len = hba->desc_size[desc_id];
a4b0e8a4
PM
3324}
3325EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3326
7a0bf85b 3327static void ufshcd_update_desc_length(struct ufs_hba *hba,
72fb690e 3328 enum desc_idn desc_id, int desc_index,
7a0bf85b
BH
3329 unsigned char desc_len)
3330{
3331 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
72fb690e
BH
3332 desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3333 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3334 * than the RPMB unit, however, both descriptors share the same
3335 * desc_idn, to cover both unit descriptors with one length, we
3336 * choose the normal unit descriptor length by desc_index.
3337 */
7a0bf85b
BH
3338 hba->desc_size[desc_id] = desc_len;
3339}
3340
da461cec
SJ
3341/**
3342 * ufshcd_read_desc_param - read the specified descriptor parameter
3343 * @hba: Pointer to adapter instance
3344 * @desc_id: descriptor idn value
3345 * @desc_index: descriptor index
3346 * @param_offset: offset of the parameter to read
3347 * @param_read_buf: pointer to buffer where parameter would be read
3348 * @param_size: sizeof(param_read_buf)
3349 *
3350 * Return 0 in case of success, non-zero otherwise
3351 */
45bced87
SN
3352int ufshcd_read_desc_param(struct ufs_hba *hba,
3353 enum desc_idn desc_id,
3354 int desc_index,
3355 u8 param_offset,
3356 u8 *param_read_buf,
3357 u8 param_size)
da461cec
SJ
3358{
3359 int ret;
3360 u8 *desc_buf;
a4b0e8a4 3361 int buff_len;
da461cec
SJ
3362 bool is_kmalloc = true;
3363
a4b0e8a4
PM
3364 /* Safety check */
3365 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
da461cec
SJ
3366 return -EINVAL;
3367
7a0bf85b
BH
3368 /* Get the length of descriptor */
3369 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3370 if (!buff_len) {
1699f980
CG
3371 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3372 return -EINVAL;
3373 }
3374
3375 if (param_offset >= buff_len) {
3376 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3377 __func__, param_offset, desc_id, buff_len);
7a0bf85b 3378 return -EINVAL;
a4b0e8a4
PM
3379 }
3380
3381 /* Check whether we need temp memory */
3382 if (param_offset != 0 || param_size < buff_len) {
1699f980 3383 desc_buf = kzalloc(buff_len, GFP_KERNEL);
da461cec
SJ
3384 if (!desc_buf)
3385 return -ENOMEM;
a4b0e8a4
PM
3386 } else {
3387 desc_buf = param_read_buf;
3388 is_kmalloc = false;
da461cec
SJ
3389 }
3390
a4b0e8a4 3391 /* Request for full descriptor */
a70e91b8 3392 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
a4b0e8a4
PM
3393 desc_id, desc_index, 0,
3394 desc_buf, &buff_len);
da461cec 3395
bde44bb6 3396 if (ret) {
1699f980 3397 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
bde44bb6 3398 __func__, desc_id, desc_index, param_offset, ret);
da461cec
SJ
3399 goto out;
3400 }
3401
bde44bb6 3402 /* Sanity check */
3403 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1699f980 3404 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
bde44bb6 3405 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3406 ret = -EINVAL;
3407 goto out;
3408 }
3409
7a0bf85b
BH
3410 /* Update descriptor length */
3411 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
72fb690e 3412 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
7a0bf85b 3413
1699f980
CG
3414 if (is_kmalloc) {
3415 /* Make sure we don't copy more data than available */
d3d9c457
BVA
3416 if (param_offset >= buff_len)
3417 ret = -EINVAL;
3418 else
3419 memcpy(param_read_buf, &desc_buf[param_offset],
3420 min_t(u32, param_size, buff_len - param_offset));
1699f980 3421 }
da461cec
SJ
3422out:
3423 if (is_kmalloc)
3424 kfree(desc_buf);
3425 return ret;
3426}
3427
4b828fe1
TW
3428/**
3429 * struct uc_string_id - unicode string
3430 *
3431 * @len: size of this descriptor inclusive
3432 * @type: descriptor type
3433 * @uc: unicode string character
3434 */
3435struct uc_string_id {
3436 u8 len;
3437 u8 type;
ec38c0ad 3438 wchar_t uc[];
4b828fe1
TW
3439} __packed;
3440
3441/* replace non-printable or non-ASCII characters with spaces */
3442static inline char ufshcd_remove_non_printable(u8 ch)
3443{
3444 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3445}
3446
b573d484
YG
3447/**
3448 * ufshcd_read_string_desc - read string descriptor
3449 * @hba: pointer to adapter instance
3450 * @desc_index: descriptor index
4b828fe1
TW
3451 * @buf: pointer to buffer where descriptor would be read,
3452 * the caller should free the memory.
b573d484 3453 * @ascii: if true convert from unicode to ascii characters
4b828fe1 3454 * null terminated string.
b573d484 3455 *
4b828fe1
TW
3456 * Return:
3457 * * string size on success.
3458 * * -ENOMEM: on allocation failure
3459 * * -EINVAL: on a wrong parameter
b573d484 3460 */
4b828fe1
TW
3461int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3462 u8 **buf, bool ascii)
b573d484 3463{
4b828fe1
TW
3464 struct uc_string_id *uc_str;
3465 u8 *str;
3466 int ret;
b573d484 3467
4b828fe1
TW
3468 if (!buf)
3469 return -EINVAL;
b573d484 3470
4b828fe1
TW
3471 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3472 if (!uc_str)
3473 return -ENOMEM;
b573d484 3474
c4607a09
BH
3475 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3476 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
4b828fe1
TW
3477 if (ret < 0) {
3478 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3479 QUERY_REQ_RETRIES, ret);
3480 str = NULL;
3481 goto out;
3482 }
3483
3484 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3485 dev_dbg(hba->dev, "String Desc is of zero length\n");
3486 str = NULL;
3487 ret = 0;
b573d484
YG
3488 goto out;
3489 }
3490
3491 if (ascii) {
4b828fe1 3492 ssize_t ascii_len;
b573d484 3493 int i;
b573d484 3494 /* remove header and divide by 2 to move from UTF16 to UTF8 */
4b828fe1
TW
3495 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3496 str = kzalloc(ascii_len, GFP_KERNEL);
3497 if (!str) {
3498 ret = -ENOMEM;
fcbefc3b 3499 goto out;
b573d484
YG
3500 }
3501
3502 /*
3503 * the descriptor contains string in UTF16 format
3504 * we need to convert to utf-8 so it can be displayed
3505 */
4b828fe1
TW
3506 ret = utf16s_to_utf8s(uc_str->uc,
3507 uc_str->len - QUERY_DESC_HDR_SIZE,
3508 UTF16_BIG_ENDIAN, str, ascii_len);
b573d484
YG
3509
3510 /* replace non-printable or non-ASCII characters with spaces */
4b828fe1
TW
3511 for (i = 0; i < ret; i++)
3512 str[i] = ufshcd_remove_non_printable(str[i]);
b573d484 3513
4b828fe1
TW
3514 str[ret++] = '\0';
3515
3516 } else {
5f57704d 3517 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
4b828fe1
TW
3518 if (!str) {
3519 ret = -ENOMEM;
3520 goto out;
3521 }
4b828fe1 3522 ret = uc_str->len;
b573d484
YG
3523 }
3524out:
4b828fe1
TW
3525 *buf = str;
3526 kfree(uc_str);
3527 return ret;
b573d484 3528}
b573d484 3529
da461cec
SJ
3530/**
3531 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3532 * @hba: Pointer to adapter instance
3533 * @lun: lun id
3534 * @param_offset: offset of the parameter to read
3535 * @param_read_buf: pointer to buffer where parameter would be read
3536 * @param_size: sizeof(param_read_buf)
3537 *
3538 * Return 0 in case of success, non-zero otherwise
3539 */
3540static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3541 int lun,
3542 enum unit_desc_param param_offset,
3543 u8 *param_read_buf,
3544 u32 param_size)
3545{
3546 /*
3547 * Unit descriptors are only available for general purpose LUs (LUN id
3548 * from 0 to 7) and RPMB Well known LU.
3549 */
a2fca52e 3550 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
da461cec
SJ
3551 return -EOPNOTSUPP;
3552
3553 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3554 param_offset, param_read_buf, param_size);
3555}
3556
09f17791
CG
3557static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3558{
3559 int err = 0;
3560 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3561
3562 if (hba->dev_info.wspecversion >= 0x300) {
3563 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3564 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3565 &gating_wait);
3566 if (err)
3567 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3568 err, gating_wait);
3569
3570 if (gating_wait == 0) {
3571 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3572 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3573 gating_wait);
3574 }
3575
3576 hba->dev_info.clk_gating_wait_us = gating_wait;
3577 }
3578
3579 return err;
3580}
3581
7a3e97b0
SY
3582/**
3583 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3584 * @hba: per adapter instance
3585 *
3586 * 1. Allocate DMA memory for Command Descriptor array
3587 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3588 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3589 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3590 * (UTMRDL)
3591 * 4. Allocate memory for local reference block(lrb).
3592 *
3593 * Returns 0 for success, non-zero in case of failure
3594 */
3595static int ufshcd_memory_alloc(struct ufs_hba *hba)
3596{
3597 size_t utmrdl_size, utrdl_size, ucdl_size;
3598
3599 /* Allocate memory for UTP command descriptors */
3600 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
3601 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3602 ucdl_size,
3603 &hba->ucdl_dma_addr,
3604 GFP_KERNEL);
7a3e97b0
SY
3605
3606 /*
3607 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3608 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3609 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3610 * be aligned to 128 bytes as well
3611 */
3612 if (!hba->ucdl_base_addr ||
3613 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3614 dev_err(hba->dev,
7a3e97b0
SY
3615 "Command Descriptor Memory allocation failed\n");
3616 goto out;
3617 }
3618
3619 /*
3620 * Allocate memory for UTP Transfer descriptors
3621 * UFSHCI requires 1024 byte alignment of UTRD
3622 */
3623 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
3624 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3625 utrdl_size,
3626 &hba->utrdl_dma_addr,
3627 GFP_KERNEL);
7a3e97b0
SY
3628 if (!hba->utrdl_base_addr ||
3629 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3630 dev_err(hba->dev,
7a3e97b0
SY
3631 "Transfer Descriptor Memory allocation failed\n");
3632 goto out;
3633 }
3634
3635 /*
3636 * Allocate memory for UTP Task Management descriptors
3637 * UFSHCI requires 1024 byte alignment of UTMRD
3638 */
3639 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
3640 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3641 utmrdl_size,
3642 &hba->utmrdl_dma_addr,
3643 GFP_KERNEL);
7a3e97b0
SY
3644 if (!hba->utmrdl_base_addr ||
3645 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3646 dev_err(hba->dev,
7a3e97b0
SY
3647 "Task Management Descriptor Memory allocation failed\n");
3648 goto out;
3649 }
3650
3651 /* Allocate memory for local reference block */
a86854d0
KC
3652 hba->lrb = devm_kcalloc(hba->dev,
3653 hba->nutrs, sizeof(struct ufshcd_lrb),
2953f850 3654 GFP_KERNEL);
7a3e97b0 3655 if (!hba->lrb) {
3b1d0580 3656 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
3657 goto out;
3658 }
3659 return 0;
3660out:
7a3e97b0
SY
3661 return -ENOMEM;
3662}
3663
3664/**
3665 * ufshcd_host_memory_configure - configure local reference block with
3666 * memory offsets
3667 * @hba: per adapter instance
3668 *
3669 * Configure Host memory space
3670 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3671 * address.
3672 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3673 * and PRDT offset.
3674 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3675 * into local reference block.
3676 */
3677static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3678{
7a3e97b0
SY
3679 struct utp_transfer_req_desc *utrdlp;
3680 dma_addr_t cmd_desc_dma_addr;
3681 dma_addr_t cmd_desc_element_addr;
3682 u16 response_offset;
3683 u16 prdt_offset;
3684 int cmd_desc_size;
3685 int i;
3686
3687 utrdlp = hba->utrdl_base_addr;
7a3e97b0
SY
3688
3689 response_offset =
3690 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3691 prdt_offset =
3692 offsetof(struct utp_transfer_cmd_desc, prd_table);
3693
3694 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3695 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3696
3697 for (i = 0; i < hba->nutrs; i++) {
3698 /* Configure UTRD with command descriptor base address */
3699 cmd_desc_element_addr =
3700 (cmd_desc_dma_addr + (cmd_desc_size * i));
3701 utrdlp[i].command_desc_base_addr_lo =
3702 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3703 utrdlp[i].command_desc_base_addr_hi =
3704 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3705
3706 /* Response upiu and prdt offset should be in double words */
26f968d7
AA
3707 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3708 utrdlp[i].response_upiu_offset =
3709 cpu_to_le16(response_offset);
3710 utrdlp[i].prd_table_offset =
3711 cpu_to_le16(prdt_offset);
3712 utrdlp[i].response_upiu_length =
3713 cpu_to_le16(ALIGNED_UPIU_SIZE);
3714 } else {
3715 utrdlp[i].response_upiu_offset =
3716 cpu_to_le16(response_offset >> 2);
3717 utrdlp[i].prd_table_offset =
3718 cpu_to_le16(prdt_offset >> 2);
3719 utrdlp[i].response_upiu_length =
3720 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3721 }
7a3e97b0 3722
4d2b8d40 3723 ufshcd_init_lrb(hba, &hba->lrb[i], i);
7a3e97b0
SY
3724 }
3725}
3726
3727/**
3728 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3729 * @hba: per adapter instance
3730 *
3731 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3732 * in order to initialize the Unipro link startup procedure.
3733 * Once the Unipro links are up, the device connected to the controller
3734 * is detected.
3735 *
3736 * Returns 0 on success, non-zero value on failure
3737 */
3738static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3739{
6ccf44fe
SJ
3740 struct uic_command uic_cmd = {0};
3741 int ret;
7a3e97b0 3742
6ccf44fe 3743 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 3744
6ccf44fe
SJ
3745 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3746 if (ret)
ff8e20c6 3747 dev_dbg(hba->dev,
6ccf44fe
SJ
3748 "dme-link-startup: error code %d\n", ret);
3749 return ret;
7a3e97b0 3750}
39bf2d83
AA
3751/**
3752 * ufshcd_dme_reset - UIC command for DME_RESET
3753 * @hba: per adapter instance
3754 *
3755 * DME_RESET command is issued in order to reset UniPro stack.
3756 * This function now deals with cold reset.
3757 *
3758 * Returns 0 on success, non-zero value on failure
3759 */
3760static int ufshcd_dme_reset(struct ufs_hba *hba)
3761{
3762 struct uic_command uic_cmd = {0};
3763 int ret;
3764
3765 uic_cmd.command = UIC_CMD_DME_RESET;
3766
3767 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3768 if (ret)
3769 dev_err(hba->dev,
3770 "dme-reset: error code %d\n", ret);
3771
3772 return ret;
3773}
3774
fc85a74e
SC
3775int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3776 int agreed_gear,
3777 int adapt_val)
3778{
3779 int ret;
3780
3781 if (agreed_gear != UFS_HS_G4)
66df79cc 3782 adapt_val = PA_NO_ADAPT;
fc85a74e
SC
3783
3784 ret = ufshcd_dme_set(hba,
3785 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3786 adapt_val);
3787 return ret;
3788}
3789EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3790
39bf2d83
AA
3791/**
3792 * ufshcd_dme_enable - UIC command for DME_ENABLE
3793 * @hba: per adapter instance
3794 *
3795 * DME_ENABLE command is issued in order to enable UniPro stack.
3796 *
3797 * Returns 0 on success, non-zero value on failure
3798 */
3799static int ufshcd_dme_enable(struct ufs_hba *hba)
3800{
3801 struct uic_command uic_cmd = {0};
3802 int ret;
3803
3804 uic_cmd.command = UIC_CMD_DME_ENABLE;
3805
3806 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3807 if (ret)
3808 dev_err(hba->dev,
1fa05700 3809 "dme-enable: error code %d\n", ret);
39bf2d83
AA
3810
3811 return ret;
3812}
7a3e97b0 3813
cad2e03d
YG
3814static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3815{
3816 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3817 unsigned long min_sleep_time_us;
3818
3819 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3820 return;
3821
3822 /*
3823 * last_dme_cmd_tstamp will be 0 only for 1st call to
3824 * this function
3825 */
3826 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3827 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3828 } else {
3829 unsigned long delta =
3830 (unsigned long) ktime_to_us(
3831 ktime_sub(ktime_get(),
3832 hba->last_dme_cmd_tstamp));
3833
3834 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3835 min_sleep_time_us =
3836 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3837 else
3838 return; /* no more delay required */
3839 }
3840
3841 /* allow sleep for extra 50us if needed */
3842 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3843}
3844
12b4fdb4
SJ
3845/**
3846 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3847 * @hba: per adapter instance
3848 * @attr_sel: uic command argument1
3849 * @attr_set: attribute set type as uic command argument2
3850 * @mib_val: setting value as uic command argument3
3851 * @peer: indicate whether peer or local
3852 *
3853 * Returns 0 on success, non-zero value on failure
3854 */
3855int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3856 u8 attr_set, u32 mib_val, u8 peer)
3857{
3858 struct uic_command uic_cmd = {0};
3859 static const char *const action[] = {
3860 "dme-set",
3861 "dme-peer-set"
3862 };
3863 const char *set = action[!!peer];
3864 int ret;
64238fbd 3865 int retries = UFS_UIC_COMMAND_RETRIES;
12b4fdb4
SJ
3866
3867 uic_cmd.command = peer ?
3868 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3869 uic_cmd.argument1 = attr_sel;
3870 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3871 uic_cmd.argument3 = mib_val;
3872
64238fbd
YG
3873 do {
3874 /* for peer attributes we retry upon failure */
3875 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3876 if (ret)
3877 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3878 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3879 } while (ret && peer && --retries);
3880
f37e9f8c 3881 if (ret)
64238fbd 3882 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
f37e9f8c
YG
3883 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3884 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4
SJ
3885
3886 return ret;
3887}
3888EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3889
3890/**
3891 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3892 * @hba: per adapter instance
3893 * @attr_sel: uic command argument1
3894 * @mib_val: the value of the attribute as returned by the UIC command
3895 * @peer: indicate whether peer or local
3896 *
3897 * Returns 0 on success, non-zero value on failure
3898 */
3899int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3900 u32 *mib_val, u8 peer)
3901{
3902 struct uic_command uic_cmd = {0};
3903 static const char *const action[] = {
3904 "dme-get",
3905 "dme-peer-get"
3906 };
3907 const char *get = action[!!peer];
3908 int ret;
64238fbd 3909 int retries = UFS_UIC_COMMAND_RETRIES;
874237f7
YG
3910 struct ufs_pa_layer_attr orig_pwr_info;
3911 struct ufs_pa_layer_attr temp_pwr_info;
3912 bool pwr_mode_change = false;
3913
3914 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3915 orig_pwr_info = hba->pwr_info;
3916 temp_pwr_info = orig_pwr_info;
3917
3918 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3919 orig_pwr_info.pwr_rx == FAST_MODE) {
3920 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3921 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3922 pwr_mode_change = true;
3923 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3924 orig_pwr_info.pwr_rx == SLOW_MODE) {
3925 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3926 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3927 pwr_mode_change = true;
3928 }
3929 if (pwr_mode_change) {
3930 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3931 if (ret)
3932 goto out;
3933 }
3934 }
12b4fdb4
SJ
3935
3936 uic_cmd.command = peer ?
3937 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3938 uic_cmd.argument1 = attr_sel;
3939
64238fbd
YG
3940 do {
3941 /* for peer attributes we retry upon failure */
3942 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3943 if (ret)
3944 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3945 get, UIC_GET_ATTR_ID(attr_sel), ret);
3946 } while (ret && peer && --retries);
3947
f37e9f8c 3948 if (ret)
64238fbd 3949 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
f37e9f8c
YG
3950 get, UIC_GET_ATTR_ID(attr_sel),
3951 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4 3952
64238fbd 3953 if (mib_val && !ret)
12b4fdb4 3954 *mib_val = uic_cmd.argument3;
874237f7
YG
3955
3956 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3957 && pwr_mode_change)
3958 ufshcd_change_power_mode(hba, &orig_pwr_info);
12b4fdb4
SJ
3959out:
3960 return ret;
3961}
3962EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3963
53b3d9c3 3964/**
57d104c1
SJ
3965 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3966 * state) and waits for it to take effect.
3967 *
53b3d9c3 3968 * @hba: per adapter instance
57d104c1
SJ
3969 * @cmd: UIC command to execute
3970 *
3971 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3972 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3973 * and device UniPro link and hence it's final completion would be indicated by
3974 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3975 * addition to normal UIC command completion Status (UCCS). This function only
3976 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
3977 *
3978 * Returns 0 on success, non-zero value on failure
3979 */
57d104c1 3980static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 3981{
8a686f26 3982 DECLARE_COMPLETION_ONSTACK(uic_async_done);
53b3d9c3
SJ
3983 unsigned long flags;
3984 u8 status;
3985 int ret;
d75f7fe4 3986 bool reenable_intr = false;
53b3d9c3 3987
53b3d9c3 3988 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d 3989 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
3990
3991 spin_lock_irqsave(hba->host->host_lock, flags);
4db7a236
CG
3992 if (ufshcd_is_link_broken(hba)) {
3993 ret = -ENOLINK;
3994 goto out_unlock;
3995 }
57d104c1 3996 hba->uic_async_done = &uic_async_done;
d75f7fe4
YG
3997 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3998 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3999 /*
4000 * Make sure UIC command completion interrupt is disabled before
4001 * issuing UIC command.
4002 */
4003 wmb();
4004 reenable_intr = true;
57d104c1 4005 }
d75f7fe4
YG
4006 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4007 spin_unlock_irqrestore(hba->host->host_lock, flags);
57d104c1
SJ
4008 if (ret) {
4009 dev_err(hba->dev,
4010 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4011 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
4012 goto out;
4013 }
4014
57d104c1 4015 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
4016 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4017 dev_err(hba->dev,
57d104c1
SJ
4018 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4019 cmd->command, cmd->argument3);
0f52fcb9
CG
4020
4021 if (!cmd->cmd_active) {
4022 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4023 __func__);
4024 goto check_upmcrs;
4025 }
4026
53b3d9c3
SJ
4027 ret = -ETIMEDOUT;
4028 goto out;
4029 }
4030
0f52fcb9 4031check_upmcrs:
53b3d9c3
SJ
4032 status = ufshcd_get_upmcrs(hba);
4033 if (status != PWR_LOCAL) {
4034 dev_err(hba->dev,
479da360 4035 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
57d104c1 4036 cmd->command, status);
53b3d9c3
SJ
4037 ret = (status != PWR_OK) ? status : -1;
4038 }
4039out:
7942f7b5
VG
4040 if (ret) {
4041 ufshcd_print_host_state(hba);
4042 ufshcd_print_pwr_info(hba);
e965e5e0 4043 ufshcd_print_evt_hist(hba);
7942f7b5
VG
4044 }
4045
53b3d9c3 4046 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 4047 hba->active_uic_cmd = NULL;
57d104c1 4048 hba->uic_async_done = NULL;
d75f7fe4
YG
4049 if (reenable_intr)
4050 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4db7a236
CG
4051 if (ret) {
4052 ufshcd_set_link_broken(hba);
88b09900 4053 ufshcd_schedule_eh_work(hba);
4db7a236
CG
4054 }
4055out_unlock:
53b3d9c3
SJ
4056 spin_unlock_irqrestore(hba->host->host_lock, flags);
4057 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 4058
53b3d9c3
SJ
4059 return ret;
4060}
4061
57d104c1
SJ
4062/**
4063 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4064 * using DME_SET primitives.
4065 * @hba: per adapter instance
4066 * @mode: powr mode value
4067 *
4068 * Returns 0 on success, non-zero value on failure
4069 */
4070static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4071{
4072 struct uic_command uic_cmd = {0};
1ab27c9c 4073 int ret;
57d104c1 4074
c3a2f9ee
YG
4075 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4076 ret = ufshcd_dme_set(hba,
4077 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4078 if (ret) {
4079 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4080 __func__, ret);
4081 goto out;
4082 }
4083 }
4084
57d104c1
SJ
4085 uic_cmd.command = UIC_CMD_DME_SET;
4086 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4087 uic_cmd.argument3 = mode;
1ab27c9c
ST
4088 ufshcd_hold(hba, false);
4089 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4090 ufshcd_release(hba);
57d104c1 4091
c3a2f9ee 4092out:
1ab27c9c 4093 return ret;
57d104c1
SJ
4094}
4095
087c5efa 4096int ufshcd_link_recovery(struct ufs_hba *hba)
53c12d0e
YG
4097{
4098 int ret;
4099 unsigned long flags;
4100
4101 spin_lock_irqsave(hba->host->host_lock, flags);
4102 hba->ufshcd_state = UFSHCD_STATE_RESET;
4103 ufshcd_set_eh_in_progress(hba);
4104 spin_unlock_irqrestore(hba->host->host_lock, flags);
4105
ebdd1dfd 4106 /* Reset the attached device */
31a5d9ca 4107 ufshcd_device_reset(hba);
ebdd1dfd 4108
53c12d0e
YG
4109 ret = ufshcd_host_reset_and_restore(hba);
4110
4111 spin_lock_irqsave(hba->host->host_lock, flags);
4112 if (ret)
4113 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4114 ufshcd_clear_eh_in_progress(hba);
4115 spin_unlock_irqrestore(hba->host->host_lock, flags);
4116
4117 if (ret)
4118 dev_err(hba->dev, "%s: link recovery failed, err %d",
4119 __func__, ret);
4120
4121 return ret;
4122}
087c5efa 4123EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
53c12d0e 4124
525943a5 4125int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
57d104c1 4126{
87d0b4a6 4127 int ret;
57d104c1 4128 struct uic_command uic_cmd = {0};
911a0771 4129 ktime_t start = ktime_get();
57d104c1 4130
ee32c909
KK
4131 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4132
57d104c1 4133 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
87d0b4a6 4134 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 4135 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4136 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
87d0b4a6 4137
4db7a236 4138 if (ret)
87d0b4a6
YG
4139 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4140 __func__, ret);
4db7a236 4141 else
ee32c909
KK
4142 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4143 POST_CHANGE);
53c12d0e 4144
87d0b4a6
YG
4145 return ret;
4146}
525943a5 4147EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
87d0b4a6 4148
9d19bf7a 4149int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
57d104c1
SJ
4150{
4151 struct uic_command uic_cmd = {0};
4152 int ret;
911a0771 4153 ktime_t start = ktime_get();
57d104c1 4154
ee32c909
KK
4155 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4156
57d104c1
SJ
4157 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4158 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 4159 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4160 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4161
57d104c1 4162 if (ret) {
53c12d0e
YG
4163 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4164 __func__, ret);
ff8e20c6 4165 } else {
ee32c909
KK
4166 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4167 POST_CHANGE);
ff8e20c6
DR
4168 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4169 hba->ufs_stats.hibern8_exit_cnt++;
4170 }
57d104c1
SJ
4171
4172 return ret;
4173}
9d19bf7a 4174EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
57d104c1 4175
ba7af5ec
SC
4176void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4177{
4178 unsigned long flags;
be7594a4 4179 bool update = false;
ba7af5ec 4180
be7594a4 4181 if (!ufshcd_is_auto_hibern8_supported(hba))
ba7af5ec
SC
4182 return;
4183
4184 spin_lock_irqsave(hba->host->host_lock, flags);
be7594a4
CG
4185 if (hba->ahit != ahit) {
4186 hba->ahit = ahit;
4187 update = true;
4188 }
ba7af5ec 4189 spin_unlock_irqrestore(hba->host->host_lock, flags);
be7594a4 4190
b294ff3e
AD
4191 if (update &&
4192 !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
4193 ufshcd_rpm_get_sync(hba);
be7594a4
CG
4194 ufshcd_hold(hba, false);
4195 ufshcd_auto_hibern8_enable(hba);
4196 ufshcd_release(hba);
b294ff3e 4197 ufshcd_rpm_put_sync(hba);
be7594a4 4198 }
ba7af5ec
SC
4199}
4200EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4201
71d848b8 4202void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
ad448378
AH
4203{
4204 unsigned long flags;
4205
499f7a96 4206 if (!ufshcd_is_auto_hibern8_supported(hba))
ad448378
AH
4207 return;
4208
4209 spin_lock_irqsave(hba->host->host_lock, flags);
4210 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4211 spin_unlock_irqrestore(hba->host->host_lock, flags);
4212}
4213
5064636c
YG
4214 /**
4215 * ufshcd_init_pwr_info - setting the POR (power on reset)
4216 * values in hba power info
4217 * @hba: per-adapter instance
4218 */
4219static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4220{
4221 hba->pwr_info.gear_rx = UFS_PWM_G1;
4222 hba->pwr_info.gear_tx = UFS_PWM_G1;
4223 hba->pwr_info.lane_rx = 1;
4224 hba->pwr_info.lane_tx = 1;
4225 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4226 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4227 hba->pwr_info.hs_rate = 0;
4228}
4229
d3e89bac 4230/**
7eb584db
DR
4231 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4232 * @hba: per-adapter instance
d3e89bac 4233 */
7eb584db 4234static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 4235{
7eb584db
DR
4236 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4237
4238 if (hba->max_pwr_info.is_valid)
4239 return 0;
4240
2349b533 4241 pwr_info->pwr_tx = FAST_MODE;
4242 pwr_info->pwr_rx = FAST_MODE;
7eb584db 4243 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
4244
4245 /* Get the connected lane count */
7eb584db
DR
4246 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4247 &pwr_info->lane_rx);
4248 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4249 &pwr_info->lane_tx);
4250
4251 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4252 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4253 __func__,
4254 pwr_info->lane_rx,
4255 pwr_info->lane_tx);
4256 return -EINVAL;
4257 }
d3e89bac
SJ
4258
4259 /*
4260 * First, get the maximum gears of HS speed.
4261 * If a zero value, it means there is no HSGEAR capability.
4262 * Then, get the maximum gears of PWM speed.
4263 */
7eb584db
DR
4264 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4265 if (!pwr_info->gear_rx) {
4266 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4267 &pwr_info->gear_rx);
4268 if (!pwr_info->gear_rx) {
4269 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4270 __func__, pwr_info->gear_rx);
4271 return -EINVAL;
4272 }
2349b533 4273 pwr_info->pwr_rx = SLOW_MODE;
d3e89bac
SJ
4274 }
4275
7eb584db
DR
4276 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4277 &pwr_info->gear_tx);
4278 if (!pwr_info->gear_tx) {
d3e89bac 4279 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
4280 &pwr_info->gear_tx);
4281 if (!pwr_info->gear_tx) {
4282 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4283 __func__, pwr_info->gear_tx);
4284 return -EINVAL;
4285 }
2349b533 4286 pwr_info->pwr_tx = SLOW_MODE;
7eb584db
DR
4287 }
4288
4289 hba->max_pwr_info.is_valid = true;
4290 return 0;
4291}
4292
4293static int ufshcd_change_power_mode(struct ufs_hba *hba,
4294 struct ufs_pa_layer_attr *pwr_mode)
4295{
4296 int ret;
4297
4298 /* if already configured to the requested pwr_mode */
2355b66e
CG
4299 if (!hba->force_pmc &&
4300 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
7eb584db
DR
4301 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4302 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4303 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4304 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4305 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4306 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4307 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4308 return 0;
d3e89bac
SJ
4309 }
4310
4311 /*
4312 * Configure attributes for power mode change with below.
4313 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4314 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4315 * - PA_HSSERIES
4316 */
7eb584db
DR
4317 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4318 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4319 pwr_mode->lane_rx);
4320 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4321 pwr_mode->pwr_rx == FAST_MODE)
d3e89bac 4322 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
7eb584db
DR
4323 else
4324 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
d3e89bac 4325
7eb584db
DR
4326 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4327 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4328 pwr_mode->lane_tx);
4329 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4330 pwr_mode->pwr_tx == FAST_MODE)
d3e89bac 4331 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
7eb584db
DR
4332 else
4333 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
d3e89bac 4334
7eb584db
DR
4335 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4336 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4337 pwr_mode->pwr_rx == FAST_MODE ||
4338 pwr_mode->pwr_tx == FAST_MODE)
4339 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4340 pwr_mode->hs_rate);
d3e89bac 4341
b1d0d2eb
KK
4342 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4343 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4344 DL_FC0ProtectionTimeOutVal_Default);
4345 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4346 DL_TC0ReplayTimeOutVal_Default);
4347 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4348 DL_AFC0ReqTimeOutVal_Default);
4349 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4350 DL_FC1ProtectionTimeOutVal_Default);
4351 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4352 DL_TC1ReplayTimeOutVal_Default);
4353 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4354 DL_AFC1ReqTimeOutVal_Default);
4355
4356 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4357 DL_FC0ProtectionTimeOutVal_Default);
4358 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4359 DL_TC0ReplayTimeOutVal_Default);
4360 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4361 DL_AFC0ReqTimeOutVal_Default);
4362 }
08342537 4363
7eb584db
DR
4364 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4365 | pwr_mode->pwr_tx);
4366
4367 if (ret) {
d3e89bac 4368 dev_err(hba->dev,
7eb584db
DR
4369 "%s: power mode change failed %d\n", __func__, ret);
4370 } else {
0263bcd0
YG
4371 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4372 pwr_mode);
7eb584db
DR
4373
4374 memcpy(&hba->pwr_info, pwr_mode,
4375 sizeof(struct ufs_pa_layer_attr));
4376 }
4377
4378 return ret;
4379}
4380
4381/**
4382 * ufshcd_config_pwr_mode - configure a new power mode
4383 * @hba: per-adapter instance
4384 * @desired_pwr_mode: desired power configuration
4385 */
0d846e70 4386int ufshcd_config_pwr_mode(struct ufs_hba *hba,
7eb584db
DR
4387 struct ufs_pa_layer_attr *desired_pwr_mode)
4388{
4389 struct ufs_pa_layer_attr final_params = { 0 };
4390 int ret;
4391
0263bcd0
YG
4392 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4393 desired_pwr_mode, &final_params);
4394
4395 if (ret)
7eb584db
DR
4396 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4397
4398 ret = ufshcd_change_power_mode(hba, &final_params);
d3e89bac
SJ
4399
4400 return ret;
4401}
0d846e70 4402EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
d3e89bac 4403
68078d5c
DR
4404/**
4405 * ufshcd_complete_dev_init() - checks device readiness
8aa29f19 4406 * @hba: per-adapter instance
68078d5c
DR
4407 *
4408 * Set fDeviceInit flag and poll until device toggles it.
4409 */
4410static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4411{
dc3c8d3a 4412 int err;
7dfdcc39 4413 bool flag_res = true;
29707fab 4414 ktime_t timeout;
68078d5c 4415
dc3c8d3a 4416 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 4417 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
68078d5c
DR
4418 if (err) {
4419 dev_err(hba->dev,
4420 "%s setting fDeviceInit flag failed with error %d\n",
4421 __func__, err);
4422 goto out;
4423 }
4424
29707fab
KK
4425 /* Poll fDeviceInit flag to be cleared */
4426 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4427 do {
4428 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4429 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4430 if (!flag_res)
4431 break;
a4e6496f 4432 usleep_range(500, 1000);
29707fab 4433 } while (ktime_before(ktime_get(), timeout));
dc3c8d3a 4434
29707fab 4435 if (err) {
68078d5c 4436 dev_err(hba->dev,
29707fab
KK
4437 "%s reading fDeviceInit flag failed with error %d\n",
4438 __func__, err);
4439 } else if (flag_res) {
68078d5c 4440 dev_err(hba->dev,
29707fab
KK
4441 "%s fDeviceInit was not cleared by the device\n",
4442 __func__);
4443 err = -EBUSY;
4444 }
68078d5c
DR
4445out:
4446 return err;
4447}
4448
7a3e97b0
SY
4449/**
4450 * ufshcd_make_hba_operational - Make UFS controller operational
4451 * @hba: per adapter instance
4452 *
4453 * To bring UFS host controller to operational state,
5c0c28a8
SRT
4454 * 1. Enable required interrupts
4455 * 2. Configure interrupt aggregation
897efe62 4456 * 3. Program UTRL and UTMRL base address
5c0c28a8 4457 * 4. Configure run-stop-registers
7a3e97b0
SY
4458 *
4459 * Returns 0 on success, non-zero value on failure
4460 */
9d19bf7a 4461int ufshcd_make_hba_operational(struct ufs_hba *hba)
7a3e97b0
SY
4462{
4463 int err = 0;
4464 u32 reg;
4465
6ccf44fe
SJ
4466 /* Enable required interrupts */
4467 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4468
4469 /* Configure interrupt aggregation */
b852190e
YG
4470 if (ufshcd_is_intr_aggr_allowed(hba))
4471 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4472 else
4473 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
4474
4475 /* Configure UTRL and UTMRL base address registers */
4476 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4477 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4478 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4479 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4480 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4481 REG_UTP_TASK_REQ_LIST_BASE_L);
4482 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4483 REG_UTP_TASK_REQ_LIST_BASE_H);
4484
897efe62
YG
4485 /*
4486 * Make sure base address and interrupt setup are updated before
4487 * enabling the run/stop registers below.
4488 */
4489 wmb();
4490
7a3e97b0
SY
4491 /*
4492 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
7a3e97b0 4493 */
5c0c28a8 4494 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
4495 if (!(ufshcd_get_lists_status(reg))) {
4496 ufshcd_enable_run_stop_reg(hba);
4497 } else {
3b1d0580 4498 dev_err(hba->dev,
7a3e97b0
SY
4499 "Host controller not ready to process requests");
4500 err = -EIO;
7a3e97b0
SY
4501 }
4502
7a3e97b0
SY
4503 return err;
4504}
9d19bf7a 4505EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
7a3e97b0 4506
596585a2
YG
4507/**
4508 * ufshcd_hba_stop - Send controller to reset state
4509 * @hba: per adapter instance
596585a2 4510 */
3a95f5b3 4511void ufshcd_hba_stop(struct ufs_hba *hba)
596585a2 4512{
5cac1095 4513 unsigned long flags;
596585a2
YG
4514 int err;
4515
5cac1095
BVA
4516 /*
4517 * Obtain the host lock to prevent that the controller is disabled
4518 * while the UFS interrupt handler is active on another CPU.
4519 */
4520 spin_lock_irqsave(hba->host->host_lock, flags);
596585a2 4521 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
5cac1095
BVA
4522 spin_unlock_irqrestore(hba->host->host_lock, flags);
4523
596585a2
YG
4524 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4525 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
5cac1095 4526 10, 1);
596585a2
YG
4527 if (err)
4528 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4529}
3a95f5b3 4530EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
596585a2 4531
7a3e97b0 4532/**
39bf2d83 4533 * ufshcd_hba_execute_hce - initialize the controller
7a3e97b0
SY
4534 * @hba: per adapter instance
4535 *
4536 * The controller resets itself and controller firmware initialization
4537 * sequence kicks off. When controller is ready it will set
4538 * the Host Controller Enable bit to 1.
4539 *
4540 * Returns 0 on success, non-zero value on failure
4541 */
39bf2d83 4542static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
7a3e97b0 4543{
6081b12c
SC
4544 int retry_outer = 3;
4545 int retry_inner;
7a3e97b0 4546
6081b12c 4547start:
596585a2 4548 if (!ufshcd_is_hba_active(hba))
7a3e97b0 4549 /* change controller state to "reset state" */
5cac1095 4550 ufshcd_hba_stop(hba);
7a3e97b0 4551
57d104c1
SJ
4552 /* UniPro link is disabled at this point */
4553 ufshcd_set_link_off(hba);
4554
0263bcd0 4555 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
5c0c28a8 4556
7a3e97b0
SY
4557 /* start controller initialization sequence */
4558 ufshcd_hba_start(hba);
4559
4560 /*
4561 * To initialize a UFS host controller HCE bit must be set to 1.
4562 * During initialization the HCE bit value changes from 1->0->1.
4563 * When the host controller completes initialization sequence
4564 * it sets the value of HCE bit to 1. The same HCE bit is read back
4565 * to check if the controller has completed initialization sequence.
4566 * So without this delay the value HCE = 1, set in the previous
4567 * instruction might be read back.
4568 * This delay can be changed based on the controller.
4569 */
90b8491c 4570 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
7a3e97b0
SY
4571
4572 /* wait for the host controller to complete initialization */
6081b12c 4573 retry_inner = 50;
7a3e97b0 4574 while (ufshcd_is_hba_active(hba)) {
6081b12c
SC
4575 if (retry_inner) {
4576 retry_inner--;
7a3e97b0 4577 } else {
3b1d0580 4578 dev_err(hba->dev,
7a3e97b0 4579 "Controller enable failed\n");
6081b12c
SC
4580 if (retry_outer) {
4581 retry_outer--;
4582 goto start;
4583 }
7a3e97b0
SY
4584 return -EIO;
4585 }
9fc305ef 4586 usleep_range(1000, 1100);
7a3e97b0 4587 }
5c0c28a8 4588
1d337ec2 4589 /* enable UIC related interrupts */
57d104c1 4590 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 4591
0263bcd0 4592 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
5c0c28a8 4593
7a3e97b0
SY
4594 return 0;
4595}
39bf2d83
AA
4596
4597int ufshcd_hba_enable(struct ufs_hba *hba)
4598{
4599 int ret;
4600
4601 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4602 ufshcd_set_link_off(hba);
4603 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4604
4605 /* enable UIC related interrupts */
4606 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4607 ret = ufshcd_dme_reset(hba);
4608 if (!ret) {
4609 ret = ufshcd_dme_enable(hba);
4610 if (!ret)
4611 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4612 if (ret)
4613 dev_err(hba->dev,
4614 "Host controller enable failed with non-hce\n");
4615 }
4616 } else {
4617 ret = ufshcd_hba_execute_hce(hba);
4618 }
4619
4620 return ret;
4621}
9d19bf7a
SC
4622EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4623
7ca38cf3
YG
4624static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4625{
ba0320fb 4626 int tx_lanes = 0, i, err = 0;
7ca38cf3
YG
4627
4628 if (!peer)
4629 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4630 &tx_lanes);
4631 else
4632 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4633 &tx_lanes);
4634 for (i = 0; i < tx_lanes; i++) {
4635 if (!peer)
4636 err = ufshcd_dme_set(hba,
4637 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4638 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4639 0);
4640 else
4641 err = ufshcd_dme_peer_set(hba,
4642 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4643 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4644 0);
4645 if (err) {
4646 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4647 __func__, peer, i, err);
4648 break;
4649 }
4650 }
4651
4652 return err;
4653}
4654
4655static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4656{
4657 return ufshcd_disable_tx_lcc(hba, true);
4658}
4659
e965e5e0 4660void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
8808b4e9 4661{
e965e5e0
SC
4662 struct ufs_event_hist *e;
4663
4664 if (id >= UFS_EVT_CNT)
4665 return;
4666
4667 e = &hba->ufs_stats.event[id];
4668 e->val[e->pos] = val;
4669 e->tstamp[e->pos] = ktime_get();
b6cacaf2 4670 e->cnt += 1;
e965e5e0 4671 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
172614a9
SC
4672
4673 ufshcd_vops_event_notify(hba, id, &val);
8808b4e9 4674}
e965e5e0 4675EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
8808b4e9 4676
7a3e97b0 4677/**
6ccf44fe 4678 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
4679 * @hba: per adapter instance
4680 *
6ccf44fe 4681 * Returns 0 for success, non-zero in case of failure
7a3e97b0 4682 */
6ccf44fe 4683static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 4684{
6ccf44fe 4685 int ret;
1d337ec2 4686 int retries = DME_LINKSTARTUP_RETRIES;
7caf489b 4687 bool link_startup_again = false;
7a3e97b0 4688
7caf489b 4689 /*
4690 * If UFS device isn't active then we will have to issue link startup
4691 * 2 times to make sure the device state move to active.
4692 */
4693 if (!ufshcd_is_ufs_dev_active(hba))
4694 link_startup_again = true;
7a3e97b0 4695
7caf489b 4696link_startup:
1d337ec2 4697 do {
0263bcd0 4698 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 4699
1d337ec2 4700 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 4701
1d337ec2
SRT
4702 /* check if device is detected by inter-connect layer */
4703 if (!ret && !ufshcd_is_device_present(hba)) {
e965e5e0
SC
4704 ufshcd_update_evt_hist(hba,
4705 UFS_EVT_LINK_STARTUP_FAIL,
8808b4e9 4706 0);
1d337ec2
SRT
4707 dev_err(hba->dev, "%s: Device not present\n", __func__);
4708 ret = -ENXIO;
4709 goto out;
4710 }
6ccf44fe 4711
1d337ec2
SRT
4712 /*
4713 * DME link lost indication is only received when link is up,
4714 * but we can't be sure if the link is up until link startup
4715 * succeeds. So reset the local Uni-Pro and try again.
4716 */
8808b4e9 4717 if (ret && ufshcd_hba_enable(hba)) {
e965e5e0
SC
4718 ufshcd_update_evt_hist(hba,
4719 UFS_EVT_LINK_STARTUP_FAIL,
8808b4e9 4720 (u32)ret);
1d337ec2 4721 goto out;
8808b4e9 4722 }
1d337ec2
SRT
4723 } while (ret && retries--);
4724
8808b4e9 4725 if (ret) {
1d337ec2 4726 /* failed to get the link up... retire */
e965e5e0
SC
4727 ufshcd_update_evt_hist(hba,
4728 UFS_EVT_LINK_STARTUP_FAIL,
8808b4e9 4729 (u32)ret);
5c0c28a8 4730 goto out;
8808b4e9 4731 }
5c0c28a8 4732
7caf489b 4733 if (link_startup_again) {
4734 link_startup_again = false;
4735 retries = DME_LINKSTARTUP_RETRIES;
4736 goto link_startup;
4737 }
4738
d2aebb9b 4739 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4740 ufshcd_init_pwr_info(hba);
4741 ufshcd_print_pwr_info(hba);
4742
7ca38cf3
YG
4743 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4744 ret = ufshcd_disable_device_tx_lcc(hba);
4745 if (ret)
4746 goto out;
4747 }
4748
5c0c28a8 4749 /* Include any host controller configuration via UIC commands */
0263bcd0
YG
4750 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4751 if (ret)
4752 goto out;
7a3e97b0 4753
2355b66e
CG
4754 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4755 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5c0c28a8 4756 ret = ufshcd_make_hba_operational(hba);
6ccf44fe 4757out:
7942f7b5 4758 if (ret) {
6ccf44fe 4759 dev_err(hba->dev, "link startup failed %d\n", ret);
7942f7b5
VG
4760 ufshcd_print_host_state(hba);
4761 ufshcd_print_pwr_info(hba);
e965e5e0 4762 ufshcd_print_evt_hist(hba);
7942f7b5 4763 }
6ccf44fe 4764 return ret;
7a3e97b0
SY
4765}
4766
5a0b0cb9
SRT
4767/**
4768 * ufshcd_verify_dev_init() - Verify device initialization
4769 * @hba: per-adapter instance
4770 *
4771 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4772 * device Transport Protocol (UTP) layer is ready after a reset.
4773 * If the UTP layer at the device side is not initialized, it may
4774 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4775 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4776 */
4777static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4778{
4779 int err = 0;
4780 int retries;
4781
1ab27c9c 4782 ufshcd_hold(hba, false);
5a0b0cb9
SRT
4783 mutex_lock(&hba->dev_cmd.lock);
4784 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4785 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1cbc9ad3 4786 hba->nop_out_timeout);
5a0b0cb9
SRT
4787
4788 if (!err || err == -ETIMEDOUT)
4789 break;
4790
4791 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4792 }
4793 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 4794 ufshcd_release(hba);
5a0b0cb9
SRT
4795
4796 if (err)
4797 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4798 return err;
4799}
4800
0ce147d4
SJ
4801/**
4802 * ufshcd_set_queue_depth - set lun queue depth
4803 * @sdev: pointer to SCSI device
4804 *
4805 * Read bLUQueueDepth value and activate scsi tagged command
4806 * queueing. For WLUN, queue depth is set to 1. For best-effort
4807 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4808 * value that host can queue.
4809 */
4810static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4811{
4812 int ret = 0;
4813 u8 lun_qdepth;
4814 struct ufs_hba *hba;
4815
4816 hba = shost_priv(sdev->host);
4817
4818 lun_qdepth = hba->nutrs;
dbd34a61
SM
4819 ret = ufshcd_read_unit_desc_param(hba,
4820 ufshcd_scsi_to_upiu_lun(sdev->lun),
4821 UNIT_DESC_PARAM_LU_Q_DEPTH,
4822 &lun_qdepth,
4823 sizeof(lun_qdepth));
0ce147d4
SJ
4824
4825 /* Some WLUN doesn't support unit descriptor */
4826 if (ret == -EOPNOTSUPP)
4827 lun_qdepth = 1;
4828 else if (!lun_qdepth)
4829 /* eventually, we can figure out the real queue depth */
4830 lun_qdepth = hba->nutrs;
4831 else
4832 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4833
4834 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4835 __func__, lun_qdepth);
db5ed4df 4836 scsi_change_queue_depth(sdev, lun_qdepth);
0ce147d4
SJ
4837}
4838
57d104c1
SJ
4839/*
4840 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4841 * @hba: per-adapter instance
4842 * @lun: UFS device lun id
4843 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4844 *
4845 * Returns 0 in case of success and b_lu_write_protect status would be returned
4846 * @b_lu_write_protect parameter.
4847 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4848 * Returns -EINVAL in case of invalid parameters passed to this function.
4849 */
4850static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4851 u8 lun,
4852 u8 *b_lu_write_protect)
4853{
4854 int ret;
4855
4856 if (!b_lu_write_protect)
4857 ret = -EINVAL;
4858 /*
4859 * According to UFS device spec, RPMB LU can't be write
4860 * protected so skip reading bLUWriteProtect parameter for
4861 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4862 */
1baa8011 4863 else if (lun >= hba->dev_info.max_lu_supported)
57d104c1
SJ
4864 ret = -ENOTSUPP;
4865 else
4866 ret = ufshcd_read_unit_desc_param(hba,
4867 lun,
4868 UNIT_DESC_PARAM_LU_WR_PROTECT,
4869 b_lu_write_protect,
4870 sizeof(*b_lu_write_protect));
4871 return ret;
4872}
4873
4874/**
4875 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4876 * status
4877 * @hba: per-adapter instance
4878 * @sdev: pointer to SCSI device
4879 *
4880 */
4881static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4882 struct scsi_device *sdev)
4883{
4884 if (hba->dev_info.f_power_on_wp_en &&
4885 !hba->dev_info.is_lu_power_on_wp) {
4886 u8 b_lu_write_protect;
4887
4888 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4889 &b_lu_write_protect) &&
4890 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4891 hba->dev_info.is_lu_power_on_wp = true;
4892 }
4893}
4894
b294ff3e
AD
4895/**
4896 * ufshcd_setup_links - associate link b/w device wlun and other luns
4897 * @sdev: pointer to SCSI device
4898 * @hba: pointer to ufs hba
4899 */
4900static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
4901{
4902 struct device_link *link;
4903
4904 /*
4905 * Device wlun is the supplier & rest of the luns are consumers.
4906 * This ensures that device wlun suspends after all other luns.
4907 */
4908 if (hba->sdev_ufs_device) {
4909 link = device_link_add(&sdev->sdev_gendev,
4910 &hba->sdev_ufs_device->sdev_gendev,
4911 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
4912 if (!link) {
4913 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
4914 dev_name(&hba->sdev_ufs_device->sdev_gendev));
4915 return;
4916 }
4917 hba->luns_avail--;
4918 /* Ignore REPORT_LUN wlun probing */
4919 if (hba->luns_avail == 1) {
4920 ufshcd_rpm_put(hba);
4921 return;
4922 }
4923 } else {
4924 /*
4925 * Device wlun is probed. The assumption is that WLUNs are
4926 * scanned before other LUNs.
4927 */
4928 hba->luns_avail--;
4929 }
4930}
4931
7a3e97b0
SY
4932/**
4933 * ufshcd_slave_alloc - handle initial SCSI device configurations
4934 * @sdev: pointer to SCSI device
4935 *
4936 * Returns success
4937 */
4938static int ufshcd_slave_alloc(struct scsi_device *sdev)
4939{
4940 struct ufs_hba *hba;
4941
4942 hba = shost_priv(sdev->host);
7a3e97b0
SY
4943
4944 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4945 sdev->use_10_for_ms = 1;
a3a76391
CG
4946
4947 /* DBD field should be set to 1 in mode sense(10) */
4948 sdev->set_dbd_for_ms = 1;
7a3e97b0 4949
e8e7f271
SRT
4950 /* allow SCSI layer to restart the device in case of errors */
4951 sdev->allow_restart = 1;
4264fd61 4952
b2a6c522
SRT
4953 /* REPORT SUPPORTED OPERATION CODES is not supported */
4954 sdev->no_report_opcodes = 1;
4955
84af7e8b
SRT
4956 /* WRITE_SAME command is not supported */
4957 sdev->no_write_same = 1;
e8e7f271 4958
0ce147d4 4959 ufshcd_set_queue_depth(sdev);
4264fd61 4960
57d104c1
SJ
4961 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4962
b294ff3e
AD
4963 ufshcd_setup_links(hba, sdev);
4964
7a3e97b0
SY
4965 return 0;
4966}
4967
4264fd61
SRT
4968/**
4969 * ufshcd_change_queue_depth - change queue depth
4970 * @sdev: pointer to SCSI device
4971 * @depth: required depth to set
4264fd61 4972 *
db5ed4df 4973 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 4974 */
db5ed4df 4975static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61 4976{
fc21da8a 4977 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
4264fd61
SRT
4978}
4979
f02bc975
DP
4980static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
4981{
4982 /* skip well-known LU */
41d8a933
DP
4983 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4984 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
f02bc975
DP
4985 return;
4986
4987 ufshpb_destroy_lu(hba, sdev);
4988}
4989
4990static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
4991{
4992 /* skip well-known LU */
4993 if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4994 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4995 return;
4996
4997 ufshpb_init_hpb_lu(hba, sdev);
4998}
4999
eeda4749
AM
5000/**
5001 * ufshcd_slave_configure - adjust SCSI device configurations
5002 * @sdev: pointer to SCSI device
5003 */
5004static int ufshcd_slave_configure(struct scsi_device *sdev)
5005{
49615ba1 5006 struct ufs_hba *hba = shost_priv(sdev->host);
eeda4749
AM
5007 struct request_queue *q = sdev->request_queue;
5008
f02bc975
DP
5009 ufshcd_hpb_configure(hba, sdev);
5010
eeda4749 5011 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2b2bfc8a
KK
5012 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
5013 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
b294ff3e
AD
5014 /*
5015 * Block runtime-pm until all consumers are added.
5016 * Refer ufshcd_setup_links().
5017 */
5018 if (is_device_wlun(sdev))
5019 pm_runtime_get_noresume(&sdev->sdev_gendev);
5020 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
49615ba1 5021 sdev->rpm_autosuspend = 1;
71bb9ab6
AH
5022 /*
5023 * Do not print messages during runtime PM to avoid never-ending cycles
5024 * of messages written back to storage by user space causing runtime
5025 * resume, causing more messages and so on.
5026 */
5027 sdev->silence_suspend = 1;
49615ba1 5028
cb77cb5a 5029 ufshcd_crypto_register(hba, q);
df043c74 5030
eeda4749
AM
5031 return 0;
5032}
5033
7a3e97b0
SY
5034/**
5035 * ufshcd_slave_destroy - remove SCSI device configurations
5036 * @sdev: pointer to SCSI device
5037 */
5038static void ufshcd_slave_destroy(struct scsi_device *sdev)
5039{
5040 struct ufs_hba *hba;
bf25967a 5041 unsigned long flags;
7a3e97b0
SY
5042
5043 hba = shost_priv(sdev->host);
f02bc975
DP
5044
5045 ufshcd_hpb_destroy(hba, sdev);
5046
0ce147d4 5047 /* Drop the reference as it won't be needed anymore */
7c48bfd0 5048 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
7c48bfd0 5049 spin_lock_irqsave(hba->host->host_lock, flags);
0ce147d4 5050 hba->sdev_ufs_device = NULL;
7c48bfd0 5051 spin_unlock_irqrestore(hba->host->host_lock, flags);
bf25967a
AH
5052 } else if (hba->sdev_ufs_device) {
5053 struct device *supplier = NULL;
5054
5055 /* Ensure UFS Device WLUN exists and does not disappear */
5056 spin_lock_irqsave(hba->host->host_lock, flags);
5057 if (hba->sdev_ufs_device) {
5058 supplier = &hba->sdev_ufs_device->sdev_gendev;
5059 get_device(supplier);
5060 }
5061 spin_unlock_irqrestore(hba->host->host_lock, flags);
5062
5063 if (supplier) {
5064 /*
5065 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5066 * device will not have been registered but can still
5067 * have a device link holding a reference to the device.
5068 */
5069 device_link_remove(&sdev->sdev_gendev, supplier);
5070 put_device(supplier);
5071 }
7c48bfd0 5072 }
7a3e97b0
SY
5073}
5074
7a3e97b0
SY
5075/**
5076 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
8aa29f19 5077 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
5078 * @scsi_status: SCSI command status
5079 *
5080 * Returns value base on SCSI command status
5081 */
5082static inline int
5083ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5084{
5085 int result = 0;
5086
5087 switch (scsi_status) {
7a3e97b0 5088 case SAM_STAT_CHECK_CONDITION:
1c2623c5 5089 ufshcd_copy_sense_data(lrbp);
df561f66 5090 fallthrough;
1c2623c5 5091 case SAM_STAT_GOOD:
db83d8a5 5092 result |= DID_OK << 16 | scsi_status;
7a3e97b0
SY
5093 break;
5094 case SAM_STAT_TASK_SET_FULL:
1c2623c5 5095 case SAM_STAT_BUSY:
7a3e97b0 5096 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
5097 ufshcd_copy_sense_data(lrbp);
5098 result |= scsi_status;
7a3e97b0
SY
5099 break;
5100 default:
5101 result |= DID_ERROR << 16;
5102 break;
5103 } /* end of switch */
5104
5105 return result;
5106}
5107
5108/**
5109 * ufshcd_transfer_rsp_status - Get overall status of the response
5110 * @hba: per adapter instance
8aa29f19 5111 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
5112 *
5113 * Returns result of the command to notify SCSI midlayer
5114 */
5115static inline int
5116ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
5117{
5118 int result = 0;
5119 int scsi_status;
957d63e7 5120 enum utp_ocs ocs;
7a3e97b0
SY
5121
5122 /* overall command status of utrd */
5123 ocs = ufshcd_get_tr_ocs(lrbp);
5124
d779a6e9
KK
5125 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5126 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5127 MASK_RSP_UPIU_RESULT)
5128 ocs = OCS_SUCCESS;
5129 }
5130
7a3e97b0
SY
5131 switch (ocs) {
5132 case OCS_SUCCESS:
5a0b0cb9 5133 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
ff8e20c6 5134 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
5135 switch (result) {
5136 case UPIU_TRANSACTION_RESPONSE:
5137 /*
5138 * get the response UPIU result to extract
5139 * the SCSI command status
5140 */
5141 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
5142
5143 /*
5144 * get the result based on SCSI status response
5145 * to notify the SCSI midlayer of the command status
5146 */
5147 scsi_status = result & MASK_SCSI_STATUS;
5148 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59 5149
f05ac2e5
YG
5150 /*
5151 * Currently we are only supporting BKOPs exception
5152 * events hence we can ignore BKOPs exception event
5153 * during power management callbacks. BKOPs exception
5154 * event is not expected to be raised in runtime suspend
5155 * callback as it allows the urgent bkops.
5156 * During system suspend, we are anyway forcefully
5157 * disabling the bkops and if urgent bkops is needed
5158 * it will be enabled on system resume. Long term
5159 * solution could be to abort the system suspend if
5160 * UFS device needs urgent BKOPs.
5161 */
5162 if (!hba->pm_op_in_progress &&
aa53f580 5163 !ufshcd_eh_in_progress(hba) &&
b294ff3e
AD
5164 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5165 /* Flushed in suspend */
5166 schedule_work(&hba->eeh_work);
4b5f4907
DP
5167
5168 if (scsi_status == SAM_STAT_GOOD)
5169 ufshpb_rsp_upiu(hba, lrbp);
5a0b0cb9
SRT
5170 break;
5171 case UPIU_TRANSACTION_REJECT_UPIU:
5172 /* TODO: handle Reject UPIU Response */
5173 result = DID_ERROR << 16;
3b1d0580 5174 dev_err(hba->dev,
5a0b0cb9
SRT
5175 "Reject UPIU not fully implemented\n");
5176 break;
5177 default:
5a0b0cb9
SRT
5178 dev_err(hba->dev,
5179 "Unexpected request response code = %x\n",
5180 result);
e0347d89 5181 result = DID_ERROR << 16;
7a3e97b0
SY
5182 break;
5183 }
7a3e97b0
SY
5184 break;
5185 case OCS_ABORTED:
5186 result |= DID_ABORT << 16;
5187 break;
e8e7f271
SRT
5188 case OCS_INVALID_COMMAND_STATUS:
5189 result |= DID_REQUEUE << 16;
5190 break;
7a3e97b0
SY
5191 case OCS_INVALID_CMD_TABLE_ATTR:
5192 case OCS_INVALID_PRDT_ATTR:
5193 case OCS_MISMATCH_DATA_BUF_SIZE:
5194 case OCS_MISMATCH_RESP_UPIU_SIZE:
5195 case OCS_PEER_COMM_FAILURE:
5196 case OCS_FATAL_ERROR:
5e7341e1
ST
5197 case OCS_DEVICE_FATAL_ERROR:
5198 case OCS_INVALID_CRYPTO_CONFIG:
5199 case OCS_GENERAL_CRYPTO_ERROR:
7a3e97b0
SY
5200 default:
5201 result |= DID_ERROR << 16;
3b1d0580 5202 dev_err(hba->dev,
ff8e20c6
DR
5203 "OCS error from controller = %x for tag %d\n",
5204 ocs, lrbp->task_tag);
e965e5e0 5205 ufshcd_print_evt_hist(hba);
6ba65588 5206 ufshcd_print_host_state(hba);
7a3e97b0
SY
5207 break;
5208 } /* end of switch */
5209
eeb1b55b
JK
5210 if ((host_byte(result) != DID_OK) &&
5211 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
66cc820f 5212 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
7a3e97b0
SY
5213 return result;
5214}
5215
a45f9371
CG
5216static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5217 u32 intr_mask)
5218{
5219 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5220 !ufshcd_is_auto_hibern8_enabled(hba))
5221 return false;
5222
5223 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5224 return false;
5225
5226 if (hba->active_uic_cmd &&
5227 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5228 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5229 return false;
5230
5231 return true;
5232}
5233
6ccf44fe
SJ
5234/**
5235 * ufshcd_uic_cmd_compl - handle completion of uic command
5236 * @hba: per adapter instance
53b3d9c3 5237 * @intr_status: interrupt status generated by the controller
9333d775
VG
5238 *
5239 * Returns
5240 * IRQ_HANDLED - If interrupt is valid
5241 * IRQ_NONE - If invalid interrupt
6ccf44fe 5242 */
9333d775 5243static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 5244{
9333d775
VG
5245 irqreturn_t retval = IRQ_NONE;
5246
a45f9371
CG
5247 spin_lock(hba->host->host_lock);
5248 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5249 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5250
53b3d9c3 5251 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
5252 hba->active_uic_cmd->argument2 |=
5253 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
5254 hba->active_uic_cmd->argument3 =
5255 ufshcd_get_dme_attr_val(hba);
0f52fcb9
CG
5256 if (!hba->uic_async_done)
5257 hba->active_uic_cmd->cmd_active = 0;
6ccf44fe 5258 complete(&hba->active_uic_cmd->done);
9333d775 5259 retval = IRQ_HANDLED;
6ccf44fe 5260 }
53b3d9c3 5261
9333d775 5262 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
0f52fcb9 5263 hba->active_uic_cmd->cmd_active = 0;
57d104c1 5264 complete(hba->uic_async_done);
9333d775
VG
5265 retval = IRQ_HANDLED;
5266 }
aa5c6979
SC
5267
5268 if (retval == IRQ_HANDLED)
5269 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
28fa68fc 5270 UFS_CMD_COMP);
a45f9371 5271 spin_unlock(hba->host->host_lock);
9333d775 5272 return retval;
6ccf44fe
SJ
5273}
5274
6f8dafde
BVA
5275/* Release the resources allocated for processing a SCSI command. */
5276static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5277 struct ufshcd_lrb *lrbp)
5278{
5279 struct scsi_cmnd *cmd = lrbp->cmd;
5280
5281 scsi_dma_unmap(cmd);
5282 lrbp->cmd = NULL; /* Mark the command as completed. */
5283 ufshcd_release(hba);
5284 ufshcd_clk_scaling_update_busy(hba);
5285}
5286
7a3e97b0 5287/**
9a47ec7c 5288 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
7a3e97b0 5289 * @hba: per adapter instance
73dc3c4a 5290 * @completed_reqs: bitmask that indicates which requests to complete
7a3e97b0 5291 */
9a47ec7c 5292static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
11682523 5293 unsigned long completed_reqs)
7a3e97b0 5294{
5a0b0cb9
SRT
5295 struct ufshcd_lrb *lrbp;
5296 struct scsi_cmnd *cmd;
7a3e97b0 5297 int index;
e9d501b1 5298
e9d501b1
DR
5299 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5300 lrbp = &hba->lrb[index];
a3170376 5301 lrbp->compl_time_stamp = ktime_get();
e9d501b1
DR
5302 cmd = lrbp->cmd;
5303 if (cmd) {
1d8613a2
CG
5304 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5305 ufshcd_update_monitor(hba, lrbp);
28fa68fc 5306 ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
6f8dafde
BVA
5307 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
5308 ufshcd_release_scsi_cmd(hba, lrbp);
e9d501b1 5309 /* Do not touch lrbp after scsi done */
35c3730a 5310 scsi_done(cmd);
300bb13f
JP
5311 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5312 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
1a07f2d9
LS
5313 if (hba->dev_cmd.complete) {
5314 ufshcd_add_command_trace(hba, index,
28fa68fc 5315 UFS_DEV_COMP);
e9d501b1 5316 complete(hba->dev_cmd.complete);
3eb9dcc0 5317 ufshcd_clk_scaling_update_busy(hba);
1a07f2d9 5318 }
e9d501b1
DR
5319 }
5320 }
7a3e97b0
SY
5321}
5322
eaab9b57
BVA
5323/*
5324 * Returns > 0 if one or more commands have been completed or 0 if no
5325 * requests have been completed.
5326 */
5327static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5328{
5329 struct ufs_hba *hba = shost_priv(shost);
5330 unsigned long completed_reqs, flags;
5331 u32 tr_doorbell;
5332
5333 spin_lock_irqsave(&hba->outstanding_lock, flags);
5334 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5335 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5336 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5337 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5338 hba->outstanding_reqs);
5339 hba->outstanding_reqs &= ~completed_reqs;
5340 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5341
5342 if (completed_reqs)
5343 __ufshcd_transfer_req_compl(hba, completed_reqs);
5344
5345 return completed_reqs;
5346}
5347
9a47ec7c 5348/**
1f522c50 5349 * ufshcd_transfer_req_compl - handle SCSI and query command completion
9a47ec7c 5350 * @hba: per adapter instance
9333d775
VG
5351 *
5352 * Returns
5353 * IRQ_HANDLED - If interrupt is valid
5354 * IRQ_NONE - If invalid interrupt
9a47ec7c 5355 */
11682523 5356static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
9a47ec7c 5357{
9a47ec7c
YG
5358 /* Resetting interrupt aggregation counters first and reading the
5359 * DOOR_BELL afterward allows us to handle all the completed requests.
5360 * In order to prevent other interrupts starvation the DB is read once
5361 * after reset. The down side of this solution is the possibility of
5362 * false interrupt if device completes another request after resetting
5363 * aggregation and before reading the DB.
5364 */
b638b5eb
AA
5365 if (ufshcd_is_intr_aggr_allowed(hba) &&
5366 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
9a47ec7c
YG
5367 ufshcd_reset_intr_aggr(hba);
5368
c11a1ae9
BVA
5369 if (ufs_fail_completion())
5370 return IRQ_HANDLED;
5371
eaab9b57
BVA
5372 /*
5373 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5374 * do not want polling to trigger spurious interrupt complaints.
5375 */
5376 ufshcd_poll(hba->host, 0);
9a47ec7c 5377
eaab9b57 5378 return IRQ_HANDLED;
9a47ec7c
YG
5379}
5380
7deedfda 5381int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
cd469475
AH
5382{
5383 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5384 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5385 &ee_ctrl_mask);
5386}
5387
7deedfda 5388int ufshcd_write_ee_control(struct ufs_hba *hba)
cd469475
AH
5389{
5390 int err;
5391
5392 mutex_lock(&hba->ee_ctrl_mutex);
5393 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5394 mutex_unlock(&hba->ee_ctrl_mutex);
5395 if (err)
5396 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5397 __func__, err);
5398 return err;
5399}
5400
5401int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
5402 u16 set, u16 clr)
5403{
5404 u16 new_mask, ee_ctrl_mask;
5405 int err = 0;
5406
5407 mutex_lock(&hba->ee_ctrl_mutex);
5408 new_mask = (*mask & ~clr) | set;
5409 ee_ctrl_mask = new_mask | *other_mask;
5410 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5411 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5412 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5413 if (!err) {
5414 hba->ee_ctrl_mask = ee_ctrl_mask;
5415 *mask = new_mask;
5416 }
5417 mutex_unlock(&hba->ee_ctrl_mutex);
5418 return err;
5419}
5420
66ec6d59
SRT
5421/**
5422 * ufshcd_disable_ee - disable exception event
5423 * @hba: per-adapter instance
5424 * @mask: exception event to disable
5425 *
5426 * Disables exception event in the device so that the EVENT_ALERT
5427 * bit is not set.
5428 *
5429 * Returns zero on success, non-zero error value on failure.
5430 */
cd469475 5431static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
66ec6d59 5432{
cd469475 5433 return ufshcd_update_ee_drv_mask(hba, 0, mask);
66ec6d59
SRT
5434}
5435
5436/**
5437 * ufshcd_enable_ee - enable exception event
5438 * @hba: per-adapter instance
5439 * @mask: exception event to enable
5440 *
5441 * Enable corresponding exception event in the device to allow
5442 * device to alert host in critical scenarios.
5443 *
5444 * Returns zero on success, non-zero error value on failure.
5445 */
cd469475 5446static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
66ec6d59 5447{
cd469475 5448 return ufshcd_update_ee_drv_mask(hba, mask, 0);
66ec6d59
SRT
5449}
5450
5451/**
5452 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5453 * @hba: per-adapter instance
5454 *
5455 * Allow device to manage background operations on its own. Enabling
5456 * this might lead to inconsistent latencies during normal data transfers
5457 * as the device is allowed to manage its own way of handling background
5458 * operations.
5459 *
5460 * Returns zero on success, non-zero on failure.
5461 */
5462static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5463{
5464 int err = 0;
5465
5466 if (hba->auto_bkops_enabled)
5467 goto out;
5468
dc3c8d3a 5469 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1f34eedf 5470 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
66ec6d59
SRT
5471 if (err) {
5472 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5473 __func__, err);
5474 goto out;
5475 }
5476
5477 hba->auto_bkops_enabled = true;
7ff5ab47 5478 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
66ec6d59
SRT
5479
5480 /* No need of URGENT_BKOPS exception from the device */
5481 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5482 if (err)
5483 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5484 __func__, err);
5485out:
5486 return err;
5487}
5488
5489/**
5490 * ufshcd_disable_auto_bkops - block device in doing background operations
5491 * @hba: per-adapter instance
5492 *
5493 * Disabling background operations improves command response latency but
5494 * has drawback of device moving into critical state where the device is
5495 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5496 * host is idle so that BKOPS are managed effectively without any negative
5497 * impacts.
5498 *
5499 * Returns zero on success, non-zero on failure.
5500 */
5501static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5502{
5503 int err = 0;
5504
5505 if (!hba->auto_bkops_enabled)
5506 goto out;
5507
5508 /*
5509 * If host assisted BKOPs is to be enabled, make sure
5510 * urgent bkops exception is allowed.
5511 */
5512 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5513 if (err) {
5514 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5515 __func__, err);
5516 goto out;
5517 }
5518
dc3c8d3a 5519 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
1f34eedf 5520 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
66ec6d59
SRT
5521 if (err) {
5522 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5523 __func__, err);
5524 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5525 goto out;
5526 }
5527
5528 hba->auto_bkops_enabled = false;
7ff5ab47 5529 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
24366c2a 5530 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5531out:
5532 return err;
5533}
5534
5535/**
4e768e76 5536 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
66ec6d59
SRT
5537 * @hba: per adapter instance
5538 *
5539 * After a device reset the device may toggle the BKOPS_EN flag
5540 * to default value. The s/w tracking variables should be updated
4e768e76 5541 * as well. This function would change the auto-bkops state based on
5542 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
66ec6d59 5543 */
4e768e76 5544static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
66ec6d59 5545{
4e768e76 5546 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5547 hba->auto_bkops_enabled = false;
5548 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5549 ufshcd_enable_auto_bkops(hba);
5550 } else {
5551 hba->auto_bkops_enabled = true;
5552 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5553 ufshcd_disable_auto_bkops(hba);
5554 }
7b6668d8 5555 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
24366c2a 5556 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5557}
5558
5559static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5560{
5e86ae44 5561 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5562 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5563}
5564
5565/**
57d104c1 5566 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 5567 * @hba: per-adapter instance
57d104c1 5568 * @status: bkops_status value
66ec6d59 5569 *
57d104c1
SJ
5570 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5571 * flag in the device to permit background operations if the device
5572 * bkops_status is greater than or equal to "status" argument passed to
5573 * this function, disable otherwise.
5574 *
5575 * Returns 0 for success, non-zero in case of failure.
5576 *
5577 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5578 * to know whether auto bkops is enabled or disabled after this function
5579 * returns control to it.
66ec6d59 5580 */
57d104c1
SJ
5581static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5582 enum bkops_status status)
66ec6d59
SRT
5583{
5584 int err;
57d104c1 5585 u32 curr_status = 0;
66ec6d59 5586
57d104c1 5587 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
5588 if (err) {
5589 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5590 __func__, err);
5591 goto out;
57d104c1
SJ
5592 } else if (curr_status > BKOPS_STATUS_MAX) {
5593 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5594 __func__, curr_status);
5595 err = -EINVAL;
5596 goto out;
66ec6d59
SRT
5597 }
5598
57d104c1 5599 if (curr_status >= status)
66ec6d59 5600 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
5601 else
5602 err = ufshcd_disable_auto_bkops(hba);
66ec6d59
SRT
5603out:
5604 return err;
5605}
5606
57d104c1
SJ
5607/**
5608 * ufshcd_urgent_bkops - handle urgent bkops exception event
5609 * @hba: per-adapter instance
5610 *
5611 * Enable fBackgroundOpsEn flag in the device to permit background
5612 * operations.
5613 *
5614 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5615 * and negative error value for any other failure.
5616 */
5617static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5618{
afdfff59 5619 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
57d104c1
SJ
5620}
5621
66ec6d59
SRT
5622static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5623{
5e86ae44 5624 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5625 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5626}
5627
afdfff59
YG
5628static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5629{
5630 int err;
5631 u32 curr_status = 0;
5632
5633 if (hba->is_urgent_bkops_lvl_checked)
5634 goto enable_auto_bkops;
5635
5636 err = ufshcd_get_bkops_status(hba, &curr_status);
5637 if (err) {
5638 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5639 __func__, err);
5640 goto out;
5641 }
5642
5643 /*
5644 * We are seeing that some devices are raising the urgent bkops
5645 * exception events even when BKOPS status doesn't indicate performace
5646 * impacted or critical. Handle these device by determining their urgent
5647 * bkops status at runtime.
5648 */
5649 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5650 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5651 __func__, curr_status);
5652 /* update the current status as the urgent bkops level */
5653 hba->urgent_bkops_lvl = curr_status;
5654 hba->is_urgent_bkops_lvl_checked = true;
5655 }
5656
5657enable_auto_bkops:
5658 err = ufshcd_enable_auto_bkops(hba);
5659out:
5660 if (err < 0)
5661 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5662 __func__, err);
5663}
5664
322c4b29
AA
5665static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5666{
5667 u32 value;
5668
5669 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5670 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5671 return;
5672
5673 dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5674
5675 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5676
5677 /*
5678 * A placeholder for the platform vendors to add whatever additional
5679 * steps required
5680 */
5681}
5682
3b5f3c0d 5683static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
3d17b9b5 5684{
6f8d5a6a 5685 u8 index;
3b5f3c0d
YH
5686 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5687 UPIU_QUERY_OPCODE_CLEAR_FLAG;
5688
5689 index = ufshcd_wb_get_query_index(hba);
5690 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5691}
5692
5693int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5694{
5695 int ret;
3d17b9b5 5696
79e3520f 5697 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5
AD
5698 return 0;
5699
4cd48995 5700 if (!(enable ^ hba->dev_info.wb_enabled))
3d17b9b5 5701 return 0;
3d17b9b5 5702
3b5f3c0d 5703 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
3d17b9b5 5704 if (ret) {
3b5f3c0d 5705 dev_err(hba->dev, "%s Write Booster %s failed %d\n",
3d17b9b5
AD
5706 __func__, enable ? "enable" : "disable", ret);
5707 return ret;
5708 }
5709
4cd48995 5710 hba->dev_info.wb_enabled = enable;
3b5f3c0d
YH
5711 dev_info(hba->dev, "%s Write Booster %s\n",
5712 __func__, enable ? "enabled" : "disabled");
3d17b9b5
AD
5713
5714 return ret;
5715}
5716
3b5f3c0d 5717static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
3d17b9b5 5718{
3b5f3c0d 5719 int ret;
3d17b9b5 5720
3b5f3c0d
YH
5721 ret = __ufshcd_wb_toggle(hba, set,
5722 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
5723 if (ret) {
5724 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
5725 __func__, set ? "enable" : "disable", ret);
5726 return;
5727 }
5728 dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
5729 __func__, set ? "enabled" : "disabled");
3d17b9b5
AD
5730}
5731
3b5f3c0d 5732static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
3d17b9b5
AD
5733{
5734 int ret;
5735
d3ba622d
BH
5736 if (!ufshcd_is_wb_allowed(hba) ||
5737 hba->dev_info.wb_buf_flush_enabled == enable)
3b5f3c0d 5738 return;
3d17b9b5 5739
3b5f3c0d 5740 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
3d17b9b5 5741 if (ret) {
d3ba622d
BH
5742 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
5743 enable ? "enable" : "disable", ret);
3b5f3c0d 5744 return;
3d17b9b5
AD
5745 }
5746
d3ba622d
BH
5747 hba->dev_info.wb_buf_flush_enabled = enable;
5748
3b5f3c0d
YH
5749 dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
5750 __func__, enable ? "enabled" : "disabled");
3d17b9b5
AD
5751}
5752
5753static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5754 u32 avail_buf)
5755{
5756 u32 cur_buf;
5757 int ret;
e31011ab 5758 u8 index;
3d17b9b5 5759
e31011ab 5760 index = ufshcd_wb_get_query_index(hba);
3d17b9b5
AD
5761 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5762 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
e31011ab 5763 index, 0, &cur_buf);
3d17b9b5
AD
5764 if (ret) {
5765 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5766 __func__, ret);
5767 return false;
5768 }
5769
5770 if (!cur_buf) {
5771 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5772 cur_buf);
5773 return false;
5774 }
d14734ae 5775 /* Let it continue to flush when available buffer exceeds threshold */
a858af9a 5776 return avail_buf < hba->vps->wb_flush_threshold;
3d17b9b5
AD
5777}
5778
f681d107
JC
5779static void ufshcd_wb_force_disable(struct ufs_hba *hba)
5780{
5781 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
5782 ufshcd_wb_toggle_flush(hba, false);
5783
5784 ufshcd_wb_toggle_flush_during_h8(hba, false);
5785 ufshcd_wb_toggle(hba, false);
5786 hba->caps &= ~UFSHCD_CAP_WB_EN;
5787
5788 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
5789}
5790
5791static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
5792{
5793 u32 lifetime;
5794 int ret;
5795 u8 index;
5796
5797 index = ufshcd_wb_get_query_index(hba);
5798 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5799 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
5800 index, 0, &lifetime);
5801 if (ret) {
5802 dev_err(hba->dev,
5803 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
5804 __func__, ret);
5805 return false;
5806 }
5807
5808 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
5809 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
5810 __func__, lifetime);
5811 return false;
5812 }
5813
5814 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
5815 __func__, lifetime);
5816
5817 return true;
5818}
5819
51dd905b 5820static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
3d17b9b5
AD
5821{
5822 int ret;
5823 u32 avail_buf;
e31011ab 5824 u8 index;
3d17b9b5 5825
79e3520f 5826 if (!ufshcd_is_wb_allowed(hba))
3d17b9b5 5827 return false;
f681d107
JC
5828
5829 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
5830 ufshcd_wb_force_disable(hba);
5831 return false;
5832 }
5833
3d17b9b5
AD
5834 /*
5835 * The ufs device needs the vcc to be ON to flush.
5836 * With user-space reduction enabled, it's enough to enable flush
5837 * by checking only the available buffer. The threshold
5838 * defined here is > 90% full.
5839 * With user-space preserved enabled, the current-buffer
5840 * should be checked too because the wb buffer size can reduce
5841 * when disk tends to be full. This info is provided by current
5842 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5843 * keeping vcc on when current buffer is empty.
5844 */
e31011ab 5845 index = ufshcd_wb_get_query_index(hba);
3d17b9b5
AD
5846 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5847 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
e31011ab 5848 index, 0, &avail_buf);
3d17b9b5
AD
5849 if (ret) {
5850 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5851 __func__, ret);
5852 return false;
5853 }
5854
a858af9a
BVA
5855 if (!hba->dev_info.b_presrv_uspc_en)
5856 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
3d17b9b5
AD
5857
5858 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5859}
5860
51dd905b
SC
5861static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5862{
5863 struct ufs_hba *hba = container_of(to_delayed_work(work),
5864 struct ufs_hba,
5865 rpm_dev_flush_recheck_work);
5866 /*
5867 * To prevent unnecessary VCC power drain after device finishes
5868 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5869 * after a certain delay to recheck the threshold by next runtime
5870 * suspend.
5871 */
b294ff3e
AD
5872 ufshcd_rpm_get_sync(hba);
5873 ufshcd_rpm_put_sync(hba);
51dd905b
SC
5874}
5875
66ec6d59
SRT
5876/**
5877 * ufshcd_exception_event_handler - handle exceptions raised by device
5878 * @work: pointer to work data
5879 *
5880 * Read bExceptionEventStatus attribute from the device and handle the
5881 * exception event accordingly.
5882 */
5883static void ufshcd_exception_event_handler(struct work_struct *work)
5884{
5885 struct ufs_hba *hba;
5886 int err;
5887 u32 status = 0;
5888 hba = container_of(work, struct ufs_hba, eeh_work);
5889
03e1d28e 5890 ufshcd_scsi_block_requests(hba);
66ec6d59
SRT
5891 err = ufshcd_get_ee_status(hba, &status);
5892 if (err) {
5893 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5894 __func__, err);
5895 goto out;
5896 }
5897
f7733625
AH
5898 trace_ufshcd_exception_event(dev_name(hba->dev), status);
5899
cd469475 5900 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
afdfff59
YG
5901 ufshcd_bkops_exception_event_handler(hba);
5902
322c4b29
AA
5903 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
5904 ufshcd_temp_exception_event_handler(hba, status);
5905
7deedfda 5906 ufs_debugfs_exception_event(hba, status);
66ec6d59 5907out:
03e1d28e 5908 ufshcd_scsi_unblock_requests(hba);
66ec6d59
SRT
5909}
5910
9a47ec7c
YG
5911/* Complete requests that have door-bell cleared */
5912static void ufshcd_complete_requests(struct ufs_hba *hba)
5913{
11682523 5914 ufshcd_transfer_req_compl(hba);
9a47ec7c
YG
5915 ufshcd_tmc_handler(hba);
5916}
5917
583fa62d
YG
5918/**
5919 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5920 * to recover from the DL NAC errors or not.
5921 * @hba: per-adapter instance
5922 *
5923 * Returns true if error handling is required, false otherwise
5924 */
5925static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5926{
5927 unsigned long flags;
5928 bool err_handling = true;
5929
5930 spin_lock_irqsave(hba->host->host_lock, flags);
5931 /*
5932 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5933 * device fatal error and/or DL NAC & REPLAY timeout errors.
5934 */
5935 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5936 goto out;
5937
5938 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5939 ((hba->saved_err & UIC_ERROR) &&
5940 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5941 goto out;
5942
5943 if ((hba->saved_err & UIC_ERROR) &&
5944 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5945 int err;
5946 /*
5947 * wait for 50ms to see if we can get any other errors or not.
5948 */
5949 spin_unlock_irqrestore(hba->host->host_lock, flags);
5950 msleep(50);
5951 spin_lock_irqsave(hba->host->host_lock, flags);
5952
5953 /*
5954 * now check if we have got any other severe errors other than
5955 * DL NAC error?
5956 */
5957 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5958 ((hba->saved_err & UIC_ERROR) &&
5959 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5960 goto out;
5961
5962 /*
5963 * As DL NAC is the only error received so far, send out NOP
5964 * command to confirm if link is still active or not.
5965 * - If we don't get any response then do error recovery.
5966 * - If we get response then clear the DL NAC error bit.
5967 */
5968
5969 spin_unlock_irqrestore(hba->host->host_lock, flags);
5970 err = ufshcd_verify_dev_init(hba);
5971 spin_lock_irqsave(hba->host->host_lock, flags);
5972
5973 if (err)
5974 goto out;
5975
5976 /* Link seems to be alive hence ignore the DL NAC errors */
5977 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5978 hba->saved_err &= ~UIC_ERROR;
5979 /* clear NAC error */
5980 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
b0008625 5981 if (!hba->saved_uic_err)
583fa62d 5982 err_handling = false;
583fa62d
YG
5983 }
5984out:
5985 spin_unlock_irqrestore(hba->host->host_lock, flags);
5986 return err_handling;
5987}
5988
88b09900
AH
5989/* host lock must be held before calling this func */
5990static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5991{
5992 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5993 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5994}
5995
267a59f6 5996void ufshcd_schedule_eh_work(struct ufs_hba *hba)
88b09900 5997{
267a59f6
BVA
5998 lockdep_assert_held(hba->host->host_lock);
5999
88b09900
AH
6000 /* handle fatal errors only when link is not in error state */
6001 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6002 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6003 ufshcd_is_saved_err_fatal(hba))
6004 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6005 else
6006 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6007 queue_work(hba->eh_wq, &hba->eh_work);
6008 }
6009}
6010
348e1bc5
SC
6011static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6012{
6013 down_write(&hba->clk_scaling_lock);
6014 hba->clk_scaling.is_allowed = allow;
6015 up_write(&hba->clk_scaling_lock);
6016}
6017
6018static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6019{
6020 if (suspend) {
6021 if (hba->clk_scaling.is_enabled)
6022 ufshcd_suspend_clkscaling(hba);
6023 ufshcd_clk_scaling_allow(hba, false);
6024 } else {
6025 ufshcd_clk_scaling_allow(hba, true);
6026 if (hba->clk_scaling.is_enabled)
6027 ufshcd_resume_clkscaling(hba);
6028 }
6029}
6030
c72e79c0
CG
6031static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6032{
b294ff3e
AD
6033 ufshcd_rpm_get_sync(hba);
6034 if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
6035 hba->is_sys_suspended) {
88a92d6a
CG
6036 enum ufs_pm_op pm_op;
6037
c72e79c0 6038 /*
b294ff3e 6039 * Don't assume anything of resume, if
c72e79c0
CG
6040 * resume fails, irq and clocks can be OFF, and powers
6041 * can be OFF or in LPM.
6042 */
6043 ufshcd_setup_hba_vreg(hba, true);
6044 ufshcd_enable_irq(hba);
6045 ufshcd_setup_vreg(hba, true);
6046 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6047 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6048 ufshcd_hold(hba, false);
6049 if (!ufshcd_is_clkgating_allowed(hba))
6050 ufshcd_setup_clocks(hba, true);
6051 ufshcd_release(hba);
88a92d6a
CG
6052 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6053 ufshcd_vops_resume(hba, pm_op);
c72e79c0
CG
6054 } else {
6055 ufshcd_hold(hba, false);
348e1bc5
SC
6056 if (ufshcd_is_clkscaling_supported(hba) &&
6057 hba->clk_scaling.is_enabled)
c72e79c0 6058 ufshcd_suspend_clkscaling(hba);
348e1bc5 6059 ufshcd_clk_scaling_allow(hba, false);
c72e79c0 6060 }
aa53f580
CG
6061 ufshcd_scsi_block_requests(hba);
6062 /* Drain ufshcd_queuecommand() */
5675c381 6063 synchronize_rcu();
aa53f580 6064 cancel_work_sync(&hba->eeh_work);
c72e79c0
CG
6065}
6066
6067static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6068{
aa53f580 6069 ufshcd_scsi_unblock_requests(hba);
c72e79c0 6070 ufshcd_release(hba);
348e1bc5
SC
6071 if (ufshcd_is_clkscaling_supported(hba))
6072 ufshcd_clk_scaling_suspend(hba, false);
b294ff3e 6073 ufshcd_rpm_put(hba);
c72e79c0
CG
6074}
6075
6076static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6077{
9cd20d3f 6078 return (!hba->is_powered || hba->shutting_down ||
b294ff3e 6079 !hba->sdev_ufs_device ||
9cd20d3f 6080 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
c72e79c0 6081 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
9cd20d3f 6082 ufshcd_is_link_broken(hba))));
c72e79c0
CG
6083}
6084
6085#ifdef CONFIG_PM
6086static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6087{
6088 struct Scsi_Host *shost = hba->host;
6089 struct scsi_device *sdev;
6090 struct request_queue *q;
6091 int ret;
6092
88a92d6a 6093 hba->is_sys_suspended = false;
c72e79c0 6094 /*
b294ff3e 6095 * Set RPM status of wlun device to RPM_ACTIVE,
c72e79c0
CG
6096 * this also clears its runtime error.
6097 */
b294ff3e
AD
6098 ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
6099
6100 /* hba device might have a runtime error otherwise */
6101 if (ret)
6102 ret = pm_runtime_set_active(hba->dev);
c72e79c0 6103 /*
b294ff3e
AD
6104 * If wlun device had runtime error, we also need to resume those
6105 * consumer scsi devices in case any of them has failed to be
6106 * resumed due to supplier runtime resume failure. This is to unblock
c72e79c0
CG
6107 * blk_queue_enter in case there are bios waiting inside it.
6108 */
6109 if (!ret) {
6110 shost_for_each_device(sdev, shost) {
6111 q = sdev->request_queue;
6112 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6113 q->rpm_status == RPM_SUSPENDING))
6114 pm_request_resume(q->dev);
6115 }
6116 }
6117}
6118#else
6119static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6120{
6121}
6122#endif
6123
2355b66e
CG
6124static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6125{
6126 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6127 u32 mode;
6128
6129 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6130
6131 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6132 return true;
6133
6134 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6135 return true;
6136
6137 return false;
6138}
6139
7a3e97b0 6140/**
e8e7f271 6141 * ufshcd_err_handler - handle UFS errors that require s/w attention
88b09900 6142 * @work: pointer to work structure
7a3e97b0 6143 */
88b09900 6144static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0 6145{
87bf6a6b 6146 int retries = MAX_ERR_HANDLER_RETRIES;
88b09900 6147 struct ufs_hba *hba;
e8e7f271 6148 unsigned long flags;
87bf6a6b
AH
6149 bool needs_restore;
6150 bool needs_reset;
6151 bool err_xfer;
6152 bool err_tm;
6153 int pmc_err;
e8e7f271
SRT
6154 int tag;
6155
88b09900
AH
6156 hba = container_of(work, struct ufs_hba, eh_work);
6157
4693fad7
BVA
6158 dev_info(hba->dev,
6159 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6160 __func__, ufshcd_state_name[hba->ufshcd_state],
6161 hba->is_powered, hba->shutting_down, hba->saved_err,
6162 hba->saved_uic_err, hba->force_reset,
6163 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6164
9cd20d3f 6165 down(&hba->host_sem);
e8e7f271 6166 spin_lock_irqsave(hba->host->host_lock, flags);
c72e79c0 6167 if (ufshcd_err_handling_should_stop(hba)) {
4db7a236
CG
6168 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6169 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6170 spin_unlock_irqrestore(hba->host->host_lock, flags);
9cd20d3f 6171 up(&hba->host_sem);
4db7a236
CG
6172 return;
6173 }
6174 ufshcd_set_eh_in_progress(hba);
6175 spin_unlock_irqrestore(hba->host->host_lock, flags);
c72e79c0 6176 ufshcd_err_handling_prepare(hba);
a45f9371
CG
6177 /* Complete requests that have door-bell cleared by h/w */
6178 ufshcd_complete_requests(hba);
e8e7f271 6179 spin_lock_irqsave(hba->host->host_lock, flags);
87bf6a6b
AH
6180again:
6181 needs_restore = false;
6182 needs_reset = false;
6183 err_xfer = false;
6184 err_tm = false;
6185
aa53f580
CG
6186 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6187 hba->ufshcd_state = UFSHCD_STATE_RESET;
88a92d6a
CG
6188 /*
6189 * A full reset and restore might have happened after preparation
6190 * is finished, double check whether we should stop.
6191 */
6192 if (ufshcd_err_handling_should_stop(hba))
6193 goto skip_err_handling;
6194
583fa62d
YG
6195 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6196 bool ret;
6197
6198 spin_unlock_irqrestore(hba->host->host_lock, flags);
6199 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6200 ret = ufshcd_quirk_dl_nac_errors(hba);
6201 spin_lock_irqsave(hba->host->host_lock, flags);
88a92d6a 6202 if (!ret && ufshcd_err_handling_should_stop(hba))
583fa62d
YG
6203 goto skip_err_handling;
6204 }
4db7a236 6205
2355b66e
CG
6206 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6207 (hba->saved_uic_err &&
6208 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
c3be8d1e
CG
6209 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6210
6211 spin_unlock_irqrestore(hba->host->host_lock, flags);
6212 ufshcd_print_host_state(hba);
6213 ufshcd_print_pwr_info(hba);
e965e5e0 6214 ufshcd_print_evt_hist(hba);
c3be8d1e
CG
6215 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6216 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6217 spin_lock_irqsave(hba->host->host_lock, flags);
6218 }
6219
9a47ec7c
YG
6220 /*
6221 * if host reset is required then skip clearing the pending
2df74b69
CG
6222 * transfers forcefully because they will get cleared during
6223 * host reset and restore
9a47ec7c 6224 */
88a92d6a
CG
6225 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6226 ufshcd_is_saved_err_fatal(hba) ||
6227 ((hba->saved_err & UIC_ERROR) &&
6228 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6229 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6230 needs_reset = true;
2355b66e 6231 goto do_reset;
88a92d6a 6232 }
9a47ec7c 6233
2355b66e
CG
6234 /*
6235 * If LINERESET was caught, UFS might have been put to PWM mode,
6236 * check if power mode restore is needed.
6237 */
6238 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6239 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6240 if (!hba->saved_uic_err)
6241 hba->saved_err &= ~UIC_ERROR;
6242 spin_unlock_irqrestore(hba->host->host_lock, flags);
6243 if (ufshcd_is_pwr_mode_restore_needed(hba))
6244 needs_restore = true;
6245 spin_lock_irqsave(hba->host->host_lock, flags);
6246 if (!hba->saved_err && !needs_restore)
6247 goto skip_err_handling;
6248 }
9a47ec7c 6249
2355b66e 6250 hba->silence_err_logs = true;
9a47ec7c
YG
6251 /* release lock as clear command might sleep */
6252 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 6253 /* Clear pending transfer requests */
9a47ec7c 6254 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
307348f6 6255 if (ufshcd_try_to_abort_task(hba, tag)) {
9a47ec7c
YG
6256 err_xfer = true;
6257 goto lock_skip_pending_xfer_clear;
6258 }
4693fad7
BVA
6259 dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
6260 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
9a47ec7c 6261 }
e8e7f271
SRT
6262
6263 /* Clear pending task management requests */
9a47ec7c
YG
6264 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6265 if (ufshcd_clear_tm_cmd(hba, tag)) {
6266 err_tm = true;
6267 goto lock_skip_pending_xfer_clear;
6268 }
6269 }
e8e7f271 6270
9a47ec7c 6271lock_skip_pending_xfer_clear:
11682523
BVA
6272 /* Complete the requests that are cleared by s/w */
6273 ufshcd_complete_requests(hba);
9a47ec7c 6274
a45f9371
CG
6275 spin_lock_irqsave(hba->host->host_lock, flags);
6276 hba->silence_err_logs = false;
2355b66e 6277 if (err_xfer || err_tm) {
9a47ec7c 6278 needs_reset = true;
2355b66e
CG
6279 goto do_reset;
6280 }
9a47ec7c 6281
2355b66e
CG
6282 /*
6283 * After all reqs and tasks are cleared from doorbell,
6284 * now it is safe to retore power mode.
6285 */
6286 if (needs_restore) {
6287 spin_unlock_irqrestore(hba->host->host_lock, flags);
6288 /*
6289 * Hold the scaling lock just in case dev cmds
6290 * are sent via bsg and/or sysfs.
6291 */
6292 down_write(&hba->clk_scaling_lock);
6293 hba->force_pmc = true;
6294 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6295 if (pmc_err) {
6296 needs_reset = true;
6297 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6298 __func__, pmc_err);
6299 }
6300 hba->force_pmc = false;
6301 ufshcd_print_pwr_info(hba);
6302 up_write(&hba->clk_scaling_lock);
6303 spin_lock_irqsave(hba->host->host_lock, flags);
6304 }
9a47ec7c 6305
2355b66e 6306do_reset:
e8e7f271 6307 /* Fatal errors need reset */
9a47ec7c 6308 if (needs_reset) {
87bf6a6b
AH
6309 int err;
6310
4db7a236 6311 hba->force_reset = false;
9a47ec7c 6312 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 6313 err = ufshcd_reset_and_restore(hba);
4db7a236
CG
6314 if (err)
6315 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6316 __func__, err);
c72e79c0
CG
6317 else
6318 ufshcd_recover_pm_error(hba);
9a47ec7c 6319 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 6320 }
9a47ec7c 6321
583fa62d 6322skip_err_handling:
9a47ec7c 6323 if (!needs_reset) {
4db7a236
CG
6324 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6325 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
9a47ec7c
YG
6326 if (hba->saved_err || hba->saved_uic_err)
6327 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6328 __func__, hba->saved_err, hba->saved_uic_err);
6329 }
87bf6a6b
AH
6330 /* Exit in an operational state or dead */
6331 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6332 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6333 if (--retries)
6334 goto again;
6335 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6336 }
e8e7f271 6337 ufshcd_clear_eh_in_progress(hba);
9a47ec7c 6338 spin_unlock_irqrestore(hba->host->host_lock, flags);
c72e79c0 6339 ufshcd_err_handling_unprepare(hba);
9cd20d3f 6340 up(&hba->host_sem);
4693fad7
BVA
6341
6342 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6343 ufshcd_state_name[hba->ufshcd_state]);
7a3e97b0
SY
6344}
6345
6346/**
e8e7f271
SRT
6347 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6348 * @hba: per-adapter instance
9333d775
VG
6349 *
6350 * Returns
6351 * IRQ_HANDLED - If interrupt is valid
6352 * IRQ_NONE - If invalid interrupt
7a3e97b0 6353 */
9333d775 6354static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
6355{
6356 u32 reg;
9333d775 6357 irqreturn_t retval = IRQ_NONE;
7a3e97b0 6358
2355b66e 6359 /* PHY layer error */
fb7b45f0 6360 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
fb7b45f0 6361 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
2355b66e 6362 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6363 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
fb7b45f0
DR
6364 /*
6365 * To know whether this error is fatal or not, DB timeout
6366 * must be checked but this error is handled separately.
6367 */
2355b66e
CG
6368 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6369 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6370 __func__);
6371
6372 /* Got a LINERESET indication. */
6373 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6374 struct uic_command *cmd = NULL;
6375
6376 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6377 if (hba->uic_async_done && hba->active_uic_cmd)
6378 cmd = hba->active_uic_cmd;
6379 /*
6380 * Ignore the LINERESET during power mode change
6381 * operation via DME_SET command.
6382 */
6383 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6384 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6385 }
9333d775 6386 retval |= IRQ_HANDLED;
ff8e20c6 6387 }
fb7b45f0 6388
e8e7f271
SRT
6389 /* PA_INIT_ERROR is fatal and needs UIC reset */
6390 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
9333d775
VG
6391 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6392 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6393 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
ff8e20c6 6394
9333d775
VG
6395 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6396 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6397 else if (hba->dev_quirks &
6398 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6399 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6400 hba->uic_error |=
6401 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6402 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6403 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6404 }
6405 retval |= IRQ_HANDLED;
583fa62d 6406 }
e8e7f271
SRT
6407
6408 /* UIC NL/TL/DME errors needs software retry */
6409 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
9333d775
VG
6410 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6411 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6412 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
e8e7f271 6413 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
9333d775 6414 retval |= IRQ_HANDLED;
ff8e20c6 6415 }
e8e7f271
SRT
6416
6417 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
9333d775
VG
6418 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6419 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
e965e5e0 6420 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
e8e7f271 6421 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
9333d775 6422 retval |= IRQ_HANDLED;
ff8e20c6 6423 }
e8e7f271
SRT
6424
6425 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
9333d775
VG
6426 if ((reg & UIC_DME_ERROR) &&
6427 (reg & UIC_DME_ERROR_CODE_MASK)) {
e965e5e0 6428 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
e8e7f271 6429 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
9333d775 6430 retval |= IRQ_HANDLED;
ff8e20c6 6431 }
e8e7f271
SRT
6432
6433 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6434 __func__, hba->uic_error);
9333d775 6435 return retval;
e8e7f271
SRT
6436}
6437
6438/**
6439 * ufshcd_check_errors - Check for errors that need s/w attention
6440 * @hba: per-adapter instance
a45f9371 6441 * @intr_status: interrupt status generated by the controller
9333d775
VG
6442 *
6443 * Returns
6444 * IRQ_HANDLED - If interrupt is valid
6445 * IRQ_NONE - If invalid interrupt
e8e7f271 6446 */
a45f9371 6447static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
e8e7f271
SRT
6448{
6449 bool queue_eh_work = false;
9333d775 6450 irqreturn_t retval = IRQ_NONE;
e8e7f271 6451
a45f9371
CG
6452 spin_lock(hba->host->host_lock);
6453 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6454
d3c615bf 6455 if (hba->errors & INT_FATAL_ERRORS) {
e965e5e0
SC
6456 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6457 hba->errors);
e8e7f271 6458 queue_eh_work = true;
d3c615bf 6459 }
7a3e97b0
SY
6460
6461 if (hba->errors & UIC_ERROR) {
e8e7f271 6462 hba->uic_error = 0;
9333d775 6463 retval = ufshcd_update_uic_error(hba);
e8e7f271
SRT
6464 if (hba->uic_error)
6465 queue_eh_work = true;
7a3e97b0 6466 }
e8e7f271 6467
82174440
SC
6468 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6469 dev_err(hba->dev,
6470 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6471 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6472 "Enter" : "Exit",
6473 hba->errors, ufshcd_get_upmcrs(hba));
e965e5e0 6474 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
d3c615bf 6475 hba->errors);
4db7a236 6476 ufshcd_set_link_broken(hba);
82174440
SC
6477 queue_eh_work = true;
6478 }
6479
e8e7f271 6480 if (queue_eh_work) {
9a47ec7c
YG
6481 /*
6482 * update the transfer error masks to sticky bits, let's do this
6483 * irrespective of current ufshcd_state.
6484 */
6485 hba->saved_err |= hba->errors;
6486 hba->saved_uic_err |= hba->uic_error;
6487
4db7a236 6488 /* dump controller state before resetting */
ace3804b
CG
6489 if ((hba->saved_err &
6490 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
2355b66e
CG
6491 (hba->saved_uic_err &&
6492 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
4db7a236 6493 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
66cc820f
DR
6494 __func__, hba->saved_err,
6495 hba->saved_uic_err);
c3be8d1e
CG
6496 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6497 "host_regs: ");
4db7a236 6498 ufshcd_print_pwr_info(hba);
e8e7f271 6499 }
88b09900 6500 ufshcd_schedule_eh_work(hba);
9333d775 6501 retval |= IRQ_HANDLED;
3441da7d 6502 }
e8e7f271
SRT
6503 /*
6504 * if (!queue_eh_work) -
6505 * Other errors are either non-fatal where host recovers
6506 * itself without s/w intervention or errors that will be
6507 * handled by the SCSI core layer.
6508 */
a45f9371
CG
6509 hba->errors = 0;
6510 hba->uic_error = 0;
6511 spin_unlock(hba->host->host_lock);
9333d775 6512 return retval;
7a3e97b0
SY
6513}
6514
6515/**
6516 * ufshcd_tmc_handler - handle task management function completion
6517 * @hba: per adapter instance
9333d775
VG
6518 *
6519 * Returns
6520 * IRQ_HANDLED - If interrupt is valid
6521 * IRQ_NONE - If invalid interrupt
7a3e97b0 6522 */
9333d775 6523static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
7a3e97b0 6524{
f5ef336f
AH
6525 unsigned long flags, pending, issued;
6526 irqreturn_t ret = IRQ_NONE;
6527 int tag;
6528
a45f9371 6529 spin_lock_irqsave(hba->host->host_lock, flags);
5cb37a26 6530 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
f5ef336f
AH
6531 issued = hba->outstanding_tasks & ~pending;
6532 for_each_set_bit(tag, &issued, hba->nutmrs) {
6533 struct request *req = hba->tmf_rqs[tag];
6534 struct completion *c = req->end_io_data;
6535
6536 complete(c);
6537 ret = IRQ_HANDLED;
6538 }
a45f9371
CG
6539 spin_unlock_irqrestore(hba->host->host_lock, flags);
6540
f5ef336f 6541 return ret;
7a3e97b0
SY
6542}
6543
6544/**
6545 * ufshcd_sl_intr - Interrupt service routine
6546 * @hba: per adapter instance
6547 * @intr_status: contains interrupts generated by the controller
9333d775
VG
6548 *
6549 * Returns
6550 * IRQ_HANDLED - If interrupt is valid
6551 * IRQ_NONE - If invalid interrupt
7a3e97b0 6552 */
9333d775 6553static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
7a3e97b0 6554{
9333d775
VG
6555 irqreturn_t retval = IRQ_NONE;
6556
53b3d9c3 6557 if (intr_status & UFSHCD_UIC_MASK)
9333d775 6558 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0 6559
a45f9371
CG
6560 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6561 retval |= ufshcd_check_errors(hba, intr_status);
6562
7a3e97b0 6563 if (intr_status & UTP_TASK_REQ_COMPL)
9333d775 6564 retval |= ufshcd_tmc_handler(hba);
7a3e97b0
SY
6565
6566 if (intr_status & UTP_TRANSFER_REQ_COMPL)
11682523 6567 retval |= ufshcd_transfer_req_compl(hba);
9333d775
VG
6568
6569 return retval;
7a3e97b0
SY
6570}
6571
6572/**
6573 * ufshcd_intr - Main interrupt service routine
6574 * @irq: irq number
6575 * @__hba: pointer to adapter instance
6576 *
9333d775
VG
6577 * Returns
6578 * IRQ_HANDLED - If interrupt is valid
6579 * IRQ_NONE - If invalid interrupt
7a3e97b0
SY
6580 */
6581static irqreturn_t ufshcd_intr(int irq, void *__hba)
6582{
127d5f7c 6583 u32 intr_status, enabled_intr_status = 0;
7a3e97b0
SY
6584 irqreturn_t retval = IRQ_NONE;
6585 struct ufs_hba *hba = __hba;
7f6ba4f1 6586 int retries = hba->nutrs;
7a3e97b0 6587
b873a275 6588 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
3f8af604
CG
6589 hba->ufs_stats.last_intr_status = intr_status;
6590 hba->ufs_stats.last_intr_ts = ktime_get();
7a3e97b0 6591
7f6ba4f1
VG
6592 /*
6593 * There could be max of hba->nutrs reqs in flight and in worst case
6594 * if the reqs get finished 1 by 1 after the interrupt status is
6595 * read, make sure we handle them by checking the interrupt status
6596 * again in a loop until we process all of the reqs before returning.
6597 */
127d5f7c 6598 while (intr_status && retries--) {
7f6ba4f1
VG
6599 enabled_intr_status =
6600 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
60ec3755 6601 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
9333d775
VG
6602 if (enabled_intr_status)
6603 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7f6ba4f1
VG
6604
6605 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
127d5f7c 6606 }
d75f7fe4 6607
eeb1b55b 6608 if (enabled_intr_status && retval == IRQ_NONE &&
40d2fd05
BVA
6609 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6610 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
eeb1b55b
JK
6611 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6612 __func__,
6613 intr_status,
6614 hba->ufs_stats.last_intr_status,
6615 enabled_intr_status);
9333d775
VG
6616 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6617 }
6618
7a3e97b0
SY
6619 return retval;
6620}
6621
e2933132
SRT
6622static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6623{
6624 int err = 0;
6625 u32 mask = 1 << tag;
6626 unsigned long flags;
6627
6628 if (!test_bit(tag, &hba->outstanding_tasks))
6629 goto out;
6630
6631 spin_lock_irqsave(hba->host->host_lock, flags);
1399c5b0 6632 ufshcd_utmrl_clear(hba, tag);
e2933132
SRT
6633 spin_unlock_irqrestore(hba->host->host_lock, flags);
6634
6635 /* poll for max. 1 sec to clear door bell register by h/w */
6636 err = ufshcd_wait_for_register(hba,
6637 REG_UTP_TASK_REQ_DOOR_BELL,
5cac1095 6638 mask, 0, 1000, 1000);
4693fad7
BVA
6639
6640 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
6641 tag, err ? "succeeded" : "failed");
6642
e2933132
SRT
6643out:
6644 return err;
6645}
6646
c6049cd9
CH
6647static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6648 struct utp_task_req_desc *treq, u8 tm_function)
7a3e97b0 6649{
69a6c269 6650 struct request_queue *q = hba->tmf_queue;
c6049cd9 6651 struct Scsi_Host *host = hba->host;
69a6c269
BVA
6652 DECLARE_COMPLETION_ONSTACK(wait);
6653 struct request *req;
7a3e97b0 6654 unsigned long flags;
4b42d557 6655 int task_tag, err;
7a3e97b0 6656
e2933132 6657 /*
0bf6d96c 6658 * blk_mq_alloc_request() is used here only to get a free tag.
e2933132 6659 */
0bf6d96c 6660 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
eeb1b55b
JK
6661 if (IS_ERR(req))
6662 return PTR_ERR(req);
6663
69a6c269 6664 req->end_io_data = &wait;
1ab27c9c 6665 ufshcd_hold(hba, false);
7a3e97b0 6666
e2933132 6667 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0 6668
4b42d557 6669 task_tag = req->tag;
eaab9b57
BVA
6670 WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
6671 task_tag);
f5ef336f 6672 hba->tmf_rqs[req->tag] = req;
1352eec8 6673 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
c6049cd9 6674
4b42d557
CG
6675 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6676 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
d2877be4 6677
7a3e97b0 6678 /* send command to the controller */
4b42d557 6679 __set_bit(task_tag, &hba->outstanding_tasks);
897efe62 6680
4b42d557 6681 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
ad1a1b9c
GB
6682 /* Make sure that doorbell is committed immediately */
6683 wmb();
7a3e97b0
SY
6684
6685 spin_unlock_irqrestore(host->host_lock, flags);
6686
28fa68fc 6687 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6667e6d9 6688
7a3e97b0 6689 /* wait until the task management command is completed */
69a6c269 6690 err = wait_for_completion_io_timeout(&wait,
e2933132 6691 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 6692 if (!err) {
28fa68fc 6693 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
e2933132
SRT
6694 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6695 __func__, tm_function);
4b42d557
CG
6696 if (ufshcd_clear_tm_cmd(hba, task_tag))
6697 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6698 __func__, task_tag);
e2933132
SRT
6699 err = -ETIMEDOUT;
6700 } else {
c6049cd9 6701 err = 0;
4b42d557 6702 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
c6049cd9 6703
28fa68fc 6704 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7a3e97b0 6705 }
e2933132 6706
b557217c 6707 spin_lock_irqsave(hba->host->host_lock, flags);
f5ef336f 6708 hba->tmf_rqs[req->tag] = NULL;
4b42d557 6709 __clear_bit(task_tag, &hba->outstanding_tasks);
b557217c
SC
6710 spin_unlock_irqrestore(hba->host->host_lock, flags);
6711
4b42d557 6712 ufshcd_release(hba);
0bf6d96c 6713 blk_mq_free_request(req);
e2933132 6714
7a3e97b0
SY
6715 return err;
6716}
6717
c6049cd9
CH
6718/**
6719 * ufshcd_issue_tm_cmd - issues task management commands to controller
6720 * @hba: per adapter instance
6721 * @lun_id: LUN ID to which TM command is sent
6722 * @task_id: task ID to which the TM command is applicable
6723 * @tm_function: task management function opcode
6724 * @tm_response: task management service response return value
6725 *
6726 * Returns non-zero value on error, zero on success.
6727 */
6728static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6729 u8 tm_function, u8 *tm_response)
6730{
6731 struct utp_task_req_desc treq = { { 0 }, };
957d63e7
BVA
6732 enum utp_ocs ocs_value;
6733 int err;
c6049cd9
CH
6734
6735 /* Configure task request descriptor */
6736 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6737 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6738
6739 /* Configure task request UPIU */
1352eec8 6740 treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
c6049cd9 6741 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
1352eec8 6742 treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
c6049cd9
CH
6743
6744 /*
6745 * The host shall provide the same value for LUN field in the basic
6746 * header and for Input Parameter.
6747 */
1352eec8
GS
6748 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
6749 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
c6049cd9
CH
6750
6751 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6752 if (err == -ETIMEDOUT)
6753 return err;
6754
6755 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6756 if (ocs_value != OCS_SUCCESS)
6757 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6758 __func__, ocs_value);
6759 else if (tm_response)
1352eec8 6760 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
c6049cd9
CH
6761 MASK_TM_SERVICE_RESP;
6762 return err;
6763}
6764
5e0a86ee
AA
6765/**
6766 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6767 * @hba: per-adapter instance
6768 * @req_upiu: upiu request
6769 * @rsp_upiu: upiu reply
5e0a86ee
AA
6770 * @desc_buff: pointer to descriptor buffer, NULL if NA
6771 * @buff_len: descriptor size, 0 if NA
d0e9760d 6772 * @cmd_type: specifies the type (NOP, Query...)
5e0a86ee
AA
6773 * @desc_op: descriptor operation
6774 *
6775 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6776 * Therefore, it "rides" the device management infrastructure: uses its tag and
6777 * tasks work queues.
6778 *
6779 * Since there is only one available tag for device management commands,
6780 * the caller is expected to hold the hba->dev_cmd.lock mutex.
6781 */
6782static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6783 struct utp_upiu_req *req_upiu,
6784 struct utp_upiu_req *rsp_upiu,
6785 u8 *desc_buff, int *buff_len,
7f674c38 6786 enum dev_cmd_type cmd_type,
5e0a86ee
AA
6787 enum query_opcode desc_op)
6788{
8a686f26 6789 DECLARE_COMPLETION_ONSTACK(wait);
945c3cca 6790 const u32 tag = hba->reserved_slot;
5e0a86ee
AA
6791 struct ufshcd_lrb *lrbp;
6792 int err = 0;
a23064c4 6793 u8 upiu_flags;
5e0a86ee 6794
945c3cca
BVA
6795 /* Protects use of hba->reserved_slot. */
6796 lockdep_assert_held(&hba->dev_cmd.lock);
5e0a86ee 6797
945c3cca 6798 down_read(&hba->clk_scaling_lock);
5e0a86ee 6799
a45f9371 6800 lrbp = &hba->lrb[tag];
7a7e66c6 6801 WARN_ON(lrbp->cmd);
5e0a86ee 6802 lrbp->cmd = NULL;
5e0a86ee
AA
6803 lrbp->sense_buffer = NULL;
6804 lrbp->task_tag = tag;
6805 lrbp->lun = 0;
6806 lrbp->intr_cmd = true;
df043c74 6807 ufshcd_prepare_lrbp_crypto(NULL, lrbp);
5e0a86ee
AA
6808 hba->dev_cmd.type = cmd_type;
6809
51428818 6810 if (hba->ufs_version <= ufshci_version(1, 1))
5e0a86ee 6811 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
51428818 6812 else
5e0a86ee 6813 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5e0a86ee
AA
6814
6815 /* update the task tag in the request upiu */
6816 req_upiu->header.dword_0 |= cpu_to_be32(tag);
6817
6818 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6819
6820 /* just copy the upiu request as it is */
6821 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6822 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6823 /* The Data Segment Area is optional depending upon the query
6824 * function value. for WRITE DESCRIPTOR, the data segment
6825 * follows right after the tsf.
6826 */
6827 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6828 *buff_len = 0;
6829 }
6830
6831 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6832
6833 hba->dev_cmd.complete = &wait;
6834
10542489 6835 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
5e0a86ee 6836
a45f9371 6837 ufshcd_send_command(hba, tag);
5e0a86ee
AA
6838 /*
6839 * ignore the returning value here - ufshcd_check_query_response is
6840 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6841 * read the response directly ignoring all errors.
6842 */
6843 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6844
6845 /* just copy the upiu response as it is */
6846 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
4bbbe242
AA
6847 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6848 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6849 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6850 MASK_QUERY_DATA_SEG_LEN;
6851
6852 if (*buff_len >= resp_len) {
6853 memcpy(desc_buff, descp, resp_len);
6854 *buff_len = resp_len;
6855 } else {
3d4881d1
BH
6856 dev_warn(hba->dev,
6857 "%s: rsp size %d is bigger than buffer size %d",
6858 __func__, resp_len, *buff_len);
4bbbe242
AA
6859 *buff_len = 0;
6860 err = -EINVAL;
6861 }
6862 }
10542489
BH
6863 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
6864 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
5e0a86ee 6865
5e0a86ee
AA
6866 up_read(&hba->clk_scaling_lock);
6867 return err;
6868}
6869
6870/**
6871 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6872 * @hba: per-adapter instance
6873 * @req_upiu: upiu request
6874 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6875 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6876 * @desc_buff: pointer to descriptor buffer, NULL if NA
6877 * @buff_len: descriptor size, 0 if NA
6878 * @desc_op: descriptor operation
6879 *
6880 * Supports UTP Transfer requests (nop and query), and UTP Task
6881 * Management requests.
6882 * It is up to the caller to fill the upiu conent properly, as it will
6883 * be copied without any further input validations.
6884 */
6885int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6886 struct utp_upiu_req *req_upiu,
6887 struct utp_upiu_req *rsp_upiu,
6888 int msgcode,
6889 u8 *desc_buff, int *buff_len,
6890 enum query_opcode desc_op)
6891{
6892 int err;
7f674c38 6893 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
5e0a86ee 6894 struct utp_task_req_desc treq = { { 0 }, };
957d63e7 6895 enum utp_ocs ocs_value;
5e0a86ee
AA
6896 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6897
5e0a86ee
AA
6898 switch (msgcode) {
6899 case UPIU_TRANSACTION_NOP_OUT:
6900 cmd_type = DEV_CMD_TYPE_NOP;
df561f66 6901 fallthrough;
5e0a86ee
AA
6902 case UPIU_TRANSACTION_QUERY_REQ:
6903 ufshcd_hold(hba, false);
6904 mutex_lock(&hba->dev_cmd.lock);
6905 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6906 desc_buff, buff_len,
6907 cmd_type, desc_op);
6908 mutex_unlock(&hba->dev_cmd.lock);
6909 ufshcd_release(hba);
6910
6911 break;
6912 case UPIU_TRANSACTION_TASK_REQ:
6913 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6914 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6915
1352eec8 6916 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
5e0a86ee
AA
6917
6918 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6919 if (err == -ETIMEDOUT)
6920 break;
6921
6922 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6923 if (ocs_value != OCS_SUCCESS) {
6924 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6925 ocs_value);
6926 break;
6927 }
6928
1352eec8 6929 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
5e0a86ee
AA
6930
6931 break;
6932 default:
6933 err = -EINVAL;
6934
6935 break;
6936 }
6937
5e0a86ee
AA
6938 return err;
6939}
6940
7a3e97b0 6941/**
3441da7d
SRT
6942 * ufshcd_eh_device_reset_handler - device reset handler registered to
6943 * scsi layer.
7a3e97b0
SY
6944 * @cmd: SCSI command pointer
6945 *
6946 * Returns SUCCESS/FAILED
6947 */
3441da7d 6948static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
6949{
6950 struct Scsi_Host *host;
6951 struct ufs_hba *hba;
7a3e97b0
SY
6952 u32 pos;
6953 int err;
35fc4cd3 6954 u8 resp = 0xF, lun;
7a3e97b0
SY
6955
6956 host = cmd->device->host;
6957 hba = shost_priv(host);
7a3e97b0 6958
35fc4cd3
CG
6959 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
6960 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
e2933132 6961 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
6962 if (!err)
6963 err = resp;
7a3e97b0 6964 goto out;
e2933132 6965 }
7a3e97b0 6966
3441da7d
SRT
6967 /* clear the commands that were pending for corresponding LUN */
6968 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
35fc4cd3 6969 if (hba->lrb[pos].lun == lun) {
3441da7d
SRT
6970 err = ufshcd_clear_cmd(hba, pos);
6971 if (err)
6972 break;
11682523 6973 __ufshcd_transfer_req_compl(hba, 1U << pos);
7a3e97b0 6974 }
3441da7d 6975 }
7fabb77b 6976
7a3e97b0 6977out:
7fabb77b 6978 hba->req_abort_count = 0;
e965e5e0 6979 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
3441da7d
SRT
6980 if (!err) {
6981 err = SUCCESS;
6982 } else {
6983 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6984 err = FAILED;
6985 }
7a3e97b0
SY
6986 return err;
6987}
6988
e0b299e3
GB
6989static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6990{
6991 struct ufshcd_lrb *lrbp;
6992 int tag;
6993
6994 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6995 lrbp = &hba->lrb[tag];
6996 lrbp->req_abort_skip = true;
6997 }
6998}
6999
7a3e97b0 7000/**
307348f6 7001 * ufshcd_try_to_abort_task - abort a specific task
d23ec0b6
LJ
7002 * @hba: Pointer to adapter instance
7003 * @tag: Task tag/index to be aborted
7a3e97b0 7004 *
f20810d8
SRT
7005 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7006 * command, and in host controller by clearing the door-bell register. There can
7007 * be race between controller sending the command to the device while abort is
7008 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7009 * really issued and then try to abort it.
7010 *
307348f6
CG
7011 * Returns zero on success, non-zero on failure
7012 */
7013static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7014{
7015 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7016 int err = 0;
7017 int poll_cnt;
7018 u8 resp = 0xF;
7019 u32 reg;
7020
7021 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7022 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7023 UFS_QUERY_TASK, &resp);
7024 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7025 /* cmd pending in the device */
7026 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7027 __func__, tag);
7028 break;
7029 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7030 /*
7031 * cmd not pending in the device, check if it is
7032 * in transition.
7033 */
7034 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7035 __func__, tag);
7036 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7037 if (reg & (1 << tag)) {
7038 /* sleep for max. 200us to stabilize */
7039 usleep_range(100, 200);
7040 continue;
7041 }
7042 /* command completed already */
7043 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7044 __func__, tag);
7045 goto out;
7046 } else {
7047 dev_err(hba->dev,
7048 "%s: no response from device. tag = %d, err %d\n",
7049 __func__, tag, err);
7050 if (!err)
7051 err = resp; /* service response error */
7052 goto out;
7053 }
7054 }
7055
7056 if (!poll_cnt) {
7057 err = -EBUSY;
7058 goto out;
7059 }
7060
7061 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7062 UFS_ABORT_TASK, &resp);
7063 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7064 if (!err) {
7065 err = resp; /* service response error */
7066 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7067 __func__, tag, err);
7068 }
7069 goto out;
7070 }
7071
7072 err = ufshcd_clear_cmd(hba, tag);
7073 if (err)
7074 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7075 __func__, tag, err);
7076
7077out:
7078 return err;
7079}
7080
7081/**
7082 * ufshcd_abort - scsi host template eh_abort_handler callback
7083 * @cmd: SCSI command pointer
7084 *
7a3e97b0
SY
7085 * Returns SUCCESS/FAILED
7086 */
7087static int ufshcd_abort(struct scsi_cmnd *cmd)
7088{
4728ab4a
BVA
7089 struct Scsi_Host *host = cmd->device->host;
7090 struct ufs_hba *hba = shost_priv(host);
3f2c1002 7091 int tag = scsi_cmd_to_rq(cmd)->tag;
4728ab4a 7092 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7a3e97b0 7093 unsigned long flags;
64180742 7094 int err = FAILED;
1fbaa02d 7095 bool outstanding;
e9d501b1 7096 u32 reg;
7a3e97b0 7097
4728ab4a 7098 WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
7a3e97b0 7099
1ab27c9c 7100 ufshcd_hold(hba, false);
14497328 7101 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
64180742 7102 /* If command is already aborted/completed, return FAILED. */
14497328
YG
7103 if (!(test_bit(tag, &hba->outstanding_reqs))) {
7104 dev_err(hba->dev,
7105 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7106 __func__, tag, hba->outstanding_reqs, reg);
64180742 7107 goto release;
14497328 7108 }
7a3e97b0 7109
66cc820f 7110 /* Print Transfer Request of aborted task */
d87a1f6d 7111 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
66cc820f 7112
7fabb77b
GB
7113 /*
7114 * Print detailed info about aborted request.
7115 * As more than one request might get aborted at the same time,
7116 * print full information only for the first aborted request in order
7117 * to reduce repeated printouts. For other aborted requests only print
7118 * basic details.
7119 */
7a7e66c6 7120 scsi_print_command(cmd);
7fabb77b 7121 if (!hba->req_abort_count) {
e965e5e0
SC
7122 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7123 ufshcd_print_evt_hist(hba);
6ba65588 7124 ufshcd_print_host_state(hba);
7fabb77b
GB
7125 ufshcd_print_pwr_info(hba);
7126 ufshcd_print_trs(hba, 1 << tag, true);
7127 } else {
7128 ufshcd_print_trs(hba, 1 << tag, false);
7129 }
7130 hba->req_abort_count++;
e0b299e3 7131
d87a1f6d
BH
7132 if (!(reg & (1 << tag))) {
7133 dev_err(hba->dev,
7134 "%s: cmd was completed, but without a notifying intr, tag = %d",
7135 __func__, tag);
11682523 7136 __ufshcd_transfer_req_compl(hba, 1UL << tag);
64180742 7137 goto release;
d87a1f6d
BH
7138 }
7139
7a7e66c6
CG
7140 /*
7141 * Task abort to the device W-LUN is illegal. When this command
7142 * will fail, due to spec violation, scsi err handling next step
7143 * will be to send LU reset which, again, is a spec violation.
7144 * To avoid these unnecessary/illegal steps, first we clean up
a45f9371 7145 * the lrb taken by this cmd and re-set it in outstanding_reqs,
88b09900 7146 * then queue the eh_work and bail.
7a7e66c6
CG
7147 */
7148 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7149 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
64180742 7150
7a7e66c6 7151 spin_lock_irqsave(host->host_lock, flags);
a45f9371 7152 hba->force_reset = true;
88b09900 7153 ufshcd_schedule_eh_work(hba);
7a7e66c6 7154 spin_unlock_irqrestore(host->host_lock, flags);
64180742 7155 goto release;
7a7e66c6
CG
7156 }
7157
e0b299e3 7158 /* Skip task abort in case previous aborts failed and report failure */
64180742
BVA
7159 if (lrbp->req_abort_skip) {
7160 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7161 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7162 goto release;
7163 }
f20810d8 7164
64180742
BVA
7165 err = ufshcd_try_to_abort_task(hba, tag);
7166 if (err) {
f20810d8 7167 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
e0b299e3 7168 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
f20810d8 7169 err = FAILED;
64180742 7170 goto release;
f20810d8
SRT
7171 }
7172
1fbaa02d
BVA
7173 /*
7174 * Clear the corresponding bit from outstanding_reqs since the command
7175 * has been aborted successfully.
7176 */
7177 spin_lock_irqsave(&hba->outstanding_lock, flags);
7178 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7179 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7180
7181 if (outstanding)
7182 ufshcd_release_scsi_cmd(hba, lrbp);
7183
64180742
BVA
7184 err = SUCCESS;
7185
7186release:
7187 /* Matches the ufshcd_hold() call at the start of this function. */
1ab27c9c 7188 ufshcd_release(hba);
7a3e97b0
SY
7189 return err;
7190}
7191
3441da7d
SRT
7192/**
7193 * ufshcd_host_reset_and_restore - reset and restore host controller
7194 * @hba: per-adapter instance
7195 *
7196 * Note that host controller reset may issue DME_RESET to
7197 * local and remote (device) Uni-Pro stack and the attributes
7198 * are reset to default state.
7199 *
7200 * Returns zero on success, non-zero on failure
7201 */
7202static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7203{
7204 int err;
3441da7d 7205
2df74b69
CG
7206 /*
7207 * Stop the host controller and complete the requests
7208 * cleared by h/w
7209 */
f02bc975 7210 ufshpb_reset_host(hba);
5cac1095 7211 ufshcd_hba_stop(hba);
2df74b69 7212 hba->silence_err_logs = true;
11682523 7213 ufshcd_complete_requests(hba);
2df74b69 7214 hba->silence_err_logs = false;
3441da7d 7215
a3cd5ec5 7216 /* scale up clocks to max frequency before full reinitialization */
394b949f 7217 ufshcd_set_clk_freq(hba, true);
a3cd5ec5 7218
3441da7d 7219 err = ufshcd_hba_enable(hba);
3441da7d
SRT
7220
7221 /* Establish the link again and restore the device */
1918651f 7222 if (!err)
4ee7ee53
JK
7223 err = ufshcd_probe_hba(hba, false);
7224
3441da7d
SRT
7225 if (err)
7226 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
e965e5e0 7227 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
3441da7d
SRT
7228 return err;
7229}
7230
7231/**
7232 * ufshcd_reset_and_restore - reset and re-initialize host/device
7233 * @hba: per-adapter instance
7234 *
7235 * Reset and recover device, host and re-establish link. This
7236 * is helpful to recover the communication in fatal error conditions.
7237 *
7238 * Returns zero on success, non-zero on failure
7239 */
7240static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7241{
54a40453
AH
7242 u32 saved_err = 0;
7243 u32 saved_uic_err = 0;
3441da7d 7244 int err = 0;
4db7a236 7245 unsigned long flags;
1d337ec2 7246 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 7247
4db7a236 7248 spin_lock_irqsave(hba->host->host_lock, flags);
1d337ec2 7249 do {
54a40453
AH
7250 /*
7251 * This is a fresh start, cache and clear saved error first,
7252 * in case new error generated during reset and restore.
7253 */
7254 saved_err |= hba->saved_err;
7255 saved_uic_err |= hba->saved_uic_err;
7256 hba->saved_err = 0;
7257 hba->saved_uic_err = 0;
7258 hba->force_reset = false;
7259 hba->ufshcd_state = UFSHCD_STATE_RESET;
7260 spin_unlock_irqrestore(hba->host->host_lock, flags);
7261
d8d9f793 7262 /* Reset the attached device */
31a5d9ca 7263 ufshcd_device_reset(hba);
d8d9f793 7264
1d337ec2 7265 err = ufshcd_host_reset_and_restore(hba);
54a40453
AH
7266
7267 spin_lock_irqsave(hba->host->host_lock, flags);
7268 if (err)
7269 continue;
7270 /* Do not exit unless operational or dead */
7271 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7272 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7273 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7274 err = -EAGAIN;
1d337ec2 7275 } while (err && --retries);
3441da7d 7276
4db7a236
CG
7277 /*
7278 * Inform scsi mid-layer that we did reset and allow to handle
7279 * Unit Attention properly.
7280 */
7281 scsi_report_bus_reset(hba->host, 0);
7282 if (err) {
88a92d6a 7283 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4db7a236
CG
7284 hba->saved_err |= saved_err;
7285 hba->saved_uic_err |= saved_uic_err;
7286 }
7287 spin_unlock_irqrestore(hba->host->host_lock, flags);
7288
3441da7d
SRT
7289 return err;
7290}
7291
7292/**
7293 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
8aa29f19 7294 * @cmd: SCSI command pointer
3441da7d
SRT
7295 *
7296 * Returns SUCCESS/FAILED
7297 */
7298static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7299{
4db7a236 7300 int err = SUCCESS;
3441da7d
SRT
7301 unsigned long flags;
7302 struct ufs_hba *hba;
7303
7304 hba = shost_priv(cmd->device->host);
7305
4db7a236
CG
7306 spin_lock_irqsave(hba->host->host_lock, flags);
7307 hba->force_reset = true;
88b09900 7308 ufshcd_schedule_eh_work(hba);
4db7a236 7309 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
3441da7d
SRT
7310 spin_unlock_irqrestore(hba->host->host_lock, flags);
7311
88b09900 7312 flush_work(&hba->eh_work);
3441da7d
SRT
7313
7314 spin_lock_irqsave(hba->host->host_lock, flags);
4db7a236 7315 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
3441da7d 7316 err = FAILED;
3441da7d
SRT
7317 spin_unlock_irqrestore(hba->host->host_lock, flags);
7318
7319 return err;
7320}
7321
3a4bf06d
YG
7322/**
7323 * ufshcd_get_max_icc_level - calculate the ICC level
7324 * @sup_curr_uA: max. current supported by the regulator
7325 * @start_scan: row at the desc table to start scan from
7326 * @buff: power descriptor buffer
7327 *
7328 * Returns calculated max ICC level for specific regulator
7329 */
7330static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7331{
7332 int i;
7333 int curr_uA;
7334 u16 data;
7335 u16 unit;
7336
7337 for (i = start_scan; i >= 0; i--) {
d79713f9 7338 data = be16_to_cpup((__be16 *)&buff[2 * i]);
3a4bf06d
YG
7339 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7340 ATTR_ICC_LVL_UNIT_OFFSET;
7341 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7342 switch (unit) {
7343 case UFSHCD_NANO_AMP:
7344 curr_uA = curr_uA / 1000;
7345 break;
7346 case UFSHCD_MILI_AMP:
7347 curr_uA = curr_uA * 1000;
7348 break;
7349 case UFSHCD_AMP:
7350 curr_uA = curr_uA * 1000 * 1000;
7351 break;
7352 case UFSHCD_MICRO_AMP:
7353 default:
7354 break;
7355 }
7356 if (sup_curr_uA >= curr_uA)
7357 break;
7358 }
7359 if (i < 0) {
7360 i = 0;
7361 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7362 }
7363
7364 return (u32)i;
7365}
7366
7367/**
11eea9b3 7368 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
3a4bf06d
YG
7369 * In case regulators are not initialized we'll return 0
7370 * @hba: per-adapter instance
7371 * @desc_buf: power descriptor buffer to extract ICC levels from.
7372 * @len: length of desc_buff
7373 *
7374 * Returns calculated ICC level
7375 */
7376static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7377 u8 *desc_buf, int len)
7378{
7379 u32 icc_level = 0;
7380
7381 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7382 !hba->vreg_info.vccq2) {
71bb9ab6
AH
7383 /*
7384 * Using dev_dbg to avoid messages during runtime PM to avoid
7385 * never-ending cycles of messages written back to storage by
7386 * user space causing runtime resume, causing more messages and
7387 * so on.
7388 */
7389 dev_dbg(hba->dev,
3a4bf06d
YG
7390 "%s: Regulator capability was not set, actvIccLevel=%d",
7391 __func__, icc_level);
7392 goto out;
7393 }
7394
0873045f 7395 if (hba->vreg_info.vcc->max_uA)
3a4bf06d
YG
7396 icc_level = ufshcd_get_max_icc_level(
7397 hba->vreg_info.vcc->max_uA,
7398 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7399 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7400
0873045f 7401 if (hba->vreg_info.vccq->max_uA)
3a4bf06d
YG
7402 icc_level = ufshcd_get_max_icc_level(
7403 hba->vreg_info.vccq->max_uA,
7404 icc_level,
7405 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7406
0873045f 7407 if (hba->vreg_info.vccq2->max_uA)
3a4bf06d
YG
7408 icc_level = ufshcd_get_max_icc_level(
7409 hba->vreg_info.vccq2->max_uA,
7410 icc_level,
7411 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7412out:
7413 return icc_level;
7414}
7415
e89860f1 7416static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
3a4bf06d
YG
7417{
7418 int ret;
7a0bf85b 7419 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
bbe21d7a 7420 u8 *desc_buf;
e89860f1 7421 u32 icc_level;
bbe21d7a
KC
7422
7423 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7424 if (!desc_buf)
7425 return;
3a4bf06d 7426
c4607a09
BH
7427 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7428 desc_buf, buff_len);
3a4bf06d
YG
7429 if (ret) {
7430 dev_err(hba->dev,
7431 "%s: Failed reading power descriptor.len = %d ret = %d",
7432 __func__, buff_len, ret);
bbe21d7a 7433 goto out;
3a4bf06d
YG
7434 }
7435
e89860f1
CG
7436 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7437 buff_len);
7438 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
3a4bf06d 7439
dbd34a61 7440 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
e89860f1 7441 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
3a4bf06d
YG
7442
7443 if (ret)
7444 dev_err(hba->dev,
7445 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
e89860f1 7446 __func__, icc_level, ret);
3a4bf06d 7447
bbe21d7a
KC
7448out:
7449 kfree(desc_buf);
3a4bf06d
YG
7450}
7451
fb276f77
CG
7452static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7453{
7454 scsi_autopm_get_device(sdev);
7455 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7456 if (sdev->rpm_autosuspend)
7457 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7458 RPM_AUTOSUSPEND_DELAY_MS);
7459 scsi_autopm_put_device(sdev);
7460}
7461
2a8fa600
SJ
7462/**
7463 * ufshcd_scsi_add_wlus - Adds required W-LUs
7464 * @hba: per-adapter instance
7465 *
7466 * UFS device specification requires the UFS devices to support 4 well known
7467 * logical units:
7468 * "REPORT_LUNS" (address: 01h)
7469 * "UFS Device" (address: 50h)
7470 * "RPMB" (address: 44h)
7471 * "BOOT" (address: 30h)
7472 * UFS device's power management needs to be controlled by "POWER CONDITION"
7473 * field of SSU (START STOP UNIT) command. But this "power condition" field
7474 * will take effect only when its sent to "UFS device" well known logical unit
7475 * hence we require the scsi_device instance to represent this logical unit in
7476 * order for the UFS host driver to send the SSU command for power management.
8aa29f19 7477 *
2a8fa600
SJ
7478 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7479 * Block) LU so user space process can control this LU. User space may also
7480 * want to have access to BOOT LU.
8aa29f19 7481 *
2a8fa600
SJ
7482 * This function adds scsi device instances for each of all well known LUs
7483 * (except "REPORT LUNS" LU).
7484 *
7485 * Returns zero on success (all required W-LUs are added successfully),
7486 * non-zero error value on failure (if failed to add any of the required W-LU).
7487 */
7488static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7489{
7490 int ret = 0;
59830c09 7491 struct scsi_device *sdev_boot, *sdev_rpmb;
2a8fa600
SJ
7492
7493 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7494 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7495 if (IS_ERR(hba->sdev_ufs_device)) {
7496 ret = PTR_ERR(hba->sdev_ufs_device);
7497 hba->sdev_ufs_device = NULL;
7498 goto out;
7499 }
7c48bfd0 7500 scsi_device_put(hba->sdev_ufs_device);
2a8fa600 7501
59830c09 7502 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 7503 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
59830c09
BVA
7504 if (IS_ERR(sdev_rpmb)) {
7505 ret = PTR_ERR(sdev_rpmb);
3d21fbde 7506 goto remove_sdev_ufs_device;
2a8fa600 7507 }
59830c09
BVA
7508 ufshcd_blk_pm_runtime_init(sdev_rpmb);
7509 scsi_device_put(sdev_rpmb);
3d21fbde
HK
7510
7511 sdev_boot = __scsi_add_device(hba->host, 0, 0,
7512 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
fb276f77 7513 if (IS_ERR(sdev_boot)) {
3d21fbde 7514 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
fb276f77
CG
7515 } else {
7516 ufshcd_blk_pm_runtime_init(sdev_boot);
3d21fbde 7517 scsi_device_put(sdev_boot);
fb276f77 7518 }
2a8fa600
SJ
7519 goto out;
7520
2a8fa600
SJ
7521remove_sdev_ufs_device:
7522 scsi_remove_device(hba->sdev_ufs_device);
7523out:
7524 return ret;
7525}
7526
3d17b9b5
AD
7527static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7528{
a7f1e69d 7529 struct ufs_dev_info *dev_info = &hba->dev_info;
6f8d5a6a
SC
7530 u8 lun;
7531 u32 d_lu_wb_buf_alloc;
e8d03813 7532 u32 ext_ufs_feature;
6f8d5a6a 7533
817d7e14
SC
7534 if (!ufshcd_is_wb_allowed(hba))
7535 return;
f681d107 7536
a7f1e69d
SC
7537 /*
7538 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7539 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7540 * enabled
7541 */
7542 if (!(dev_info->wspecversion >= 0x310 ||
7543 dev_info->wspecversion == 0x220 ||
7544 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7545 goto wb_disabled;
817d7e14 7546
7a0bf85b
BH
7547 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7548 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
817d7e14
SC
7549 goto wb_disabled;
7550
e8d03813
BH
7551 ext_ufs_feature = get_unaligned_be32(desc_buf +
7552 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
817d7e14 7553
e8d03813 7554 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
817d7e14
SC
7555 goto wb_disabled;
7556
3d17b9b5 7557 /*
ae1ce1fc
BH
7558 * WB may be supported but not configured while provisioning. The spec
7559 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7560 * buffer configured.
3d17b9b5 7561 */
4cd48995 7562 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
3d17b9b5 7563
a7f1e69d 7564 dev_info->b_presrv_uspc_en =
3d17b9b5
AD
7565 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7566
4cd48995 7567 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
e8d03813
BH
7568 if (!get_unaligned_be32(desc_buf +
7569 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
6f8d5a6a
SC
7570 goto wb_disabled;
7571 } else {
7572 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7573 d_lu_wb_buf_alloc = 0;
7574 ufshcd_read_unit_desc_param(hba,
7575 lun,
7576 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7577 (u8 *)&d_lu_wb_buf_alloc,
7578 sizeof(d_lu_wb_buf_alloc));
7579 if (d_lu_wb_buf_alloc) {
a7f1e69d 7580 dev_info->wb_dedicated_lu = lun;
6f8d5a6a
SC
7581 break;
7582 }
7583 }
817d7e14 7584
6f8d5a6a
SC
7585 if (!d_lu_wb_buf_alloc)
7586 goto wb_disabled;
7587 }
f681d107
JC
7588
7589 if (!ufshcd_is_wb_buf_lifetime_available(hba))
7590 goto wb_disabled;
7591
817d7e14
SC
7592 return;
7593
7594wb_disabled:
7595 hba->caps &= ~UFSHCD_CAP_WB_EN;
7596}
7597
e88e2d32
AA
7598static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
7599{
7600 struct ufs_dev_info *dev_info = &hba->dev_info;
7601 u32 ext_ufs_feature;
7602 u8 mask = 0;
7603
7604 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
7605 return;
7606
7607 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7608
7609 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
7610 mask |= MASK_EE_TOO_LOW_TEMP;
7611
7612 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
7613 mask |= MASK_EE_TOO_HIGH_TEMP;
7614
7615 if (mask) {
7616 ufshcd_enable_ee(hba, mask);
7617 ufs_hwmon_probe(hba, mask);
7618 }
7619}
7620
8db269a5 7621void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
817d7e14
SC
7622{
7623 struct ufs_dev_fix *f;
7624 struct ufs_dev_info *dev_info = &hba->dev_info;
7625
8db269a5
SC
7626 if (!fixups)
7627 return;
7628
7629 for (f = fixups; f->quirk; f++) {
817d7e14
SC
7630 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7631 f->wmanufacturerid == UFS_ANY_VENDOR) &&
7632 ((dev_info->model &&
7633 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7634 !strcmp(f->model, UFS_ANY_MODEL)))
7635 hba->dev_quirks |= f->quirk;
7636 }
3d17b9b5 7637}
8db269a5 7638EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
3d17b9b5 7639
c28c00ba
SC
7640static void ufs_fixup_device_setup(struct ufs_hba *hba)
7641{
7642 /* fix by general quirk table */
8db269a5 7643 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
c28c00ba
SC
7644
7645 /* allow vendors to fix quirks */
7646 ufshcd_vops_fixup_dev_quirks(hba);
7647}
7648
09750066 7649static int ufs_get_device_desc(struct ufs_hba *hba)
c58ab7aa
YG
7650{
7651 int err;
7652 u8 model_index;
f02bc975 7653 u8 b_ufs_feature_sup;
bbe21d7a 7654 u8 *desc_buf;
09750066 7655 struct ufs_dev_info *dev_info = &hba->dev_info;
4b828fe1 7656
458a45f5 7657 desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
bbe21d7a
KC
7658 if (!desc_buf) {
7659 err = -ENOMEM;
7660 goto out;
7661 }
c58ab7aa 7662
c4607a09 7663 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7a0bf85b 7664 hba->desc_size[QUERY_DESC_IDN_DEVICE]);
c58ab7aa
YG
7665 if (err) {
7666 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7667 __func__, err);
7668 goto out;
7669 }
7670
7671 /*
7672 * getting vendor (manufacturerID) and Bank Index in big endian
7673 * format
7674 */
09750066 7675 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
c58ab7aa
YG
7676 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7677
09f17791
CG
7678 /* getting Specification Version in big endian format */
7679 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7680 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
f02bc975 7681 b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
09f17791 7682
c58ab7aa 7683 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
3d17b9b5 7684
f02bc975
DP
7685 if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7686 (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
41d8a933
DP
7687 bool hpb_en = false;
7688
f02bc975 7689 ufshpb_get_dev_info(hba, desc_buf);
41d8a933
DP
7690
7691 if (!ufshpb_is_legacy(hba))
7692 err = ufshcd_query_flag_retry(hba,
7693 UPIU_QUERY_OPCODE_READ_FLAG,
7694 QUERY_FLAG_IDN_HPB_EN, 0,
7695 &hpb_en);
7696
7697 if (ufshpb_is_legacy(hba) || (!err && hpb_en))
7698 dev_info->hpb_enabled = true;
f02bc975
DP
7699 }
7700
4b828fe1 7701 err = ufshcd_read_string_desc(hba, model_index,
09750066 7702 &dev_info->model, SD_ASCII_STD);
4b828fe1 7703 if (err < 0) {
c58ab7aa
YG
7704 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7705 __func__, err);
7706 goto out;
7707 }
7708
b294ff3e
AD
7709 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
7710 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
7711
817d7e14
SC
7712 ufs_fixup_device_setup(hba);
7713
a7f1e69d 7714 ufshcd_wb_probe(hba, desc_buf);
817d7e14 7715
e88e2d32
AA
7716 ufshcd_temp_notif_probe(hba, desc_buf);
7717
4b828fe1
TW
7718 /*
7719 * ufshcd_read_string_desc returns size of the string
7720 * reset the error value
7721 */
7722 err = 0;
c58ab7aa
YG
7723
7724out:
bbe21d7a 7725 kfree(desc_buf);
c58ab7aa
YG
7726 return err;
7727}
7728
09750066 7729static void ufs_put_device_desc(struct ufs_hba *hba)
4b828fe1 7730{
09750066
BH
7731 struct ufs_dev_info *dev_info = &hba->dev_info;
7732
7733 kfree(dev_info->model);
7734 dev_info->model = NULL;
4b828fe1
TW
7735}
7736
37113106
YG
7737/**
7738 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7739 * @hba: per-adapter instance
7740 *
7741 * PA_TActivate parameter can be tuned manually if UniPro version is less than
7742 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7743 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7744 * the hibern8 exit latency.
7745 *
7746 * Returns zero on success, non-zero error value on failure.
7747 */
7748static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7749{
7750 int ret = 0;
7751 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7752
7753 ret = ufshcd_dme_peer_get(hba,
7754 UIC_ARG_MIB_SEL(
7755 RX_MIN_ACTIVATETIME_CAPABILITY,
7756 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7757 &peer_rx_min_activatetime);
7758 if (ret)
7759 goto out;
7760
7761 /* make sure proper unit conversion is applied */
7762 tuned_pa_tactivate =
7763 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7764 / PA_TACTIVATE_TIME_UNIT_US);
7765 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7766 tuned_pa_tactivate);
7767
7768out:
7769 return ret;
7770}
7771
7772/**
7773 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7774 * @hba: per-adapter instance
7775 *
7776 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7777 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7778 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7779 * This optimal value can help reduce the hibern8 exit latency.
7780 *
7781 * Returns zero on success, non-zero error value on failure.
7782 */
7783static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7784{
7785 int ret = 0;
7786 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7787 u32 max_hibern8_time, tuned_pa_hibern8time;
7788
7789 ret = ufshcd_dme_get(hba,
7790 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7791 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7792 &local_tx_hibern8_time_cap);
7793 if (ret)
7794 goto out;
7795
7796 ret = ufshcd_dme_peer_get(hba,
7797 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7798 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7799 &peer_rx_hibern8_time_cap);
7800 if (ret)
7801 goto out;
7802
7803 max_hibern8_time = max(local_tx_hibern8_time_cap,
7804 peer_rx_hibern8_time_cap);
7805 /* make sure proper unit conversion is applied */
7806 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7807 / PA_HIBERN8_TIME_UNIT_US);
7808 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7809 tuned_pa_hibern8time);
7810out:
7811 return ret;
7812}
7813
c6a6db43 7814/**
7815 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7816 * less than device PA_TACTIVATE time.
7817 * @hba: per-adapter instance
7818 *
7819 * Some UFS devices require host PA_TACTIVATE to be lower than device
7820 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7821 * for such devices.
7822 *
7823 * Returns zero on success, non-zero error value on failure.
7824 */
7825static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7826{
7827 int ret = 0;
7828 u32 granularity, peer_granularity;
7829 u32 pa_tactivate, peer_pa_tactivate;
7830 u32 pa_tactivate_us, peer_pa_tactivate_us;
7831 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7832
7833 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7834 &granularity);
7835 if (ret)
7836 goto out;
7837
7838 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7839 &peer_granularity);
7840 if (ret)
7841 goto out;
7842
7843 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7844 (granularity > PA_GRANULARITY_MAX_VAL)) {
7845 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7846 __func__, granularity);
7847 return -EINVAL;
7848 }
7849
7850 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7851 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7852 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7853 __func__, peer_granularity);
7854 return -EINVAL;
7855 }
7856
7857 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7858 if (ret)
7859 goto out;
7860
7861 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7862 &peer_pa_tactivate);
7863 if (ret)
7864 goto out;
7865
7866 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7867 peer_pa_tactivate_us = peer_pa_tactivate *
7868 gran_to_us_table[peer_granularity - 1];
7869
9008661e 7870 if (pa_tactivate_us >= peer_pa_tactivate_us) {
c6a6db43 7871 u32 new_peer_pa_tactivate;
7872
7873 new_peer_pa_tactivate = pa_tactivate_us /
7874 gran_to_us_table[peer_granularity - 1];
7875 new_peer_pa_tactivate++;
7876 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7877 new_peer_pa_tactivate);
7878 }
7879
7880out:
7881 return ret;
7882}
7883
09750066 7884static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
37113106
YG
7885{
7886 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7887 ufshcd_tune_pa_tactivate(hba);
7888 ufshcd_tune_pa_hibern8time(hba);
7889 }
7890
e91ed9e0
CG
7891 ufshcd_vops_apply_dev_quirks(hba);
7892
37113106
YG
7893 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7894 /* set 1ms timeout for PA_TACTIVATE */
7895 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
c6a6db43 7896
7897 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7898 ufshcd_quirk_tune_host_pa_tactivate(hba);
37113106
YG
7899}
7900
ff8e20c6
DR
7901static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7902{
ff8e20c6
DR
7903 hba->ufs_stats.hibern8_exit_cnt = 0;
7904 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7fabb77b 7905 hba->req_abort_count = 0;
ff8e20c6
DR
7906}
7907
731f0621
BH
7908static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7909{
7910 int err;
7911 size_t buff_len;
7912 u8 *desc_buf;
7913
7a0bf85b 7914 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
731f0621
BH
7915 desc_buf = kmalloc(buff_len, GFP_KERNEL);
7916 if (!desc_buf) {
7917 err = -ENOMEM;
7918 goto out;
7919 }
7920
c4607a09
BH
7921 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7922 desc_buf, buff_len);
731f0621
BH
7923 if (err) {
7924 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7925 __func__, err);
7926 goto out;
7927 }
7928
7929 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7930 hba->dev_info.max_lu_supported = 32;
7931 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7932 hba->dev_info.max_lu_supported = 8;
7933
f02bc975
DP
7934 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
7935 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
7936 ufshpb_get_geo_info(hba, desc_buf);
7937
731f0621
BH
7938out:
7939 kfree(desc_buf);
7940 return err;
7941}
7942
9e1e8a75
SJ
7943static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7944 {19200000, REF_CLK_FREQ_19_2_MHZ},
7945 {26000000, REF_CLK_FREQ_26_MHZ},
7946 {38400000, REF_CLK_FREQ_38_4_MHZ},
7947 {52000000, REF_CLK_FREQ_52_MHZ},
7948 {0, REF_CLK_FREQ_INVAL},
7949};
7950
7951static enum ufs_ref_clk_freq
7952ufs_get_bref_clk_from_hz(unsigned long freq)
7953{
7954 int i;
7955
7956 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7957 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7958 return ufs_ref_clk_freqs[i].val;
7959
7960 return REF_CLK_FREQ_INVAL;
7961}
7962
7963void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7964{
7965 unsigned long freq;
7966
7967 freq = clk_get_rate(refclk);
7968
7969 hba->dev_ref_clk_freq =
7970 ufs_get_bref_clk_from_hz(freq);
7971
7972 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7973 dev_err(hba->dev,
7974 "invalid ref_clk setting = %ld\n", freq);
7975}
7976
7977static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7978{
7979 int err;
7980 u32 ref_clk;
7981 u32 freq = hba->dev_ref_clk_freq;
7982
7983 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7984 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7985
7986 if (err) {
7987 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7988 err);
7989 goto out;
7990 }
7991
7992 if (ref_clk == freq)
7993 goto out; /* nothing to update */
7994
7995 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7996 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7997
7998 if (err) {
7999 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8000 ufs_ref_clk_freqs[freq].freq_hz);
8001 goto out;
8002 }
8003
8004 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8005 ufs_ref_clk_freqs[freq].freq_hz);
8006
8007out:
8008 return err;
8009}
8010
1b9e2141
BH
8011static int ufshcd_device_params_init(struct ufs_hba *hba)
8012{
8013 bool flag;
7a0bf85b 8014 int ret, i;
1b9e2141 8015
7a0bf85b
BH
8016 /* Init device descriptor sizes */
8017 for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
8018 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
1b9e2141 8019
731f0621
BH
8020 /* Init UFS geometry descriptor related parameters */
8021 ret = ufshcd_device_geo_params_init(hba);
8022 if (ret)
8023 goto out;
8024
1b9e2141
BH
8025 /* Check and apply UFS device quirks */
8026 ret = ufs_get_device_desc(hba);
8027 if (ret) {
8028 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8029 __func__, ret);
8030 goto out;
8031 }
8032
09f17791
CG
8033 ufshcd_get_ref_clk_gating_wait(hba);
8034
1b9e2141 8035 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1f34eedf 8036 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
1b9e2141
BH
8037 hba->dev_info.f_power_on_wp_en = flag;
8038
2b35b2ad
BH
8039 /* Probe maximum power mode co-supported by both UFS host and device */
8040 if (ufshcd_get_max_pwr_mode(hba))
8041 dev_err(hba->dev,
8042 "%s: Failed getting max supported power mode\n",
8043 __func__);
1b9e2141
BH
8044out:
8045 return ret;
8046}
8047
8048/**
8049 * ufshcd_add_lus - probe and add UFS logical units
8050 * @hba: per-adapter instance
8051 */
8052static int ufshcd_add_lus(struct ufs_hba *hba)
8053{
8054 int ret;
8055
1b9e2141
BH
8056 /* Add required well known logical units to scsi mid layer */
8057 ret = ufshcd_scsi_add_wlus(hba);
8058 if (ret)
8059 goto out;
8060
8061 /* Initialize devfreq after UFS device is detected */
8062 if (ufshcd_is_clkscaling_supported(hba)) {
8063 memcpy(&hba->clk_scaling.saved_pwr_info.info,
8064 &hba->pwr_info,
8065 sizeof(struct ufs_pa_layer_attr));
8066 hba->clk_scaling.saved_pwr_info.is_valid = true;
1b9e2141 8067 hba->clk_scaling.is_allowed = true;
1b9e2141 8068
b058fa86
SC
8069 ret = ufshcd_devfreq_init(hba);
8070 if (ret)
8071 goto out;
8072
8073 hba->clk_scaling.is_enabled = true;
8074 ufshcd_init_clk_scaling_sysfs(hba);
1b9e2141
BH
8075 }
8076
8077 ufs_bsg_probe(hba);
f02bc975 8078 ufshpb_init(hba);
1b9e2141
BH
8079 scsi_scan_host(hba->host);
8080 pm_runtime_put_sync(hba->dev);
8081
1b9e2141
BH
8082out:
8083 return ret;
8084}
8085
6ccf44fe 8086/**
568dd995 8087 * ufshcd_probe_hba - probe hba to detect device and initialize it
1d337ec2 8088 * @hba: per-adapter instance
568dd995 8089 * @init_dev_params: whether or not to call ufshcd_device_params_init().
1d337ec2
SRT
8090 *
8091 * Execute link-startup and verify device initialization
6ccf44fe 8092 */
568dd995 8093static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
6ccf44fe 8094{
6ccf44fe 8095 int ret;
4db7a236 8096 unsigned long flags;
7ff5ab47 8097 ktime_t start = ktime_get();
6ccf44fe 8098
aa53f580
CG
8099 hba->ufshcd_state = UFSHCD_STATE_RESET;
8100
6ccf44fe 8101 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
8102 if (ret)
8103 goto out;
8104
10fb4f87 8105 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8106 goto out;
8107
ff8e20c6
DR
8108 /* Debug counters initialization */
8109 ufshcd_clear_dbg_ufs_stats(hba);
8110
57d104c1
SJ
8111 /* UniPro link is active now */
8112 ufshcd_set_link_active(hba);
d3e89bac 8113
1b9e2141 8114 /* Verify device initialization by sending NOP OUT UPIU */
5a0b0cb9
SRT
8115 ret = ufshcd_verify_dev_init(hba);
8116 if (ret)
8117 goto out;
68078d5c 8118
1b9e2141 8119 /* Initiate UFS initialization, and waiting until completion */
68078d5c
DR
8120 ret = ufshcd_complete_dev_init(hba);
8121 if (ret)
8122 goto out;
5a0b0cb9 8123
1b9e2141
BH
8124 /*
8125 * Initialize UFS device parameters used by driver, these
8126 * parameters are associated with UFS descriptors.
8127 */
568dd995 8128 if (init_dev_params) {
1b9e2141
BH
8129 ret = ufshcd_device_params_init(hba);
8130 if (ret)
8131 goto out;
93fdd5ac
TW
8132 }
8133
09750066 8134 ufshcd_tune_unipro_params(hba);
4b828fe1 8135
57d104c1
SJ
8136 /* UFS device is also active now */
8137 ufshcd_set_ufs_dev_active(hba);
66ec6d59 8138 ufshcd_force_reset_auto_bkops(hba);
57d104c1 8139
2b35b2ad
BH
8140 /* Gear up to HS gear if supported */
8141 if (hba->max_pwr_info.is_valid) {
9e1e8a75
SJ
8142 /*
8143 * Set the right value to bRefClkFreq before attempting to
8144 * switch to HS gears.
8145 */
8146 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8147 ufshcd_set_dev_ref_clk(hba);
7eb584db 8148 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8643ae66 8149 if (ret) {
7eb584db
DR
8150 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8151 __func__, ret);
8643ae66
DL
8152 goto out;
8153 }
6a9df818 8154 ufshcd_print_pwr_info(hba);
7eb584db 8155 }
57d104c1 8156
e89860f1
CG
8157 /*
8158 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8159 * and for removable UFS card as well, hence always set the parameter.
8160 * Note: Error handler may issue the device reset hence resetting
8161 * bActiveICCLevel as well so it is always safe to set this here.
8162 */
8163 ufshcd_set_active_icc_lvl(hba);
8164
3d17b9b5 8165 ufshcd_wb_config(hba);
cd469475
AH
8166 if (hba->ee_usr_mask)
8167 ufshcd_write_ee_control(hba);
71d848b8
CG
8168 /* Enable Auto-Hibernate if configured */
8169 ufshcd_auto_hibern8_enable(hba);
8170
f02bc975 8171 ufshpb_reset(hba);
5a0b0cb9 8172out:
4db7a236
CG
8173 spin_lock_irqsave(hba->host->host_lock, flags);
8174 if (ret)
8175 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8176 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8177 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8178 spin_unlock_irqrestore(hba->host->host_lock, flags);
1d337ec2 8179
7ff5ab47 8180 trace_ufshcd_init(dev_name(hba->dev), ret,
8181 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8182 hba->curr_dev_pwr_mode, hba->uic_link_state);
1d337ec2
SRT
8183 return ret;
8184}
8185
8186/**
8187 * ufshcd_async_scan - asynchronous execution for probing hba
8188 * @data: data pointer to pass to this function
8189 * @cookie: cookie data
8190 */
8191static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8192{
8193 struct ufs_hba *hba = (struct ufs_hba *)data;
1b9e2141 8194 int ret;
1d337ec2 8195
9cd20d3f 8196 down(&hba->host_sem);
1b9e2141
BH
8197 /* Initialize hba, detect and initialize UFS device */
8198 ret = ufshcd_probe_hba(hba, true);
9cd20d3f 8199 up(&hba->host_sem);
1b9e2141
BH
8200 if (ret)
8201 goto out;
8202
8203 /* Probe and add UFS logical units */
8204 ret = ufshcd_add_lus(hba);
8205out:
8206 /*
8207 * If we failed to initialize the device or the device is not
8208 * present, turn off the power/clocks etc.
8209 */
8210 if (ret) {
8211 pm_runtime_put_sync(hba->dev);
1b9e2141
BH
8212 ufshcd_hba_exit(hba);
8213 }
6ccf44fe
SJ
8214}
8215
d829fc8a
SN
8216static const struct attribute_group *ufshcd_driver_groups[] = {
8217 &ufs_sysfs_unit_descriptor_group,
ec92b59c 8218 &ufs_sysfs_lun_attributes_group,
f02bc975
DP
8219#ifdef CONFIG_SCSI_UFS_HPB
8220 &ufs_sysfs_hpb_stat_group,
41d8a933 8221 &ufs_sysfs_hpb_param_group,
f02bc975 8222#endif
d829fc8a
SN
8223 NULL,
8224};
8225
90b8491c
SC
8226static struct ufs_hba_variant_params ufs_hba_vps = {
8227 .hba_enable_delay_us = 1000,
d14734ae 8228 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
90b8491c
SC
8229 .devfreq_profile.polling_ms = 100,
8230 .devfreq_profile.target = ufshcd_devfreq_target,
8231 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8232 .ondemand_data.upthreshold = 70,
8233 .ondemand_data.downdifferential = 5,
8234};
8235
7a3e97b0
SY
8236static struct scsi_host_template ufshcd_driver_template = {
8237 .module = THIS_MODULE,
8238 .name = UFSHCD,
8239 .proc_name = UFSHCD,
eaab9b57 8240 .map_queues = ufshcd_map_queues,
7a3e97b0 8241 .queuecommand = ufshcd_queuecommand,
eaab9b57 8242 .mq_poll = ufshcd_poll,
7a3e97b0 8243 .slave_alloc = ufshcd_slave_alloc,
eeda4749 8244 .slave_configure = ufshcd_slave_configure,
7a3e97b0 8245 .slave_destroy = ufshcd_slave_destroy,
4264fd61 8246 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 8247 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
8248 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
8249 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7a3e97b0
SY
8250 .this_id = -1,
8251 .sg_tablesize = SG_ALL,
8252 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
8253 .can_queue = UFSHCD_CAN_QUEUE,
552a990c 8254 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
1ab27c9c 8255 .max_host_blocked = 1,
c40ecc12 8256 .track_queue_depth = 1,
d829fc8a 8257 .sdev_groups = ufshcd_driver_groups,
4af14d11 8258 .dma_boundary = PAGE_SIZE - 1,
49615ba1 8259 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
7a3e97b0
SY
8260};
8261
57d104c1
SJ
8262static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8263 int ua)
8264{
7b16a07c 8265 int ret;
57d104c1 8266
7b16a07c
BA
8267 if (!vreg)
8268 return 0;
57d104c1 8269
0487fff7
SC
8270 /*
8271 * "set_load" operation shall be required on those regulators
8272 * which specifically configured current limitation. Otherwise
8273 * zero max_uA may cause unexpected behavior when regulator is
8274 * enabled or set as high power mode.
8275 */
8276 if (!vreg->max_uA)
8277 return 0;
8278
7b16a07c
BA
8279 ret = regulator_set_load(vreg->reg, ua);
8280 if (ret < 0) {
8281 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8282 __func__, vreg->name, ua, ret);
57d104c1
SJ
8283 }
8284
8285 return ret;
8286}
8287
8288static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8289 struct ufs_vreg *vreg)
8290{
73067981 8291 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
57d104c1
SJ
8292}
8293
8294static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8295 struct ufs_vreg *vreg)
8296{
7c7cfdcf
AH
8297 if (!vreg)
8298 return 0;
8299
73067981 8300 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
57d104c1
SJ
8301}
8302
aa497613
SRT
8303static int ufshcd_config_vreg(struct device *dev,
8304 struct ufs_vreg *vreg, bool on)
8305{
8306 int ret = 0;
72753590
GS
8307 struct regulator *reg;
8308 const char *name;
aa497613
SRT
8309 int min_uV, uA_load;
8310
8311 BUG_ON(!vreg);
8312
72753590
GS
8313 reg = vreg->reg;
8314 name = vreg->name;
8315
aa497613 8316 if (regulator_count_voltages(reg) > 0) {
90d88f47
AD
8317 uA_load = on ? vreg->max_uA : 0;
8318 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8319 if (ret)
8320 goto out;
8321
3b141e8c
SC
8322 if (vreg->min_uV && vreg->max_uV) {
8323 min_uV = on ? vreg->min_uV : 0;
8324 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
b0008625 8325 if (ret)
3b141e8c
SC
8326 dev_err(dev,
8327 "%s: %s set voltage failed, err=%d\n",
aa497613 8328 __func__, name, ret);
aa497613 8329 }
aa497613
SRT
8330 }
8331out:
8332 return ret;
8333}
8334
8335static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8336{
8337 int ret = 0;
8338
73067981 8339 if (!vreg || vreg->enabled)
aa497613
SRT
8340 goto out;
8341
8342 ret = ufshcd_config_vreg(dev, vreg, true);
8343 if (!ret)
8344 ret = regulator_enable(vreg->reg);
8345
8346 if (!ret)
8347 vreg->enabled = true;
8348 else
8349 dev_err(dev, "%s: %s enable failed, err=%d\n",
8350 __func__, vreg->name, ret);
8351out:
8352 return ret;
8353}
8354
8355static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8356{
8357 int ret = 0;
8358
f8162ac7 8359 if (!vreg || !vreg->enabled || vreg->always_on)
aa497613
SRT
8360 goto out;
8361
8362 ret = regulator_disable(vreg->reg);
8363
8364 if (!ret) {
8365 /* ignore errors on applying disable config */
8366 ufshcd_config_vreg(dev, vreg, false);
8367 vreg->enabled = false;
8368 } else {
8369 dev_err(dev, "%s: %s disable failed, err=%d\n",
8370 __func__, vreg->name, ret);
8371 }
8372out:
8373 return ret;
8374}
8375
8376static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8377{
8378 int ret = 0;
8379 struct device *dev = hba->dev;
8380 struct ufs_vreg_info *info = &hba->vreg_info;
8381
aa497613
SRT
8382 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8383 if (ret)
8384 goto out;
8385
8386 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8387 if (ret)
8388 goto out;
8389
8390 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
aa497613
SRT
8391
8392out:
8393 if (ret) {
8394 ufshcd_toggle_vreg(dev, info->vccq2, false);
8395 ufshcd_toggle_vreg(dev, info->vccq, false);
8396 ufshcd_toggle_vreg(dev, info->vcc, false);
8397 }
8398 return ret;
8399}
8400
6a771a65
RS
8401static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8402{
8403 struct ufs_vreg_info *info = &hba->vreg_info;
8404
60b7b823 8405 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6a771a65
RS
8406}
8407
aa497613
SRT
8408static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8409{
8410 int ret = 0;
8411
8412 if (!vreg)
8413 goto out;
8414
8415 vreg->reg = devm_regulator_get(dev, vreg->name);
8416 if (IS_ERR(vreg->reg)) {
8417 ret = PTR_ERR(vreg->reg);
8418 dev_err(dev, "%s: %s get failed, err=%d\n",
8419 __func__, vreg->name, ret);
8420 }
8421out:
8422 return ret;
8423}
8424
8425static int ufshcd_init_vreg(struct ufs_hba *hba)
8426{
8427 int ret = 0;
8428 struct device *dev = hba->dev;
8429 struct ufs_vreg_info *info = &hba->vreg_info;
8430
aa497613
SRT
8431 ret = ufshcd_get_vreg(dev, info->vcc);
8432 if (ret)
8433 goto out;
8434
8435 ret = ufshcd_get_vreg(dev, info->vccq);
b0008625
BH
8436 if (!ret)
8437 ret = ufshcd_get_vreg(dev, info->vccq2);
aa497613
SRT
8438out:
8439 return ret;
8440}
8441
6a771a65
RS
8442static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8443{
8444 struct ufs_vreg_info *info = &hba->vreg_info;
8445
8446 if (info)
8447 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8448
8449 return 0;
8450}
8451
81309c24 8452static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
c6e79dac
SRT
8453{
8454 int ret = 0;
8455 struct ufs_clk_info *clki;
8456 struct list_head *head = &hba->clk_list_head;
1ab27c9c 8457 unsigned long flags;
911a0771 8458 ktime_t start = ktime_get();
8459 bool clk_state_changed = false;
c6e79dac 8460
566ec9ad 8461 if (list_empty(head))
c6e79dac
SRT
8462 goto out;
8463
38f3242e
CG
8464 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8465 if (ret)
8466 return ret;
1e879e8f 8467
c6e79dac
SRT
8468 list_for_each_entry(clki, head, list) {
8469 if (!IS_ERR_OR_NULL(clki->clk)) {
81309c24
CG
8470 /*
8471 * Don't disable clocks which are needed
8472 * to keep the link active.
8473 */
8474 if (ufshcd_is_link_active(hba) &&
8475 clki->keep_link_active)
57d104c1
SJ
8476 continue;
8477
911a0771 8478 clk_state_changed = on ^ clki->enabled;
c6e79dac
SRT
8479 if (on && !clki->enabled) {
8480 ret = clk_prepare_enable(clki->clk);
8481 if (ret) {
8482 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8483 __func__, clki->name, ret);
8484 goto out;
8485 }
8486 } else if (!on && clki->enabled) {
8487 clk_disable_unprepare(clki->clk);
8488 }
8489 clki->enabled = on;
8490 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8491 clki->name, on ? "en" : "dis");
8492 }
8493 }
1ab27c9c 8494
38f3242e
CG
8495 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8496 if (ret)
8497 return ret;
1e879e8f 8498
c6e79dac
SRT
8499out:
8500 if (ret) {
8501 list_for_each_entry(clki, head, list) {
8502 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8503 clk_disable_unprepare(clki->clk);
8504 }
7ff5ab47 8505 } else if (!ret && on) {
1ab27c9c
ST
8506 spin_lock_irqsave(hba->host->host_lock, flags);
8507 hba->clk_gating.state = CLKS_ON;
7ff5ab47 8508 trace_ufshcd_clk_gating(dev_name(hba->dev),
8509 hba->clk_gating.state);
1ab27c9c 8510 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac 8511 }
7ff5ab47 8512
911a0771 8513 if (clk_state_changed)
8514 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8515 (on ? "on" : "off"),
8516 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
c6e79dac
SRT
8517 return ret;
8518}
8519
8520static int ufshcd_init_clocks(struct ufs_hba *hba)
8521{
8522 int ret = 0;
8523 struct ufs_clk_info *clki;
8524 struct device *dev = hba->dev;
8525 struct list_head *head = &hba->clk_list_head;
8526
566ec9ad 8527 if (list_empty(head))
c6e79dac
SRT
8528 goto out;
8529
8530 list_for_each_entry(clki, head, list) {
8531 if (!clki->name)
8532 continue;
8533
8534 clki->clk = devm_clk_get(dev, clki->name);
8535 if (IS_ERR(clki->clk)) {
8536 ret = PTR_ERR(clki->clk);
8537 dev_err(dev, "%s: %s clk get failed, %d\n",
8538 __func__, clki->name, ret);
8539 goto out;
8540 }
8541
9e1e8a75
SJ
8542 /*
8543 * Parse device ref clk freq as per device tree "ref_clk".
8544 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8545 * in ufshcd_alloc_host().
8546 */
8547 if (!strcmp(clki->name, "ref_clk"))
8548 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8549
c6e79dac
SRT
8550 if (clki->max_freq) {
8551 ret = clk_set_rate(clki->clk, clki->max_freq);
8552 if (ret) {
8553 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8554 __func__, clki->name,
8555 clki->max_freq, ret);
8556 goto out;
8557 }
856b3483 8558 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
8559 }
8560 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8561 clki->name, clk_get_rate(clki->clk));
8562 }
8563out:
8564 return ret;
8565}
8566
5c0c28a8
SRT
8567static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8568{
8569 int err = 0;
8570
8571 if (!hba->vops)
8572 goto out;
8573
0263bcd0 8574 err = ufshcd_vops_init(hba);
5c0c28a8
SRT
8575 if (err)
8576 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
0263bcd0 8577 __func__, ufshcd_get_var_name(hba), err);
ade921a8 8578out:
5c0c28a8
SRT
8579 return err;
8580}
8581
8582static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8583{
8584 if (!hba->vops)
8585 return;
8586
0263bcd0 8587 ufshcd_vops_exit(hba);
5c0c28a8
SRT
8588}
8589
aa497613
SRT
8590static int ufshcd_hba_init(struct ufs_hba *hba)
8591{
8592 int err;
8593
6a771a65
RS
8594 /*
8595 * Handle host controller power separately from the UFS device power
8596 * rails as it will help controlling the UFS host controller power
8597 * collapse easily which is different than UFS device power collapse.
8598 * Also, enable the host controller power before we go ahead with rest
8599 * of the initialization here.
8600 */
8601 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
8602 if (err)
8603 goto out;
8604
6a771a65 8605 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
8606 if (err)
8607 goto out;
8608
6a771a65
RS
8609 err = ufshcd_init_clocks(hba);
8610 if (err)
8611 goto out_disable_hba_vreg;
8612
8613 err = ufshcd_setup_clocks(hba, true);
8614 if (err)
8615 goto out_disable_hba_vreg;
8616
c6e79dac
SRT
8617 err = ufshcd_init_vreg(hba);
8618 if (err)
8619 goto out_disable_clks;
8620
8621 err = ufshcd_setup_vreg(hba, true);
8622 if (err)
8623 goto out_disable_clks;
8624
aa497613
SRT
8625 err = ufshcd_variant_hba_init(hba);
8626 if (err)
8627 goto out_disable_vreg;
8628
b6cacaf2
AH
8629 ufs_debugfs_hba_init(hba);
8630
1d337ec2 8631 hba->is_powered = true;
aa497613
SRT
8632 goto out;
8633
8634out_disable_vreg:
8635 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
8636out_disable_clks:
8637 ufshcd_setup_clocks(hba, false);
6a771a65
RS
8638out_disable_hba_vreg:
8639 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
8640out:
8641 return err;
8642}
8643
8644static void ufshcd_hba_exit(struct ufs_hba *hba)
8645{
1d337ec2 8646 if (hba->is_powered) {
4543d9d7
CG
8647 ufshcd_exit_clk_scaling(hba);
8648 ufshcd_exit_clk_gating(hba);
88b09900
AH
8649 if (hba->eh_wq)
8650 destroy_workqueue(hba->eh_wq);
b6cacaf2 8651 ufs_debugfs_hba_exit(hba);
1d337ec2
SRT
8652 ufshcd_variant_hba_exit(hba);
8653 ufshcd_setup_vreg(hba, false);
8654 ufshcd_setup_clocks(hba, false);
8655 ufshcd_setup_hba_vreg(hba, false);
8656 hba->is_powered = false;
09750066 8657 ufs_put_device_desc(hba);
1d337ec2 8658 }
aa497613
SRT
8659}
8660
57d104c1
SJ
8661/**
8662 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8663 * power mode
8664 * @hba: per adapter instance
8665 * @pwr_mode: device power mode to set
8666 *
8667 * Returns 0 if requested power mode is set successfully
ad6c8a42 8668 * Returns < 0 if failed to set the requested power mode
57d104c1
SJ
8669 */
8670static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8671 enum ufs_dev_pwr_mode pwr_mode)
8672{
8673 unsigned char cmd[6] = { START_STOP };
8674 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
8675 struct scsi_device *sdp;
8676 unsigned long flags;
af21c3fd 8677 int ret, retries;
57d104c1 8678
7c48bfd0
AM
8679 spin_lock_irqsave(hba->host->host_lock, flags);
8680 sdp = hba->sdev_ufs_device;
8681 if (sdp) {
8682 ret = scsi_device_get(sdp);
8683 if (!ret && !scsi_device_online(sdp)) {
8684 ret = -ENODEV;
8685 scsi_device_put(sdp);
8686 }
8687 } else {
8688 ret = -ENODEV;
8689 }
8690 spin_unlock_irqrestore(hba->host->host_lock, flags);
8691
8692 if (ret)
8693 return ret;
57d104c1
SJ
8694
8695 /*
8696 * If scsi commands fail, the scsi mid-layer schedules scsi error-
8697 * handling, which would wait for host to be resumed. Since we know
8698 * we are functional while we are here, skip host resume in error
8699 * handling context.
8700 */
8701 hba->host->eh_noresume = 1;
57d104c1
SJ
8702
8703 cmd[4] = pwr_mode << 4;
8704
8705 /*
8706 * Current function would be generally called from the power management
e8064021 8707 * callbacks hence set the RQF_PM flag so that it doesn't resume the
57d104c1
SJ
8708 * already suspended childs.
8709 */
af21c3fd
JK
8710 for (retries = 3; retries > 0; --retries) {
8711 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8712 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8713 if (!scsi_status_is_check_condition(ret) ||
8714 !scsi_sense_valid(&sshdr) ||
8715 sshdr.sense_key != UNIT_ATTENTION)
8716 break;
8717 }
57d104c1
SJ
8718 if (ret) {
8719 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
8720 "START_STOP failed for power mode: %d, result %x\n",
8721 pwr_mode, ret);
ad6c8a42
KK
8722 if (ret > 0) {
8723 if (scsi_sense_valid(&sshdr))
8724 scsi_print_sense_hdr(sdp, NULL, &sshdr);
8725 ret = -EIO;
8726 }
57d104c1
SJ
8727 }
8728
8729 if (!ret)
8730 hba->curr_dev_pwr_mode = pwr_mode;
1918651f 8731
7c48bfd0 8732 scsi_device_put(sdp);
57d104c1
SJ
8733 hba->host->eh_noresume = 0;
8734 return ret;
8735}
8736
8737static int ufshcd_link_state_transition(struct ufs_hba *hba,
8738 enum uic_link_state req_link_state,
8739 int check_for_bkops)
8740{
8741 int ret = 0;
8742
8743 if (req_link_state == hba->uic_link_state)
8744 return 0;
8745
8746 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8747 ret = ufshcd_uic_hibern8_enter(hba);
4db7a236 8748 if (!ret) {
57d104c1 8749 ufshcd_set_link_hibern8(hba);
4db7a236
CG
8750 } else {
8751 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8752 __func__, ret);
57d104c1 8753 goto out;
4db7a236 8754 }
57d104c1
SJ
8755 }
8756 /*
8757 * If autobkops is enabled, link can't be turned off because
fe1d4c2e
AH
8758 * turning off the link would also turn off the device, except in the
8759 * case of DeepSleep where the device is expected to remain powered.
57d104c1
SJ
8760 */
8761 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
dc30c9e6 8762 (!check_for_bkops || !hba->auto_bkops_enabled)) {
f3099fbd
YG
8763 /*
8764 * Let's make sure that link is in low power mode, we are doing
8765 * this currently by putting the link in Hibern8. Otherway to
8766 * put the link in low power mode is to send the DME end point
8767 * to device and then send the DME reset command to local
8768 * unipro. But putting the link in hibern8 is much faster.
fe1d4c2e
AH
8769 *
8770 * Note also that putting the link in Hibern8 is a requirement
8771 * for entering DeepSleep.
f3099fbd
YG
8772 */
8773 ret = ufshcd_uic_hibern8_enter(hba);
4db7a236
CG
8774 if (ret) {
8775 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8776 __func__, ret);
f3099fbd 8777 goto out;
4db7a236 8778 }
57d104c1
SJ
8779 /*
8780 * Change controller state to "reset state" which
8781 * should also put the link in off/reset state
8782 */
5cac1095 8783 ufshcd_hba_stop(hba);
57d104c1
SJ
8784 /*
8785 * TODO: Check if we need any delay to make sure that
8786 * controller is reset
8787 */
8788 ufshcd_set_link_off(hba);
8789 }
8790
8791out:
8792 return ret;
8793}
8794
8795static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8796{
c4df6eed
SC
8797 bool vcc_off = false;
8798
b799fdf7
YG
8799 /*
8800 * It seems some UFS devices may keep drawing more than sleep current
8801 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8802 * To avoid this situation, add 2ms delay before putting these UFS
8803 * rails in LPM mode.
8804 */
8805 if (!ufshcd_is_link_active(hba) &&
8806 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8807 usleep_range(2000, 2100);
8808
57d104c1
SJ
8809 /*
8810 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8811 * power.
8812 *
8813 * If UFS device and link is in OFF state, all power supplies (VCC,
8814 * VCCQ, VCCQ2) can be turned off if power on write protect is not
8815 * required. If UFS link is inactive (Hibern8 or OFF state) and device
8816 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8817 *
8818 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8819 * in low power state which would save some power.
3d17b9b5
AD
8820 *
8821 * If Write Booster is enabled and the device needs to flush the WB
8822 * buffer OR if bkops status is urgent for WB, keep Vcc on.
57d104c1
SJ
8823 */
8824 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8825 !hba->dev_info.is_lu_power_on_wp) {
8826 ufshcd_setup_vreg(hba, false);
c4df6eed 8827 vcc_off = true;
57d104c1 8828 } else if (!ufshcd_is_ufs_dev_active(hba)) {
51dd905b 8829 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
c4df6eed 8830 vcc_off = true;
23043dd8 8831 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
57d104c1
SJ
8832 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8833 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8834 }
8835 }
c4df6eed
SC
8836
8837 /*
8838 * Some UFS devices require delay after VCC power rail is turned-off.
8839 */
8840 if (vcc_off && hba->vreg_info.vcc &&
8841 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8842 usleep_range(5000, 5100);
57d104c1
SJ
8843}
8844
9bb25e5d 8845#ifdef CONFIG_PM
57d104c1
SJ
8846static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8847{
8848 int ret = 0;
8849
8850 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8851 !hba->dev_info.is_lu_power_on_wp) {
8852 ret = ufshcd_setup_vreg(hba, true);
8853 } else if (!ufshcd_is_ufs_dev_active(hba)) {
23043dd8 8854 if (!ufshcd_is_link_active(hba)) {
57d104c1
SJ
8855 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8856 if (ret)
8857 goto vcc_disable;
8858 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8859 if (ret)
8860 goto vccq_lpm;
8861 }
69d72ac8 8862 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
57d104c1
SJ
8863 }
8864 goto out;
8865
8866vccq_lpm:
8867 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8868vcc_disable:
8869 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8870out:
8871 return ret;
8872}
9bb25e5d 8873#endif /* CONFIG_PM */
57d104c1
SJ
8874
8875static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8876{
dd7143e2 8877 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
57d104c1
SJ
8878 ufshcd_setup_hba_vreg(hba, false);
8879}
8880
8881static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8882{
dd7143e2 8883 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
57d104c1
SJ
8884 ufshcd_setup_hba_vreg(hba, true);
8885}
8886
b294ff3e 8887static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 8888{
57d104c1 8889 int ret = 0;
fe1d4c2e 8890 int check_for_bkops;
57d104c1
SJ
8891 enum ufs_pm_level pm_lvl;
8892 enum ufs_dev_pwr_mode req_dev_pwr_mode;
8893 enum uic_link_state req_link_state;
8894
b294ff3e 8895 hba->pm_op_in_progress = true;
4c6cb9ed
BVA
8896 if (pm_op != UFS_SHUTDOWN_PM) {
8897 pm_lvl = pm_op == UFS_RUNTIME_PM ?
57d104c1
SJ
8898 hba->rpm_lvl : hba->spm_lvl;
8899 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8900 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8901 } else {
8902 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8903 req_link_state = UIC_LINK_OFF_STATE;
8904 }
8905
f02bc975
DP
8906 ufshpb_suspend(hba);
8907
7a3e97b0 8908 /*
57d104c1
SJ
8909 * If we can't transition into any of the low power modes
8910 * just gate the clocks.
7a3e97b0 8911 */
1ab27c9c
ST
8912 ufshcd_hold(hba, false);
8913 hba->clk_gating.is_suspended = true;
8914
348e1bc5
SC
8915 if (ufshcd_is_clkscaling_supported(hba))
8916 ufshcd_clk_scaling_suspend(hba, true);
d6fcf81a 8917
57d104c1
SJ
8918 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8919 req_link_state == UIC_LINK_ACTIVE_STATE) {
b294ff3e 8920 goto vops_suspend;
57d104c1 8921 }
7a3e97b0 8922
57d104c1
SJ
8923 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8924 (req_link_state == hba->uic_link_state))
b294ff3e 8925 goto enable_scaling;
57d104c1
SJ
8926
8927 /* UFS device & link must be active before we enter in this function */
8928 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8929 ret = -EINVAL;
b294ff3e 8930 goto enable_scaling;
57d104c1
SJ
8931 }
8932
4c6cb9ed 8933 if (pm_op == UFS_RUNTIME_PM) {
374a246e
SJ
8934 if (ufshcd_can_autobkops_during_suspend(hba)) {
8935 /*
8936 * The device is idle with no requests in the queue,
8937 * allow background operations if bkops status shows
8938 * that performance might be impacted.
8939 */
8940 ret = ufshcd_urgent_bkops(hba);
8941 if (ret)
b294ff3e 8942 goto enable_scaling;
374a246e
SJ
8943 } else {
8944 /* make sure that auto bkops is disabled */
8945 ufshcd_disable_auto_bkops(hba);
8946 }
3d17b9b5 8947 /*
51dd905b
SC
8948 * If device needs to do BKOP or WB buffer flush during
8949 * Hibern8, keep device power mode as "active power mode"
8950 * and VCC supply.
3d17b9b5 8951 */
51dd905b
SC
8952 hba->dev_info.b_rpm_dev_flush_capable =
8953 hba->auto_bkops_enabled ||
8954 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8955 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8956 ufshcd_is_auto_hibern8_enabled(hba))) &&
8957 ufshcd_wb_need_flush(hba));
8958 }
8959
6948a96a
KK
8960 flush_work(&hba->eeh_work);
8961
9561f584
PW
8962 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
8963 if (ret)
8964 goto enable_scaling;
8965
51dd905b 8966 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
4c6cb9ed 8967 if (pm_op != UFS_RUNTIME_PM)
51dd905b
SC
8968 /* ensure that bkops is disabled */
8969 ufshcd_disable_auto_bkops(hba);
57d104c1 8970
51dd905b
SC
8971 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8972 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8973 if (ret)
b294ff3e 8974 goto enable_scaling;
51dd905b 8975 }
57d104c1
SJ
8976 }
8977
fe1d4c2e
AH
8978 /*
8979 * In the case of DeepSleep, the device is expected to remain powered
8980 * with the link off, so do not check for bkops.
8981 */
8982 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8983 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
57d104c1
SJ
8984 if (ret)
8985 goto set_dev_active;
8986
b294ff3e 8987vops_suspend:
57d104c1
SJ
8988 /*
8989 * Call vendor specific suspend callback. As these callbacks may access
8990 * vendor specific host controller register space call them before the
8991 * host clocks are ON.
8992 */
9561f584 8993 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
0263bcd0
YG
8994 if (ret)
8995 goto set_link_active;
57d104c1
SJ
8996 goto out;
8997
57d104c1 8998set_link_active:
fe1d4c2e
AH
8999 /*
9000 * Device hardware reset is required to exit DeepSleep. Also, for
9001 * DeepSleep, the link is off so host reset and restore will be done
9002 * further below.
9003 */
9004 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
31a5d9ca 9005 ufshcd_device_reset(hba);
fe1d4c2e
AH
9006 WARN_ON(!ufshcd_is_link_off(hba));
9007 }
57d104c1
SJ
9008 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9009 ufshcd_set_link_active(hba);
9010 else if (ufshcd_is_link_off(hba))
9011 ufshcd_host_reset_and_restore(hba);
9012set_dev_active:
fe1d4c2e
AH
9013 /* Can also get here needing to exit DeepSleep */
9014 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
31a5d9ca 9015 ufshcd_device_reset(hba);
fe1d4c2e
AH
9016 ufshcd_host_reset_and_restore(hba);
9017 }
57d104c1
SJ
9018 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9019 ufshcd_disable_auto_bkops(hba);
b294ff3e 9020enable_scaling:
348e1bc5
SC
9021 if (ufshcd_is_clkscaling_supported(hba))
9022 ufshcd_clk_scaling_suspend(hba, false);
9023
51dd905b 9024 hba->dev_info.b_rpm_dev_flush_capable = false;
57d104c1 9025out:
51dd905b
SC
9026 if (hba->dev_info.b_rpm_dev_flush_capable) {
9027 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9028 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9029 }
9030
b294ff3e
AD
9031 if (ret) {
9032 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9033 hba->clk_gating.is_suspended = false;
9034 ufshcd_release(hba);
f02bc975 9035 ufshpb_resume(hba);
b294ff3e
AD
9036 }
9037 hba->pm_op_in_progress = false;
57d104c1 9038 return ret;
7a3e97b0
SY
9039}
9040
75d645a6 9041#ifdef CONFIG_PM
b294ff3e 9042static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 9043{
57d104c1 9044 int ret;
b294ff3e 9045 enum uic_link_state old_link_state = hba->uic_link_state;
57d104c1 9046
b294ff3e 9047 hba->pm_op_in_progress = true;
57d104c1 9048
7a3e97b0 9049 /*
57d104c1
SJ
9050 * Call vendor specific resume callback. As these callbacks may access
9051 * vendor specific host controller register space call them when the
9052 * host clocks are ON.
7a3e97b0 9053 */
0263bcd0
YG
9054 ret = ufshcd_vops_resume(hba, pm_op);
9055 if (ret)
b294ff3e 9056 goto out;
57d104c1 9057
fe1d4c2e
AH
9058 /* For DeepSleep, the only supported option is to have the link off */
9059 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9060
57d104c1
SJ
9061 if (ufshcd_is_link_hibern8(hba)) {
9062 ret = ufshcd_uic_hibern8_exit(hba);
4db7a236 9063 if (!ret) {
57d104c1 9064 ufshcd_set_link_active(hba);
4db7a236
CG
9065 } else {
9066 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9067 __func__, ret);
57d104c1 9068 goto vendor_suspend;
4db7a236 9069 }
57d104c1 9070 } else if (ufshcd_is_link_off(hba)) {
57d104c1 9071 /*
089f5b64
AD
9072 * A full initialization of the host and the device is
9073 * required since the link was put to off during suspend.
fe1d4c2e
AH
9074 * Note, in the case of DeepSleep, the device will exit
9075 * DeepSleep due to device reset.
089f5b64
AD
9076 */
9077 ret = ufshcd_reset_and_restore(hba);
9078 /*
9079 * ufshcd_reset_and_restore() should have already
57d104c1
SJ
9080 * set the link state as active
9081 */
9082 if (ret || !ufshcd_is_link_active(hba))
9083 goto vendor_suspend;
9084 }
9085
9086 if (!ufshcd_is_ufs_dev_active(hba)) {
9087 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9088 if (ret)
9089 goto set_old_link_state;
9090 }
9091
4e768e76 9092 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9093 ufshcd_enable_auto_bkops(hba);
9094 else
9095 /*
9096 * If BKOPs operations are urgently needed at this moment then
9097 * keep auto-bkops enabled or else disable it.
9098 */
9099 ufshcd_urgent_bkops(hba);
9100
cd469475
AH
9101 if (hba->ee_usr_mask)
9102 ufshcd_write_ee_control(hba);
9103
348e1bc5
SC
9104 if (ufshcd_is_clkscaling_supported(hba))
9105 ufshcd_clk_scaling_suspend(hba, false);
856b3483 9106
51dd905b
SC
9107 if (hba->dev_info.b_rpm_dev_flush_capable) {
9108 hba->dev_info.b_rpm_dev_flush_capable = false;
9109 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9110 }
9111
b294ff3e
AD
9112 /* Enable Auto-Hibernate if configured */
9113 ufshcd_auto_hibern8_enable(hba);
f02bc975
DP
9114
9115 ufshpb_resume(hba);
57d104c1
SJ
9116 goto out;
9117
9118set_old_link_state:
9119 ufshcd_link_state_transition(hba, old_link_state, 0);
9120vendor_suspend:
9561f584
PW
9121 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9122 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
b294ff3e
AD
9123out:
9124 if (ret)
9125 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9126 hba->clk_gating.is_suspended = false;
9127 ufshcd_release(hba);
9128 hba->pm_op_in_progress = false;
9129 return ret;
9130}
9131
9132static int ufshcd_wl_runtime_suspend(struct device *dev)
9133{
9134 struct scsi_device *sdev = to_scsi_device(dev);
9135 struct ufs_hba *hba;
9136 int ret;
9137 ktime_t start = ktime_get();
9138
9139 hba = shost_priv(sdev->host);
9140
9141 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9142 if (ret)
9143 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9144
9145 trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9146 ktime_to_us(ktime_sub(ktime_get(), start)),
9147 hba->curr_dev_pwr_mode, hba->uic_link_state);
9148
9149 return ret;
9150}
9151
9152static int ufshcd_wl_runtime_resume(struct device *dev)
9153{
9154 struct scsi_device *sdev = to_scsi_device(dev);
9155 struct ufs_hba *hba;
9156 int ret = 0;
9157 ktime_t start = ktime_get();
9158
9159 hba = shost_priv(sdev->host);
9160
9161 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9162 if (ret)
9163 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9164
9165 trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
9166 ktime_to_us(ktime_sub(ktime_get(), start)),
9167 hba->curr_dev_pwr_mode, hba->uic_link_state);
9168
9169 return ret;
9170}
75d645a6 9171#endif
b294ff3e
AD
9172
9173#ifdef CONFIG_PM_SLEEP
9174static int ufshcd_wl_suspend(struct device *dev)
9175{
9176 struct scsi_device *sdev = to_scsi_device(dev);
9177 struct ufs_hba *hba;
9178 int ret = 0;
9179 ktime_t start = ktime_get();
9180
9181 hba = shost_priv(sdev->host);
9182 down(&hba->host_sem);
9183
9184 if (pm_runtime_suspended(dev))
9185 goto out;
9186
9187 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
9188 if (ret) {
9189 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9190 up(&hba->host_sem);
9191 }
9192
9193out:
9194 if (!ret)
9195 hba->is_sys_suspended = true;
9196 trace_ufshcd_wl_suspend(dev_name(dev), ret,
9197 ktime_to_us(ktime_sub(ktime_get(), start)),
9198 hba->curr_dev_pwr_mode, hba->uic_link_state);
9199
9200 return ret;
9201}
9202
9203static int ufshcd_wl_resume(struct device *dev)
9204{
9205 struct scsi_device *sdev = to_scsi_device(dev);
9206 struct ufs_hba *hba;
9207 int ret = 0;
9208 ktime_t start = ktime_get();
9209
9210 hba = shost_priv(sdev->host);
9211
9212 if (pm_runtime_suspended(dev))
9213 goto out;
9214
9215 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
9216 if (ret)
9217 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9218out:
9219 trace_ufshcd_wl_resume(dev_name(dev), ret,
9220 ktime_to_us(ktime_sub(ktime_get(), start)),
9221 hba->curr_dev_pwr_mode, hba->uic_link_state);
9222 if (!ret)
9223 hba->is_sys_suspended = false;
9224 up(&hba->host_sem);
9225 return ret;
9226}
9227#endif
9228
9229static void ufshcd_wl_shutdown(struct device *dev)
9230{
9231 struct scsi_device *sdev = to_scsi_device(dev);
9232 struct ufs_hba *hba;
9233
9234 hba = shost_priv(sdev->host);
9235
9236 down(&hba->host_sem);
9237 hba->shutting_down = true;
9238 up(&hba->host_sem);
9239
9240 /* Turn on everything while shutting down */
9241 ufshcd_rpm_get_sync(hba);
9242 scsi_device_quiesce(sdev);
9243 shost_for_each_device(sdev, hba->host) {
9244 if (sdev == hba->sdev_ufs_device)
9245 continue;
9246 scsi_device_quiesce(sdev);
9247 }
9248 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9249}
9250
9251/**
9252 * ufshcd_suspend - helper function for suspend operations
9253 * @hba: per adapter instance
9254 *
9255 * This function will put disable irqs, turn off clocks
9256 * and set vreg and hba-vreg in lpm mode.
b294ff3e
AD
9257 */
9258static int ufshcd_suspend(struct ufs_hba *hba)
9259{
9260 int ret;
9261
9262 if (!hba->is_powered)
9263 return 0;
9264 /*
9265 * Disable the host irq as host controller as there won't be any
9266 * host controller transaction expected till resume.
9267 */
57d104c1 9268 ufshcd_disable_irq(hba);
b294ff3e
AD
9269 ret = ufshcd_setup_clocks(hba, false);
9270 if (ret) {
9271 ufshcd_enable_irq(hba);
9272 return ret;
9273 }
2dec9475
CG
9274 if (ufshcd_is_clkgating_allowed(hba)) {
9275 hba->clk_gating.state = CLKS_OFF;
9276 trace_ufshcd_clk_gating(dev_name(hba->dev),
9277 hba->clk_gating.state);
9278 }
b294ff3e
AD
9279
9280 ufshcd_vreg_set_lpm(hba);
9281 /* Put the host controller in low power mode if possible */
9282 ufshcd_hba_vreg_set_lpm(hba);
9283 return ret;
9284}
9285
9bb25e5d 9286#ifdef CONFIG_PM
b294ff3e
AD
9287/**
9288 * ufshcd_resume - helper function for resume operations
9289 * @hba: per adapter instance
9290 *
9291 * This function basically turns on the regulators, clocks and
9292 * irqs of the hba.
b294ff3e
AD
9293 *
9294 * Returns 0 for success and non-zero for failure
9295 */
9296static int ufshcd_resume(struct ufs_hba *hba)
9297{
9298 int ret;
9299
9300 if (!hba->is_powered)
9301 return 0;
9302
9303 ufshcd_hba_vreg_set_hpm(hba);
9304 ret = ufshcd_vreg_set_hpm(hba);
9305 if (ret)
9306 goto out;
9307
9308 /* Make sure clocks are enabled before accessing controller */
9309 ret = ufshcd_setup_clocks(hba, true);
9310 if (ret)
9311 goto disable_vreg;
9312
9313 /* enable the host irq as host controller would be active soon */
9314 ufshcd_enable_irq(hba);
9315 goto out;
9316
528db9e5
ZC
9317disable_vreg:
9318 ufshcd_vreg_set_lpm(hba);
57d104c1 9319out:
8808b4e9 9320 if (ret)
e965e5e0 9321 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
57d104c1
SJ
9322 return ret;
9323}
9bb25e5d 9324#endif /* CONFIG_PM */
57d104c1 9325
9bb25e5d 9326#ifdef CONFIG_PM_SLEEP
57d104c1 9327/**
f1ecbe1e
BVA
9328 * ufshcd_system_suspend - system suspend callback
9329 * @dev: Device associated with the UFS controller.
57d104c1 9330 *
f1ecbe1e
BVA
9331 * Executed before putting the system into a sleep state in which the contents
9332 * of main memory are preserved.
57d104c1
SJ
9333 *
9334 * Returns 0 for success and non-zero for failure
9335 */
f1ecbe1e 9336int ufshcd_system_suspend(struct device *dev)
57d104c1 9337{
f1ecbe1e 9338 struct ufs_hba *hba = dev_get_drvdata(dev);
57d104c1 9339 int ret = 0;
7ff5ab47 9340 ktime_t start = ktime_get();
57d104c1 9341
b294ff3e 9342 if (pm_runtime_suspended(hba->dev))
0b257734 9343 goto out;
57d104c1 9344
b294ff3e 9345 ret = ufshcd_suspend(hba);
57d104c1 9346out:
7ff5ab47 9347 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9348 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9349 hba->curr_dev_pwr_mode, hba->uic_link_state);
57d104c1
SJ
9350 return ret;
9351}
9352EXPORT_SYMBOL(ufshcd_system_suspend);
9353
9354/**
f1ecbe1e
BVA
9355 * ufshcd_system_resume - system resume callback
9356 * @dev: Device associated with the UFS controller.
9357 *
9358 * Executed after waking the system up from a sleep state in which the contents
9359 * of main memory were preserved.
57d104c1
SJ
9360 *
9361 * Returns 0 for success and non-zero for failure
9362 */
f1ecbe1e 9363int ufshcd_system_resume(struct device *dev)
57d104c1 9364{
f1ecbe1e 9365 struct ufs_hba *hba = dev_get_drvdata(dev);
7ff5ab47 9366 ktime_t start = ktime_get();
f1ecbe1e 9367 int ret = 0;
7ff5ab47 9368
b294ff3e 9369 if (pm_runtime_suspended(hba->dev))
7ff5ab47 9370 goto out;
b294ff3e
AD
9371
9372 ret = ufshcd_resume(hba);
9373
7ff5ab47 9374out:
9375 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9376 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9377 hba->curr_dev_pwr_mode, hba->uic_link_state);
b294ff3e 9378
7ff5ab47 9379 return ret;
7a3e97b0 9380}
57d104c1 9381EXPORT_SYMBOL(ufshcd_system_resume);
9bb25e5d 9382#endif /* CONFIG_PM_SLEEP */
3b1d0580 9383
9bb25e5d 9384#ifdef CONFIG_PM
57d104c1 9385/**
f1ecbe1e
BVA
9386 * ufshcd_runtime_suspend - runtime suspend callback
9387 * @dev: Device associated with the UFS controller.
57d104c1
SJ
9388 *
9389 * Check the description of ufshcd_suspend() function for more details.
9390 *
9391 * Returns 0 for success and non-zero for failure
9392 */
f1ecbe1e 9393int ufshcd_runtime_suspend(struct device *dev)
66ec6d59 9394{
f1ecbe1e 9395 struct ufs_hba *hba = dev_get_drvdata(dev);
b294ff3e 9396 int ret;
7ff5ab47 9397 ktime_t start = ktime_get();
9398
b294ff3e
AD
9399 ret = ufshcd_suspend(hba);
9400
7ff5ab47 9401 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9402 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9403 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 9404 return ret;
66ec6d59
SRT
9405}
9406EXPORT_SYMBOL(ufshcd_runtime_suspend);
9407
57d104c1
SJ
9408/**
9409 * ufshcd_runtime_resume - runtime resume routine
f1ecbe1e 9410 * @dev: Device associated with the UFS controller.
57d104c1 9411 *
b294ff3e 9412 * This function basically brings controller
57d104c1
SJ
9413 * to active state. Following operations are done in this function:
9414 *
9415 * 1. Turn on all the controller related clocks
b294ff3e 9416 * 2. Turn ON VCC rail
57d104c1 9417 */
f1ecbe1e 9418int ufshcd_runtime_resume(struct device *dev)
66ec6d59 9419{
f1ecbe1e 9420 struct ufs_hba *hba = dev_get_drvdata(dev);
b294ff3e 9421 int ret;
7ff5ab47 9422 ktime_t start = ktime_get();
9423
b294ff3e
AD
9424 ret = ufshcd_resume(hba);
9425
7ff5ab47 9426 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9427 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 9428 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 9429 return ret;
66ec6d59
SRT
9430}
9431EXPORT_SYMBOL(ufshcd_runtime_resume);
9bb25e5d 9432#endif /* CONFIG_PM */
66ec6d59 9433
57d104c1
SJ
9434/**
9435 * ufshcd_shutdown - shutdown routine
9436 * @hba: per adapter instance
9437 *
b294ff3e
AD
9438 * This function would turn off both UFS device and UFS hba
9439 * regulators. It would also disable clocks.
57d104c1
SJ
9440 *
9441 * Returns 0 always to allow force shutdown even in case of errors.
9442 */
9443int ufshcd_shutdown(struct ufs_hba *hba)
9444{
57d104c1
SJ
9445 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9446 goto out;
9447
e92643db 9448 pm_runtime_get_sync(hba->dev);
57d104c1 9449
b294ff3e 9450 ufshcd_suspend(hba);
57d104c1 9451out:
88a92d6a 9452 hba->is_powered = false;
57d104c1
SJ
9453 /* allow force shutdown even in case of errors */
9454 return 0;
9455}
9456EXPORT_SYMBOL(ufshcd_shutdown);
9457
7a3e97b0 9458/**
3b1d0580 9459 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 9460 * data structure memory
8aa29f19 9461 * @hba: per adapter instance
7a3e97b0 9462 */
3b1d0580 9463void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 9464{
b294ff3e
AD
9465 if (hba->sdev_ufs_device)
9466 ufshcd_rpm_get_sync(hba);
e88e2d32 9467 ufs_hwmon_remove(hba);
df032bf2 9468 ufs_bsg_remove(hba);
4b5f4907 9469 ufshpb_remove(hba);
cbb6813e 9470 ufs_sysfs_remove_nodes(hba->dev);
69a6c269
BVA
9471 blk_cleanup_queue(hba->tmf_queue);
9472 blk_mq_free_tag_set(&hba->tmf_tag_set);
cfdf9c91 9473 scsi_remove_host(hba->host);
7a3e97b0 9474 /* disable interrupts */
2fbd009b 9475 ufshcd_disable_intr(hba, hba->intr_mask);
5cac1095 9476 ufshcd_hba_stop(hba);
aa497613 9477 ufshcd_hba_exit(hba);
3b1d0580
VH
9478}
9479EXPORT_SYMBOL_GPL(ufshcd_remove);
9480
47555a5c
YG
9481/**
9482 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9483 * @hba: pointer to Host Bus Adapter (HBA)
9484 */
9485void ufshcd_dealloc_host(struct ufs_hba *hba)
9486{
9487 scsi_host_put(hba->host);
9488}
9489EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9490
ca3d7bf9
AM
9491/**
9492 * ufshcd_set_dma_mask - Set dma mask based on the controller
9493 * addressing capability
9494 * @hba: per adapter instance
9495 *
9496 * Returns 0 for success, non-zero for failure
9497 */
9498static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9499{
9500 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9501 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9502 return 0;
9503 }
9504 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9505}
9506
7a3e97b0 9507/**
5c0c28a8 9508 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
9509 * @dev: pointer to device handle
9510 * @hba_handle: driver private handle
7a3e97b0
SY
9511 * Returns 0 on success, non-zero value on failure
9512 */
5c0c28a8 9513int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
9514{
9515 struct Scsi_Host *host;
9516 struct ufs_hba *hba;
5c0c28a8 9517 int err = 0;
7a3e97b0 9518
3b1d0580
VH
9519 if (!dev) {
9520 dev_err(dev,
9521 "Invalid memory reference for dev is NULL\n");
9522 err = -ENODEV;
7a3e97b0
SY
9523 goto out_error;
9524 }
9525
7a3e97b0
SY
9526 host = scsi_host_alloc(&ufshcd_driver_template,
9527 sizeof(struct ufs_hba));
9528 if (!host) {
3b1d0580 9529 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 9530 err = -ENOMEM;
3b1d0580 9531 goto out_error;
7a3e97b0 9532 }
eaab9b57 9533 host->nr_maps = HCTX_TYPE_POLL + 1;
7a3e97b0 9534 hba = shost_priv(host);
7a3e97b0 9535 hba->host = host;
3b1d0580 9536 hba->dev = dev;
9e1e8a75 9537 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
1cbc9ad3 9538 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
566ec9ad 9539 INIT_LIST_HEAD(&hba->clk_list_head);
169f5eb2
BVA
9540 spin_lock_init(&hba->outstanding_lock);
9541
9542 *hba_handle = hba;
566ec9ad 9543
5c0c28a8
SRT
9544out_error:
9545 return err;
9546}
9547EXPORT_SYMBOL(ufshcd_alloc_host);
9548
69a6c269
BVA
9549/* This function exists because blk_mq_alloc_tag_set() requires this. */
9550static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9551 const struct blk_mq_queue_data *qd)
9552{
9553 WARN_ON_ONCE(true);
9554 return BLK_STS_NOTSUPP;
9555}
9556
9557static const struct blk_mq_ops ufshcd_tmf_ops = {
9558 .queue_rq = ufshcd_queue_tmf,
9559};
9560
5c0c28a8
SRT
9561/**
9562 * ufshcd_init - Driver initialization routine
9563 * @hba: per-adapter instance
9564 * @mmio_base: base register address
9565 * @irq: Interrupt line of device
9566 * Returns 0 on success, non-zero value on failure
9567 */
9568int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9569{
9570 int err;
9571 struct Scsi_Host *host = hba->host;
9572 struct device *dev = hba->dev;
88b09900 9573 char eh_wq_name[sizeof("ufs_eh_wq_00")];
5c0c28a8 9574
21ad0e49
BVA
9575 /*
9576 * dev_set_drvdata() must be called before any callbacks are registered
9577 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
9578 * sysfs).
9579 */
9580 dev_set_drvdata(dev, hba);
9581
5c0c28a8
SRT
9582 if (!mmio_base) {
9583 dev_err(hba->dev,
9584 "Invalid memory reference for mmio_base is NULL\n");
9585 err = -ENODEV;
9586 goto out_error;
9587 }
9588
3b1d0580
VH
9589 hba->mmio_base = mmio_base;
9590 hba->irq = irq;
90b8491c 9591 hba->vps = &ufs_hba_vps;
7a3e97b0 9592
aa497613 9593 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
9594 if (err)
9595 goto out_error;
9596
7a3e97b0 9597 /* Read capabilities registers */
df043c74
ST
9598 err = ufshcd_hba_capabilities(hba);
9599 if (err)
9600 goto out_disable;
7a3e97b0
SY
9601
9602 /* Get UFS version supported by the controller */
9603 hba->ufs_version = ufshcd_get_ufs_version(hba);
9604
2fbd009b
SJ
9605 /* Get Interrupt bit mask per version */
9606 hba->intr_mask = ufshcd_get_intr_mask(hba);
9607
ca3d7bf9
AM
9608 err = ufshcd_set_dma_mask(hba);
9609 if (err) {
9610 dev_err(hba->dev, "set dma mask failed\n");
9611 goto out_disable;
9612 }
9613
7a3e97b0
SY
9614 /* Allocate memory for host memory space */
9615 err = ufshcd_memory_alloc(hba);
9616 if (err) {
3b1d0580
VH
9617 dev_err(hba->dev, "Memory allocation failed\n");
9618 goto out_disable;
7a3e97b0
SY
9619 }
9620
9621 /* Configure LRB */
9622 ufshcd_host_memory_configure(hba);
9623
945c3cca
BVA
9624 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
9625 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
7a3e97b0 9626 host->max_id = UFSHCD_MAX_ID;
0ce147d4 9627 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
9628 host->max_channel = UFSHCD_MAX_CHANNEL;
9629 host->unique_id = host->host_no;
a851b2bd 9630 host->max_cmd_len = UFS_CDB_SIZE;
7a3e97b0 9631
7eb584db
DR
9632 hba->max_pwr_info.is_valid = false;
9633
88b09900
AH
9634 /* Initialize work queues */
9635 snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9636 hba->host->host_no);
9637 hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9638 if (!hba->eh_wq) {
9639 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9640 __func__);
9641 err = -ENOMEM;
9642 goto out_disable;
9643 }
9644 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 9645 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 9646
9cd20d3f 9647 sema_init(&hba->host_sem, 1);
88a92d6a 9648
6ccf44fe
SJ
9649 /* Initialize UIC command mutex */
9650 mutex_init(&hba->uic_cmd_mutex);
9651
5a0b0cb9
SRT
9652 /* Initialize mutex for device management commands */
9653 mutex_init(&hba->dev_cmd.lock);
9654
cd469475
AH
9655 /* Initialize mutex for exception event control */
9656 mutex_init(&hba->ee_ctrl_mutex);
9657
a3cd5ec5 9658 init_rwsem(&hba->clk_scaling_lock);
9659
1ab27c9c 9660 ufshcd_init_clk_gating(hba);
199ef13c 9661
eebcc196
VG
9662 ufshcd_init_clk_scaling(hba);
9663
199ef13c
YG
9664 /*
9665 * In order to avoid any spurious interrupt immediately after
9666 * registering UFS controller interrupt handler, clear any pending UFS
9667 * interrupt status and disable all the UFS interrupts.
9668 */
9669 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9670 REG_INTERRUPT_STATUS);
9671 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9672 /*
9673 * Make sure that UFS interrupts are disabled and any pending interrupt
9674 * status is cleared before registering UFS interrupt handler.
9675 */
9676 mb();
9677
7a3e97b0 9678 /* IRQ registration */
2953f850 9679 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 9680 if (err) {
3b1d0580 9681 dev_err(hba->dev, "request irq failed\n");
4543d9d7 9682 goto out_disable;
57d104c1
SJ
9683 } else {
9684 hba->is_irq_enabled = true;
7a3e97b0
SY
9685 }
9686
3b1d0580 9687 err = scsi_add_host(host, hba->dev);
7a3e97b0 9688 if (err) {
3b1d0580 9689 dev_err(hba->dev, "scsi_add_host failed\n");
4543d9d7 9690 goto out_disable;
7a3e97b0
SY
9691 }
9692
69a6c269
BVA
9693 hba->tmf_tag_set = (struct blk_mq_tag_set) {
9694 .nr_hw_queues = 1,
9695 .queue_depth = hba->nutmrs,
9696 .ops = &ufshcd_tmf_ops,
9697 .flags = BLK_MQ_F_NO_SCHED,
9698 };
9699 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9700 if (err < 0)
511a083b 9701 goto out_remove_scsi_host;
69a6c269
BVA
9702 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9703 if (IS_ERR(hba->tmf_queue)) {
9704 err = PTR_ERR(hba->tmf_queue);
9705 goto free_tmf_tag_set;
9706 }
f5ef336f
AH
9707 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
9708 sizeof(*hba->tmf_rqs), GFP_KERNEL);
9709 if (!hba->tmf_rqs) {
9710 err = -ENOMEM;
9711 goto free_tmf_queue;
9712 }
69a6c269 9713
d8d9f793 9714 /* Reset the attached device */
31a5d9ca 9715 ufshcd_device_reset(hba);
d8d9f793 9716
df043c74
ST
9717 ufshcd_init_crypto(hba);
9718
6ccf44fe
SJ
9719 /* Host controller enable */
9720 err = ufshcd_hba_enable(hba);
7a3e97b0 9721 if (err) {
6ccf44fe 9722 dev_err(hba->dev, "Host controller enable failed\n");
e965e5e0 9723 ufshcd_print_evt_hist(hba);
6ba65588 9724 ufshcd_print_host_state(hba);
69a6c269 9725 goto free_tmf_queue;
7a3e97b0 9726 }
6ccf44fe 9727
0c8f7586 9728 /*
9729 * Set the default power management level for runtime and system PM.
9730 * Default power saving mode is to keep UFS link in Hibern8 state
9731 * and UFS device in sleep state.
9732 */
9733 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9734 UFS_SLEEP_PWR_MODE,
9735 UIC_LINK_HIBERN8_STATE);
9736 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9737 UFS_SLEEP_PWR_MODE,
9738 UIC_LINK_HIBERN8_STATE);
9739
51dd905b
SC
9740 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9741 ufshcd_rpm_dev_flush_recheck_work);
9742
ad448378 9743 /* Set the default auto-hiberate idle timer value to 150 ms */
f571b377 9744 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
ad448378
AH
9745 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9746 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9747 }
9748
62694735
SRT
9749 /* Hold auto suspend until async scan completes */
9750 pm_runtime_get_sync(dev);
38135535 9751 atomic_set(&hba->scsi_block_reqs_cnt, 0);
57d104c1 9752 /*
7caf489b 9753 * We are assuming that device wasn't put in sleep/power-down
9754 * state exclusively during the boot stage before kernel.
9755 * This assumption helps avoid doing link startup twice during
9756 * ufshcd_probe_hba().
57d104c1 9757 */
7caf489b 9758 ufshcd_set_ufs_dev_active(hba);
57d104c1 9759
6ccf44fe 9760 async_schedule(ufshcd_async_scan, hba);
cbb6813e 9761 ufs_sysfs_add_nodes(hba->dev);
6ccf44fe 9762
1084514c 9763 device_enable_async_suspend(dev);
7a3e97b0
SY
9764 return 0;
9765
69a6c269
BVA
9766free_tmf_queue:
9767 blk_cleanup_queue(hba->tmf_queue);
9768free_tmf_tag_set:
9769 blk_mq_free_tag_set(&hba->tmf_tag_set);
3b1d0580
VH
9770out_remove_scsi_host:
9771 scsi_remove_host(hba->host);
3b1d0580 9772out_disable:
57d104c1 9773 hba->is_irq_enabled = false;
aa497613 9774 ufshcd_hba_exit(hba);
3b1d0580
VH
9775out_error:
9776 return err;
9777}
9778EXPORT_SYMBOL_GPL(ufshcd_init);
9779
b294ff3e
AD
9780void ufshcd_resume_complete(struct device *dev)
9781{
9782 struct ufs_hba *hba = dev_get_drvdata(dev);
9783
9784 if (hba->complete_put) {
9785 ufshcd_rpm_put(hba);
9786 hba->complete_put = false;
9787 }
b294ff3e
AD
9788}
9789EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
9790
ddba1cf7
AH
9791static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
9792{
9793 struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
9794 enum ufs_dev_pwr_mode dev_pwr_mode;
9795 enum uic_link_state link_state;
9796 unsigned long flags;
9797 bool res;
9798
9799 spin_lock_irqsave(&dev->power.lock, flags);
9800 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
9801 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
9802 res = pm_runtime_suspended(dev) &&
9803 hba->curr_dev_pwr_mode == dev_pwr_mode &&
9804 hba->uic_link_state == link_state &&
9805 !hba->dev_info.b_rpm_dev_flush_capable;
9806 spin_unlock_irqrestore(&dev->power.lock, flags);
9807
9808 return res;
9809}
9810
9811int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
b294ff3e
AD
9812{
9813 struct ufs_hba *hba = dev_get_drvdata(dev);
9814 int ret;
9815
9816 /*
9817 * SCSI assumes that runtime-pm and system-pm for scsi drivers
9818 * are same. And it doesn't wake up the device for system-suspend
9819 * if it's runtime suspended. But ufs doesn't follow that.
9820 * Refer ufshcd_resume_complete()
9821 */
9822 if (hba->sdev_ufs_device) {
ddba1cf7
AH
9823 /* Prevent runtime suspend */
9824 ufshcd_rpm_get_noresume(hba);
9825 /*
9826 * Check if already runtime suspended in same state as system
9827 * suspend would be.
9828 */
9829 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
9830 /* RPM state is not ok for SPM, so runtime resume */
9831 ret = ufshcd_rpm_resume(hba);
9832 if (ret < 0 && ret != -EACCES) {
9833 ufshcd_rpm_put(hba);
9834 return ret;
9835 }
b294ff3e
AD
9836 }
9837 hba->complete_put = true;
9838 }
b294ff3e
AD
9839 return 0;
9840}
ddba1cf7
AH
9841EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
9842
9843int ufshcd_suspend_prepare(struct device *dev)
9844{
9845 return __ufshcd_suspend_prepare(dev, true);
9846}
b294ff3e
AD
9847EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
9848
9849#ifdef CONFIG_PM_SLEEP
9850static int ufshcd_wl_poweroff(struct device *dev)
9851{
9852 struct scsi_device *sdev = to_scsi_device(dev);
9853 struct ufs_hba *hba = shost_priv(sdev->host);
9854
9855 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
9856 return 0;
9857}
9858#endif
9859
9860static int ufshcd_wl_probe(struct device *dev)
9861{
9862 struct scsi_device *sdev = to_scsi_device(dev);
9863
9864 if (!is_device_wlun(sdev))
9865 return -ENODEV;
9866
9867 blk_pm_runtime_init(sdev->request_queue, dev);
9868 pm_runtime_set_autosuspend_delay(dev, 0);
9869 pm_runtime_allow(dev);
9870
9871 return 0;
9872}
9873
9874static int ufshcd_wl_remove(struct device *dev)
9875{
9876 pm_runtime_forbid(dev);
9877 return 0;
9878}
9879
9880static const struct dev_pm_ops ufshcd_wl_pm_ops = {
9881#ifdef CONFIG_PM_SLEEP
9882 .suspend = ufshcd_wl_suspend,
9883 .resume = ufshcd_wl_resume,
9884 .freeze = ufshcd_wl_suspend,
9885 .thaw = ufshcd_wl_resume,
9886 .poweroff = ufshcd_wl_poweroff,
9887 .restore = ufshcd_wl_resume,
9888#endif
9889 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
9890};
9891
9892/*
9893 * ufs_dev_wlun_template - describes ufs device wlun
9894 * ufs-device wlun - used to send pm commands
9895 * All luns are consumers of ufs-device wlun.
9896 *
9897 * Currently, no sd driver is present for wluns.
9898 * Hence the no specific pm operations are performed.
9899 * With ufs design, SSU should be sent to ufs-device wlun.
9900 * Hence register a scsi driver for ufs wluns only.
9901 */
9902static struct scsi_driver ufs_dev_wlun_template = {
9903 .gendrv = {
9904 .name = "ufs_device_wlun",
9905 .owner = THIS_MODULE,
9906 .probe = ufshcd_wl_probe,
9907 .remove = ufshcd_wl_remove,
9908 .pm = &ufshcd_wl_pm_ops,
9909 .shutdown = ufshcd_wl_shutdown,
9910 },
9911};
9912
b6cacaf2
AH
9913static int __init ufshcd_core_init(void)
9914{
b294ff3e
AD
9915 int ret;
9916
9a868c8a
BVA
9917 /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
9918 static_assert(sizeof(struct utp_transfer_cmd_desc) ==
9919 2 * ALIGNED_UPIU_SIZE +
9920 SG_ALL * sizeof(struct ufshcd_sg_entry));
9921
b6cacaf2 9922 ufs_debugfs_init();
b294ff3e
AD
9923
9924 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
9925 if (ret)
edc0596c 9926 ufs_debugfs_exit();
b294ff3e 9927 return ret;
b6cacaf2
AH
9928}
9929
9930static void __exit ufshcd_core_exit(void)
9931{
9932 ufs_debugfs_exit();
b294ff3e 9933 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
b6cacaf2
AH
9934}
9935
9936module_init(ufshcd_core_init);
9937module_exit(ufshcd_core_exit);
9938
3b1d0580
VH
9939MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9940MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 9941MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
9942MODULE_LICENSE("GPL");
9943MODULE_VERSION(UFSHCD_DRIVER_VERSION);