scsi: ufshcd: Update the set frequency to devfreq
[linux-2.6-block.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
52ac95fe 6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7a3e97b0 7 *
3b1d0580
VH
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
3b1d0580
VH
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
3b1d0580
VH
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
5c0c28a8
SRT
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
7a3e97b0
SY
38 */
39
6ccf44fe 40#include <linux/async.h>
856b3483 41#include <linux/devfreq.h>
b573d484 42#include <linux/nls.h>
54b879b7 43#include <linux/of.h>
ad448378 44#include <linux/bitfield.h>
e0eca63e 45#include "ufshcd.h"
c58ab7aa 46#include "ufs_quirks.h"
53b3d9c3 47#include "unipro.h"
cbb6813e 48#include "ufs-sysfs.h"
df032bf2 49#include "ufs_bsg.h"
7a3e97b0 50
7ff5ab47 51#define CREATE_TRACE_POINTS
52#include <trace/events/ufs.h>
53
2fbd009b
SJ
54#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
55 UTP_TASK_REQ_COMPL |\
56 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
57/* UIC command timeout, unit: ms */
58#define UIC_CMD_TIMEOUT 500
2fbd009b 59
5a0b0cb9
SRT
60/* NOP OUT retries waiting for NOP IN response */
61#define NOP_OUT_RETRIES 10
62/* Timeout after 30 msecs if NOP OUT hangs without response */
63#define NOP_OUT_TIMEOUT 30 /* msecs */
64
68078d5c 65/* Query request retries */
10fe5888 66#define QUERY_REQ_RETRIES 3
68078d5c 67/* Query request timeout */
10fe5888 68#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68078d5c 69
e2933132
SRT
70/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */
72
64238fbd
YG
73/* maximum number of retries for a general UIC command */
74#define UFS_UIC_COMMAND_RETRIES 3
75
1d337ec2
SRT
76/* maximum number of link-startup retries */
77#define DME_LINKSTARTUP_RETRIES 3
78
87d0b4a6
YG
79/* Maximum retries for Hibern8 enter */
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
1d337ec2
SRT
82/* maximum number of reset retries before giving up */
83#define MAX_HOST_RESET_RETRIES 5
84
68078d5c
DR
85/* Expose the flag value from utp_upiu_query.value */
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
7d568652
SJ
88/* Interrupt aggregation default timeout, unit: 40us */
89#define INT_AGGR_DEF_TO 0x02
90
49615ba1
SC
91/* default delay of autosuspend: 2000 ms */
92#define RPM_AUTOSUSPEND_DELAY_MS 2000
93
09f17791
CG
94/* Default value of wait time before gating device ref clock */
95#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
96
aa497613
SRT
97#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
98 ({ \
99 int _ret; \
100 if (_on) \
101 _ret = ufshcd_enable_vreg(_dev, _vreg); \
102 else \
103 _ret = ufshcd_disable_vreg(_dev, _vreg); \
104 _ret; \
105 })
106
ba80917d
TW
107#define ufshcd_hex_dump(prefix_str, buf, len) do { \
108 size_t __len = (len); \
109 print_hex_dump(KERN_ERR, prefix_str, \
110 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
111 16, 4, buf, __len, false); \
112} while (0)
113
114int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
115 const char *prefix)
116{
d6724756
MG
117 u32 *regs;
118 size_t pos;
119
120 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
121 return -EINVAL;
ba80917d 122
cddaebaf 123 regs = kzalloc(len, GFP_ATOMIC);
ba80917d
TW
124 if (!regs)
125 return -ENOMEM;
126
d6724756
MG
127 for (pos = 0; pos < len; pos += 4)
128 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
129
ba80917d
TW
130 ufshcd_hex_dump(prefix, regs, len);
131 kfree(regs);
132
133 return 0;
134}
135EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
66cc820f 136
7a3e97b0
SY
137enum {
138 UFSHCD_MAX_CHANNEL = 0,
139 UFSHCD_MAX_ID = 1,
7a3e97b0
SY
140 UFSHCD_CMD_PER_LUN = 32,
141 UFSHCD_CAN_QUEUE = 32,
142};
143
144/* UFSHCD states */
145enum {
7a3e97b0
SY
146 UFSHCD_STATE_RESET,
147 UFSHCD_STATE_ERROR,
3441da7d 148 UFSHCD_STATE_OPERATIONAL,
141f8165 149 UFSHCD_STATE_EH_SCHEDULED,
3441da7d
SRT
150};
151
152/* UFSHCD error handling flags */
153enum {
154 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
155};
156
e8e7f271
SRT
157/* UFSHCD UIC layer error flags */
158enum {
159 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
9a47ec7c
YG
160 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
161 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
162 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
163 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
164 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
e8e7f271
SRT
165};
166
3441da7d 167#define ufshcd_set_eh_in_progress(h) \
9c490d2d 168 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
3441da7d 169#define ufshcd_eh_in_progress(h) \
9c490d2d 170 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
3441da7d 171#define ufshcd_clear_eh_in_progress(h) \
9c490d2d 172 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
3441da7d 173
57d104c1
SJ
174#define ufshcd_set_ufs_dev_active(h) \
175 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
176#define ufshcd_set_ufs_dev_sleep(h) \
177 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
178#define ufshcd_set_ufs_dev_poweroff(h) \
179 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
180#define ufshcd_is_ufs_dev_active(h) \
181 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
182#define ufshcd_is_ufs_dev_sleep(h) \
183 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
184#define ufshcd_is_ufs_dev_poweroff(h) \
185 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
186
cbb6813e 187struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
57d104c1
SJ
188 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
189 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
190 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
191 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
192 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
193 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
194};
195
196static inline enum ufs_dev_pwr_mode
197ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
198{
199 return ufs_pm_lvl_states[lvl].dev_state;
200}
201
202static inline enum uic_link_state
203ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
204{
205 return ufs_pm_lvl_states[lvl].link_state;
206}
207
0c8f7586 208static inline enum ufs_pm_level
209ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
210 enum uic_link_state link_state)
211{
212 enum ufs_pm_level lvl;
213
214 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
215 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
216 (ufs_pm_lvl_states[lvl].link_state == link_state))
217 return lvl;
218 }
219
220 /* if no match found, return the level 0 */
221 return UFS_PM_LVL_0;
222}
223
56d4a186
SJ
224static struct ufs_dev_fix ufs_fixups[] = {
225 /* UFS cards deviations table */
226 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
227 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
56d4a186
SJ
228 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
229 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
56d4a186
SJ
230 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
231 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
232 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
233 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
234 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
235 UFS_DEVICE_QUIRK_PA_TACTIVATE),
236 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
237 UFS_DEVICE_QUIRK_PA_TACTIVATE),
56d4a186
SJ
238 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
239 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
8e4829c6
WL
240 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
241 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
56d4a186
SJ
242
243 END_FIX
244};
245
9333d775 246static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
3441da7d 247static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271 248static int ufshcd_reset_and_restore(struct ufs_hba *hba);
e7d38257 249static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
e8e7f271 250static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2 251static void ufshcd_hba_exit(struct ufs_hba *hba);
1b9e2141 252static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
1ab27c9c
ST
253static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
254 bool skip_ref_clk);
255static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
1ab27c9c 256static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
cad2e03d 257static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1 258static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
fcb0c4b0
ST
259static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
260static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
401f1e44 261static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
fcb0c4b0 262static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
57d104c1 263static irqreturn_t ufshcd_intr(int irq, void *__hba);
874237f7
YG
264static int ufshcd_change_power_mode(struct ufs_hba *hba,
265 struct ufs_pa_layer_attr *pwr_mode);
14497328
YG
266static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
267{
268 return tag >= 0 && tag < hba->nutrs;
269}
57d104c1 270
5231d38c 271static inline void ufshcd_enable_irq(struct ufs_hba *hba)
57d104c1 272{
57d104c1 273 if (!hba->is_irq_enabled) {
5231d38c 274 enable_irq(hba->irq);
57d104c1
SJ
275 hba->is_irq_enabled = true;
276 }
57d104c1
SJ
277}
278
279static inline void ufshcd_disable_irq(struct ufs_hba *hba)
280{
281 if (hba->is_irq_enabled) {
5231d38c 282 disable_irq(hba->irq);
57d104c1
SJ
283 hba->is_irq_enabled = false;
284 }
285}
3441da7d 286
38135535
SJ
287static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
288{
289 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
290 scsi_unblock_requests(hba->host);
291}
292
293static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
294{
295 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
296 scsi_block_requests(hba->host);
297}
298
6667e6d9
OS
299static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
300 const char *str)
301{
302 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
303
304 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
305}
306
307static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
308 const char *str)
309{
310 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
311
312 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
313}
314
315static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
316 const char *str)
317{
6667e6d9 318 int off = (int)tag - hba->nutrs;
391e388f 319 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
6667e6d9 320
391e388f
CH
321 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
322 &descp->input_param1);
6667e6d9
OS
323}
324
1a07f2d9
LS
325static void ufshcd_add_command_trace(struct ufs_hba *hba,
326 unsigned int tag, const char *str)
327{
328 sector_t lba = -1;
329 u8 opcode = 0;
330 u32 intr, doorbell;
e7c3b379 331 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
e4d2add7 332 struct scsi_cmnd *cmd = lrbp->cmd;
1a07f2d9
LS
333 int transfer_len = -1;
334
e7c3b379
OS
335 if (!trace_ufshcd_command_enabled()) {
336 /* trace UPIU W/O tracing command */
e4d2add7 337 if (cmd)
e7c3b379 338 ufshcd_add_cmd_upiu_trace(hba, tag, str);
1a07f2d9 339 return;
e7c3b379 340 }
1a07f2d9 341
e4d2add7 342 if (cmd) { /* data phase exists */
e7c3b379
OS
343 /* trace UPIU also */
344 ufshcd_add_cmd_upiu_trace(hba, tag, str);
e4d2add7 345 opcode = cmd->cmnd[0];
1a07f2d9
LS
346 if ((opcode == READ_10) || (opcode == WRITE_10)) {
347 /*
348 * Currently we only fully trace read(10) and write(10)
349 * commands
350 */
e4d2add7
BVA
351 if (cmd->request && cmd->request->bio)
352 lba = cmd->request->bio->bi_iter.bi_sector;
1a07f2d9
LS
353 transfer_len = be32_to_cpu(
354 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
355 }
356 }
357
358 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
359 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
360 trace_ufshcd_command(dev_name(hba->dev), str, tag,
361 doorbell, transfer_len, intr, lba, opcode);
362}
363
ff8e20c6
DR
364static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
365{
366 struct ufs_clk_info *clki;
367 struct list_head *head = &hba->clk_list_head;
368
566ec9ad 369 if (list_empty(head))
ff8e20c6
DR
370 return;
371
372 list_for_each_entry(clki, head, list) {
373 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
374 clki->max_freq)
375 dev_err(hba->dev, "clk: %s, rate: %u\n",
376 clki->name, clki->curr_freq);
377 }
378}
379
48d5b973
SC
380static void ufshcd_print_err_hist(struct ufs_hba *hba,
381 struct ufs_err_reg_hist *err_hist,
382 char *err_name)
ff8e20c6
DR
383{
384 int i;
27752647 385 bool found = false;
ff8e20c6 386
48d5b973
SC
387 for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
388 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
ff8e20c6 389
645728a6 390 if (err_hist->tstamp[p] == 0)
ff8e20c6 391 continue;
c5397f13 392 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
ff8e20c6 393 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
27752647 394 found = true;
ff8e20c6 395 }
27752647
SC
396
397 if (!found)
fd1fb4d5 398 dev_err(hba->dev, "No record of %s\n", err_name);
ff8e20c6
DR
399}
400
66cc820f
DR
401static void ufshcd_print_host_regs(struct ufs_hba *hba)
402{
ba80917d 403 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
66cc820f
DR
404 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
405 hba->ufs_version, hba->capabilities);
406 dev_err(hba->dev,
407 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
408 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
ff8e20c6
DR
409 dev_err(hba->dev,
410 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
411 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
412 hba->ufs_stats.hibern8_exit_cnt);
413
48d5b973
SC
414 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
415 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
416 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
417 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
418 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
d3c615bf
SC
419 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
420 "auto_hibern8_err");
8808b4e9
SC
421 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
422 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
423 "link_startup_fail");
424 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
425 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
426 "suspend_fail");
427 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
428 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
429 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
ff8e20c6
DR
430
431 ufshcd_print_clk_freqs(hba);
432
7c486d91 433 ufshcd_vops_dbg_register_dump(hba);
66cc820f
DR
434}
435
436static
437void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
438{
439 struct ufshcd_lrb *lrbp;
7fabb77b 440 int prdt_length;
66cc820f
DR
441 int tag;
442
443 for_each_set_bit(tag, &bitmap, hba->nutrs) {
444 lrbp = &hba->lrb[tag];
445
ff8e20c6
DR
446 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
447 tag, ktime_to_us(lrbp->issue_time_stamp));
09017188
ZL
448 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
449 tag, ktime_to_us(lrbp->compl_time_stamp));
ff8e20c6
DR
450 dev_err(hba->dev,
451 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
452 tag, (u64)lrbp->utrd_dma_addr);
453
66cc820f
DR
454 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
455 sizeof(struct utp_transfer_req_desc));
ff8e20c6
DR
456 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
457 (u64)lrbp->ucd_req_dma_addr);
66cc820f
DR
458 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
459 sizeof(struct utp_upiu_req));
ff8e20c6
DR
460 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
461 (u64)lrbp->ucd_rsp_dma_addr);
66cc820f
DR
462 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
463 sizeof(struct utp_upiu_rsp));
66cc820f 464
7fabb77b
GB
465 prdt_length = le16_to_cpu(
466 lrbp->utr_descriptor_ptr->prd_table_length);
467 dev_err(hba->dev,
468 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
469 tag, prdt_length,
470 (u64)lrbp->ucd_prdt_dma_addr);
471
472 if (pr_prdt)
66cc820f 473 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
7fabb77b 474 sizeof(struct ufshcd_sg_entry) * prdt_length);
66cc820f
DR
475 }
476}
477
478static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
479{
66cc820f
DR
480 int tag;
481
482 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
391e388f
CH
483 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
484
66cc820f 485 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
391e388f 486 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
66cc820f
DR
487 }
488}
489
6ba65588
GB
490static void ufshcd_print_host_state(struct ufs_hba *hba)
491{
492 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
7252a360
BVA
493 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
494 hba->outstanding_reqs, hba->outstanding_tasks);
6ba65588
GB
495 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
496 hba->saved_err, hba->saved_uic_err);
497 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
498 hba->curr_dev_pwr_mode, hba->uic_link_state);
499 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
500 hba->pm_op_in_progress, hba->is_sys_suspended);
501 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
502 hba->auto_bkops_enabled, hba->host->host_self_blocked);
503 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
504 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
505 hba->eh_flags, hba->req_abort_count);
506 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
507 hba->capabilities, hba->caps);
508 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
509 hba->dev_quirks);
510}
511
ff8e20c6
DR
512/**
513 * ufshcd_print_pwr_info - print power params as saved in hba
514 * power info
515 * @hba: per-adapter instance
516 */
517static void ufshcd_print_pwr_info(struct ufs_hba *hba)
518{
519 static const char * const names[] = {
520 "INVALID MODE",
521 "FAST MODE",
522 "SLOW_MODE",
523 "INVALID MODE",
524 "FASTAUTO_MODE",
525 "SLOWAUTO_MODE",
526 "INVALID MODE",
527 };
528
529 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
530 __func__,
531 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
532 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
533 names[hba->pwr_info.pwr_rx],
534 names[hba->pwr_info.pwr_tx],
535 hba->pwr_info.hs_rate);
536}
537
5c955c10
SC
538void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
539{
540 if (!us)
541 return;
542
543 if (us < 10)
544 udelay(us);
545 else
546 usleep_range(us, us + tolerance);
547}
548EXPORT_SYMBOL_GPL(ufshcd_delay_us);
549
5a0b0cb9
SRT
550/*
551 * ufshcd_wait_for_register - wait for register value to change
552 * @hba - per-adapter interface
553 * @reg - mmio register offset
554 * @mask - mask to apply to read register value
555 * @val - wait condition
556 * @interval_us - polling interval in microsecs
557 * @timeout_ms - timeout in millisecs
596585a2 558 * @can_sleep - perform sleep or just spin
5a0b0cb9
SRT
559 *
560 * Returns -ETIMEDOUT on error, zero on success
561 */
596585a2
YG
562int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
563 u32 val, unsigned long interval_us,
564 unsigned long timeout_ms, bool can_sleep)
5a0b0cb9
SRT
565{
566 int err = 0;
567 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
568
569 /* ignore bits that we don't intend to wait on */
570 val = val & mask;
571
572 while ((ufshcd_readl(hba, reg) & mask) != val) {
596585a2
YG
573 if (can_sleep)
574 usleep_range(interval_us, interval_us + 50);
575 else
576 udelay(interval_us);
5a0b0cb9
SRT
577 if (time_after(jiffies, timeout)) {
578 if ((ufshcd_readl(hba, reg) & mask) != val)
579 err = -ETIMEDOUT;
580 break;
581 }
582 }
583
584 return err;
585}
586
2fbd009b
SJ
587/**
588 * ufshcd_get_intr_mask - Get the interrupt bit mask
8aa29f19 589 * @hba: Pointer to adapter instance
2fbd009b
SJ
590 *
591 * Returns interrupt bit mask per version
592 */
593static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
594{
c01848c6
YG
595 u32 intr_mask = 0;
596
597 switch (hba->ufs_version) {
598 case UFSHCI_VERSION_10:
599 intr_mask = INTERRUPT_MASK_ALL_VER_10;
600 break;
c01848c6
YG
601 case UFSHCI_VERSION_11:
602 case UFSHCI_VERSION_20:
603 intr_mask = INTERRUPT_MASK_ALL_VER_11;
604 break;
c01848c6
YG
605 case UFSHCI_VERSION_21:
606 default:
607 intr_mask = INTERRUPT_MASK_ALL_VER_21;
031d1e0f 608 break;
c01848c6
YG
609 }
610
611 return intr_mask;
2fbd009b
SJ
612}
613
7a3e97b0
SY
614/**
615 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
8aa29f19 616 * @hba: Pointer to adapter instance
7a3e97b0
SY
617 *
618 * Returns UFSHCI version supported by the controller
619 */
620static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
621{
0263bcd0
YG
622 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
623 return ufshcd_vops_get_ufs_hci_version(hba);
9949e702 624
b873a275 625 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
626}
627
628/**
629 * ufshcd_is_device_present - Check if any device connected to
630 * the host controller
5c0c28a8 631 * @hba: pointer to adapter instance
7a3e97b0 632 *
c9e6010b 633 * Returns true if device present, false if no device detected
7a3e97b0 634 */
c9e6010b 635static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 636{
5c0c28a8 637 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
c9e6010b 638 DEVICE_PRESENT) ? true : false;
7a3e97b0
SY
639}
640
641/**
642 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
8aa29f19 643 * @lrbp: pointer to local command reference block
7a3e97b0
SY
644 *
645 * This function is used to get the OCS field from UTRD
646 * Returns the OCS field in the UTRD
647 */
648static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
649{
e8c8e82a 650 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
651}
652
7a3e97b0
SY
653/**
654 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
655 * @hba: per adapter instance
656 * @pos: position of the bit to be cleared
657 */
658static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
659{
49200199 660 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
1399c5b0
AA
661}
662
663/**
664 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
665 * @hba: per adapter instance
666 * @pos: position of the bit to be cleared
667 */
668static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
669{
49200199 670 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
7a3e97b0
SY
671}
672
a48353f6
YG
673/**
674 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
675 * @hba: per adapter instance
676 * @tag: position of the bit to be cleared
677 */
678static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
679{
680 __clear_bit(tag, &hba->outstanding_reqs);
681}
682
7a3e97b0
SY
683/**
684 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
685 * @reg: Register value of host controller status
686 *
687 * Returns integer, 0 on Success and positive value if failed
688 */
689static inline int ufshcd_get_lists_status(u32 reg)
690{
6cf16115 691 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
7a3e97b0
SY
692}
693
694/**
695 * ufshcd_get_uic_cmd_result - Get the UIC command result
696 * @hba: Pointer to adapter instance
697 *
698 * This function gets the result of UIC command completion
699 * Returns 0 on success, non zero value on error
700 */
701static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
702{
b873a275 703 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
704 MASK_UIC_COMMAND_RESULT;
705}
706
12b4fdb4
SJ
707/**
708 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
709 * @hba: Pointer to adapter instance
710 *
711 * This function gets UIC command argument3
712 * Returns 0 on success, non zero value on error
713 */
714static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
715{
716 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
717}
718
7a3e97b0 719/**
5a0b0cb9 720 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 721 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
722 */
723static inline int
5a0b0cb9 724ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 725{
5a0b0cb9 726 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
727}
728
729/**
730 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
731 * @ucd_rsp_ptr: pointer to response UPIU
732 *
733 * This function gets the response status and scsi_status from response UPIU
734 * Returns the response result code.
735 */
736static inline int
737ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
738{
739 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
740}
741
1c2623c5
SJ
742/*
743 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
744 * from response UPIU
745 * @ucd_rsp_ptr: pointer to response UPIU
746 *
747 * Return the data segment length.
748 */
749static inline unsigned int
750ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
751{
752 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
753 MASK_RSP_UPIU_DATA_SEG_LEN;
754}
755
66ec6d59
SRT
756/**
757 * ufshcd_is_exception_event - Check if the device raised an exception event
758 * @ucd_rsp_ptr: pointer to response UPIU
759 *
760 * The function checks if the device raised an exception event indicated in
761 * the Device Information field of response UPIU.
762 *
763 * Returns true if exception is raised, false otherwise.
764 */
765static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
766{
767 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
768 MASK_RSP_EXCEPTION_EVENT ? true : false;
769}
770
7a3e97b0 771/**
7d568652 772 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 773 * @hba: per adapter instance
7a3e97b0
SY
774 */
775static inline void
7d568652 776ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 777{
7d568652
SJ
778 ufshcd_writel(hba, INT_AGGR_ENABLE |
779 INT_AGGR_COUNTER_AND_TIMER_RESET,
780 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
781}
782
783/**
784 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
785 * @hba: per adapter instance
786 * @cnt: Interrupt aggregation counter threshold
787 * @tmout: Interrupt aggregation timeout value
788 */
789static inline void
790ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
791{
792 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
793 INT_AGGR_COUNTER_THLD_VAL(cnt) |
794 INT_AGGR_TIMEOUT_VAL(tmout),
795 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
796}
797
b852190e
YG
798/**
799 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
800 * @hba: per adapter instance
801 */
802static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
803{
804 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
805}
806
7a3e97b0
SY
807/**
808 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
809 * When run-stop registers are set to 1, it indicates the
810 * host controller that it can process the requests
811 * @hba: per adapter instance
812 */
813static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
814{
b873a275
SJ
815 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
816 REG_UTP_TASK_REQ_LIST_RUN_STOP);
817 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
818 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
819}
820
7a3e97b0
SY
821/**
822 * ufshcd_hba_start - Start controller initialization sequence
823 * @hba: per adapter instance
824 */
825static inline void ufshcd_hba_start(struct ufs_hba *hba)
826{
b873a275 827 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
828}
829
830/**
831 * ufshcd_is_hba_active - Get controller state
832 * @hba: per adapter instance
833 *
c9e6010b 834 * Returns false if controller is active, true otherwise
7a3e97b0 835 */
c9e6010b 836static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
7a3e97b0 837{
4a8eec2b
TK
838 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
839 ? false : true;
7a3e97b0
SY
840}
841
37113106
YG
842u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
843{
844 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
845 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
846 (hba->ufs_version == UFSHCI_VERSION_11))
847 return UFS_UNIPRO_VER_1_41;
848 else
849 return UFS_UNIPRO_VER_1_6;
850}
851EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
852
853static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
854{
855 /*
856 * If both host and device support UniPro ver1.6 or later, PA layer
857 * parameters tuning happens during link startup itself.
858 *
859 * We can manually tune PA layer parameters if either host or device
860 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
861 * logic simple, we will only do manual tuning if local unipro version
862 * doesn't support ver1.6 or later.
863 */
864 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
865 return true;
866 else
867 return false;
868}
869
a3cd5ec5 870static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
871{
872 int ret = 0;
873 struct ufs_clk_info *clki;
874 struct list_head *head = &hba->clk_list_head;
875 ktime_t start = ktime_get();
876 bool clk_state_changed = false;
877
566ec9ad 878 if (list_empty(head))
a3cd5ec5 879 goto out;
880
881 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
882 if (ret)
883 return ret;
884
885 list_for_each_entry(clki, head, list) {
886 if (!IS_ERR_OR_NULL(clki->clk)) {
887 if (scale_up && clki->max_freq) {
888 if (clki->curr_freq == clki->max_freq)
889 continue;
890
891 clk_state_changed = true;
892 ret = clk_set_rate(clki->clk, clki->max_freq);
893 if (ret) {
894 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
895 __func__, clki->name,
896 clki->max_freq, ret);
897 break;
898 }
899 trace_ufshcd_clk_scaling(dev_name(hba->dev),
900 "scaled up", clki->name,
901 clki->curr_freq,
902 clki->max_freq);
903
904 clki->curr_freq = clki->max_freq;
905
906 } else if (!scale_up && clki->min_freq) {
907 if (clki->curr_freq == clki->min_freq)
908 continue;
909
910 clk_state_changed = true;
911 ret = clk_set_rate(clki->clk, clki->min_freq);
912 if (ret) {
913 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
914 __func__, clki->name,
915 clki->min_freq, ret);
916 break;
917 }
918 trace_ufshcd_clk_scaling(dev_name(hba->dev),
919 "scaled down", clki->name,
920 clki->curr_freq,
921 clki->min_freq);
922 clki->curr_freq = clki->min_freq;
923 }
924 }
925 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
926 clki->name, clk_get_rate(clki->clk));
927 }
928
929 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
930
931out:
932 if (clk_state_changed)
933 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
934 (scale_up ? "up" : "down"),
935 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
936 return ret;
937}
938
939/**
940 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
941 * @hba: per adapter instance
942 * @scale_up: True if scaling up and false if scaling down
943 *
944 * Returns true if scaling is required, false otherwise.
945 */
946static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
947 bool scale_up)
948{
949 struct ufs_clk_info *clki;
950 struct list_head *head = &hba->clk_list_head;
951
566ec9ad 952 if (list_empty(head))
a3cd5ec5 953 return false;
954
955 list_for_each_entry(clki, head, list) {
956 if (!IS_ERR_OR_NULL(clki->clk)) {
957 if (scale_up && clki->max_freq) {
958 if (clki->curr_freq == clki->max_freq)
959 continue;
960 return true;
961 } else if (!scale_up && clki->min_freq) {
962 if (clki->curr_freq == clki->min_freq)
963 continue;
964 return true;
965 }
966 }
967 }
968
969 return false;
970}
971
972static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
973 u64 wait_timeout_us)
974{
975 unsigned long flags;
976 int ret = 0;
977 u32 tm_doorbell;
978 u32 tr_doorbell;
979 bool timeout = false, do_last_check = false;
980 ktime_t start;
981
982 ufshcd_hold(hba, false);
983 spin_lock_irqsave(hba->host->host_lock, flags);
984 /*
985 * Wait for all the outstanding tasks/transfer requests.
986 * Verify by checking the doorbell registers are clear.
987 */
988 start = ktime_get();
989 do {
990 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
991 ret = -EBUSY;
992 goto out;
993 }
994
995 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
996 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
997 if (!tm_doorbell && !tr_doorbell) {
998 timeout = false;
999 break;
1000 } else if (do_last_check) {
1001 break;
1002 }
1003
1004 spin_unlock_irqrestore(hba->host->host_lock, flags);
1005 schedule();
1006 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1007 wait_timeout_us) {
1008 timeout = true;
1009 /*
1010 * We might have scheduled out for long time so make
1011 * sure to check if doorbells are cleared by this time
1012 * or not.
1013 */
1014 do_last_check = true;
1015 }
1016 spin_lock_irqsave(hba->host->host_lock, flags);
1017 } while (tm_doorbell || tr_doorbell);
1018
1019 if (timeout) {
1020 dev_err(hba->dev,
1021 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1022 __func__, tm_doorbell, tr_doorbell);
1023 ret = -EBUSY;
1024 }
1025out:
1026 spin_unlock_irqrestore(hba->host->host_lock, flags);
1027 ufshcd_release(hba);
1028 return ret;
1029}
1030
1031/**
1032 * ufshcd_scale_gear - scale up/down UFS gear
1033 * @hba: per adapter instance
1034 * @scale_up: True for scaling up gear and false for scaling down
1035 *
1036 * Returns 0 for success,
1037 * Returns -EBUSY if scaling can't happen at this time
1038 * Returns non-zero for any other errors
1039 */
1040static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1041{
1042 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1043 int ret = 0;
1044 struct ufs_pa_layer_attr new_pwr_info;
1045
1046 if (scale_up) {
1047 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1048 sizeof(struct ufs_pa_layer_attr));
1049 } else {
1050 memcpy(&new_pwr_info, &hba->pwr_info,
1051 sizeof(struct ufs_pa_layer_attr));
1052
1053 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1054 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1055 /* save the current power mode */
1056 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1057 &hba->pwr_info,
1058 sizeof(struct ufs_pa_layer_attr));
1059
1060 /* scale down gear */
1061 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1062 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1063 }
1064 }
1065
1066 /* check if the power mode needs to be changed or not? */
1067 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1068
1069 if (ret)
1070 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1071 __func__, ret,
1072 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1073 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1074
1075 return ret;
1076}
1077
1078static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1079{
1080 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1081 int ret = 0;
1082 /*
1083 * make sure that there are no outstanding requests when
1084 * clock scaling is in progress
1085 */
38135535 1086 ufshcd_scsi_block_requests(hba);
a3cd5ec5 1087 down_write(&hba->clk_scaling_lock);
1088 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1089 ret = -EBUSY;
1090 up_write(&hba->clk_scaling_lock);
38135535 1091 ufshcd_scsi_unblock_requests(hba);
a3cd5ec5 1092 }
1093
1094 return ret;
1095}
1096
1097static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1098{
1099 up_write(&hba->clk_scaling_lock);
38135535 1100 ufshcd_scsi_unblock_requests(hba);
a3cd5ec5 1101}
1102
1103/**
1104 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1105 * @hba: per adapter instance
1106 * @scale_up: True for scaling up and false for scalin down
1107 *
1108 * Returns 0 for success,
1109 * Returns -EBUSY if scaling can't happen at this time
1110 * Returns non-zero for any other errors
1111 */
1112static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1113{
1114 int ret = 0;
1115
401f1e44 1116 /* let's not get into low power until clock scaling is completed */
1117 ufshcd_hold(hba, false);
1118
a3cd5ec5 1119 ret = ufshcd_clock_scaling_prepare(hba);
1120 if (ret)
1121 return ret;
1122
1123 /* scale down the gear before scaling down clocks */
1124 if (!scale_up) {
1125 ret = ufshcd_scale_gear(hba, false);
1126 if (ret)
1127 goto out;
1128 }
1129
1130 ret = ufshcd_scale_clks(hba, scale_up);
1131 if (ret) {
1132 if (!scale_up)
1133 ufshcd_scale_gear(hba, true);
1134 goto out;
1135 }
1136
1137 /* scale up the gear after scaling up clocks */
1138 if (scale_up) {
1139 ret = ufshcd_scale_gear(hba, true);
1140 if (ret) {
1141 ufshcd_scale_clks(hba, false);
1142 goto out;
1143 }
1144 }
1145
1146 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1147
1148out:
1149 ufshcd_clock_scaling_unprepare(hba);
401f1e44 1150 ufshcd_release(hba);
a3cd5ec5 1151 return ret;
1152}
1153
401f1e44 1154static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1155{
1156 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1157 clk_scaling.suspend_work);
1158 unsigned long irq_flags;
1159
1160 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1161 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1162 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1163 return;
1164 }
1165 hba->clk_scaling.is_suspended = true;
1166 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1167
1168 __ufshcd_suspend_clkscaling(hba);
1169}
1170
1171static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1172{
1173 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1174 clk_scaling.resume_work);
1175 unsigned long irq_flags;
1176
1177 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1178 if (!hba->clk_scaling.is_suspended) {
1179 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1180 return;
1181 }
1182 hba->clk_scaling.is_suspended = false;
1183 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1184
1185 devfreq_resume_device(hba->devfreq);
1186}
1187
a3cd5ec5 1188static int ufshcd_devfreq_target(struct device *dev,
1189 unsigned long *freq, u32 flags)
1190{
1191 int ret = 0;
1192 struct ufs_hba *hba = dev_get_drvdata(dev);
1193 ktime_t start;
401f1e44 1194 bool scale_up, sched_clk_scaling_suspend_work = false;
092b4558
BA
1195 struct list_head *clk_list = &hba->clk_list_head;
1196 struct ufs_clk_info *clki;
a3cd5ec5 1197 unsigned long irq_flags;
1198
1199 if (!ufshcd_is_clkscaling_supported(hba))
1200 return -EINVAL;
1201
91831d33
AD
1202 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1203 /* Override with the closest supported frequency */
1204 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
a3cd5ec5 1205 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1206 if (ufshcd_eh_in_progress(hba)) {
1207 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1208 return 0;
1209 }
1210
401f1e44 1211 if (!hba->clk_scaling.active_reqs)
1212 sched_clk_scaling_suspend_work = true;
1213
092b4558
BA
1214 if (list_empty(clk_list)) {
1215 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1216 goto out;
1217 }
1218
91831d33 1219 /* Decide based on the rounded-off frequency and update */
092b4558 1220 scale_up = (*freq == clki->max_freq) ? true : false;
91831d33
AD
1221 if (!scale_up)
1222 *freq = clki->min_freq;
1223 /* Update the frequency */
401f1e44 1224 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1225 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1226 ret = 0;
1227 goto out; /* no state change required */
a3cd5ec5 1228 }
1229 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1230
1231 start = ktime_get();
a3cd5ec5 1232 ret = ufshcd_devfreq_scale(hba, scale_up);
1233
a3cd5ec5 1234 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1235 (scale_up ? "up" : "down"),
1236 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1237
401f1e44 1238out:
1239 if (sched_clk_scaling_suspend_work)
1240 queue_work(hba->clk_scaling.workq,
1241 &hba->clk_scaling.suspend_work);
1242
a3cd5ec5 1243 return ret;
1244}
1245
7252a360
BVA
1246static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1247{
1248 int *busy = priv;
1249
1250 WARN_ON_ONCE(reserved);
1251 (*busy)++;
1252 return false;
1253}
1254
1255/* Whether or not any tag is in use by a request that is in progress. */
1256static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1257{
1258 struct request_queue *q = hba->cmd_queue;
1259 int busy = 0;
1260
1261 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1262 return busy;
1263}
a3cd5ec5 1264
1265static int ufshcd_devfreq_get_dev_status(struct device *dev,
1266 struct devfreq_dev_status *stat)
1267{
1268 struct ufs_hba *hba = dev_get_drvdata(dev);
1269 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1270 unsigned long flags;
91831d33
AD
1271 struct list_head *clk_list = &hba->clk_list_head;
1272 struct ufs_clk_info *clki;
a3cd5ec5 1273
1274 if (!ufshcd_is_clkscaling_supported(hba))
1275 return -EINVAL;
1276
1277 memset(stat, 0, sizeof(*stat));
1278
1279 spin_lock_irqsave(hba->host->host_lock, flags);
1280 if (!scaling->window_start_t)
1281 goto start_window;
1282
91831d33
AD
1283 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1284 /*
1285 * If current frequency is 0, then the ondemand governor considers
1286 * there's no initial frequency set. And it always requests to set
1287 * to max. frequency.
1288 */
1289 stat->current_frequency = clki->curr_freq;
a3cd5ec5 1290 if (scaling->is_busy_started)
1291 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1292 scaling->busy_start_t));
1293
1294 stat->total_time = jiffies_to_usecs((long)jiffies -
1295 (long)scaling->window_start_t);
1296 stat->busy_time = scaling->tot_busy_t;
1297start_window:
1298 scaling->window_start_t = jiffies;
1299 scaling->tot_busy_t = 0;
1300
1301 if (hba->outstanding_reqs) {
1302 scaling->busy_start_t = ktime_get();
1303 scaling->is_busy_started = true;
1304 } else {
1305 scaling->busy_start_t = 0;
1306 scaling->is_busy_started = false;
1307 }
1308 spin_unlock_irqrestore(hba->host->host_lock, flags);
1309 return 0;
1310}
1311
1312static struct devfreq_dev_profile ufs_devfreq_profile = {
1313 .polling_ms = 100,
1314 .target = ufshcd_devfreq_target,
1315 .get_dev_status = ufshcd_devfreq_get_dev_status,
1316};
1317
deac444f
BA
1318static int ufshcd_devfreq_init(struct ufs_hba *hba)
1319{
092b4558
BA
1320 struct list_head *clk_list = &hba->clk_list_head;
1321 struct ufs_clk_info *clki;
deac444f
BA
1322 struct devfreq *devfreq;
1323 int ret;
1324
092b4558
BA
1325 /* Skip devfreq if we don't have any clocks in the list */
1326 if (list_empty(clk_list))
1327 return 0;
1328
1329 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1330 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1331 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1332
1333 devfreq = devfreq_add_device(hba->dev,
deac444f
BA
1334 &ufs_devfreq_profile,
1335 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1336 NULL);
1337 if (IS_ERR(devfreq)) {
1338 ret = PTR_ERR(devfreq);
1339 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
092b4558
BA
1340
1341 dev_pm_opp_remove(hba->dev, clki->min_freq);
1342 dev_pm_opp_remove(hba->dev, clki->max_freq);
deac444f
BA
1343 return ret;
1344 }
1345
1346 hba->devfreq = devfreq;
1347
1348 return 0;
1349}
1350
092b4558
BA
1351static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1352{
1353 struct list_head *clk_list = &hba->clk_list_head;
1354 struct ufs_clk_info *clki;
1355
1356 if (!hba->devfreq)
1357 return;
1358
1359 devfreq_remove_device(hba->devfreq);
1360 hba->devfreq = NULL;
1361
1362 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1363 dev_pm_opp_remove(hba->dev, clki->min_freq);
1364 dev_pm_opp_remove(hba->dev, clki->max_freq);
1365}
1366
401f1e44 1367static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1368{
1369 unsigned long flags;
1370
1371 devfreq_suspend_device(hba->devfreq);
1372 spin_lock_irqsave(hba->host->host_lock, flags);
1373 hba->clk_scaling.window_start_t = 0;
1374 spin_unlock_irqrestore(hba->host->host_lock, flags);
1375}
a3cd5ec5 1376
a508253d
GB
1377static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1378{
401f1e44 1379 unsigned long flags;
1380 bool suspend = false;
1381
fcb0c4b0
ST
1382 if (!ufshcd_is_clkscaling_supported(hba))
1383 return;
1384
401f1e44 1385 spin_lock_irqsave(hba->host->host_lock, flags);
1386 if (!hba->clk_scaling.is_suspended) {
1387 suspend = true;
1388 hba->clk_scaling.is_suspended = true;
1389 }
1390 spin_unlock_irqrestore(hba->host->host_lock, flags);
1391
1392 if (suspend)
1393 __ufshcd_suspend_clkscaling(hba);
a508253d
GB
1394}
1395
1396static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1397{
401f1e44 1398 unsigned long flags;
1399 bool resume = false;
1400
1401 if (!ufshcd_is_clkscaling_supported(hba))
1402 return;
1403
1404 spin_lock_irqsave(hba->host->host_lock, flags);
1405 if (hba->clk_scaling.is_suspended) {
1406 resume = true;
1407 hba->clk_scaling.is_suspended = false;
1408 }
1409 spin_unlock_irqrestore(hba->host->host_lock, flags);
1410
1411 if (resume)
1412 devfreq_resume_device(hba->devfreq);
fcb0c4b0
ST
1413}
1414
1415static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1416 struct device_attribute *attr, char *buf)
1417{
1418 struct ufs_hba *hba = dev_get_drvdata(dev);
1419
1420 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1421}
1422
1423static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1424 struct device_attribute *attr, const char *buf, size_t count)
1425{
1426 struct ufs_hba *hba = dev_get_drvdata(dev);
1427 u32 value;
1428 int err;
1429
1430 if (kstrtou32(buf, 0, &value))
1431 return -EINVAL;
1432
1433 value = !!value;
1434 if (value == hba->clk_scaling.is_allowed)
1435 goto out;
1436
1437 pm_runtime_get_sync(hba->dev);
1438 ufshcd_hold(hba, false);
1439
401f1e44 1440 cancel_work_sync(&hba->clk_scaling.suspend_work);
1441 cancel_work_sync(&hba->clk_scaling.resume_work);
1442
1443 hba->clk_scaling.is_allowed = value;
1444
fcb0c4b0
ST
1445 if (value) {
1446 ufshcd_resume_clkscaling(hba);
1447 } else {
1448 ufshcd_suspend_clkscaling(hba);
a3cd5ec5 1449 err = ufshcd_devfreq_scale(hba, true);
fcb0c4b0
ST
1450 if (err)
1451 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1452 __func__, err);
1453 }
fcb0c4b0
ST
1454
1455 ufshcd_release(hba);
1456 pm_runtime_put_sync(hba->dev);
1457out:
1458 return count;
a508253d
GB
1459}
1460
a3cd5ec5 1461static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1462{
1463 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1464 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1465 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1466 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1467 hba->clk_scaling.enable_attr.attr.mode = 0644;
1468 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1469 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1470}
1471
1ab27c9c
ST
1472static void ufshcd_ungate_work(struct work_struct *work)
1473{
1474 int ret;
1475 unsigned long flags;
1476 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1477 clk_gating.ungate_work);
1478
1479 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1480
1481 spin_lock_irqsave(hba->host->host_lock, flags);
1482 if (hba->clk_gating.state == CLKS_ON) {
1483 spin_unlock_irqrestore(hba->host->host_lock, flags);
1484 goto unblock_reqs;
1485 }
1486
1487 spin_unlock_irqrestore(hba->host->host_lock, flags);
1488 ufshcd_setup_clocks(hba, true);
1489
8b0bbf00
SC
1490 ufshcd_enable_irq(hba);
1491
1ab27c9c
ST
1492 /* Exit from hibern8 */
1493 if (ufshcd_can_hibern8_during_gating(hba)) {
1494 /* Prevent gating in this path */
1495 hba->clk_gating.is_suspended = true;
1496 if (ufshcd_is_link_hibern8(hba)) {
1497 ret = ufshcd_uic_hibern8_exit(hba);
1498 if (ret)
1499 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1500 __func__, ret);
1501 else
1502 ufshcd_set_link_active(hba);
1503 }
1504 hba->clk_gating.is_suspended = false;
1505 }
1506unblock_reqs:
38135535 1507 ufshcd_scsi_unblock_requests(hba);
1ab27c9c
ST
1508}
1509
1510/**
1511 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1512 * Also, exit from hibern8 mode and set the link as active.
1513 * @hba: per adapter instance
1514 * @async: This indicates whether caller should ungate clocks asynchronously.
1515 */
1516int ufshcd_hold(struct ufs_hba *hba, bool async)
1517{
1518 int rc = 0;
1519 unsigned long flags;
1520
1521 if (!ufshcd_is_clkgating_allowed(hba))
1522 goto out;
1ab27c9c
ST
1523 spin_lock_irqsave(hba->host->host_lock, flags);
1524 hba->clk_gating.active_reqs++;
1525
53c12d0e
YG
1526 if (ufshcd_eh_in_progress(hba)) {
1527 spin_unlock_irqrestore(hba->host->host_lock, flags);
1528 return 0;
1529 }
1530
856b3483 1531start:
1ab27c9c
ST
1532 switch (hba->clk_gating.state) {
1533 case CLKS_ON:
f2a785ac
VG
1534 /*
1535 * Wait for the ungate work to complete if in progress.
1536 * Though the clocks may be in ON state, the link could
1537 * still be in hibner8 state if hibern8 is allowed
1538 * during clock gating.
1539 * Make sure we exit hibern8 state also in addition to
1540 * clocks being ON.
1541 */
1542 if (ufshcd_can_hibern8_during_gating(hba) &&
1543 ufshcd_is_link_hibern8(hba)) {
c63d6099
CG
1544 if (async) {
1545 rc = -EAGAIN;
1546 hba->clk_gating.active_reqs--;
1547 break;
1548 }
f2a785ac
VG
1549 spin_unlock_irqrestore(hba->host->host_lock, flags);
1550 flush_work(&hba->clk_gating.ungate_work);
1551 spin_lock_irqsave(hba->host->host_lock, flags);
1552 goto start;
1553 }
1ab27c9c
ST
1554 break;
1555 case REQ_CLKS_OFF:
1556 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1557 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1558 trace_ufshcd_clk_gating(dev_name(hba->dev),
1559 hba->clk_gating.state);
1ab27c9c
ST
1560 break;
1561 }
1562 /*
9c490d2d 1563 * If we are here, it means gating work is either done or
1ab27c9c
ST
1564 * currently running. Hence, fall through to cancel gating
1565 * work and to enable clocks.
1566 */
30eb2e4c 1567 /* fallthrough */
1ab27c9c 1568 case CLKS_OFF:
38135535 1569 ufshcd_scsi_block_requests(hba);
1ab27c9c 1570 hba->clk_gating.state = REQ_CLKS_ON;
7ff5ab47 1571 trace_ufshcd_clk_gating(dev_name(hba->dev),
1572 hba->clk_gating.state);
10e5e375
VV
1573 queue_work(hba->clk_gating.clk_gating_workq,
1574 &hba->clk_gating.ungate_work);
1ab27c9c
ST
1575 /*
1576 * fall through to check if we should wait for this
1577 * work to be done or not.
1578 */
30eb2e4c 1579 /* fallthrough */
1ab27c9c
ST
1580 case REQ_CLKS_ON:
1581 if (async) {
1582 rc = -EAGAIN;
1583 hba->clk_gating.active_reqs--;
1584 break;
1585 }
1586
1587 spin_unlock_irqrestore(hba->host->host_lock, flags);
1588 flush_work(&hba->clk_gating.ungate_work);
1589 /* Make sure state is CLKS_ON before returning */
856b3483 1590 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
1591 goto start;
1592 default:
1593 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1594 __func__, hba->clk_gating.state);
1595 break;
1596 }
1597 spin_unlock_irqrestore(hba->host->host_lock, flags);
1598out:
1599 return rc;
1600}
6e3fd44d 1601EXPORT_SYMBOL_GPL(ufshcd_hold);
1ab27c9c
ST
1602
1603static void ufshcd_gate_work(struct work_struct *work)
1604{
1605 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1606 clk_gating.gate_work.work);
1607 unsigned long flags;
1608
1609 spin_lock_irqsave(hba->host->host_lock, flags);
3f0c06de
VG
1610 /*
1611 * In case you are here to cancel this work the gating state
1612 * would be marked as REQ_CLKS_ON. In this case save time by
1613 * skipping the gating work and exit after changing the clock
1614 * state to CLKS_ON.
1615 */
1616 if (hba->clk_gating.is_suspended ||
18f01374 1617 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1ab27c9c 1618 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1619 trace_ufshcd_clk_gating(dev_name(hba->dev),
1620 hba->clk_gating.state);
1ab27c9c
ST
1621 goto rel_lock;
1622 }
1623
1624 if (hba->clk_gating.active_reqs
1625 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
7252a360 1626 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1ab27c9c
ST
1627 || hba->active_uic_cmd || hba->uic_async_done)
1628 goto rel_lock;
1629
1630 spin_unlock_irqrestore(hba->host->host_lock, flags);
1631
1632 /* put the link into hibern8 mode before turning off clocks */
1633 if (ufshcd_can_hibern8_during_gating(hba)) {
1634 if (ufshcd_uic_hibern8_enter(hba)) {
1635 hba->clk_gating.state = CLKS_ON;
7ff5ab47 1636 trace_ufshcd_clk_gating(dev_name(hba->dev),
1637 hba->clk_gating.state);
1ab27c9c
ST
1638 goto out;
1639 }
1640 ufshcd_set_link_hibern8(hba);
1641 }
1642
8b0bbf00
SC
1643 ufshcd_disable_irq(hba);
1644
1ab27c9c
ST
1645 if (!ufshcd_is_link_active(hba))
1646 ufshcd_setup_clocks(hba, false);
1647 else
1648 /* If link is active, device ref_clk can't be switched off */
1649 __ufshcd_setup_clocks(hba, false, true);
1650
1651 /*
1652 * In case you are here to cancel this work the gating state
1653 * would be marked as REQ_CLKS_ON. In this case keep the state
1654 * as REQ_CLKS_ON which would anyway imply that clocks are off
1655 * and a request to turn them on is pending. By doing this way,
1656 * we keep the state machine in tact and this would ultimately
1657 * prevent from doing cancel work multiple times when there are
1658 * new requests arriving before the current cancel work is done.
1659 */
1660 spin_lock_irqsave(hba->host->host_lock, flags);
7ff5ab47 1661 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1ab27c9c 1662 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 1663 trace_ufshcd_clk_gating(dev_name(hba->dev),
1664 hba->clk_gating.state);
1665 }
1ab27c9c
ST
1666rel_lock:
1667 spin_unlock_irqrestore(hba->host->host_lock, flags);
1668out:
1669 return;
1670}
1671
1672/* host lock must be held before calling this variant */
1673static void __ufshcd_release(struct ufs_hba *hba)
1674{
1675 if (!ufshcd_is_clkgating_allowed(hba))
1676 return;
1677
1678 hba->clk_gating.active_reqs--;
1679
1680 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1681 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
7252a360 1682 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
53c12d0e
YG
1683 || hba->active_uic_cmd || hba->uic_async_done
1684 || ufshcd_eh_in_progress(hba))
1ab27c9c
ST
1685 return;
1686
1687 hba->clk_gating.state = REQ_CLKS_OFF;
7ff5ab47 1688 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
f4bb7704
EG
1689 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1690 &hba->clk_gating.gate_work,
1691 msecs_to_jiffies(hba->clk_gating.delay_ms));
1ab27c9c
ST
1692}
1693
1694void ufshcd_release(struct ufs_hba *hba)
1695{
1696 unsigned long flags;
1697
1698 spin_lock_irqsave(hba->host->host_lock, flags);
1699 __ufshcd_release(hba);
1700 spin_unlock_irqrestore(hba->host->host_lock, flags);
1701}
6e3fd44d 1702EXPORT_SYMBOL_GPL(ufshcd_release);
1ab27c9c
ST
1703
1704static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1705 struct device_attribute *attr, char *buf)
1706{
1707 struct ufs_hba *hba = dev_get_drvdata(dev);
1708
1709 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1710}
1711
1712static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1713 struct device_attribute *attr, const char *buf, size_t count)
1714{
1715 struct ufs_hba *hba = dev_get_drvdata(dev);
1716 unsigned long flags, value;
1717
1718 if (kstrtoul(buf, 0, &value))
1719 return -EINVAL;
1720
1721 spin_lock_irqsave(hba->host->host_lock, flags);
1722 hba->clk_gating.delay_ms = value;
1723 spin_unlock_irqrestore(hba->host->host_lock, flags);
1724 return count;
1725}
1726
b427411a
ST
1727static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1728 struct device_attribute *attr, char *buf)
1729{
1730 struct ufs_hba *hba = dev_get_drvdata(dev);
1731
1732 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1733}
1734
1735static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1736 struct device_attribute *attr, const char *buf, size_t count)
1737{
1738 struct ufs_hba *hba = dev_get_drvdata(dev);
1739 unsigned long flags;
1740 u32 value;
1741
1742 if (kstrtou32(buf, 0, &value))
1743 return -EINVAL;
1744
1745 value = !!value;
1746 if (value == hba->clk_gating.is_enabled)
1747 goto out;
1748
1749 if (value) {
1750 ufshcd_release(hba);
1751 } else {
1752 spin_lock_irqsave(hba->host->host_lock, flags);
1753 hba->clk_gating.active_reqs++;
1754 spin_unlock_irqrestore(hba->host->host_lock, flags);
1755 }
1756
1757 hba->clk_gating.is_enabled = value;
1758out:
1759 return count;
1760}
1761
eebcc196
VG
1762static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1763{
1764 char wq_name[sizeof("ufs_clkscaling_00")];
1765
1766 if (!ufshcd_is_clkscaling_supported(hba))
1767 return;
1768
1769 INIT_WORK(&hba->clk_scaling.suspend_work,
1770 ufshcd_clk_scaling_suspend_work);
1771 INIT_WORK(&hba->clk_scaling.resume_work,
1772 ufshcd_clk_scaling_resume_work);
1773
1774 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1775 hba->host->host_no);
1776 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1777
1778 ufshcd_clkscaling_init_sysfs(hba);
1779}
1780
1781static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1782{
1783 if (!ufshcd_is_clkscaling_supported(hba))
1784 return;
1785
1786 destroy_workqueue(hba->clk_scaling.workq);
1787 ufshcd_devfreq_remove(hba);
1788}
1789
1ab27c9c
ST
1790static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1791{
10e5e375
VV
1792 char wq_name[sizeof("ufs_clk_gating_00")];
1793
1ab27c9c
ST
1794 if (!ufshcd_is_clkgating_allowed(hba))
1795 return;
1796
1797 hba->clk_gating.delay_ms = 150;
1798 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1799 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1800
10e5e375
VV
1801 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1802 hba->host->host_no);
1803 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1804 WQ_MEM_RECLAIM);
1805
b427411a
ST
1806 hba->clk_gating.is_enabled = true;
1807
1ab27c9c
ST
1808 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1809 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1810 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1811 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
b427411a 1812 hba->clk_gating.delay_attr.attr.mode = 0644;
1ab27c9c
ST
1813 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1814 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
b427411a
ST
1815
1816 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1817 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1818 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1819 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1820 hba->clk_gating.enable_attr.attr.mode = 0644;
1821 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1822 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1ab27c9c
ST
1823}
1824
1825static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1826{
1827 if (!ufshcd_is_clkgating_allowed(hba))
1828 return;
1829 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
b427411a 1830 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
97cd6805
AM
1831 cancel_work_sync(&hba->clk_gating.ungate_work);
1832 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
10e5e375 1833 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1ab27c9c
ST
1834}
1835
856b3483
ST
1836/* Must be called with host lock acquired */
1837static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1838{
401f1e44 1839 bool queue_resume_work = false;
1840
fcb0c4b0 1841 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1842 return;
1843
401f1e44 1844 if (!hba->clk_scaling.active_reqs++)
1845 queue_resume_work = true;
1846
1847 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1848 return;
1849
1850 if (queue_resume_work)
1851 queue_work(hba->clk_scaling.workq,
1852 &hba->clk_scaling.resume_work);
1853
1854 if (!hba->clk_scaling.window_start_t) {
1855 hba->clk_scaling.window_start_t = jiffies;
1856 hba->clk_scaling.tot_busy_t = 0;
1857 hba->clk_scaling.is_busy_started = false;
1858 }
1859
856b3483
ST
1860 if (!hba->clk_scaling.is_busy_started) {
1861 hba->clk_scaling.busy_start_t = ktime_get();
1862 hba->clk_scaling.is_busy_started = true;
1863 }
1864}
1865
1866static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1867{
1868 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1869
fcb0c4b0 1870 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1871 return;
1872
1873 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1874 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1875 scaling->busy_start_t));
8b0e1953 1876 scaling->busy_start_t = 0;
856b3483
ST
1877 scaling->is_busy_started = false;
1878 }
1879}
7a3e97b0
SY
1880/**
1881 * ufshcd_send_command - Send SCSI or device management commands
1882 * @hba: per adapter instance
1883 * @task_tag: Task tag of the command
1884 */
1885static inline
1886void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1887{
ff8e20c6 1888 hba->lrb[task_tag].issue_time_stamp = ktime_get();
09017188 1889 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
eacf36f5 1890 ufshcd_add_command_trace(hba, task_tag, "send");
856b3483 1891 ufshcd_clk_scaling_start_busy(hba);
7a3e97b0 1892 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 1893 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
ad1a1b9c
GB
1894 /* Make sure that doorbell is committed immediately */
1895 wmb();
7a3e97b0
SY
1896}
1897
1898/**
1899 * ufshcd_copy_sense_data - Copy sense data in case of check condition
8aa29f19 1900 * @lrbp: pointer to local reference block
7a3e97b0
SY
1901 */
1902static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1903{
1904 int len;
1c2623c5
SJ
1905 if (lrbp->sense_buffer &&
1906 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
e3ce73d6
YG
1907 int len_to_copy;
1908
5a0b0cb9 1909 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
09a5a24f 1910 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
e3ce73d6 1911
09a5a24f
AA
1912 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1913 len_to_copy);
7a3e97b0
SY
1914 }
1915}
1916
68078d5c
DR
1917/**
1918 * ufshcd_copy_query_response() - Copy the Query Response and the data
1919 * descriptor
1920 * @hba: per adapter instance
8aa29f19 1921 * @lrbp: pointer to local reference block
68078d5c
DR
1922 */
1923static
c6d4a831 1924int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
1925{
1926 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1927
68078d5c 1928 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 1929
68078d5c 1930 /* Get the descriptor */
1c90836f
AA
1931 if (hba->dev_cmd.query.descriptor &&
1932 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 1933 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 1934 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
1935 u16 resp_len;
1936 u16 buf_len;
68078d5c
DR
1937
1938 /* data segment length */
c6d4a831 1939 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 1940 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
1941 buf_len = be16_to_cpu(
1942 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
1943 if (likely(buf_len >= resp_len)) {
1944 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1945 } else {
1946 dev_warn(hba->dev,
3d4881d1
BH
1947 "%s: rsp size %d is bigger than buffer size %d",
1948 __func__, resp_len, buf_len);
c6d4a831
DR
1949 return -EINVAL;
1950 }
68078d5c 1951 }
c6d4a831
DR
1952
1953 return 0;
68078d5c
DR
1954}
1955
7a3e97b0
SY
1956/**
1957 * ufshcd_hba_capabilities - Read controller capabilities
1958 * @hba: per adapter instance
1959 */
1960static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1961{
b873a275 1962 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
1963
1964 /* nutrs and nutmrs are 0 based values */
1965 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1966 hba->nutmrs =
1967 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1968}
1969
1970/**
6ccf44fe
SJ
1971 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1972 * to accept UIC commands
7a3e97b0 1973 * @hba: per adapter instance
6ccf44fe
SJ
1974 * Return true on success, else false
1975 */
1976static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1977{
1978 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1979 return true;
1980 else
1981 return false;
1982}
1983
53b3d9c3
SJ
1984/**
1985 * ufshcd_get_upmcrs - Get the power mode change request status
1986 * @hba: Pointer to adapter instance
1987 *
1988 * This function gets the UPMCRS field of HCS register
1989 * Returns value of UPMCRS field
1990 */
1991static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1992{
1993 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1994}
1995
6ccf44fe
SJ
1996/**
1997 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1998 * @hba: per adapter instance
1999 * @uic_cmd: UIC command
2000 *
2001 * Mutex must be held.
7a3e97b0
SY
2002 */
2003static inline void
6ccf44fe 2004ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 2005{
6ccf44fe
SJ
2006 WARN_ON(hba->active_uic_cmd);
2007
2008 hba->active_uic_cmd = uic_cmd;
2009
7a3e97b0 2010 /* Write Args */
6ccf44fe
SJ
2011 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2012 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2013 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
2014
2015 /* Write UIC Cmd */
6ccf44fe 2016 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 2017 REG_UIC_COMMAND);
7a3e97b0
SY
2018}
2019
6ccf44fe
SJ
2020/**
2021 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2022 * @hba: per adapter instance
8aa29f19 2023 * @uic_cmd: UIC command
6ccf44fe
SJ
2024 *
2025 * Must be called with mutex held.
2026 * Returns 0 only if success.
2027 */
2028static int
2029ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2030{
2031 int ret;
2032 unsigned long flags;
2033
2034 if (wait_for_completion_timeout(&uic_cmd->done,
2035 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2036 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2037 else
2038 ret = -ETIMEDOUT;
2039
2040 spin_lock_irqsave(hba->host->host_lock, flags);
2041 hba->active_uic_cmd = NULL;
2042 spin_unlock_irqrestore(hba->host->host_lock, flags);
2043
2044 return ret;
2045}
2046
2047/**
2048 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2049 * @hba: per adapter instance
2050 * @uic_cmd: UIC command
d75f7fe4 2051 * @completion: initialize the completion only if this is set to true
6ccf44fe
SJ
2052 *
2053 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
57d104c1 2054 * with mutex held and host_lock locked.
6ccf44fe
SJ
2055 * Returns 0 only if success.
2056 */
2057static int
d75f7fe4
YG
2058__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2059 bool completion)
6ccf44fe 2060{
6ccf44fe
SJ
2061 if (!ufshcd_ready_for_uic_cmd(hba)) {
2062 dev_err(hba->dev,
2063 "Controller not ready to accept UIC commands\n");
2064 return -EIO;
2065 }
2066
d75f7fe4
YG
2067 if (completion)
2068 init_completion(&uic_cmd->done);
6ccf44fe 2069
6ccf44fe 2070 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 2071
57d104c1 2072 return 0;
6ccf44fe
SJ
2073}
2074
2075/**
2076 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2077 * @hba: per adapter instance
2078 * @uic_cmd: UIC command
2079 *
2080 * Returns 0 only if success.
2081 */
e77044c5 2082int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
6ccf44fe
SJ
2083{
2084 int ret;
57d104c1 2085 unsigned long flags;
6ccf44fe 2086
1ab27c9c 2087 ufshcd_hold(hba, false);
6ccf44fe 2088 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
2089 ufshcd_add_delay_before_dme_cmd(hba);
2090
57d104c1 2091 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 2092 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
57d104c1
SJ
2093 spin_unlock_irqrestore(hba->host->host_lock, flags);
2094 if (!ret)
2095 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2096
6ccf44fe
SJ
2097 mutex_unlock(&hba->uic_cmd_mutex);
2098
1ab27c9c 2099 ufshcd_release(hba);
6ccf44fe
SJ
2100 return ret;
2101}
2102
7a3e97b0
SY
2103/**
2104 * ufshcd_map_sg - Map scatter-gather list to prdt
8aa29f19
BVA
2105 * @hba: per adapter instance
2106 * @lrbp: pointer to local reference block
7a3e97b0
SY
2107 *
2108 * Returns 0 in case of success, non-zero value in case of failure
2109 */
75b1cc4a 2110static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0
SY
2111{
2112 struct ufshcd_sg_entry *prd_table;
2113 struct scatterlist *sg;
2114 struct scsi_cmnd *cmd;
2115 int sg_segments;
2116 int i;
2117
2118 cmd = lrbp->cmd;
2119 sg_segments = scsi_dma_map(cmd);
2120 if (sg_segments < 0)
2121 return sg_segments;
2122
2123 if (sg_segments) {
49200199
CH
2124 lrbp->utr_descriptor_ptr->prd_table_length =
2125 cpu_to_le16((u16)sg_segments);
7a3e97b0
SY
2126
2127 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2128
2129 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2130 prd_table[i].size =
2131 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2132 prd_table[i].base_addr =
2133 cpu_to_le32(lower_32_bits(sg->dma_address));
2134 prd_table[i].upper_addr =
2135 cpu_to_le32(upper_32_bits(sg->dma_address));
52ac95fe 2136 prd_table[i].reserved = 0;
7a3e97b0
SY
2137 }
2138 } else {
2139 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2140 }
2141
2142 return 0;
2143}
2144
2145/**
2fbd009b 2146 * ufshcd_enable_intr - enable interrupts
7a3e97b0 2147 * @hba: per adapter instance
2fbd009b 2148 * @intrs: interrupt bits
7a3e97b0 2149 */
2fbd009b 2150static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 2151{
2fbd009b
SJ
2152 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2153
2154 if (hba->ufs_version == UFSHCI_VERSION_10) {
2155 u32 rw;
2156 rw = set & INTERRUPT_MASK_RW_VER_10;
2157 set = rw | ((set ^ intrs) & intrs);
2158 } else {
2159 set |= intrs;
2160 }
2161
2162 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2163}
2164
2165/**
2166 * ufshcd_disable_intr - disable interrupts
2167 * @hba: per adapter instance
2168 * @intrs: interrupt bits
2169 */
2170static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2171{
2172 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2173
2174 if (hba->ufs_version == UFSHCI_VERSION_10) {
2175 u32 rw;
2176 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2177 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2178 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2179
2180 } else {
2181 set &= ~intrs;
7a3e97b0 2182 }
2fbd009b
SJ
2183
2184 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
2185}
2186
5a0b0cb9
SRT
2187/**
2188 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2189 * descriptor according to request
2190 * @lrbp: pointer to local reference block
2191 * @upiu_flags: flags required in the header
2192 * @cmd_dir: requests data direction
2193 */
2194static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
300bb13f 2195 u32 *upiu_flags, enum dma_data_direction cmd_dir)
5a0b0cb9
SRT
2196{
2197 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2198 u32 data_direction;
2199 u32 dword_0;
2200
2201 if (cmd_dir == DMA_FROM_DEVICE) {
2202 data_direction = UTP_DEVICE_TO_HOST;
2203 *upiu_flags = UPIU_CMD_FLAGS_READ;
2204 } else if (cmd_dir == DMA_TO_DEVICE) {
2205 data_direction = UTP_HOST_TO_DEVICE;
2206 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2207 } else {
2208 data_direction = UTP_NO_DATA_TRANSFER;
2209 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2210 }
2211
2212 dword_0 = data_direction | (lrbp->command_type
2213 << UPIU_COMMAND_TYPE_OFFSET);
2214 if (lrbp->intr_cmd)
2215 dword_0 |= UTP_REQ_DESC_INT_CMD;
2216
2217 /* Transfer request descriptor header fields */
2218 req_desc->header.dword_0 = cpu_to_le32(dword_0);
52ac95fe
YG
2219 /* dword_1 is reserved, hence it is set to 0 */
2220 req_desc->header.dword_1 = 0;
5a0b0cb9
SRT
2221 /*
2222 * assigning invalid value for command status. Controller
2223 * updates OCS on command completion, with the command
2224 * status
2225 */
2226 req_desc->header.dword_2 =
2227 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
52ac95fe
YG
2228 /* dword_3 is reserved, hence it is set to 0 */
2229 req_desc->header.dword_3 = 0;
51047266
YG
2230
2231 req_desc->prd_table_length = 0;
5a0b0cb9
SRT
2232}
2233
2234/**
2235 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2236 * for scsi commands
8aa29f19
BVA
2237 * @lrbp: local reference block pointer
2238 * @upiu_flags: flags
5a0b0cb9
SRT
2239 */
2240static
2241void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2242{
1b21b8f0 2243 struct scsi_cmnd *cmd = lrbp->cmd;
5a0b0cb9 2244 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
52ac95fe 2245 unsigned short cdb_len;
5a0b0cb9
SRT
2246
2247 /* command descriptor fields */
2248 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2249 UPIU_TRANSACTION_COMMAND, upiu_flags,
2250 lrbp->lun, lrbp->task_tag);
2251 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2252 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2253
2254 /* Total EHS length and Data segment length will be zero */
2255 ucd_req_ptr->header.dword_2 = 0;
2256
1b21b8f0 2257 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
5a0b0cb9 2258
1b21b8f0 2259 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
a851b2bd 2260 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1b21b8f0 2261 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
52ac95fe
YG
2262
2263 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2264}
2265
68078d5c
DR
2266/**
2267 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2268 * for query requsts
2269 * @hba: UFS hba
2270 * @lrbp: local reference block pointer
2271 * @upiu_flags: flags
2272 */
2273static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2274 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2275{
2276 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2277 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 2278 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
2279
2280 /* Query request header */
2281 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2282 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2283 lrbp->lun, lrbp->task_tag);
2284 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2285 0, query->request.query_func, 0, 0);
2286
6861285c
ZL
2287 /* Data segment length only need for WRITE_DESC */
2288 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2289 ucd_req_ptr->header.dword_2 =
2290 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2291 else
2292 ucd_req_ptr->header.dword_2 = 0;
68078d5c
DR
2293
2294 /* Copy the Query Request buffer as is */
2295 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2296 QUERY_OSF_SIZE);
68078d5c
DR
2297
2298 /* Copy the Descriptor */
c6d4a831 2299 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
220d17a6 2300 memcpy(ucd_req_ptr + 1, query->descriptor, len);
c6d4a831 2301
51047266 2302 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
68078d5c
DR
2303}
2304
5a0b0cb9
SRT
2305static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2306{
2307 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2308
2309 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2310
2311 /* command descriptor fields */
2312 ucd_req_ptr->header.dword_0 =
2313 UPIU_HEADER_DWORD(
2314 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
51047266
YG
2315 /* clear rest of the fields of basic header */
2316 ucd_req_ptr->header.dword_1 = 0;
2317 ucd_req_ptr->header.dword_2 = 0;
2318
2319 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2320}
2321
7a3e97b0 2322/**
300bb13f
JP
2323 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2324 * for Device Management Purposes
8aa29f19
BVA
2325 * @hba: per adapter instance
2326 * @lrbp: pointer to local reference block
7a3e97b0 2327 */
300bb13f 2328static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 2329{
7a3e97b0 2330 u32 upiu_flags;
5a0b0cb9 2331 int ret = 0;
7a3e97b0 2332
83dc7e3d 2333 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2334 (hba->ufs_version == UFSHCI_VERSION_11))
300bb13f 2335 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
83dc7e3d 2336 else
2337 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2338
2339 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2340 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2341 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2342 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2343 ufshcd_prepare_utp_nop_upiu(lrbp);
2344 else
2345 ret = -EINVAL;
2346
2347 return ret;
2348}
2349
2350/**
2351 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2352 * for SCSI Purposes
8aa29f19
BVA
2353 * @hba: per adapter instance
2354 * @lrbp: pointer to local reference block
300bb13f
JP
2355 */
2356static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2357{
2358 u32 upiu_flags;
2359 int ret = 0;
2360
83dc7e3d 2361 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2362 (hba->ufs_version == UFSHCI_VERSION_11))
300bb13f 2363 lrbp->command_type = UTP_CMD_TYPE_SCSI;
83dc7e3d 2364 else
2365 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2366
2367 if (likely(lrbp->cmd)) {
2368 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2369 lrbp->cmd->sc_data_direction);
2370 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2371 } else {
2372 ret = -EINVAL;
2373 }
5a0b0cb9
SRT
2374
2375 return ret;
7a3e97b0
SY
2376}
2377
2a8fa600
SJ
2378/**
2379 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
8aa29f19 2380 * @upiu_wlun_id: UPIU W-LUN id
2a8fa600
SJ
2381 *
2382 * Returns SCSI W-LUN id
2383 */
2384static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2385{
2386 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2387}
2388
4d2b8d40
BVA
2389static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2390{
2391 struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2392 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2393 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2394 i * sizeof(struct utp_transfer_cmd_desc);
2395 u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2396 response_upiu);
2397 u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2398
2399 lrb->utr_descriptor_ptr = utrdlp + i;
2400 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2401 i * sizeof(struct utp_transfer_req_desc);
2402 lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2403 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2404 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2405 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2406 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2407 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2408}
2409
7a3e97b0
SY
2410/**
2411 * ufshcd_queuecommand - main entry point for SCSI requests
8aa29f19 2412 * @host: SCSI host pointer
7a3e97b0 2413 * @cmd: command from SCSI Midlayer
7a3e97b0
SY
2414 *
2415 * Returns 0 for success, non-zero in case of failure
2416 */
2417static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2418{
2419 struct ufshcd_lrb *lrbp;
2420 struct ufs_hba *hba;
2421 unsigned long flags;
2422 int tag;
2423 int err = 0;
2424
2425 hba = shost_priv(host);
2426
2427 tag = cmd->request->tag;
14497328
YG
2428 if (!ufshcd_valid_tag(hba, tag)) {
2429 dev_err(hba->dev,
2430 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2431 __func__, tag, cmd, cmd->request);
2432 BUG();
2433 }
7a3e97b0 2434
a3cd5ec5 2435 if (!down_read_trylock(&hba->clk_scaling_lock))
2436 return SCSI_MLQUEUE_HOST_BUSY;
2437
3441da7d
SRT
2438 spin_lock_irqsave(hba->host->host_lock, flags);
2439 switch (hba->ufshcd_state) {
2440 case UFSHCD_STATE_OPERATIONAL:
2441 break;
141f8165 2442 case UFSHCD_STATE_EH_SCHEDULED:
3441da7d 2443 case UFSHCD_STATE_RESET:
7a3e97b0 2444 err = SCSI_MLQUEUE_HOST_BUSY;
3441da7d
SRT
2445 goto out_unlock;
2446 case UFSHCD_STATE_ERROR:
2447 set_host_byte(cmd, DID_ERROR);
2448 cmd->scsi_done(cmd);
2449 goto out_unlock;
2450 default:
2451 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2452 __func__, hba->ufshcd_state);
2453 set_host_byte(cmd, DID_BAD_TARGET);
2454 cmd->scsi_done(cmd);
2455 goto out_unlock;
7a3e97b0 2456 }
53c12d0e
YG
2457
2458 /* if error handling is in progress, don't issue commands */
2459 if (ufshcd_eh_in_progress(hba)) {
2460 set_host_byte(cmd, DID_ERROR);
2461 cmd->scsi_done(cmd);
2462 goto out_unlock;
2463 }
3441da7d 2464 spin_unlock_irqrestore(hba->host->host_lock, flags);
7a3e97b0 2465
7fabb77b
GB
2466 hba->req_abort_count = 0;
2467
1ab27c9c
ST
2468 err = ufshcd_hold(hba, true);
2469 if (err) {
2470 err = SCSI_MLQUEUE_HOST_BUSY;
1ab27c9c
ST
2471 goto out;
2472 }
2473 WARN_ON(hba->clk_gating.state != CLKS_ON);
2474
7a3e97b0
SY
2475 lrbp = &hba->lrb[tag];
2476
5a0b0cb9 2477 WARN_ON(lrbp->cmd);
7a3e97b0 2478 lrbp->cmd = cmd;
09a5a24f 2479 lrbp->sense_bufflen = UFS_SENSE_SIZE;
7a3e97b0
SY
2480 lrbp->sense_buffer = cmd->sense_buffer;
2481 lrbp->task_tag = tag;
0ce147d4 2482 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
b852190e 2483 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
e0b299e3 2484 lrbp->req_abort_skip = false;
7a3e97b0 2485
300bb13f
JP
2486 ufshcd_comp_scsi_upiu(hba, lrbp);
2487
75b1cc4a 2488 err = ufshcd_map_sg(hba, lrbp);
5a0b0cb9
SRT
2489 if (err) {
2490 lrbp->cmd = NULL;
17c7d35f 2491 ufshcd_release(hba);
7a3e97b0 2492 goto out;
5a0b0cb9 2493 }
ad1a1b9c
GB
2494 /* Make sure descriptors are ready before ringing the doorbell */
2495 wmb();
7a3e97b0
SY
2496
2497 /* issue command to the controller */
2498 spin_lock_irqsave(hba->host->host_lock, flags);
5905d464 2499 ufshcd_vops_setup_xfer_req(hba, tag, true);
7a3e97b0 2500 ufshcd_send_command(hba, tag);
3441da7d 2501out_unlock:
7a3e97b0
SY
2502 spin_unlock_irqrestore(hba->host->host_lock, flags);
2503out:
a3cd5ec5 2504 up_read(&hba->clk_scaling_lock);
7a3e97b0
SY
2505 return err;
2506}
2507
5a0b0cb9
SRT
2508static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2509 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2510{
2511 lrbp->cmd = NULL;
2512 lrbp->sense_bufflen = 0;
2513 lrbp->sense_buffer = NULL;
2514 lrbp->task_tag = tag;
2515 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
5a0b0cb9
SRT
2516 lrbp->intr_cmd = true; /* No interrupt aggregation */
2517 hba->dev_cmd.type = cmd_type;
2518
300bb13f 2519 return ufshcd_comp_devman_upiu(hba, lrbp);
5a0b0cb9
SRT
2520}
2521
2522static int
2523ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2524{
2525 int err = 0;
2526 unsigned long flags;
2527 u32 mask = 1 << tag;
2528
2529 /* clear outstanding transaction before retry */
2530 spin_lock_irqsave(hba->host->host_lock, flags);
2531 ufshcd_utrl_clear(hba, tag);
2532 spin_unlock_irqrestore(hba->host->host_lock, flags);
2533
2534 /*
2535 * wait for for h/w to clear corresponding bit in door-bell.
2536 * max. wait is 1 sec.
2537 */
2538 err = ufshcd_wait_for_register(hba,
2539 REG_UTP_TRANSFER_REQ_DOOR_BELL,
596585a2 2540 mask, ~mask, 1000, 1000, true);
5a0b0cb9
SRT
2541
2542 return err;
2543}
2544
c6d4a831
DR
2545static int
2546ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2547{
2548 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2549
2550 /* Get the UPIU response */
2551 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2552 UPIU_RSP_CODE_OFFSET;
2553 return query_res->response;
2554}
2555
5a0b0cb9
SRT
2556/**
2557 * ufshcd_dev_cmd_completion() - handles device management command responses
2558 * @hba: per adapter instance
2559 * @lrbp: pointer to local reference block
2560 */
2561static int
2562ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2563{
2564 int resp;
2565 int err = 0;
2566
ff8e20c6 2567 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
2568 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2569
2570 switch (resp) {
2571 case UPIU_TRANSACTION_NOP_IN:
2572 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2573 err = -EINVAL;
2574 dev_err(hba->dev, "%s: unexpected response %x\n",
2575 __func__, resp);
2576 }
2577 break;
68078d5c 2578 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
2579 err = ufshcd_check_query_response(hba, lrbp);
2580 if (!err)
2581 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 2582 break;
5a0b0cb9
SRT
2583 case UPIU_TRANSACTION_REJECT_UPIU:
2584 /* TODO: handle Reject UPIU Response */
2585 err = -EPERM;
2586 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2587 __func__);
2588 break;
2589 default:
2590 err = -EINVAL;
2591 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2592 __func__, resp);
2593 break;
2594 }
2595
2596 return err;
2597}
2598
2599static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2600 struct ufshcd_lrb *lrbp, int max_timeout)
2601{
2602 int err = 0;
2603 unsigned long time_left;
2604 unsigned long flags;
2605
2606 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2607 msecs_to_jiffies(max_timeout));
2608
ad1a1b9c
GB
2609 /* Make sure descriptors are ready before ringing the doorbell */
2610 wmb();
5a0b0cb9
SRT
2611 spin_lock_irqsave(hba->host->host_lock, flags);
2612 hba->dev_cmd.complete = NULL;
2613 if (likely(time_left)) {
2614 err = ufshcd_get_tr_ocs(lrbp);
2615 if (!err)
2616 err = ufshcd_dev_cmd_completion(hba, lrbp);
2617 }
2618 spin_unlock_irqrestore(hba->host->host_lock, flags);
2619
2620 if (!time_left) {
2621 err = -ETIMEDOUT;
a48353f6
YG
2622 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2623 __func__, lrbp->task_tag);
5a0b0cb9 2624 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
a48353f6 2625 /* successfully cleared the command, retry if needed */
5a0b0cb9 2626 err = -EAGAIN;
a48353f6
YG
2627 /*
2628 * in case of an error, after clearing the doorbell,
2629 * we also need to clear the outstanding_request
2630 * field in hba
2631 */
2632 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
5a0b0cb9
SRT
2633 }
2634
2635 return err;
2636}
2637
5a0b0cb9
SRT
2638/**
2639 * ufshcd_exec_dev_cmd - API for sending device management requests
8aa29f19
BVA
2640 * @hba: UFS hba
2641 * @cmd_type: specifies the type (NOP, Query...)
2642 * @timeout: time in seconds
5a0b0cb9 2643 *
68078d5c
DR
2644 * NOTE: Since there is only one available tag for device management commands,
2645 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
2646 */
2647static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2648 enum dev_cmd_type cmd_type, int timeout)
2649{
7252a360
BVA
2650 struct request_queue *q = hba->cmd_queue;
2651 struct request *req;
5a0b0cb9
SRT
2652 struct ufshcd_lrb *lrbp;
2653 int err;
2654 int tag;
2655 struct completion wait;
2656 unsigned long flags;
2657
a3cd5ec5 2658 down_read(&hba->clk_scaling_lock);
2659
5a0b0cb9
SRT
2660 /*
2661 * Get free slot, sleep if slots are unavailable.
2662 * Even though we use wait_event() which sleeps indefinitely,
2663 * the maximum wait time is bounded by SCSI request timeout.
2664 */
7252a360 2665 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
bb14dd15
DC
2666 if (IS_ERR(req)) {
2667 err = PTR_ERR(req);
2668 goto out_unlock;
2669 }
7252a360
BVA
2670 tag = req->tag;
2671 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5a0b0cb9
SRT
2672
2673 init_completion(&wait);
2674 lrbp = &hba->lrb[tag];
2675 WARN_ON(lrbp->cmd);
2676 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2677 if (unlikely(err))
2678 goto out_put_tag;
2679
2680 hba->dev_cmd.complete = &wait;
2681
6667e6d9 2682 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
e3dfdc53
YG
2683 /* Make sure descriptors are ready before ringing the doorbell */
2684 wmb();
5a0b0cb9 2685 spin_lock_irqsave(hba->host->host_lock, flags);
5905d464 2686 ufshcd_vops_setup_xfer_req(hba, tag, false);
5a0b0cb9
SRT
2687 ufshcd_send_command(hba, tag);
2688 spin_unlock_irqrestore(hba->host->host_lock, flags);
2689
2690 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2691
6667e6d9
OS
2692 ufshcd_add_query_upiu_trace(hba, tag,
2693 err ? "query_complete_err" : "query_complete");
2694
5a0b0cb9 2695out_put_tag:
7252a360 2696 blk_put_request(req);
bb14dd15 2697out_unlock:
a3cd5ec5 2698 up_read(&hba->clk_scaling_lock);
5a0b0cb9
SRT
2699 return err;
2700}
2701
d44a5f98
DR
2702/**
2703 * ufshcd_init_query() - init the query response and request parameters
2704 * @hba: per-adapter instance
2705 * @request: address of the request pointer to be initialized
2706 * @response: address of the response pointer to be initialized
2707 * @opcode: operation to perform
2708 * @idn: flag idn to access
2709 * @index: LU number to access
2710 * @selector: query/flag/descriptor further identification
2711 */
2712static inline void ufshcd_init_query(struct ufs_hba *hba,
2713 struct ufs_query_req **request, struct ufs_query_res **response,
2714 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2715{
2716 *request = &hba->dev_cmd.query.request;
2717 *response = &hba->dev_cmd.query.response;
2718 memset(*request, 0, sizeof(struct ufs_query_req));
2719 memset(*response, 0, sizeof(struct ufs_query_res));
2720 (*request)->upiu_req.opcode = opcode;
2721 (*request)->upiu_req.idn = idn;
2722 (*request)->upiu_req.index = index;
2723 (*request)->upiu_req.selector = selector;
2724}
2725
dc3c8d3a
YG
2726static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2727 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2728{
2729 int ret;
2730 int retries;
2731
2732 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2733 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2734 if (ret)
2735 dev_dbg(hba->dev,
2736 "%s: failed with error %d, retries %d\n",
2737 __func__, ret, retries);
2738 else
2739 break;
2740 }
2741
2742 if (ret)
2743 dev_err(hba->dev,
2744 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2745 __func__, opcode, idn, ret, retries);
2746 return ret;
2747}
2748
68078d5c
DR
2749/**
2750 * ufshcd_query_flag() - API function for sending flag query requests
8aa29f19
BVA
2751 * @hba: per-adapter instance
2752 * @opcode: flag query to perform
2753 * @idn: flag idn to access
2754 * @flag_res: the flag value after the query request completes
68078d5c
DR
2755 *
2756 * Returns 0 for success, non-zero in case of failure
2757 */
dc3c8d3a 2758int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
68078d5c
DR
2759 enum flag_idn idn, bool *flag_res)
2760{
d44a5f98
DR
2761 struct ufs_query_req *request = NULL;
2762 struct ufs_query_res *response = NULL;
2763 int err, index = 0, selector = 0;
e5ad406c 2764 int timeout = QUERY_REQ_TIMEOUT;
68078d5c
DR
2765
2766 BUG_ON(!hba);
2767
1ab27c9c 2768 ufshcd_hold(hba, false);
68078d5c 2769 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
2770 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2771 selector);
68078d5c
DR
2772
2773 switch (opcode) {
2774 case UPIU_QUERY_OPCODE_SET_FLAG:
2775 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2776 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2777 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2778 break;
2779 case UPIU_QUERY_OPCODE_READ_FLAG:
2780 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2781 if (!flag_res) {
2782 /* No dummy reads */
2783 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2784 __func__);
2785 err = -EINVAL;
2786 goto out_unlock;
2787 }
2788 break;
2789 default:
2790 dev_err(hba->dev,
2791 "%s: Expected query flag opcode but got = %d\n",
2792 __func__, opcode);
2793 err = -EINVAL;
2794 goto out_unlock;
2795 }
68078d5c 2796
e5ad406c 2797 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
68078d5c
DR
2798
2799 if (err) {
2800 dev_err(hba->dev,
2801 "%s: Sending flag query for idn %d failed, err = %d\n",
2802 __func__, idn, err);
2803 goto out_unlock;
2804 }
2805
2806 if (flag_res)
e8c8e82a 2807 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
2808 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2809
2810out_unlock:
2811 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 2812 ufshcd_release(hba);
68078d5c
DR
2813 return err;
2814}
2815
66ec6d59
SRT
2816/**
2817 * ufshcd_query_attr - API function for sending attribute requests
8aa29f19
BVA
2818 * @hba: per-adapter instance
2819 * @opcode: attribute opcode
2820 * @idn: attribute idn to access
2821 * @index: index field
2822 * @selector: selector field
2823 * @attr_val: the attribute value after the query request completes
66ec6d59
SRT
2824 *
2825 * Returns 0 for success, non-zero in case of failure
2826*/
ec92b59c
SN
2827int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2828 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
66ec6d59 2829{
d44a5f98
DR
2830 struct ufs_query_req *request = NULL;
2831 struct ufs_query_res *response = NULL;
66ec6d59
SRT
2832 int err;
2833
2834 BUG_ON(!hba);
2835
1ab27c9c 2836 ufshcd_hold(hba, false);
66ec6d59
SRT
2837 if (!attr_val) {
2838 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2839 __func__, opcode);
2840 err = -EINVAL;
2841 goto out;
2842 }
2843
2844 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
2845 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2846 selector);
66ec6d59
SRT
2847
2848 switch (opcode) {
2849 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2850 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 2851 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
2852 break;
2853 case UPIU_QUERY_OPCODE_READ_ATTR:
2854 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2855 break;
2856 default:
2857 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2858 __func__, opcode);
2859 err = -EINVAL;
2860 goto out_unlock;
2861 }
2862
d44a5f98 2863 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
2864
2865 if (err) {
4b761b58
YG
2866 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2867 __func__, opcode, idn, index, err);
66ec6d59
SRT
2868 goto out_unlock;
2869 }
2870
e8c8e82a 2871 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
2872
2873out_unlock:
2874 mutex_unlock(&hba->dev_cmd.lock);
2875out:
1ab27c9c 2876 ufshcd_release(hba);
66ec6d59
SRT
2877 return err;
2878}
2879
5e86ae44
YG
2880/**
2881 * ufshcd_query_attr_retry() - API function for sending query
2882 * attribute with retries
2883 * @hba: per-adapter instance
2884 * @opcode: attribute opcode
2885 * @idn: attribute idn to access
2886 * @index: index field
2887 * @selector: selector field
2888 * @attr_val: the attribute value after the query request
2889 * completes
2890 *
2891 * Returns 0 for success, non-zero in case of failure
2892*/
2893static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2894 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2895 u32 *attr_val)
2896{
2897 int ret = 0;
2898 u32 retries;
2899
68c9fcfd 2900 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
5e86ae44
YG
2901 ret = ufshcd_query_attr(hba, opcode, idn, index,
2902 selector, attr_val);
2903 if (ret)
2904 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2905 __func__, ret, retries);
2906 else
2907 break;
2908 }
2909
2910 if (ret)
2911 dev_err(hba->dev,
2912 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2913 __func__, idn, ret, QUERY_REQ_RETRIES);
2914 return ret;
2915}
2916
a70e91b8 2917static int __ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
2918 enum query_opcode opcode, enum desc_idn idn, u8 index,
2919 u8 selector, u8 *desc_buf, int *buf_len)
2920{
2921 struct ufs_query_req *request = NULL;
2922 struct ufs_query_res *response = NULL;
2923 int err;
2924
2925 BUG_ON(!hba);
2926
1ab27c9c 2927 ufshcd_hold(hba, false);
d44a5f98
DR
2928 if (!desc_buf) {
2929 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2930 __func__, opcode);
2931 err = -EINVAL;
2932 goto out;
2933 }
2934
a4b0e8a4 2935 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
d44a5f98
DR
2936 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2937 __func__, *buf_len);
2938 err = -EINVAL;
2939 goto out;
2940 }
2941
2942 mutex_lock(&hba->dev_cmd.lock);
2943 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2944 selector);
2945 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 2946 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
2947
2948 switch (opcode) {
2949 case UPIU_QUERY_OPCODE_WRITE_DESC:
2950 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2951 break;
2952 case UPIU_QUERY_OPCODE_READ_DESC:
2953 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2954 break;
2955 default:
2956 dev_err(hba->dev,
2957 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2958 __func__, opcode);
2959 err = -EINVAL;
2960 goto out_unlock;
2961 }
2962
2963 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2964
2965 if (err) {
4b761b58
YG
2966 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2967 __func__, opcode, idn, index, err);
d44a5f98
DR
2968 goto out_unlock;
2969 }
2970
ea2aab24 2971 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
2972
2973out_unlock:
cfcbae38 2974 hba->dev_cmd.query.descriptor = NULL;
d44a5f98
DR
2975 mutex_unlock(&hba->dev_cmd.lock);
2976out:
1ab27c9c 2977 ufshcd_release(hba);
d44a5f98
DR
2978 return err;
2979}
2980
a70e91b8 2981/**
8aa29f19
BVA
2982 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
2983 * @hba: per-adapter instance
2984 * @opcode: attribute opcode
2985 * @idn: attribute idn to access
2986 * @index: index field
2987 * @selector: selector field
2988 * @desc_buf: the buffer that contains the descriptor
2989 * @buf_len: length parameter passed to the device
a70e91b8
YG
2990 *
2991 * Returns 0 for success, non-zero in case of failure.
2992 * The buf_len parameter will contain, on return, the length parameter
2993 * received on the response.
2994 */
2238d31c
SN
2995int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2996 enum query_opcode opcode,
2997 enum desc_idn idn, u8 index,
2998 u8 selector,
2999 u8 *desc_buf, int *buf_len)
a70e91b8
YG
3000{
3001 int err;
3002 int retries;
3003
3004 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3005 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3006 selector, desc_buf, buf_len);
3007 if (!err || err == -EINVAL)
3008 break;
3009 }
3010
3011 return err;
3012}
a70e91b8 3013
a4b0e8a4
PM
3014/**
3015 * ufshcd_read_desc_length - read the specified descriptor length from header
3016 * @hba: Pointer to adapter instance
3017 * @desc_id: descriptor idn value
3018 * @desc_index: descriptor index
3019 * @desc_length: pointer to variable to read the length of descriptor
3020 *
3021 * Return 0 in case of success, non-zero otherwise
3022 */
3023static int ufshcd_read_desc_length(struct ufs_hba *hba,
3024 enum desc_idn desc_id,
3025 int desc_index,
3026 int *desc_length)
3027{
3028 int ret;
3029 u8 header[QUERY_DESC_HDR_SIZE];
3030 int header_len = QUERY_DESC_HDR_SIZE;
3031
3032 if (desc_id >= QUERY_DESC_IDN_MAX)
3033 return -EINVAL;
3034
3035 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3036 desc_id, desc_index, 0, header,
3037 &header_len);
3038
3039 if (ret) {
3040 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3041 __func__, desc_id);
3042 return ret;
3043 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3044 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3045 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3046 desc_id);
3047 ret = -EINVAL;
3048 }
3049
3050 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3051 return ret;
3052
3053}
3054
3055/**
3056 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3057 * @hba: Pointer to adapter instance
3058 * @desc_id: descriptor idn value
3059 * @desc_len: mapped desc length (out)
3060 *
3061 * Return 0 in case of success, non-zero otherwise
3062 */
3063int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3064 enum desc_idn desc_id, int *desc_len)
3065{
3066 switch (desc_id) {
3067 case QUERY_DESC_IDN_DEVICE:
3068 *desc_len = hba->desc_size.dev_desc;
3069 break;
3070 case QUERY_DESC_IDN_POWER:
3071 *desc_len = hba->desc_size.pwr_desc;
3072 break;
3073 case QUERY_DESC_IDN_GEOMETRY:
3074 *desc_len = hba->desc_size.geom_desc;
3075 break;
3076 case QUERY_DESC_IDN_CONFIGURATION:
3077 *desc_len = hba->desc_size.conf_desc;
3078 break;
3079 case QUERY_DESC_IDN_UNIT:
3080 *desc_len = hba->desc_size.unit_desc;
3081 break;
3082 case QUERY_DESC_IDN_INTERCONNECT:
3083 *desc_len = hba->desc_size.interc_desc;
3084 break;
3085 case QUERY_DESC_IDN_STRING:
3086 *desc_len = QUERY_DESC_MAX_SIZE;
3087 break;
c648c2d2
SN
3088 case QUERY_DESC_IDN_HEALTH:
3089 *desc_len = hba->desc_size.hlth_desc;
3090 break;
a4b0e8a4
PM
3091 case QUERY_DESC_IDN_RFU_0:
3092 case QUERY_DESC_IDN_RFU_1:
3093 *desc_len = 0;
3094 break;
3095 default:
3096 *desc_len = 0;
3097 return -EINVAL;
3098 }
3099 return 0;
3100}
3101EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3102
da461cec
SJ
3103/**
3104 * ufshcd_read_desc_param - read the specified descriptor parameter
3105 * @hba: Pointer to adapter instance
3106 * @desc_id: descriptor idn value
3107 * @desc_index: descriptor index
3108 * @param_offset: offset of the parameter to read
3109 * @param_read_buf: pointer to buffer where parameter would be read
3110 * @param_size: sizeof(param_read_buf)
3111 *
3112 * Return 0 in case of success, non-zero otherwise
3113 */
45bced87
SN
3114int ufshcd_read_desc_param(struct ufs_hba *hba,
3115 enum desc_idn desc_id,
3116 int desc_index,
3117 u8 param_offset,
3118 u8 *param_read_buf,
3119 u8 param_size)
da461cec
SJ
3120{
3121 int ret;
3122 u8 *desc_buf;
a4b0e8a4 3123 int buff_len;
da461cec
SJ
3124 bool is_kmalloc = true;
3125
a4b0e8a4
PM
3126 /* Safety check */
3127 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
da461cec
SJ
3128 return -EINVAL;
3129
a4b0e8a4
PM
3130 /* Get the max length of descriptor from structure filled up at probe
3131 * time.
3132 */
3133 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
da461cec 3134
a4b0e8a4
PM
3135 /* Sanity checks */
3136 if (ret || !buff_len) {
3137 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3138 __func__);
3139 return ret;
3140 }
3141
3142 /* Check whether we need temp memory */
3143 if (param_offset != 0 || param_size < buff_len) {
da461cec
SJ
3144 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3145 if (!desc_buf)
3146 return -ENOMEM;
a4b0e8a4
PM
3147 } else {
3148 desc_buf = param_read_buf;
3149 is_kmalloc = false;
da461cec
SJ
3150 }
3151
a4b0e8a4 3152 /* Request for full descriptor */
a70e91b8 3153 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
a4b0e8a4
PM
3154 desc_id, desc_index, 0,
3155 desc_buf, &buff_len);
da461cec 3156
bde44bb6 3157 if (ret) {
3158 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3159 __func__, desc_id, desc_index, param_offset, ret);
da461cec
SJ
3160 goto out;
3161 }
3162
bde44bb6 3163 /* Sanity check */
3164 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3165 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3166 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3167 ret = -EINVAL;
3168 goto out;
3169 }
3170
a4b0e8a4
PM
3171 /* Check wherher we will not copy more data, than available */
3172 if (is_kmalloc && param_size > buff_len)
3173 param_size = buff_len;
bde44bb6 3174
da461cec
SJ
3175 if (is_kmalloc)
3176 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3177out:
3178 if (is_kmalloc)
3179 kfree(desc_buf);
3180 return ret;
3181}
3182
3183static inline int ufshcd_read_desc(struct ufs_hba *hba,
3184 enum desc_idn desc_id,
3185 int desc_index,
4b828fe1 3186 void *buf,
da461cec
SJ
3187 u32 size)
3188{
3189 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3190}
3191
b573d484 3192
4b828fe1
TW
3193/**
3194 * struct uc_string_id - unicode string
3195 *
3196 * @len: size of this descriptor inclusive
3197 * @type: descriptor type
3198 * @uc: unicode string character
3199 */
3200struct uc_string_id {
3201 u8 len;
3202 u8 type;
3203 wchar_t uc[0];
3204} __packed;
3205
3206/* replace non-printable or non-ASCII characters with spaces */
3207static inline char ufshcd_remove_non_printable(u8 ch)
3208{
3209 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3210}
3211
b573d484
YG
3212/**
3213 * ufshcd_read_string_desc - read string descriptor
3214 * @hba: pointer to adapter instance
3215 * @desc_index: descriptor index
4b828fe1
TW
3216 * @buf: pointer to buffer where descriptor would be read,
3217 * the caller should free the memory.
b573d484 3218 * @ascii: if true convert from unicode to ascii characters
4b828fe1 3219 * null terminated string.
b573d484 3220 *
4b828fe1
TW
3221 * Return:
3222 * * string size on success.
3223 * * -ENOMEM: on allocation failure
3224 * * -EINVAL: on a wrong parameter
b573d484 3225 */
4b828fe1
TW
3226int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3227 u8 **buf, bool ascii)
b573d484 3228{
4b828fe1
TW
3229 struct uc_string_id *uc_str;
3230 u8 *str;
3231 int ret;
b573d484 3232
4b828fe1
TW
3233 if (!buf)
3234 return -EINVAL;
b573d484 3235
4b828fe1
TW
3236 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3237 if (!uc_str)
3238 return -ENOMEM;
b573d484 3239
4b828fe1
TW
3240 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3241 desc_index, uc_str,
3242 QUERY_DESC_MAX_SIZE);
3243 if (ret < 0) {
3244 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3245 QUERY_REQ_RETRIES, ret);
3246 str = NULL;
3247 goto out;
3248 }
3249
3250 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3251 dev_dbg(hba->dev, "String Desc is of zero length\n");
3252 str = NULL;
3253 ret = 0;
b573d484
YG
3254 goto out;
3255 }
3256
3257 if (ascii) {
4b828fe1 3258 ssize_t ascii_len;
b573d484 3259 int i;
b573d484 3260 /* remove header and divide by 2 to move from UTF16 to UTF8 */
4b828fe1
TW
3261 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3262 str = kzalloc(ascii_len, GFP_KERNEL);
3263 if (!str) {
3264 ret = -ENOMEM;
fcbefc3b 3265 goto out;
b573d484
YG
3266 }
3267
3268 /*
3269 * the descriptor contains string in UTF16 format
3270 * we need to convert to utf-8 so it can be displayed
3271 */
4b828fe1
TW
3272 ret = utf16s_to_utf8s(uc_str->uc,
3273 uc_str->len - QUERY_DESC_HDR_SIZE,
3274 UTF16_BIG_ENDIAN, str, ascii_len);
b573d484
YG
3275
3276 /* replace non-printable or non-ASCII characters with spaces */
4b828fe1
TW
3277 for (i = 0; i < ret; i++)
3278 str[i] = ufshcd_remove_non_printable(str[i]);
b573d484 3279
4b828fe1
TW
3280 str[ret++] = '\0';
3281
3282 } else {
5f57704d 3283 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
4b828fe1
TW
3284 if (!str) {
3285 ret = -ENOMEM;
3286 goto out;
3287 }
4b828fe1 3288 ret = uc_str->len;
b573d484
YG
3289 }
3290out:
4b828fe1
TW
3291 *buf = str;
3292 kfree(uc_str);
3293 return ret;
b573d484 3294}
b573d484 3295
da461cec
SJ
3296/**
3297 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3298 * @hba: Pointer to adapter instance
3299 * @lun: lun id
3300 * @param_offset: offset of the parameter to read
3301 * @param_read_buf: pointer to buffer where parameter would be read
3302 * @param_size: sizeof(param_read_buf)
3303 *
3304 * Return 0 in case of success, non-zero otherwise
3305 */
3306static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3307 int lun,
3308 enum unit_desc_param param_offset,
3309 u8 *param_read_buf,
3310 u32 param_size)
3311{
3312 /*
3313 * Unit descriptors are only available for general purpose LUs (LUN id
3314 * from 0 to 7) and RPMB Well known LU.
3315 */
1baa8011 3316 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
da461cec
SJ
3317 return -EOPNOTSUPP;
3318
3319 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3320 param_offset, param_read_buf, param_size);
3321}
3322
09f17791
CG
3323static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3324{
3325 int err = 0;
3326 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3327
3328 if (hba->dev_info.wspecversion >= 0x300) {
3329 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3330 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3331 &gating_wait);
3332 if (err)
3333 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3334 err, gating_wait);
3335
3336 if (gating_wait == 0) {
3337 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3338 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3339 gating_wait);
3340 }
3341
3342 hba->dev_info.clk_gating_wait_us = gating_wait;
3343 }
3344
3345 return err;
3346}
3347
7a3e97b0
SY
3348/**
3349 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3350 * @hba: per adapter instance
3351 *
3352 * 1. Allocate DMA memory for Command Descriptor array
3353 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3354 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3355 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3356 * (UTMRDL)
3357 * 4. Allocate memory for local reference block(lrb).
3358 *
3359 * Returns 0 for success, non-zero in case of failure
3360 */
3361static int ufshcd_memory_alloc(struct ufs_hba *hba)
3362{
3363 size_t utmrdl_size, utrdl_size, ucdl_size;
3364
3365 /* Allocate memory for UTP command descriptors */
3366 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
3367 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3368 ucdl_size,
3369 &hba->ucdl_dma_addr,
3370 GFP_KERNEL);
7a3e97b0
SY
3371
3372 /*
3373 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3374 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3375 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3376 * be aligned to 128 bytes as well
3377 */
3378 if (!hba->ucdl_base_addr ||
3379 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3380 dev_err(hba->dev,
7a3e97b0
SY
3381 "Command Descriptor Memory allocation failed\n");
3382 goto out;
3383 }
3384
3385 /*
3386 * Allocate memory for UTP Transfer descriptors
3387 * UFSHCI requires 1024 byte alignment of UTRD
3388 */
3389 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
3390 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3391 utrdl_size,
3392 &hba->utrdl_dma_addr,
3393 GFP_KERNEL);
7a3e97b0
SY
3394 if (!hba->utrdl_base_addr ||
3395 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3396 dev_err(hba->dev,
7a3e97b0
SY
3397 "Transfer Descriptor Memory allocation failed\n");
3398 goto out;
3399 }
3400
3401 /*
3402 * Allocate memory for UTP Task Management descriptors
3403 * UFSHCI requires 1024 byte alignment of UTMRD
3404 */
3405 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
3406 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3407 utmrdl_size,
3408 &hba->utmrdl_dma_addr,
3409 GFP_KERNEL);
7a3e97b0
SY
3410 if (!hba->utmrdl_base_addr ||
3411 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3412 dev_err(hba->dev,
7a3e97b0
SY
3413 "Task Management Descriptor Memory allocation failed\n");
3414 goto out;
3415 }
3416
3417 /* Allocate memory for local reference block */
a86854d0
KC
3418 hba->lrb = devm_kcalloc(hba->dev,
3419 hba->nutrs, sizeof(struct ufshcd_lrb),
2953f850 3420 GFP_KERNEL);
7a3e97b0 3421 if (!hba->lrb) {
3b1d0580 3422 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
3423 goto out;
3424 }
3425 return 0;
3426out:
7a3e97b0
SY
3427 return -ENOMEM;
3428}
3429
3430/**
3431 * ufshcd_host_memory_configure - configure local reference block with
3432 * memory offsets
3433 * @hba: per adapter instance
3434 *
3435 * Configure Host memory space
3436 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3437 * address.
3438 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3439 * and PRDT offset.
3440 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3441 * into local reference block.
3442 */
3443static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3444{
7a3e97b0
SY
3445 struct utp_transfer_req_desc *utrdlp;
3446 dma_addr_t cmd_desc_dma_addr;
3447 dma_addr_t cmd_desc_element_addr;
3448 u16 response_offset;
3449 u16 prdt_offset;
3450 int cmd_desc_size;
3451 int i;
3452
3453 utrdlp = hba->utrdl_base_addr;
7a3e97b0
SY
3454
3455 response_offset =
3456 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3457 prdt_offset =
3458 offsetof(struct utp_transfer_cmd_desc, prd_table);
3459
3460 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3461 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3462
3463 for (i = 0; i < hba->nutrs; i++) {
3464 /* Configure UTRD with command descriptor base address */
3465 cmd_desc_element_addr =
3466 (cmd_desc_dma_addr + (cmd_desc_size * i));
3467 utrdlp[i].command_desc_base_addr_lo =
3468 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3469 utrdlp[i].command_desc_base_addr_hi =
3470 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3471
3472 /* Response upiu and prdt offset should be in double words */
49200199
CH
3473 utrdlp[i].response_upiu_offset =
3474 cpu_to_le16(response_offset >> 2);
3475 utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
3476 utrdlp[i].response_upiu_length =
3477 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0 3478
4d2b8d40 3479 ufshcd_init_lrb(hba, &hba->lrb[i], i);
7a3e97b0
SY
3480 }
3481}
3482
3483/**
3484 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3485 * @hba: per adapter instance
3486 *
3487 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3488 * in order to initialize the Unipro link startup procedure.
3489 * Once the Unipro links are up, the device connected to the controller
3490 * is detected.
3491 *
3492 * Returns 0 on success, non-zero value on failure
3493 */
3494static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3495{
6ccf44fe
SJ
3496 struct uic_command uic_cmd = {0};
3497 int ret;
7a3e97b0 3498
6ccf44fe 3499 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 3500
6ccf44fe
SJ
3501 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3502 if (ret)
ff8e20c6 3503 dev_dbg(hba->dev,
6ccf44fe
SJ
3504 "dme-link-startup: error code %d\n", ret);
3505 return ret;
7a3e97b0
SY
3506}
3507
cad2e03d
YG
3508static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3509{
3510 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3511 unsigned long min_sleep_time_us;
3512
3513 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3514 return;
3515
3516 /*
3517 * last_dme_cmd_tstamp will be 0 only for 1st call to
3518 * this function
3519 */
3520 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3521 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3522 } else {
3523 unsigned long delta =
3524 (unsigned long) ktime_to_us(
3525 ktime_sub(ktime_get(),
3526 hba->last_dme_cmd_tstamp));
3527
3528 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3529 min_sleep_time_us =
3530 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3531 else
3532 return; /* no more delay required */
3533 }
3534
3535 /* allow sleep for extra 50us if needed */
3536 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3537}
3538
12b4fdb4
SJ
3539/**
3540 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3541 * @hba: per adapter instance
3542 * @attr_sel: uic command argument1
3543 * @attr_set: attribute set type as uic command argument2
3544 * @mib_val: setting value as uic command argument3
3545 * @peer: indicate whether peer or local
3546 *
3547 * Returns 0 on success, non-zero value on failure
3548 */
3549int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3550 u8 attr_set, u32 mib_val, u8 peer)
3551{
3552 struct uic_command uic_cmd = {0};
3553 static const char *const action[] = {
3554 "dme-set",
3555 "dme-peer-set"
3556 };
3557 const char *set = action[!!peer];
3558 int ret;
64238fbd 3559 int retries = UFS_UIC_COMMAND_RETRIES;
12b4fdb4
SJ
3560
3561 uic_cmd.command = peer ?
3562 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3563 uic_cmd.argument1 = attr_sel;
3564 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3565 uic_cmd.argument3 = mib_val;
3566
64238fbd
YG
3567 do {
3568 /* for peer attributes we retry upon failure */
3569 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3570 if (ret)
3571 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3572 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3573 } while (ret && peer && --retries);
3574
f37e9f8c 3575 if (ret)
64238fbd 3576 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
f37e9f8c
YG
3577 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3578 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4
SJ
3579
3580 return ret;
3581}
3582EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3583
3584/**
3585 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3586 * @hba: per adapter instance
3587 * @attr_sel: uic command argument1
3588 * @mib_val: the value of the attribute as returned by the UIC command
3589 * @peer: indicate whether peer or local
3590 *
3591 * Returns 0 on success, non-zero value on failure
3592 */
3593int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3594 u32 *mib_val, u8 peer)
3595{
3596 struct uic_command uic_cmd = {0};
3597 static const char *const action[] = {
3598 "dme-get",
3599 "dme-peer-get"
3600 };
3601 const char *get = action[!!peer];
3602 int ret;
64238fbd 3603 int retries = UFS_UIC_COMMAND_RETRIES;
874237f7
YG
3604 struct ufs_pa_layer_attr orig_pwr_info;
3605 struct ufs_pa_layer_attr temp_pwr_info;
3606 bool pwr_mode_change = false;
3607
3608 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3609 orig_pwr_info = hba->pwr_info;
3610 temp_pwr_info = orig_pwr_info;
3611
3612 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3613 orig_pwr_info.pwr_rx == FAST_MODE) {
3614 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3615 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3616 pwr_mode_change = true;
3617 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3618 orig_pwr_info.pwr_rx == SLOW_MODE) {
3619 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3620 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3621 pwr_mode_change = true;
3622 }
3623 if (pwr_mode_change) {
3624 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3625 if (ret)
3626 goto out;
3627 }
3628 }
12b4fdb4
SJ
3629
3630 uic_cmd.command = peer ?
3631 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3632 uic_cmd.argument1 = attr_sel;
3633
64238fbd
YG
3634 do {
3635 /* for peer attributes we retry upon failure */
3636 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3637 if (ret)
3638 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3639 get, UIC_GET_ATTR_ID(attr_sel), ret);
3640 } while (ret && peer && --retries);
3641
f37e9f8c 3642 if (ret)
64238fbd 3643 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
f37e9f8c
YG
3644 get, UIC_GET_ATTR_ID(attr_sel),
3645 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4 3646
64238fbd 3647 if (mib_val && !ret)
12b4fdb4 3648 *mib_val = uic_cmd.argument3;
874237f7
YG
3649
3650 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3651 && pwr_mode_change)
3652 ufshcd_change_power_mode(hba, &orig_pwr_info);
12b4fdb4
SJ
3653out:
3654 return ret;
3655}
3656EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3657
53b3d9c3 3658/**
57d104c1
SJ
3659 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3660 * state) and waits for it to take effect.
3661 *
53b3d9c3 3662 * @hba: per adapter instance
57d104c1
SJ
3663 * @cmd: UIC command to execute
3664 *
3665 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3666 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3667 * and device UniPro link and hence it's final completion would be indicated by
3668 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3669 * addition to normal UIC command completion Status (UCCS). This function only
3670 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
3671 *
3672 * Returns 0 on success, non-zero value on failure
3673 */
57d104c1 3674static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 3675{
57d104c1 3676 struct completion uic_async_done;
53b3d9c3
SJ
3677 unsigned long flags;
3678 u8 status;
3679 int ret;
d75f7fe4 3680 bool reenable_intr = false;
53b3d9c3 3681
53b3d9c3 3682 mutex_lock(&hba->uic_cmd_mutex);
57d104c1 3683 init_completion(&uic_async_done);
cad2e03d 3684 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
3685
3686 spin_lock_irqsave(hba->host->host_lock, flags);
57d104c1 3687 hba->uic_async_done = &uic_async_done;
d75f7fe4
YG
3688 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3689 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3690 /*
3691 * Make sure UIC command completion interrupt is disabled before
3692 * issuing UIC command.
3693 */
3694 wmb();
3695 reenable_intr = true;
57d104c1 3696 }
d75f7fe4
YG
3697 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3698 spin_unlock_irqrestore(hba->host->host_lock, flags);
57d104c1
SJ
3699 if (ret) {
3700 dev_err(hba->dev,
3701 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3702 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
3703 goto out;
3704 }
3705
57d104c1 3706 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
3707 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3708 dev_err(hba->dev,
57d104c1
SJ
3709 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3710 cmd->command, cmd->argument3);
53b3d9c3
SJ
3711 ret = -ETIMEDOUT;
3712 goto out;
3713 }
3714
3715 status = ufshcd_get_upmcrs(hba);
3716 if (status != PWR_LOCAL) {
3717 dev_err(hba->dev,
479da360 3718 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
57d104c1 3719 cmd->command, status);
53b3d9c3
SJ
3720 ret = (status != PWR_OK) ? status : -1;
3721 }
3722out:
7942f7b5
VG
3723 if (ret) {
3724 ufshcd_print_host_state(hba);
3725 ufshcd_print_pwr_info(hba);
3726 ufshcd_print_host_regs(hba);
3727 }
3728
53b3d9c3 3729 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 3730 hba->active_uic_cmd = NULL;
57d104c1 3731 hba->uic_async_done = NULL;
d75f7fe4
YG
3732 if (reenable_intr)
3733 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
53b3d9c3
SJ
3734 spin_unlock_irqrestore(hba->host->host_lock, flags);
3735 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 3736
53b3d9c3
SJ
3737 return ret;
3738}
3739
57d104c1
SJ
3740/**
3741 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3742 * using DME_SET primitives.
3743 * @hba: per adapter instance
3744 * @mode: powr mode value
3745 *
3746 * Returns 0 on success, non-zero value on failure
3747 */
3748static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3749{
3750 struct uic_command uic_cmd = {0};
1ab27c9c 3751 int ret;
57d104c1 3752
c3a2f9ee
YG
3753 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3754 ret = ufshcd_dme_set(hba,
3755 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3756 if (ret) {
3757 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3758 __func__, ret);
3759 goto out;
3760 }
3761 }
3762
57d104c1
SJ
3763 uic_cmd.command = UIC_CMD_DME_SET;
3764 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3765 uic_cmd.argument3 = mode;
1ab27c9c
ST
3766 ufshcd_hold(hba, false);
3767 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3768 ufshcd_release(hba);
57d104c1 3769
c3a2f9ee 3770out:
1ab27c9c 3771 return ret;
57d104c1
SJ
3772}
3773
53c12d0e
YG
3774static int ufshcd_link_recovery(struct ufs_hba *hba)
3775{
3776 int ret;
3777 unsigned long flags;
3778
3779 spin_lock_irqsave(hba->host->host_lock, flags);
3780 hba->ufshcd_state = UFSHCD_STATE_RESET;
3781 ufshcd_set_eh_in_progress(hba);
3782 spin_unlock_irqrestore(hba->host->host_lock, flags);
3783
ebdd1dfd
CG
3784 /* Reset the attached device */
3785 ufshcd_vops_device_reset(hba);
3786
53c12d0e
YG
3787 ret = ufshcd_host_reset_and_restore(hba);
3788
3789 spin_lock_irqsave(hba->host->host_lock, flags);
3790 if (ret)
3791 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3792 ufshcd_clear_eh_in_progress(hba);
3793 spin_unlock_irqrestore(hba->host->host_lock, flags);
3794
3795 if (ret)
3796 dev_err(hba->dev, "%s: link recovery failed, err %d",
3797 __func__, ret);
3798
3799 return ret;
3800}
3801
87d0b4a6 3802static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
57d104c1 3803{
87d0b4a6 3804 int ret;
57d104c1 3805 struct uic_command uic_cmd = {0};
911a0771 3806 ktime_t start = ktime_get();
57d104c1 3807
ee32c909
KK
3808 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3809
57d104c1 3810 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
87d0b4a6 3811 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 3812 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3813 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
87d0b4a6 3814
53c12d0e 3815 if (ret) {
6d303e4b
SJ
3816 int err;
3817
87d0b4a6
YG
3818 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3819 __func__, ret);
3820
53c12d0e 3821 /*
6d303e4b
SJ
3822 * If link recovery fails then return error code returned from
3823 * ufshcd_link_recovery().
3824 * If link recovery succeeds then return -EAGAIN to attempt
3825 * hibern8 enter retry again.
53c12d0e 3826 */
6d303e4b
SJ
3827 err = ufshcd_link_recovery(hba);
3828 if (err) {
3829 dev_err(hba->dev, "%s: link recovery failed", __func__);
3830 ret = err;
3831 } else {
3832 ret = -EAGAIN;
3833 }
ee32c909
KK
3834 } else
3835 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3836 POST_CHANGE);
53c12d0e 3837
87d0b4a6
YG
3838 return ret;
3839}
3840
3841static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3842{
3843 int ret = 0, retries;
57d104c1 3844
87d0b4a6
YG
3845 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3846 ret = __ufshcd_uic_hibern8_enter(hba);
6d303e4b 3847 if (!ret)
87d0b4a6
YG
3848 goto out;
3849 }
3850out:
3851 return ret;
57d104c1
SJ
3852}
3853
9d19bf7a 3854int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
57d104c1
SJ
3855{
3856 struct uic_command uic_cmd = {0};
3857 int ret;
911a0771 3858 ktime_t start = ktime_get();
57d104c1 3859
ee32c909
KK
3860 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3861
57d104c1
SJ
3862 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3863 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771 3864 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3865 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3866
57d104c1 3867 if (ret) {
53c12d0e
YG
3868 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3869 __func__, ret);
3870 ret = ufshcd_link_recovery(hba);
ff8e20c6 3871 } else {
ee32c909
KK
3872 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3873 POST_CHANGE);
ff8e20c6
DR
3874 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3875 hba->ufs_stats.hibern8_exit_cnt++;
3876 }
57d104c1
SJ
3877
3878 return ret;
3879}
9d19bf7a 3880EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
57d104c1 3881
ba7af5ec
SC
3882void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3883{
3884 unsigned long flags;
3885
3886 if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
3887 return;
3888
3889 spin_lock_irqsave(hba->host->host_lock, flags);
3890 if (hba->ahit == ahit)
3891 goto out_unlock;
3892 hba->ahit = ahit;
3893 if (!pm_runtime_suspended(hba->dev))
3894 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3895out_unlock:
3896 spin_unlock_irqrestore(hba->host->host_lock, flags);
3897}
3898EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
3899
71d848b8 3900void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
ad448378
AH
3901{
3902 unsigned long flags;
3903
ee5f1042 3904 if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
ad448378
AH
3905 return;
3906
3907 spin_lock_irqsave(hba->host->host_lock, flags);
3908 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3909 spin_unlock_irqrestore(hba->host->host_lock, flags);
3910}
3911
5064636c
YG
3912 /**
3913 * ufshcd_init_pwr_info - setting the POR (power on reset)
3914 * values in hba power info
3915 * @hba: per-adapter instance
3916 */
3917static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3918{
3919 hba->pwr_info.gear_rx = UFS_PWM_G1;
3920 hba->pwr_info.gear_tx = UFS_PWM_G1;
3921 hba->pwr_info.lane_rx = 1;
3922 hba->pwr_info.lane_tx = 1;
3923 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3924 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3925 hba->pwr_info.hs_rate = 0;
3926}
3927
d3e89bac 3928/**
7eb584db
DR
3929 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3930 * @hba: per-adapter instance
d3e89bac 3931 */
7eb584db 3932static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 3933{
7eb584db
DR
3934 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3935
3936 if (hba->max_pwr_info.is_valid)
3937 return 0;
3938
2349b533 3939 pwr_info->pwr_tx = FAST_MODE;
3940 pwr_info->pwr_rx = FAST_MODE;
7eb584db 3941 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
3942
3943 /* Get the connected lane count */
7eb584db
DR
3944 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3945 &pwr_info->lane_rx);
3946 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3947 &pwr_info->lane_tx);
3948
3949 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3950 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3951 __func__,
3952 pwr_info->lane_rx,
3953 pwr_info->lane_tx);
3954 return -EINVAL;
3955 }
d3e89bac
SJ
3956
3957 /*
3958 * First, get the maximum gears of HS speed.
3959 * If a zero value, it means there is no HSGEAR capability.
3960 * Then, get the maximum gears of PWM speed.
3961 */
7eb584db
DR
3962 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3963 if (!pwr_info->gear_rx) {
3964 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3965 &pwr_info->gear_rx);
3966 if (!pwr_info->gear_rx) {
3967 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3968 __func__, pwr_info->gear_rx);
3969 return -EINVAL;
3970 }
2349b533 3971 pwr_info->pwr_rx = SLOW_MODE;
d3e89bac
SJ
3972 }
3973
7eb584db
DR
3974 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3975 &pwr_info->gear_tx);
3976 if (!pwr_info->gear_tx) {
d3e89bac 3977 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
3978 &pwr_info->gear_tx);
3979 if (!pwr_info->gear_tx) {
3980 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3981 __func__, pwr_info->gear_tx);
3982 return -EINVAL;
3983 }
2349b533 3984 pwr_info->pwr_tx = SLOW_MODE;
7eb584db
DR
3985 }
3986
3987 hba->max_pwr_info.is_valid = true;
3988 return 0;
3989}
3990
3991static int ufshcd_change_power_mode(struct ufs_hba *hba,
3992 struct ufs_pa_layer_attr *pwr_mode)
3993{
3994 int ret;
3995
3996 /* if already configured to the requested pwr_mode */
3997 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3998 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3999 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4000 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4001 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4002 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4003 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4004 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4005 return 0;
d3e89bac
SJ
4006 }
4007
4008 /*
4009 * Configure attributes for power mode change with below.
4010 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4011 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4012 * - PA_HSSERIES
4013 */
7eb584db
DR
4014 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4015 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4016 pwr_mode->lane_rx);
4017 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4018 pwr_mode->pwr_rx == FAST_MODE)
d3e89bac 4019 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
7eb584db
DR
4020 else
4021 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
d3e89bac 4022
7eb584db
DR
4023 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4024 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4025 pwr_mode->lane_tx);
4026 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4027 pwr_mode->pwr_tx == FAST_MODE)
d3e89bac 4028 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
7eb584db
DR
4029 else
4030 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
d3e89bac 4031
7eb584db
DR
4032 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4033 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4034 pwr_mode->pwr_rx == FAST_MODE ||
4035 pwr_mode->pwr_tx == FAST_MODE)
4036 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4037 pwr_mode->hs_rate);
d3e89bac 4038
08342537
CG
4039 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4040 DL_FC0ProtectionTimeOutVal_Default);
4041 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4042 DL_TC0ReplayTimeOutVal_Default);
4043 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4044 DL_AFC0ReqTimeOutVal_Default);
4045 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4046 DL_FC1ProtectionTimeOutVal_Default);
4047 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4048 DL_TC1ReplayTimeOutVal_Default);
4049 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4050 DL_AFC1ReqTimeOutVal_Default);
4051
4052 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4053 DL_FC0ProtectionTimeOutVal_Default);
4054 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4055 DL_TC0ReplayTimeOutVal_Default);
4056 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4057 DL_AFC0ReqTimeOutVal_Default);
4058
7eb584db
DR
4059 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4060 | pwr_mode->pwr_tx);
4061
4062 if (ret) {
d3e89bac 4063 dev_err(hba->dev,
7eb584db
DR
4064 "%s: power mode change failed %d\n", __func__, ret);
4065 } else {
0263bcd0
YG
4066 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4067 pwr_mode);
7eb584db
DR
4068
4069 memcpy(&hba->pwr_info, pwr_mode,
4070 sizeof(struct ufs_pa_layer_attr));
4071 }
4072
4073 return ret;
4074}
4075
4076/**
4077 * ufshcd_config_pwr_mode - configure a new power mode
4078 * @hba: per-adapter instance
4079 * @desired_pwr_mode: desired power configuration
4080 */
0d846e70 4081int ufshcd_config_pwr_mode(struct ufs_hba *hba,
7eb584db
DR
4082 struct ufs_pa_layer_attr *desired_pwr_mode)
4083{
4084 struct ufs_pa_layer_attr final_params = { 0 };
4085 int ret;
4086
0263bcd0
YG
4087 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4088 desired_pwr_mode, &final_params);
4089
4090 if (ret)
7eb584db
DR
4091 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4092
4093 ret = ufshcd_change_power_mode(hba, &final_params);
a3cd5ec5 4094 if (!ret)
4095 ufshcd_print_pwr_info(hba);
d3e89bac
SJ
4096
4097 return ret;
4098}
0d846e70 4099EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
d3e89bac 4100
68078d5c
DR
4101/**
4102 * ufshcd_complete_dev_init() - checks device readiness
8aa29f19 4103 * @hba: per-adapter instance
68078d5c
DR
4104 *
4105 * Set fDeviceInit flag and poll until device toggles it.
4106 */
4107static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4108{
dc3c8d3a
YG
4109 int i;
4110 int err;
68078d5c
DR
4111 bool flag_res = 1;
4112
dc3c8d3a
YG
4113 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4114 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
68078d5c
DR
4115 if (err) {
4116 dev_err(hba->dev,
4117 "%s setting fDeviceInit flag failed with error %d\n",
4118 __func__, err);
4119 goto out;
4120 }
4121
dc3c8d3a
YG
4122 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4123 for (i = 0; i < 1000 && !err && flag_res; i++)
4124 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4125 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4126
68078d5c
DR
4127 if (err)
4128 dev_err(hba->dev,
4129 "%s reading fDeviceInit flag failed with error %d\n",
4130 __func__, err);
4131 else if (flag_res)
4132 dev_err(hba->dev,
4133 "%s fDeviceInit was not cleared by the device\n",
4134 __func__);
4135
4136out:
4137 return err;
4138}
4139
7a3e97b0
SY
4140/**
4141 * ufshcd_make_hba_operational - Make UFS controller operational
4142 * @hba: per adapter instance
4143 *
4144 * To bring UFS host controller to operational state,
5c0c28a8
SRT
4145 * 1. Enable required interrupts
4146 * 2. Configure interrupt aggregation
897efe62 4147 * 3. Program UTRL and UTMRL base address
5c0c28a8 4148 * 4. Configure run-stop-registers
7a3e97b0
SY
4149 *
4150 * Returns 0 on success, non-zero value on failure
4151 */
9d19bf7a 4152int ufshcd_make_hba_operational(struct ufs_hba *hba)
7a3e97b0
SY
4153{
4154 int err = 0;
4155 u32 reg;
4156
6ccf44fe
SJ
4157 /* Enable required interrupts */
4158 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4159
4160 /* Configure interrupt aggregation */
b852190e
YG
4161 if (ufshcd_is_intr_aggr_allowed(hba))
4162 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4163 else
4164 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
4165
4166 /* Configure UTRL and UTMRL base address registers */
4167 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4168 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4169 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4170 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4171 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4172 REG_UTP_TASK_REQ_LIST_BASE_L);
4173 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4174 REG_UTP_TASK_REQ_LIST_BASE_H);
4175
897efe62
YG
4176 /*
4177 * Make sure base address and interrupt setup are updated before
4178 * enabling the run/stop registers below.
4179 */
4180 wmb();
4181
7a3e97b0
SY
4182 /*
4183 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
7a3e97b0 4184 */
5c0c28a8 4185 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
4186 if (!(ufshcd_get_lists_status(reg))) {
4187 ufshcd_enable_run_stop_reg(hba);
4188 } else {
3b1d0580 4189 dev_err(hba->dev,
7a3e97b0
SY
4190 "Host controller not ready to process requests");
4191 err = -EIO;
4192 goto out;
4193 }
4194
7a3e97b0
SY
4195out:
4196 return err;
4197}
9d19bf7a 4198EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
7a3e97b0 4199
596585a2
YG
4200/**
4201 * ufshcd_hba_stop - Send controller to reset state
4202 * @hba: per adapter instance
4203 * @can_sleep: perform sleep or just spin
4204 */
4205static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4206{
4207 int err;
4208
4209 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4210 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4211 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4212 10, 1, can_sleep);
4213 if (err)
4214 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4215}
4216
7a3e97b0 4217/**
49200199 4218 * ufshcd_hba_enable - initialize the controller
7a3e97b0
SY
4219 * @hba: per adapter instance
4220 *
4221 * The controller resets itself and controller firmware initialization
4222 * sequence kicks off. When controller is ready it will set
4223 * the Host Controller Enable bit to 1.
4224 *
4225 * Returns 0 on success, non-zero value on failure
4226 */
49200199 4227int ufshcd_hba_enable(struct ufs_hba *hba)
7a3e97b0
SY
4228{
4229 int retry;
4230
596585a2 4231 if (!ufshcd_is_hba_active(hba))
7a3e97b0 4232 /* change controller state to "reset state" */
596585a2 4233 ufshcd_hba_stop(hba, true);
7a3e97b0 4234
57d104c1
SJ
4235 /* UniPro link is disabled at this point */
4236 ufshcd_set_link_off(hba);
4237
0263bcd0 4238 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
5c0c28a8 4239
7a3e97b0
SY
4240 /* start controller initialization sequence */
4241 ufshcd_hba_start(hba);
4242
4243 /*
4244 * To initialize a UFS host controller HCE bit must be set to 1.
4245 * During initialization the HCE bit value changes from 1->0->1.
4246 * When the host controller completes initialization sequence
4247 * it sets the value of HCE bit to 1. The same HCE bit is read back
4248 * to check if the controller has completed initialization sequence.
4249 * So without this delay the value HCE = 1, set in the previous
4250 * instruction might be read back.
4251 * This delay can be changed based on the controller.
4252 */
b9dc8aca 4253 ufshcd_delay_us(hba->hba_enable_delay_us, 100);
7a3e97b0
SY
4254
4255 /* wait for the host controller to complete initialization */
9fc305ef 4256 retry = 50;
7a3e97b0
SY
4257 while (ufshcd_is_hba_active(hba)) {
4258 if (retry) {
4259 retry--;
4260 } else {
3b1d0580 4261 dev_err(hba->dev,
7a3e97b0
SY
4262 "Controller enable failed\n");
4263 return -EIO;
4264 }
9fc305ef 4265 usleep_range(1000, 1100);
7a3e97b0 4266 }
5c0c28a8 4267
1d337ec2 4268 /* enable UIC related interrupts */
57d104c1 4269 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 4270
0263bcd0 4271 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
5c0c28a8 4272
7a3e97b0
SY
4273 return 0;
4274}
9d19bf7a
SC
4275EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4276
7ca38cf3
YG
4277static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4278{
ba0320fb 4279 int tx_lanes = 0, i, err = 0;
7ca38cf3
YG
4280
4281 if (!peer)
4282 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4283 &tx_lanes);
4284 else
4285 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4286 &tx_lanes);
4287 for (i = 0; i < tx_lanes; i++) {
4288 if (!peer)
4289 err = ufshcd_dme_set(hba,
4290 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4291 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4292 0);
4293 else
4294 err = ufshcd_dme_peer_set(hba,
4295 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4296 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4297 0);
4298 if (err) {
4299 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4300 __func__, peer, i, err);
4301 break;
4302 }
4303 }
4304
4305 return err;
4306}
4307
4308static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4309{
4310 return ufshcd_disable_tx_lcc(hba, true);
4311}
4312
a5fe372d
SC
4313void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4314 u32 reg)
8808b4e9
SC
4315{
4316 reg_hist->reg[reg_hist->pos] = reg;
4317 reg_hist->tstamp[reg_hist->pos] = ktime_get();
4318 reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4319}
a5fe372d 4320EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
8808b4e9 4321
7a3e97b0 4322/**
6ccf44fe 4323 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
4324 * @hba: per adapter instance
4325 *
6ccf44fe 4326 * Returns 0 for success, non-zero in case of failure
7a3e97b0 4327 */
6ccf44fe 4328static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 4329{
6ccf44fe 4330 int ret;
1d337ec2 4331 int retries = DME_LINKSTARTUP_RETRIES;
7caf489b 4332 bool link_startup_again = false;
7a3e97b0 4333
7caf489b 4334 /*
4335 * If UFS device isn't active then we will have to issue link startup
4336 * 2 times to make sure the device state move to active.
4337 */
4338 if (!ufshcd_is_ufs_dev_active(hba))
4339 link_startup_again = true;
7a3e97b0 4340
7caf489b 4341link_startup:
1d337ec2 4342 do {
0263bcd0 4343 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 4344
1d337ec2 4345 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 4346
1d337ec2
SRT
4347 /* check if device is detected by inter-connect layer */
4348 if (!ret && !ufshcd_is_device_present(hba)) {
8808b4e9
SC
4349 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4350 0);
1d337ec2
SRT
4351 dev_err(hba->dev, "%s: Device not present\n", __func__);
4352 ret = -ENXIO;
4353 goto out;
4354 }
6ccf44fe 4355
1d337ec2
SRT
4356 /*
4357 * DME link lost indication is only received when link is up,
4358 * but we can't be sure if the link is up until link startup
4359 * succeeds. So reset the local Uni-Pro and try again.
4360 */
8808b4e9
SC
4361 if (ret && ufshcd_hba_enable(hba)) {
4362 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4363 (u32)ret);
1d337ec2 4364 goto out;
8808b4e9 4365 }
1d337ec2
SRT
4366 } while (ret && retries--);
4367
8808b4e9 4368 if (ret) {
1d337ec2 4369 /* failed to get the link up... retire */
8808b4e9
SC
4370 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4371 (u32)ret);
5c0c28a8 4372 goto out;
8808b4e9 4373 }
5c0c28a8 4374
7caf489b 4375 if (link_startup_again) {
4376 link_startup_again = false;
4377 retries = DME_LINKSTARTUP_RETRIES;
4378 goto link_startup;
4379 }
4380
d2aebb9b 4381 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4382 ufshcd_init_pwr_info(hba);
4383 ufshcd_print_pwr_info(hba);
4384
7ca38cf3
YG
4385 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4386 ret = ufshcd_disable_device_tx_lcc(hba);
4387 if (ret)
4388 goto out;
4389 }
4390
5c0c28a8 4391 /* Include any host controller configuration via UIC commands */
0263bcd0
YG
4392 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4393 if (ret)
4394 goto out;
7a3e97b0 4395
5c0c28a8 4396 ret = ufshcd_make_hba_operational(hba);
6ccf44fe 4397out:
7942f7b5 4398 if (ret) {
6ccf44fe 4399 dev_err(hba->dev, "link startup failed %d\n", ret);
7942f7b5
VG
4400 ufshcd_print_host_state(hba);
4401 ufshcd_print_pwr_info(hba);
4402 ufshcd_print_host_regs(hba);
4403 }
6ccf44fe 4404 return ret;
7a3e97b0
SY
4405}
4406
5a0b0cb9
SRT
4407/**
4408 * ufshcd_verify_dev_init() - Verify device initialization
4409 * @hba: per-adapter instance
4410 *
4411 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4412 * device Transport Protocol (UTP) layer is ready after a reset.
4413 * If the UTP layer at the device side is not initialized, it may
4414 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4415 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4416 */
4417static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4418{
4419 int err = 0;
4420 int retries;
4421
1ab27c9c 4422 ufshcd_hold(hba, false);
5a0b0cb9
SRT
4423 mutex_lock(&hba->dev_cmd.lock);
4424 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4425 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4426 NOP_OUT_TIMEOUT);
4427
4428 if (!err || err == -ETIMEDOUT)
4429 break;
4430
4431 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4432 }
4433 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 4434 ufshcd_release(hba);
5a0b0cb9
SRT
4435
4436 if (err)
4437 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4438 return err;
4439}
4440
0ce147d4
SJ
4441/**
4442 * ufshcd_set_queue_depth - set lun queue depth
4443 * @sdev: pointer to SCSI device
4444 *
4445 * Read bLUQueueDepth value and activate scsi tagged command
4446 * queueing. For WLUN, queue depth is set to 1. For best-effort
4447 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4448 * value that host can queue.
4449 */
4450static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4451{
4452 int ret = 0;
4453 u8 lun_qdepth;
4454 struct ufs_hba *hba;
4455
4456 hba = shost_priv(sdev->host);
4457
4458 lun_qdepth = hba->nutrs;
dbd34a61
SM
4459 ret = ufshcd_read_unit_desc_param(hba,
4460 ufshcd_scsi_to_upiu_lun(sdev->lun),
4461 UNIT_DESC_PARAM_LU_Q_DEPTH,
4462 &lun_qdepth,
4463 sizeof(lun_qdepth));
0ce147d4
SJ
4464
4465 /* Some WLUN doesn't support unit descriptor */
4466 if (ret == -EOPNOTSUPP)
4467 lun_qdepth = 1;
4468 else if (!lun_qdepth)
4469 /* eventually, we can figure out the real queue depth */
4470 lun_qdepth = hba->nutrs;
4471 else
4472 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4473
4474 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4475 __func__, lun_qdepth);
db5ed4df 4476 scsi_change_queue_depth(sdev, lun_qdepth);
0ce147d4
SJ
4477}
4478
57d104c1
SJ
4479/*
4480 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4481 * @hba: per-adapter instance
4482 * @lun: UFS device lun id
4483 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4484 *
4485 * Returns 0 in case of success and b_lu_write_protect status would be returned
4486 * @b_lu_write_protect parameter.
4487 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4488 * Returns -EINVAL in case of invalid parameters passed to this function.
4489 */
4490static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4491 u8 lun,
4492 u8 *b_lu_write_protect)
4493{
4494 int ret;
4495
4496 if (!b_lu_write_protect)
4497 ret = -EINVAL;
4498 /*
4499 * According to UFS device spec, RPMB LU can't be write
4500 * protected so skip reading bLUWriteProtect parameter for
4501 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4502 */
1baa8011 4503 else if (lun >= hba->dev_info.max_lu_supported)
57d104c1
SJ
4504 ret = -ENOTSUPP;
4505 else
4506 ret = ufshcd_read_unit_desc_param(hba,
4507 lun,
4508 UNIT_DESC_PARAM_LU_WR_PROTECT,
4509 b_lu_write_protect,
4510 sizeof(*b_lu_write_protect));
4511 return ret;
4512}
4513
4514/**
4515 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4516 * status
4517 * @hba: per-adapter instance
4518 * @sdev: pointer to SCSI device
4519 *
4520 */
4521static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4522 struct scsi_device *sdev)
4523{
4524 if (hba->dev_info.f_power_on_wp_en &&
4525 !hba->dev_info.is_lu_power_on_wp) {
4526 u8 b_lu_write_protect;
4527
4528 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4529 &b_lu_write_protect) &&
4530 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4531 hba->dev_info.is_lu_power_on_wp = true;
4532 }
4533}
4534
7a3e97b0
SY
4535/**
4536 * ufshcd_slave_alloc - handle initial SCSI device configurations
4537 * @sdev: pointer to SCSI device
4538 *
4539 * Returns success
4540 */
4541static int ufshcd_slave_alloc(struct scsi_device *sdev)
4542{
4543 struct ufs_hba *hba;
4544
4545 hba = shost_priv(sdev->host);
7a3e97b0
SY
4546
4547 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4548 sdev->use_10_for_ms = 1;
a3a76391
CG
4549
4550 /* DBD field should be set to 1 in mode sense(10) */
4551 sdev->set_dbd_for_ms = 1;
7a3e97b0 4552
e8e7f271
SRT
4553 /* allow SCSI layer to restart the device in case of errors */
4554 sdev->allow_restart = 1;
4264fd61 4555
b2a6c522
SRT
4556 /* REPORT SUPPORTED OPERATION CODES is not supported */
4557 sdev->no_report_opcodes = 1;
4558
84af7e8b
SRT
4559 /* WRITE_SAME command is not supported */
4560 sdev->no_write_same = 1;
e8e7f271 4561
0ce147d4 4562 ufshcd_set_queue_depth(sdev);
4264fd61 4563
57d104c1
SJ
4564 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4565
7a3e97b0
SY
4566 return 0;
4567}
4568
4264fd61
SRT
4569/**
4570 * ufshcd_change_queue_depth - change queue depth
4571 * @sdev: pointer to SCSI device
4572 * @depth: required depth to set
4264fd61 4573 *
db5ed4df 4574 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 4575 */
db5ed4df 4576static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61
SRT
4577{
4578 struct ufs_hba *hba = shost_priv(sdev->host);
4579
4580 if (depth > hba->nutrs)
4581 depth = hba->nutrs;
db5ed4df 4582 return scsi_change_queue_depth(sdev, depth);
4264fd61
SRT
4583}
4584
eeda4749
AM
4585/**
4586 * ufshcd_slave_configure - adjust SCSI device configurations
4587 * @sdev: pointer to SCSI device
4588 */
4589static int ufshcd_slave_configure(struct scsi_device *sdev)
4590{
49615ba1 4591 struct ufs_hba *hba = shost_priv(sdev->host);
eeda4749
AM
4592 struct request_queue *q = sdev->request_queue;
4593
4594 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
49615ba1
SC
4595
4596 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4597 sdev->rpm_autosuspend = 1;
4598
eeda4749
AM
4599 return 0;
4600}
4601
7a3e97b0
SY
4602/**
4603 * ufshcd_slave_destroy - remove SCSI device configurations
4604 * @sdev: pointer to SCSI device
4605 */
4606static void ufshcd_slave_destroy(struct scsi_device *sdev)
4607{
4608 struct ufs_hba *hba;
4609
4610 hba = shost_priv(sdev->host);
0ce147d4 4611 /* Drop the reference as it won't be needed anymore */
7c48bfd0
AM
4612 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4613 unsigned long flags;
4614
4615 spin_lock_irqsave(hba->host->host_lock, flags);
0ce147d4 4616 hba->sdev_ufs_device = NULL;
7c48bfd0
AM
4617 spin_unlock_irqrestore(hba->host->host_lock, flags);
4618 }
7a3e97b0
SY
4619}
4620
7a3e97b0
SY
4621/**
4622 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
8aa29f19 4623 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
4624 * @scsi_status: SCSI command status
4625 *
4626 * Returns value base on SCSI command status
4627 */
4628static inline int
4629ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4630{
4631 int result = 0;
4632
4633 switch (scsi_status) {
7a3e97b0 4634 case SAM_STAT_CHECK_CONDITION:
1c2623c5 4635 ufshcd_copy_sense_data(lrbp);
30eb2e4c 4636 /* fallthrough */
1c2623c5 4637 case SAM_STAT_GOOD:
7a3e97b0
SY
4638 result |= DID_OK << 16 |
4639 COMMAND_COMPLETE << 8 |
1c2623c5 4640 scsi_status;
7a3e97b0
SY
4641 break;
4642 case SAM_STAT_TASK_SET_FULL:
1c2623c5 4643 case SAM_STAT_BUSY:
7a3e97b0 4644 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
4645 ufshcd_copy_sense_data(lrbp);
4646 result |= scsi_status;
7a3e97b0
SY
4647 break;
4648 default:
4649 result |= DID_ERROR << 16;
4650 break;
4651 } /* end of switch */
4652
4653 return result;
4654}
4655
4656/**
4657 * ufshcd_transfer_rsp_status - Get overall status of the response
4658 * @hba: per adapter instance
8aa29f19 4659 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
4660 *
4661 * Returns result of the command to notify SCSI midlayer
4662 */
4663static inline int
4664ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4665{
4666 int result = 0;
4667 int scsi_status;
4668 int ocs;
4669
4670 /* overall command status of utrd */
4671 ocs = ufshcd_get_tr_ocs(lrbp);
4672
4673 switch (ocs) {
4674 case OCS_SUCCESS:
5a0b0cb9 4675 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
ff8e20c6 4676 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
4677 switch (result) {
4678 case UPIU_TRANSACTION_RESPONSE:
4679 /*
4680 * get the response UPIU result to extract
4681 * the SCSI command status
4682 */
4683 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4684
4685 /*
4686 * get the result based on SCSI status response
4687 * to notify the SCSI midlayer of the command status
4688 */
4689 scsi_status = result & MASK_SCSI_STATUS;
4690 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59 4691
f05ac2e5
YG
4692 /*
4693 * Currently we are only supporting BKOPs exception
4694 * events hence we can ignore BKOPs exception event
4695 * during power management callbacks. BKOPs exception
4696 * event is not expected to be raised in runtime suspend
4697 * callback as it allows the urgent bkops.
4698 * During system suspend, we are anyway forcefully
4699 * disabling the bkops and if urgent bkops is needed
4700 * it will be enabled on system resume. Long term
4701 * solution could be to abort the system suspend if
4702 * UFS device needs urgent BKOPs.
4703 */
4704 if (!hba->pm_op_in_progress &&
2824ec9f
SL
4705 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4706 schedule_work(&hba->eeh_work)) {
4707 /*
4708 * Prevent suspend once eeh_work is scheduled
4709 * to avoid deadlock between ufshcd_suspend
4710 * and exception event handler.
4711 */
4712 pm_runtime_get_noresume(hba->dev);
4713 }
5a0b0cb9
SRT
4714 break;
4715 case UPIU_TRANSACTION_REJECT_UPIU:
4716 /* TODO: handle Reject UPIU Response */
4717 result = DID_ERROR << 16;
3b1d0580 4718 dev_err(hba->dev,
5a0b0cb9
SRT
4719 "Reject UPIU not fully implemented\n");
4720 break;
4721 default:
5a0b0cb9
SRT
4722 dev_err(hba->dev,
4723 "Unexpected request response code = %x\n",
4724 result);
e0347d89 4725 result = DID_ERROR << 16;
7a3e97b0
SY
4726 break;
4727 }
7a3e97b0
SY
4728 break;
4729 case OCS_ABORTED:
4730 result |= DID_ABORT << 16;
4731 break;
e8e7f271
SRT
4732 case OCS_INVALID_COMMAND_STATUS:
4733 result |= DID_REQUEUE << 16;
4734 break;
7a3e97b0
SY
4735 case OCS_INVALID_CMD_TABLE_ATTR:
4736 case OCS_INVALID_PRDT_ATTR:
4737 case OCS_MISMATCH_DATA_BUF_SIZE:
4738 case OCS_MISMATCH_RESP_UPIU_SIZE:
4739 case OCS_PEER_COMM_FAILURE:
4740 case OCS_FATAL_ERROR:
4741 default:
4742 result |= DID_ERROR << 16;
3b1d0580 4743 dev_err(hba->dev,
ff8e20c6
DR
4744 "OCS error from controller = %x for tag %d\n",
4745 ocs, lrbp->task_tag);
4746 ufshcd_print_host_regs(hba);
6ba65588 4747 ufshcd_print_host_state(hba);
7a3e97b0
SY
4748 break;
4749 } /* end of switch */
4750
2df74b69 4751 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
66cc820f 4752 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
7a3e97b0
SY
4753 return result;
4754}
4755
6ccf44fe
SJ
4756/**
4757 * ufshcd_uic_cmd_compl - handle completion of uic command
4758 * @hba: per adapter instance
53b3d9c3 4759 * @intr_status: interrupt status generated by the controller
9333d775
VG
4760 *
4761 * Returns
4762 * IRQ_HANDLED - If interrupt is valid
4763 * IRQ_NONE - If invalid interrupt
6ccf44fe 4764 */
9333d775 4765static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 4766{
9333d775
VG
4767 irqreturn_t retval = IRQ_NONE;
4768
53b3d9c3 4769 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
4770 hba->active_uic_cmd->argument2 |=
4771 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
4772 hba->active_uic_cmd->argument3 =
4773 ufshcd_get_dme_attr_val(hba);
6ccf44fe 4774 complete(&hba->active_uic_cmd->done);
9333d775 4775 retval = IRQ_HANDLED;
6ccf44fe 4776 }
53b3d9c3 4777
9333d775 4778 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
57d104c1 4779 complete(hba->uic_async_done);
9333d775
VG
4780 retval = IRQ_HANDLED;
4781 }
4782 return retval;
6ccf44fe
SJ
4783}
4784
7a3e97b0 4785/**
9a47ec7c 4786 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
7a3e97b0 4787 * @hba: per adapter instance
9a47ec7c 4788 * @completed_reqs: requests to complete
7a3e97b0 4789 */
9a47ec7c
YG
4790static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4791 unsigned long completed_reqs)
7a3e97b0 4792{
5a0b0cb9
SRT
4793 struct ufshcd_lrb *lrbp;
4794 struct scsi_cmnd *cmd;
7a3e97b0
SY
4795 int result;
4796 int index;
e9d501b1 4797
e9d501b1
DR
4798 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4799 lrbp = &hba->lrb[index];
4800 cmd = lrbp->cmd;
4801 if (cmd) {
1a07f2d9 4802 ufshcd_add_command_trace(hba, index, "complete");
e9d501b1
DR
4803 result = ufshcd_transfer_rsp_status(hba, lrbp);
4804 scsi_dma_unmap(cmd);
4805 cmd->result = result;
4806 /* Mark completed command as NULL in LRB */
4807 lrbp->cmd = NULL;
74a527a2 4808 lrbp->compl_time_stamp = ktime_get();
e9d501b1
DR
4809 /* Do not touch lrbp after scsi done */
4810 cmd->scsi_done(cmd);
1ab27c9c 4811 __ufshcd_release(hba);
300bb13f
JP
4812 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4813 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
74a527a2 4814 lrbp->compl_time_stamp = ktime_get();
1a07f2d9
LS
4815 if (hba->dev_cmd.complete) {
4816 ufshcd_add_command_trace(hba, index,
4817 "dev_complete");
e9d501b1 4818 complete(hba->dev_cmd.complete);
1a07f2d9 4819 }
e9d501b1 4820 }
401f1e44 4821 if (ufshcd_is_clkscaling_supported(hba))
4822 hba->clk_scaling.active_reqs--;
e9d501b1 4823 }
7a3e97b0
SY
4824
4825 /* clear corresponding bits of completed commands */
4826 hba->outstanding_reqs ^= completed_reqs;
4827
856b3483 4828 ufshcd_clk_scaling_update_busy(hba);
7a3e97b0
SY
4829}
4830
9a47ec7c
YG
4831/**
4832 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4833 * @hba: per adapter instance
9333d775
VG
4834 *
4835 * Returns
4836 * IRQ_HANDLED - If interrupt is valid
4837 * IRQ_NONE - If invalid interrupt
9a47ec7c 4838 */
9333d775 4839static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
9a47ec7c
YG
4840{
4841 unsigned long completed_reqs;
4842 u32 tr_doorbell;
4843
4844 /* Resetting interrupt aggregation counters first and reading the
4845 * DOOR_BELL afterward allows us to handle all the completed requests.
4846 * In order to prevent other interrupts starvation the DB is read once
4847 * after reset. The down side of this solution is the possibility of
4848 * false interrupt if device completes another request after resetting
4849 * aggregation and before reading the DB.
4850 */
49200199 4851 if (ufshcd_is_intr_aggr_allowed(hba))
9a47ec7c
YG
4852 ufshcd_reset_intr_aggr(hba);
4853
4854 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4855 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4856
9333d775
VG
4857 if (completed_reqs) {
4858 __ufshcd_transfer_req_compl(hba, completed_reqs);
4859 return IRQ_HANDLED;
4860 } else {
4861 return IRQ_NONE;
4862 }
9a47ec7c
YG
4863}
4864
66ec6d59
SRT
4865/**
4866 * ufshcd_disable_ee - disable exception event
4867 * @hba: per-adapter instance
4868 * @mask: exception event to disable
4869 *
4870 * Disables exception event in the device so that the EVENT_ALERT
4871 * bit is not set.
4872 *
4873 * Returns zero on success, non-zero error value on failure.
4874 */
4875static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4876{
4877 int err = 0;
4878 u32 val;
4879
4880 if (!(hba->ee_ctrl_mask & mask))
4881 goto out;
4882
4883 val = hba->ee_ctrl_mask & ~mask;
d7e2ddd5 4884 val &= MASK_EE_STATUS;
5e86ae44 4885 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
4886 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4887 if (!err)
4888 hba->ee_ctrl_mask &= ~mask;
4889out:
4890 return err;
4891}
4892
4893/**
4894 * ufshcd_enable_ee - enable exception event
4895 * @hba: per-adapter instance
4896 * @mask: exception event to enable
4897 *
4898 * Enable corresponding exception event in the device to allow
4899 * device to alert host in critical scenarios.
4900 *
4901 * Returns zero on success, non-zero error value on failure.
4902 */
4903static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4904{
4905 int err = 0;
4906 u32 val;
4907
4908 if (hba->ee_ctrl_mask & mask)
4909 goto out;
4910
4911 val = hba->ee_ctrl_mask | mask;
d7e2ddd5 4912 val &= MASK_EE_STATUS;
5e86ae44 4913 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
4914 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4915 if (!err)
4916 hba->ee_ctrl_mask |= mask;
4917out:
4918 return err;
4919}
4920
4921/**
4922 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4923 * @hba: per-adapter instance
4924 *
4925 * Allow device to manage background operations on its own. Enabling
4926 * this might lead to inconsistent latencies during normal data transfers
4927 * as the device is allowed to manage its own way of handling background
4928 * operations.
4929 *
4930 * Returns zero on success, non-zero on failure.
4931 */
4932static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4933{
4934 int err = 0;
4935
4936 if (hba->auto_bkops_enabled)
4937 goto out;
4938
dc3c8d3a 4939 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
66ec6d59
SRT
4940 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4941 if (err) {
4942 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4943 __func__, err);
4944 goto out;
4945 }
4946
4947 hba->auto_bkops_enabled = true;
7ff5ab47 4948 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
66ec6d59
SRT
4949
4950 /* No need of URGENT_BKOPS exception from the device */
4951 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4952 if (err)
4953 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4954 __func__, err);
4955out:
4956 return err;
4957}
4958
4959/**
4960 * ufshcd_disable_auto_bkops - block device in doing background operations
4961 * @hba: per-adapter instance
4962 *
4963 * Disabling background operations improves command response latency but
4964 * has drawback of device moving into critical state where the device is
4965 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4966 * host is idle so that BKOPS are managed effectively without any negative
4967 * impacts.
4968 *
4969 * Returns zero on success, non-zero on failure.
4970 */
4971static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4972{
4973 int err = 0;
4974
4975 if (!hba->auto_bkops_enabled)
4976 goto out;
4977
4978 /*
4979 * If host assisted BKOPs is to be enabled, make sure
4980 * urgent bkops exception is allowed.
4981 */
4982 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4983 if (err) {
4984 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4985 __func__, err);
4986 goto out;
4987 }
4988
dc3c8d3a 4989 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
66ec6d59
SRT
4990 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4991 if (err) {
4992 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4993 __func__, err);
4994 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4995 goto out;
4996 }
4997
4998 hba->auto_bkops_enabled = false;
7ff5ab47 4999 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
24366c2a 5000 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5001out:
5002 return err;
5003}
5004
5005/**
4e768e76 5006 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
66ec6d59
SRT
5007 * @hba: per adapter instance
5008 *
5009 * After a device reset the device may toggle the BKOPS_EN flag
5010 * to default value. The s/w tracking variables should be updated
4e768e76 5011 * as well. This function would change the auto-bkops state based on
5012 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
66ec6d59 5013 */
4e768e76 5014static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
66ec6d59 5015{
4e768e76 5016 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5017 hba->auto_bkops_enabled = false;
5018 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5019 ufshcd_enable_auto_bkops(hba);
5020 } else {
5021 hba->auto_bkops_enabled = true;
5022 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5023 ufshcd_disable_auto_bkops(hba);
5024 }
24366c2a 5025 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5026}
5027
5028static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5029{
5e86ae44 5030 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5031 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5032}
5033
5034/**
57d104c1 5035 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 5036 * @hba: per-adapter instance
57d104c1 5037 * @status: bkops_status value
66ec6d59 5038 *
57d104c1
SJ
5039 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5040 * flag in the device to permit background operations if the device
5041 * bkops_status is greater than or equal to "status" argument passed to
5042 * this function, disable otherwise.
5043 *
5044 * Returns 0 for success, non-zero in case of failure.
5045 *
5046 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5047 * to know whether auto bkops is enabled or disabled after this function
5048 * returns control to it.
66ec6d59 5049 */
57d104c1
SJ
5050static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5051 enum bkops_status status)
66ec6d59
SRT
5052{
5053 int err;
57d104c1 5054 u32 curr_status = 0;
66ec6d59 5055
57d104c1 5056 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
5057 if (err) {
5058 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5059 __func__, err);
5060 goto out;
57d104c1
SJ
5061 } else if (curr_status > BKOPS_STATUS_MAX) {
5062 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5063 __func__, curr_status);
5064 err = -EINVAL;
5065 goto out;
66ec6d59
SRT
5066 }
5067
57d104c1 5068 if (curr_status >= status)
66ec6d59 5069 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
5070 else
5071 err = ufshcd_disable_auto_bkops(hba);
24366c2a 5072 hba->urgent_bkops_lvl = curr_status;
66ec6d59
SRT
5073out:
5074 return err;
5075}
5076
57d104c1
SJ
5077/**
5078 * ufshcd_urgent_bkops - handle urgent bkops exception event
5079 * @hba: per-adapter instance
5080 *
5081 * Enable fBackgroundOpsEn flag in the device to permit background
5082 * operations.
5083 *
5084 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5085 * and negative error value for any other failure.
5086 */
5087static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5088{
afdfff59 5089 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
57d104c1
SJ
5090}
5091
66ec6d59
SRT
5092static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5093{
5e86ae44 5094 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5095 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5096}
5097
afdfff59
YG
5098static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5099{
5100 int err;
5101 u32 curr_status = 0;
5102
5103 if (hba->is_urgent_bkops_lvl_checked)
5104 goto enable_auto_bkops;
5105
5106 err = ufshcd_get_bkops_status(hba, &curr_status);
5107 if (err) {
5108 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5109 __func__, err);
5110 goto out;
5111 }
5112
5113 /*
5114 * We are seeing that some devices are raising the urgent bkops
5115 * exception events even when BKOPS status doesn't indicate performace
5116 * impacted or critical. Handle these device by determining their urgent
5117 * bkops status at runtime.
5118 */
5119 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5120 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5121 __func__, curr_status);
5122 /* update the current status as the urgent bkops level */
5123 hba->urgent_bkops_lvl = curr_status;
5124 hba->is_urgent_bkops_lvl_checked = true;
5125 }
5126
5127enable_auto_bkops:
5128 err = ufshcd_enable_auto_bkops(hba);
5129out:
5130 if (err < 0)
5131 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5132 __func__, err);
5133}
5134
66ec6d59
SRT
5135/**
5136 * ufshcd_exception_event_handler - handle exceptions raised by device
5137 * @work: pointer to work data
5138 *
5139 * Read bExceptionEventStatus attribute from the device and handle the
5140 * exception event accordingly.
5141 */
5142static void ufshcd_exception_event_handler(struct work_struct *work)
5143{
5144 struct ufs_hba *hba;
5145 int err;
5146 u32 status = 0;
5147 hba = container_of(work, struct ufs_hba, eeh_work);
5148
62694735 5149 pm_runtime_get_sync(hba->dev);
03e1d28e 5150 ufshcd_scsi_block_requests(hba);
66ec6d59
SRT
5151 err = ufshcd_get_ee_status(hba, &status);
5152 if (err) {
5153 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5154 __func__, err);
5155 goto out;
5156 }
5157
5158 status &= hba->ee_ctrl_mask;
afdfff59
YG
5159
5160 if (status & MASK_EE_URGENT_BKOPS)
5161 ufshcd_bkops_exception_event_handler(hba);
5162
66ec6d59 5163out:
03e1d28e 5164 ufshcd_scsi_unblock_requests(hba);
2824ec9f
SL
5165 /*
5166 * pm_runtime_get_noresume is called while scheduling
5167 * eeh_work to avoid suspend racing with exception work.
5168 * Hence decrement usage counter using pm_runtime_put_noidle
5169 * to allow suspend on completion of exception event handler.
5170 */
5171 pm_runtime_put_noidle(hba->dev);
5172 pm_runtime_put(hba->dev);
66ec6d59
SRT
5173 return;
5174}
5175
9a47ec7c
YG
5176/* Complete requests that have door-bell cleared */
5177static void ufshcd_complete_requests(struct ufs_hba *hba)
5178{
5179 ufshcd_transfer_req_compl(hba);
5180 ufshcd_tmc_handler(hba);
5181}
5182
583fa62d
YG
5183/**
5184 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5185 * to recover from the DL NAC errors or not.
5186 * @hba: per-adapter instance
5187 *
5188 * Returns true if error handling is required, false otherwise
5189 */
5190static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5191{
5192 unsigned long flags;
5193 bool err_handling = true;
5194
5195 spin_lock_irqsave(hba->host->host_lock, flags);
5196 /*
5197 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5198 * device fatal error and/or DL NAC & REPLAY timeout errors.
5199 */
5200 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5201 goto out;
5202
5203 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5204 ((hba->saved_err & UIC_ERROR) &&
5205 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5206 goto out;
5207
5208 if ((hba->saved_err & UIC_ERROR) &&
5209 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5210 int err;
5211 /*
5212 * wait for 50ms to see if we can get any other errors or not.
5213 */
5214 spin_unlock_irqrestore(hba->host->host_lock, flags);
5215 msleep(50);
5216 spin_lock_irqsave(hba->host->host_lock, flags);
5217
5218 /*
5219 * now check if we have got any other severe errors other than
5220 * DL NAC error?
5221 */
5222 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5223 ((hba->saved_err & UIC_ERROR) &&
5224 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5225 goto out;
5226
5227 /*
5228 * As DL NAC is the only error received so far, send out NOP
5229 * command to confirm if link is still active or not.
5230 * - If we don't get any response then do error recovery.
5231 * - If we get response then clear the DL NAC error bit.
5232 */
5233
5234 spin_unlock_irqrestore(hba->host->host_lock, flags);
5235 err = ufshcd_verify_dev_init(hba);
5236 spin_lock_irqsave(hba->host->host_lock, flags);
5237
5238 if (err)
5239 goto out;
5240
5241 /* Link seems to be alive hence ignore the DL NAC errors */
5242 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5243 hba->saved_err &= ~UIC_ERROR;
5244 /* clear NAC error */
5245 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5246 if (!hba->saved_uic_err) {
5247 err_handling = false;
5248 goto out;
5249 }
5250 }
5251out:
5252 spin_unlock_irqrestore(hba->host->host_lock, flags);
5253 return err_handling;
5254}
5255
7a3e97b0 5256/**
e8e7f271
SRT
5257 * ufshcd_err_handler - handle UFS errors that require s/w attention
5258 * @work: pointer to work structure
7a3e97b0 5259 */
e8e7f271 5260static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0
SY
5261{
5262 struct ufs_hba *hba;
e8e7f271
SRT
5263 unsigned long flags;
5264 u32 err_xfer = 0;
5265 u32 err_tm = 0;
5266 int err = 0;
5267 int tag;
9a47ec7c 5268 bool needs_reset = false;
e8e7f271
SRT
5269
5270 hba = container_of(work, struct ufs_hba, eh_work);
7a3e97b0 5271
62694735 5272 pm_runtime_get_sync(hba->dev);
1ab27c9c 5273 ufshcd_hold(hba, false);
e8e7f271
SRT
5274
5275 spin_lock_irqsave(hba->host->host_lock, flags);
9a47ec7c 5276 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
e8e7f271 5277 goto out;
e8e7f271
SRT
5278
5279 hba->ufshcd_state = UFSHCD_STATE_RESET;
5280 ufshcd_set_eh_in_progress(hba);
5281
5282 /* Complete requests that have door-bell cleared by h/w */
9a47ec7c 5283 ufshcd_complete_requests(hba);
583fa62d
YG
5284
5285 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5286 bool ret;
5287
5288 spin_unlock_irqrestore(hba->host->host_lock, flags);
5289 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5290 ret = ufshcd_quirk_dl_nac_errors(hba);
5291 spin_lock_irqsave(hba->host->host_lock, flags);
5292 if (!ret)
5293 goto skip_err_handling;
5294 }
9a47ec7c 5295 if ((hba->saved_err & INT_FATAL_ERRORS) ||
82174440 5296 (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
9a47ec7c
YG
5297 ((hba->saved_err & UIC_ERROR) &&
5298 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5299 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5300 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5301 needs_reset = true;
e8e7f271 5302
9a47ec7c
YG
5303 /*
5304 * if host reset is required then skip clearing the pending
2df74b69
CG
5305 * transfers forcefully because they will get cleared during
5306 * host reset and restore
9a47ec7c
YG
5307 */
5308 if (needs_reset)
5309 goto skip_pending_xfer_clear;
5310
5311 /* release lock as clear command might sleep */
5312 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 5313 /* Clear pending transfer requests */
9a47ec7c
YG
5314 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5315 if (ufshcd_clear_cmd(hba, tag)) {
5316 err_xfer = true;
5317 goto lock_skip_pending_xfer_clear;
5318 }
5319 }
e8e7f271
SRT
5320
5321 /* Clear pending task management requests */
9a47ec7c
YG
5322 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5323 if (ufshcd_clear_tm_cmd(hba, tag)) {
5324 err_tm = true;
5325 goto lock_skip_pending_xfer_clear;
5326 }
5327 }
e8e7f271 5328
9a47ec7c 5329lock_skip_pending_xfer_clear:
e8e7f271 5330 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 5331
9a47ec7c
YG
5332 /* Complete the requests that are cleared by s/w */
5333 ufshcd_complete_requests(hba);
5334
5335 if (err_xfer || err_tm)
5336 needs_reset = true;
5337
5338skip_pending_xfer_clear:
e8e7f271 5339 /* Fatal errors need reset */
9a47ec7c
YG
5340 if (needs_reset) {
5341 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5342
5343 /*
5344 * ufshcd_reset_and_restore() does the link reinitialization
5345 * which will need atleast one empty doorbell slot to send the
5346 * device management commands (NOP and query commands).
5347 * If there is no slot empty at this moment then free up last
5348 * slot forcefully.
5349 */
5350 if (hba->outstanding_reqs == max_doorbells)
5351 __ufshcd_transfer_req_compl(hba,
5352 (1UL << (hba->nutrs - 1)));
5353
5354 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 5355 err = ufshcd_reset_and_restore(hba);
9a47ec7c 5356 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271
SRT
5357 if (err) {
5358 dev_err(hba->dev, "%s: reset and restore failed\n",
5359 __func__);
5360 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5361 }
5362 /*
5363 * Inform scsi mid-layer that we did reset and allow to handle
5364 * Unit Attention properly.
5365 */
5366 scsi_report_bus_reset(hba->host, 0);
5367 hba->saved_err = 0;
5368 hba->saved_uic_err = 0;
5369 }
9a47ec7c 5370
583fa62d 5371skip_err_handling:
9a47ec7c
YG
5372 if (!needs_reset) {
5373 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5374 if (hba->saved_err || hba->saved_uic_err)
5375 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5376 __func__, hba->saved_err, hba->saved_uic_err);
5377 }
5378
e8e7f271
SRT
5379 ufshcd_clear_eh_in_progress(hba);
5380
5381out:
9a47ec7c 5382 spin_unlock_irqrestore(hba->host->host_lock, flags);
38135535 5383 ufshcd_scsi_unblock_requests(hba);
1ab27c9c 5384 ufshcd_release(hba);
62694735 5385 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
5386}
5387
5388/**
e8e7f271
SRT
5389 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5390 * @hba: per-adapter instance
9333d775
VG
5391 *
5392 * Returns
5393 * IRQ_HANDLED - If interrupt is valid
5394 * IRQ_NONE - If invalid interrupt
7a3e97b0 5395 */
9333d775 5396static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
5397{
5398 u32 reg;
9333d775 5399 irqreturn_t retval = IRQ_NONE;
7a3e97b0 5400
fb7b45f0
DR
5401 /* PHY layer lane error */
5402 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5403 /* Ignore LINERESET indication, as this is not an error */
5404 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
9333d775 5405 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
fb7b45f0
DR
5406 /*
5407 * To know whether this error is fatal or not, DB timeout
5408 * must be checked but this error is handled separately.
5409 */
5410 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
48d5b973 5411 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
9333d775 5412 retval |= IRQ_HANDLED;
ff8e20c6 5413 }
fb7b45f0 5414
e8e7f271
SRT
5415 /* PA_INIT_ERROR is fatal and needs UIC reset */
5416 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
9333d775
VG
5417 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5418 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
48d5b973 5419 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
ff8e20c6 5420
9333d775
VG
5421 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5422 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5423 else if (hba->dev_quirks &
5424 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5425 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5426 hba->uic_error |=
5427 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5428 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5429 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5430 }
5431 retval |= IRQ_HANDLED;
583fa62d 5432 }
e8e7f271
SRT
5433
5434 /* UIC NL/TL/DME errors needs software retry */
5435 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
9333d775
VG
5436 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5437 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
48d5b973 5438 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
e8e7f271 5439 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
9333d775 5440 retval |= IRQ_HANDLED;
ff8e20c6 5441 }
e8e7f271
SRT
5442
5443 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
9333d775
VG
5444 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5445 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
48d5b973 5446 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
e8e7f271 5447 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
9333d775 5448 retval |= IRQ_HANDLED;
ff8e20c6 5449 }
e8e7f271
SRT
5450
5451 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
9333d775
VG
5452 if ((reg & UIC_DME_ERROR) &&
5453 (reg & UIC_DME_ERROR_CODE_MASK)) {
48d5b973 5454 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
e8e7f271 5455 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
9333d775 5456 retval |= IRQ_HANDLED;
ff8e20c6 5457 }
e8e7f271
SRT
5458
5459 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5460 __func__, hba->uic_error);
9333d775 5461 return retval;
e8e7f271
SRT
5462}
5463
82174440
SC
5464static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5465 u32 intr_mask)
5466{
5a244e0e
SC
5467 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5468 !ufshcd_is_auto_hibern8_enabled(hba))
82174440
SC
5469 return false;
5470
5471 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5472 return false;
5473
5474 if (hba->active_uic_cmd &&
5475 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5476 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5477 return false;
5478
5479 return true;
5480}
5481
e8e7f271
SRT
5482/**
5483 * ufshcd_check_errors - Check for errors that need s/w attention
5484 * @hba: per-adapter instance
9333d775
VG
5485 *
5486 * Returns
5487 * IRQ_HANDLED - If interrupt is valid
5488 * IRQ_NONE - If invalid interrupt
e8e7f271 5489 */
9333d775 5490static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
e8e7f271
SRT
5491{
5492 bool queue_eh_work = false;
9333d775 5493 irqreturn_t retval = IRQ_NONE;
e8e7f271 5494
d3c615bf
SC
5495 if (hba->errors & INT_FATAL_ERRORS) {
5496 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
e8e7f271 5497 queue_eh_work = true;
d3c615bf 5498 }
7a3e97b0
SY
5499
5500 if (hba->errors & UIC_ERROR) {
e8e7f271 5501 hba->uic_error = 0;
9333d775 5502 retval = ufshcd_update_uic_error(hba);
e8e7f271
SRT
5503 if (hba->uic_error)
5504 queue_eh_work = true;
7a3e97b0 5505 }
e8e7f271 5506
82174440
SC
5507 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5508 dev_err(hba->dev,
5509 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5510 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5511 "Enter" : "Exit",
5512 hba->errors, ufshcd_get_upmcrs(hba));
d3c615bf
SC
5513 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5514 hba->errors);
82174440
SC
5515 queue_eh_work = true;
5516 }
5517
e8e7f271 5518 if (queue_eh_work) {
9a47ec7c
YG
5519 /*
5520 * update the transfer error masks to sticky bits, let's do this
5521 * irrespective of current ufshcd_state.
5522 */
5523 hba->saved_err |= hba->errors;
5524 hba->saved_uic_err |= hba->uic_error;
5525
e8e7f271
SRT
5526 /* handle fatal errors only when link is functional */
5527 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5528 /* block commands from scsi mid-layer */
38135535 5529 ufshcd_scsi_block_requests(hba);
e8e7f271 5530
141f8165 5531 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
66cc820f
DR
5532
5533 /* dump controller state before resetting */
5534 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5535 bool pr_prdt = !!(hba->saved_err &
5536 SYSTEM_BUS_FATAL_ERROR);
5537
5538 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5539 __func__, hba->saved_err,
5540 hba->saved_uic_err);
5541
5542 ufshcd_print_host_regs(hba);
5543 ufshcd_print_pwr_info(hba);
5544 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5545 ufshcd_print_trs(hba, hba->outstanding_reqs,
5546 pr_prdt);
5547 }
e8e7f271
SRT
5548 schedule_work(&hba->eh_work);
5549 }
9333d775 5550 retval |= IRQ_HANDLED;
3441da7d 5551 }
e8e7f271
SRT
5552 /*
5553 * if (!queue_eh_work) -
5554 * Other errors are either non-fatal where host recovers
5555 * itself without s/w intervention or errors that will be
5556 * handled by the SCSI core layer.
5557 */
9333d775 5558 return retval;
7a3e97b0
SY
5559}
5560
69a6c269
BVA
5561struct ctm_info {
5562 struct ufs_hba *hba;
5563 unsigned long pending;
5564 unsigned int ncpl;
5565};
5566
5567static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5568{
5569 struct ctm_info *const ci = priv;
5570 struct completion *c;
5571
5572 WARN_ON_ONCE(reserved);
5573 if (test_bit(req->tag, &ci->pending))
5574 return true;
5575 ci->ncpl++;
5576 c = req->end_io_data;
5577 if (c)
5578 complete(c);
5579 return true;
5580}
5581
7a3e97b0
SY
5582/**
5583 * ufshcd_tmc_handler - handle task management function completion
5584 * @hba: per adapter instance
9333d775
VG
5585 *
5586 * Returns
5587 * IRQ_HANDLED - If interrupt is valid
5588 * IRQ_NONE - If invalid interrupt
7a3e97b0 5589 */
9333d775 5590static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
7a3e97b0 5591{
69a6c269
BVA
5592 struct request_queue *q = hba->tmf_queue;
5593 struct ctm_info ci = {
5594 .hba = hba,
5595 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5596 };
7a3e97b0 5597
69a6c269
BVA
5598 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5599 return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
7a3e97b0
SY
5600}
5601
5602/**
5603 * ufshcd_sl_intr - Interrupt service routine
5604 * @hba: per adapter instance
5605 * @intr_status: contains interrupts generated by the controller
9333d775
VG
5606 *
5607 * Returns
5608 * IRQ_HANDLED - If interrupt is valid
5609 * IRQ_NONE - If invalid interrupt
7a3e97b0 5610 */
9333d775 5611static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
7a3e97b0 5612{
9333d775
VG
5613 irqreturn_t retval = IRQ_NONE;
5614
7a3e97b0 5615 hba->errors = UFSHCD_ERROR_MASK & intr_status;
82174440
SC
5616
5617 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5618 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5619
7a3e97b0 5620 if (hba->errors)
9333d775 5621 retval |= ufshcd_check_errors(hba);
7a3e97b0 5622
53b3d9c3 5623 if (intr_status & UFSHCD_UIC_MASK)
9333d775 5624 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
5625
5626 if (intr_status & UTP_TASK_REQ_COMPL)
9333d775 5627 retval |= ufshcd_tmc_handler(hba);
7a3e97b0
SY
5628
5629 if (intr_status & UTP_TRANSFER_REQ_COMPL)
9333d775
VG
5630 retval |= ufshcd_transfer_req_compl(hba);
5631
5632 return retval;
7a3e97b0
SY
5633}
5634
5635/**
5636 * ufshcd_intr - Main interrupt service routine
5637 * @irq: irq number
5638 * @__hba: pointer to adapter instance
5639 *
9333d775
VG
5640 * Returns
5641 * IRQ_HANDLED - If interrupt is valid
5642 * IRQ_NONE - If invalid interrupt
7a3e97b0
SY
5643 */
5644static irqreturn_t ufshcd_intr(int irq, void *__hba)
5645{
d75f7fe4 5646 u32 intr_status, enabled_intr_status;
7a3e97b0
SY
5647 irqreturn_t retval = IRQ_NONE;
5648 struct ufs_hba *hba = __hba;
7f6ba4f1 5649 int retries = hba->nutrs;
7a3e97b0
SY
5650
5651 spin_lock(hba->host->host_lock);
b873a275 5652 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0 5653
7f6ba4f1
VG
5654 /*
5655 * There could be max of hba->nutrs reqs in flight and in worst case
5656 * if the reqs get finished 1 by 1 after the interrupt status is
5657 * read, make sure we handle them by checking the interrupt status
5658 * again in a loop until we process all of the reqs before returning.
5659 */
5660 do {
5661 enabled_intr_status =
5662 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5663 if (intr_status)
5664 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
9333d775
VG
5665 if (enabled_intr_status)
5666 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7f6ba4f1
VG
5667
5668 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5669 } while (intr_status && --retries);
d75f7fe4 5670
9333d775
VG
5671 if (retval == IRQ_NONE) {
5672 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
5673 __func__, intr_status);
5674 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
5675 }
5676
7a3e97b0
SY
5677 spin_unlock(hba->host->host_lock);
5678 return retval;
5679}
5680
e2933132
SRT
5681static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5682{
5683 int err = 0;
5684 u32 mask = 1 << tag;
5685 unsigned long flags;
5686
5687 if (!test_bit(tag, &hba->outstanding_tasks))
5688 goto out;
5689
5690 spin_lock_irqsave(hba->host->host_lock, flags);
1399c5b0 5691 ufshcd_utmrl_clear(hba, tag);
e2933132
SRT
5692 spin_unlock_irqrestore(hba->host->host_lock, flags);
5693
5694 /* poll for max. 1 sec to clear door bell register by h/w */
5695 err = ufshcd_wait_for_register(hba,
5696 REG_UTP_TASK_REQ_DOOR_BELL,
596585a2 5697 mask, 0, 1000, 1000, true);
e2933132
SRT
5698out:
5699 return err;
5700}
5701
c6049cd9
CH
5702static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5703 struct utp_task_req_desc *treq, u8 tm_function)
7a3e97b0 5704{
69a6c269 5705 struct request_queue *q = hba->tmf_queue;
c6049cd9 5706 struct Scsi_Host *host = hba->host;
69a6c269
BVA
5707 DECLARE_COMPLETION_ONSTACK(wait);
5708 struct request *req;
7a3e97b0 5709 unsigned long flags;
c6049cd9 5710 int free_slot, task_tag, err;
7a3e97b0 5711
e2933132
SRT
5712 /*
5713 * Get free slot, sleep if slots are unavailable.
5714 * Even though we use wait_event() which sleeps indefinitely,
5715 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5716 */
69a6c269
BVA
5717 req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
5718 req->end_io_data = &wait;
5719 free_slot = req->tag;
5720 WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
1ab27c9c 5721 ufshcd_hold(hba, false);
7a3e97b0 5722
e2933132 5723 spin_lock_irqsave(host->host_lock, flags);
e2933132 5724 task_tag = hba->nutrs + free_slot;
7a3e97b0 5725
c6049cd9
CH
5726 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5727
5728 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
d2877be4
KK
5729 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5730
7a3e97b0
SY
5731 /* send command to the controller */
5732 __set_bit(free_slot, &hba->outstanding_tasks);
897efe62
YG
5733
5734 /* Make sure descriptors are ready before ringing the task doorbell */
5735 wmb();
5736
b873a275 5737 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
ad1a1b9c
GB
5738 /* Make sure that doorbell is committed immediately */
5739 wmb();
7a3e97b0
SY
5740
5741 spin_unlock_irqrestore(host->host_lock, flags);
5742
6667e6d9
OS
5743 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5744
7a3e97b0 5745 /* wait until the task management command is completed */
69a6c269 5746 err = wait_for_completion_io_timeout(&wait,
e2933132 5747 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 5748 if (!err) {
69a6c269
BVA
5749 /*
5750 * Make sure that ufshcd_compl_tm() does not trigger a
5751 * use-after-free.
5752 */
5753 req->end_io_data = NULL;
6667e6d9 5754 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
e2933132
SRT
5755 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5756 __func__, tm_function);
5757 if (ufshcd_clear_tm_cmd(hba, free_slot))
5758 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5759 __func__, free_slot);
5760 err = -ETIMEDOUT;
5761 } else {
c6049cd9
CH
5762 err = 0;
5763 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5764
6667e6d9 5765 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
7a3e97b0 5766 }
e2933132 5767
b557217c
SC
5768 spin_lock_irqsave(hba->host->host_lock, flags);
5769 __clear_bit(free_slot, &hba->outstanding_tasks);
5770 spin_unlock_irqrestore(hba->host->host_lock, flags);
5771
69a6c269 5772 blk_put_request(req);
e2933132 5773
1ab27c9c 5774 ufshcd_release(hba);
7a3e97b0
SY
5775 return err;
5776}
5777
c6049cd9
CH
5778/**
5779 * ufshcd_issue_tm_cmd - issues task management commands to controller
5780 * @hba: per adapter instance
5781 * @lun_id: LUN ID to which TM command is sent
5782 * @task_id: task ID to which the TM command is applicable
5783 * @tm_function: task management function opcode
5784 * @tm_response: task management service response return value
5785 *
5786 * Returns non-zero value on error, zero on success.
5787 */
5788static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5789 u8 tm_function, u8 *tm_response)
5790{
5791 struct utp_task_req_desc treq = { { 0 }, };
5792 int ocs_value, err;
5793
5794 /* Configure task request descriptor */
5795 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5796 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5797
5798 /* Configure task request UPIU */
5799 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
5800 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
5801 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
5802
5803 /*
5804 * The host shall provide the same value for LUN field in the basic
5805 * header and for Input Parameter.
5806 */
5807 treq.input_param1 = cpu_to_be32(lun_id);
5808 treq.input_param2 = cpu_to_be32(task_id);
5809
5810 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
5811 if (err == -ETIMEDOUT)
5812 return err;
5813
5814 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5815 if (ocs_value != OCS_SUCCESS)
5816 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5817 __func__, ocs_value);
5818 else if (tm_response)
5819 *tm_response = be32_to_cpu(treq.output_param1) &
5820 MASK_TM_SERVICE_RESP;
5821 return err;
5822}
5823
5e0a86ee
AA
5824/**
5825 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
5826 * @hba: per-adapter instance
5827 * @req_upiu: upiu request
5828 * @rsp_upiu: upiu reply
5e0a86ee
AA
5829 * @desc_buff: pointer to descriptor buffer, NULL if NA
5830 * @buff_len: descriptor size, 0 if NA
d0e9760d 5831 * @cmd_type: specifies the type (NOP, Query...)
5e0a86ee
AA
5832 * @desc_op: descriptor operation
5833 *
5834 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
5835 * Therefore, it "rides" the device management infrastructure: uses its tag and
5836 * tasks work queues.
5837 *
5838 * Since there is only one available tag for device management commands,
5839 * the caller is expected to hold the hba->dev_cmd.lock mutex.
5840 */
5841static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5842 struct utp_upiu_req *req_upiu,
5843 struct utp_upiu_req *rsp_upiu,
5844 u8 *desc_buff, int *buff_len,
7f674c38 5845 enum dev_cmd_type cmd_type,
5e0a86ee
AA
5846 enum query_opcode desc_op)
5847{
7252a360
BVA
5848 struct request_queue *q = hba->cmd_queue;
5849 struct request *req;
5e0a86ee
AA
5850 struct ufshcd_lrb *lrbp;
5851 int err = 0;
5852 int tag;
5853 struct completion wait;
5854 unsigned long flags;
5855 u32 upiu_flags;
5856
5857 down_read(&hba->clk_scaling_lock);
5858
7252a360 5859 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
bb14dd15
DC
5860 if (IS_ERR(req)) {
5861 err = PTR_ERR(req);
5862 goto out_unlock;
5863 }
7252a360
BVA
5864 tag = req->tag;
5865 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5e0a86ee
AA
5866
5867 init_completion(&wait);
5868 lrbp = &hba->lrb[tag];
5869 WARN_ON(lrbp->cmd);
5870
5871 lrbp->cmd = NULL;
5872 lrbp->sense_bufflen = 0;
5873 lrbp->sense_buffer = NULL;
5874 lrbp->task_tag = tag;
5875 lrbp->lun = 0;
5876 lrbp->intr_cmd = true;
5877 hba->dev_cmd.type = cmd_type;
5878
5879 switch (hba->ufs_version) {
5880 case UFSHCI_VERSION_10:
5881 case UFSHCI_VERSION_11:
5882 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
5883 break;
5884 default:
5885 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5886 break;
5887 }
5888
5889 /* update the task tag in the request upiu */
5890 req_upiu->header.dword_0 |= cpu_to_be32(tag);
5891
5892 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
5893
5894 /* just copy the upiu request as it is */
5895 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
5896 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
5897 /* The Data Segment Area is optional depending upon the query
5898 * function value. for WRITE DESCRIPTOR, the data segment
5899 * follows right after the tsf.
5900 */
5901 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
5902 *buff_len = 0;
5903 }
5904
5905 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5906
5907 hba->dev_cmd.complete = &wait;
5908
5909 /* Make sure descriptors are ready before ringing the doorbell */
5910 wmb();
5911 spin_lock_irqsave(hba->host->host_lock, flags);
5912 ufshcd_send_command(hba, tag);
5913 spin_unlock_irqrestore(hba->host->host_lock, flags);
5914
5915 /*
5916 * ignore the returning value here - ufshcd_check_query_response is
5917 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
5918 * read the response directly ignoring all errors.
5919 */
5920 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
5921
5922 /* just copy the upiu response as it is */
5923 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
4bbbe242
AA
5924 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
5925 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
5926 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
5927 MASK_QUERY_DATA_SEG_LEN;
5928
5929 if (*buff_len >= resp_len) {
5930 memcpy(desc_buff, descp, resp_len);
5931 *buff_len = resp_len;
5932 } else {
3d4881d1
BH
5933 dev_warn(hba->dev,
5934 "%s: rsp size %d is bigger than buffer size %d",
5935 __func__, resp_len, *buff_len);
4bbbe242
AA
5936 *buff_len = 0;
5937 err = -EINVAL;
5938 }
5939 }
5e0a86ee 5940
7252a360 5941 blk_put_request(req);
bb14dd15 5942out_unlock:
5e0a86ee
AA
5943 up_read(&hba->clk_scaling_lock);
5944 return err;
5945}
5946
5947/**
5948 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
5949 * @hba: per-adapter instance
5950 * @req_upiu: upiu request
5951 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
5952 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5953 * @desc_buff: pointer to descriptor buffer, NULL if NA
5954 * @buff_len: descriptor size, 0 if NA
5955 * @desc_op: descriptor operation
5956 *
5957 * Supports UTP Transfer requests (nop and query), and UTP Task
5958 * Management requests.
5959 * It is up to the caller to fill the upiu conent properly, as it will
5960 * be copied without any further input validations.
5961 */
5962int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
5963 struct utp_upiu_req *req_upiu,
5964 struct utp_upiu_req *rsp_upiu,
5965 int msgcode,
5966 u8 *desc_buff, int *buff_len,
5967 enum query_opcode desc_op)
5968{
5969 int err;
7f674c38 5970 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
5e0a86ee
AA
5971 struct utp_task_req_desc treq = { { 0 }, };
5972 int ocs_value;
5973 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
5974
5e0a86ee
AA
5975 switch (msgcode) {
5976 case UPIU_TRANSACTION_NOP_OUT:
5977 cmd_type = DEV_CMD_TYPE_NOP;
5978 /* fall through */
5979 case UPIU_TRANSACTION_QUERY_REQ:
5980 ufshcd_hold(hba, false);
5981 mutex_lock(&hba->dev_cmd.lock);
5982 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
5983 desc_buff, buff_len,
5984 cmd_type, desc_op);
5985 mutex_unlock(&hba->dev_cmd.lock);
5986 ufshcd_release(hba);
5987
5988 break;
5989 case UPIU_TRANSACTION_TASK_REQ:
5990 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5991 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5992
5993 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
5994
5995 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
5996 if (err == -ETIMEDOUT)
5997 break;
5998
5999 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6000 if (ocs_value != OCS_SUCCESS) {
6001 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6002 ocs_value);
6003 break;
6004 }
6005
6006 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6007
6008 break;
6009 default:
6010 err = -EINVAL;
6011
6012 break;
6013 }
6014
5e0a86ee
AA
6015 return err;
6016}
6017
7a3e97b0 6018/**
3441da7d
SRT
6019 * ufshcd_eh_device_reset_handler - device reset handler registered to
6020 * scsi layer.
7a3e97b0
SY
6021 * @cmd: SCSI command pointer
6022 *
6023 * Returns SUCCESS/FAILED
6024 */
3441da7d 6025static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
6026{
6027 struct Scsi_Host *host;
6028 struct ufs_hba *hba;
6029 unsigned int tag;
6030 u32 pos;
6031 int err;
e2933132
SRT
6032 u8 resp = 0xF;
6033 struct ufshcd_lrb *lrbp;
3441da7d 6034 unsigned long flags;
7a3e97b0
SY
6035
6036 host = cmd->device->host;
6037 hba = shost_priv(host);
6038 tag = cmd->request->tag;
6039
e2933132
SRT
6040 lrbp = &hba->lrb[tag];
6041 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6042 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
6043 if (!err)
6044 err = resp;
7a3e97b0 6045 goto out;
e2933132 6046 }
7a3e97b0 6047
3441da7d
SRT
6048 /* clear the commands that were pending for corresponding LUN */
6049 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6050 if (hba->lrb[pos].lun == lrbp->lun) {
6051 err = ufshcd_clear_cmd(hba, pos);
6052 if (err)
6053 break;
7a3e97b0 6054 }
3441da7d
SRT
6055 }
6056 spin_lock_irqsave(host->host_lock, flags);
6057 ufshcd_transfer_req_compl(hba);
6058 spin_unlock_irqrestore(host->host_lock, flags);
7fabb77b 6059
7a3e97b0 6060out:
7fabb77b 6061 hba->req_abort_count = 0;
8808b4e9 6062 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
3441da7d
SRT
6063 if (!err) {
6064 err = SUCCESS;
6065 } else {
6066 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6067 err = FAILED;
6068 }
7a3e97b0
SY
6069 return err;
6070}
6071
e0b299e3
GB
6072static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6073{
6074 struct ufshcd_lrb *lrbp;
6075 int tag;
6076
6077 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6078 lrbp = &hba->lrb[tag];
6079 lrbp->req_abort_skip = true;
6080 }
6081}
6082
7a3e97b0
SY
6083/**
6084 * ufshcd_abort - abort a specific command
6085 * @cmd: SCSI command pointer
6086 *
f20810d8
SRT
6087 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6088 * command, and in host controller by clearing the door-bell register. There can
6089 * be race between controller sending the command to the device while abort is
6090 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6091 * really issued and then try to abort it.
6092 *
7a3e97b0
SY
6093 * Returns SUCCESS/FAILED
6094 */
6095static int ufshcd_abort(struct scsi_cmnd *cmd)
6096{
6097 struct Scsi_Host *host;
6098 struct ufs_hba *hba;
6099 unsigned long flags;
6100 unsigned int tag;
f20810d8
SRT
6101 int err = 0;
6102 int poll_cnt;
e2933132
SRT
6103 u8 resp = 0xF;
6104 struct ufshcd_lrb *lrbp;
e9d501b1 6105 u32 reg;
7a3e97b0
SY
6106
6107 host = cmd->device->host;
6108 hba = shost_priv(host);
6109 tag = cmd->request->tag;
e7d38257 6110 lrbp = &hba->lrb[tag];
14497328
YG
6111 if (!ufshcd_valid_tag(hba, tag)) {
6112 dev_err(hba->dev,
6113 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6114 __func__, tag, cmd, cmd->request);
6115 BUG();
6116 }
7a3e97b0 6117
e7d38257
DR
6118 /*
6119 * Task abort to the device W-LUN is illegal. When this command
6120 * will fail, due to spec violation, scsi err handling next step
6121 * will be to send LU reset which, again, is a spec violation.
6122 * To avoid these unnecessary/illegal step we skip to the last error
6123 * handling stage: reset and restore.
6124 */
6125 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6126 return ufshcd_eh_host_reset_handler(cmd);
6127
1ab27c9c 6128 ufshcd_hold(hba, false);
14497328 6129 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
f20810d8 6130 /* If command is already aborted/completed, return SUCCESS */
14497328
YG
6131 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6132 dev_err(hba->dev,
6133 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6134 __func__, tag, hba->outstanding_reqs, reg);
f20810d8 6135 goto out;
14497328 6136 }
7a3e97b0 6137
e9d501b1
DR
6138 if (!(reg & (1 << tag))) {
6139 dev_err(hba->dev,
6140 "%s: cmd was completed, but without a notifying intr, tag = %d",
6141 __func__, tag);
6142 }
6143
66cc820f
DR
6144 /* Print Transfer Request of aborted task */
6145 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
66cc820f 6146
7fabb77b
GB
6147 /*
6148 * Print detailed info about aborted request.
6149 * As more than one request might get aborted at the same time,
6150 * print full information only for the first aborted request in order
6151 * to reduce repeated printouts. For other aborted requests only print
6152 * basic details.
6153 */
6154 scsi_print_command(hba->lrb[tag].cmd);
6155 if (!hba->req_abort_count) {
8808b4e9 6156 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
7fabb77b 6157 ufshcd_print_host_regs(hba);
6ba65588 6158 ufshcd_print_host_state(hba);
7fabb77b
GB
6159 ufshcd_print_pwr_info(hba);
6160 ufshcd_print_trs(hba, 1 << tag, true);
6161 } else {
6162 ufshcd_print_trs(hba, 1 << tag, false);
6163 }
6164 hba->req_abort_count++;
e0b299e3
GB
6165
6166 /* Skip task abort in case previous aborts failed and report failure */
6167 if (lrbp->req_abort_skip) {
6168 err = -EIO;
6169 goto out;
6170 }
6171
f20810d8
SRT
6172 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6173 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6174 UFS_QUERY_TASK, &resp);
6175 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6176 /* cmd pending in the device */
ff8e20c6
DR
6177 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6178 __func__, tag);
f20810d8
SRT
6179 break;
6180 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
6181 /*
6182 * cmd not pending in the device, check if it is
6183 * in transition.
6184 */
ff8e20c6
DR
6185 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6186 __func__, tag);
f20810d8
SRT
6187 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6188 if (reg & (1 << tag)) {
6189 /* sleep for max. 200us to stabilize */
6190 usleep_range(100, 200);
6191 continue;
6192 }
6193 /* command completed already */
ff8e20c6
DR
6194 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6195 __func__, tag);
f20810d8
SRT
6196 goto out;
6197 } else {
ff8e20c6
DR
6198 dev_err(hba->dev,
6199 "%s: no response from device. tag = %d, err %d\n",
6200 __func__, tag, err);
f20810d8
SRT
6201 if (!err)
6202 err = resp; /* service response error */
6203 goto out;
6204 }
6205 }
6206
6207 if (!poll_cnt) {
6208 err = -EBUSY;
7a3e97b0
SY
6209 goto out;
6210 }
7a3e97b0 6211
e2933132
SRT
6212 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6213 UFS_ABORT_TASK, &resp);
6214 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
ff8e20c6 6215 if (!err) {
f20810d8 6216 err = resp; /* service response error */
ff8e20c6
DR
6217 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6218 __func__, tag, err);
6219 }
7a3e97b0 6220 goto out;
e2933132 6221 }
7a3e97b0 6222
f20810d8 6223 err = ufshcd_clear_cmd(hba, tag);
ff8e20c6
DR
6224 if (err) {
6225 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6226 __func__, tag, err);
f20810d8 6227 goto out;
ff8e20c6 6228 }
f20810d8 6229
7a3e97b0
SY
6230 scsi_dma_unmap(cmd);
6231
6232 spin_lock_irqsave(host->host_lock, flags);
a48353f6 6233 ufshcd_outstanding_req_clear(hba, tag);
7a3e97b0
SY
6234 hba->lrb[tag].cmd = NULL;
6235 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9 6236
7a3e97b0 6237out:
f20810d8
SRT
6238 if (!err) {
6239 err = SUCCESS;
6240 } else {
6241 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
e0b299e3 6242 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
f20810d8
SRT
6243 err = FAILED;
6244 }
6245
1ab27c9c
ST
6246 /*
6247 * This ufshcd_release() corresponds to the original scsi cmd that got
6248 * aborted here (as we won't get any IRQ for it).
6249 */
6250 ufshcd_release(hba);
7a3e97b0
SY
6251 return err;
6252}
6253
3441da7d
SRT
6254/**
6255 * ufshcd_host_reset_and_restore - reset and restore host controller
6256 * @hba: per-adapter instance
6257 *
6258 * Note that host controller reset may issue DME_RESET to
6259 * local and remote (device) Uni-Pro stack and the attributes
6260 * are reset to default state.
6261 *
6262 * Returns zero on success, non-zero on failure
6263 */
6264static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6265{
6266 int err;
3441da7d
SRT
6267 unsigned long flags;
6268
2df74b69
CG
6269 /*
6270 * Stop the host controller and complete the requests
6271 * cleared by h/w
6272 */
3441da7d 6273 spin_lock_irqsave(hba->host->host_lock, flags);
596585a2 6274 ufshcd_hba_stop(hba, false);
2df74b69
CG
6275 hba->silence_err_logs = true;
6276 ufshcd_complete_requests(hba);
6277 hba->silence_err_logs = false;
3441da7d
SRT
6278 spin_unlock_irqrestore(hba->host->host_lock, flags);
6279
a3cd5ec5 6280 /* scale up clocks to max frequency before full reinitialization */
6281 ufshcd_scale_clks(hba, true);
6282
3441da7d
SRT
6283 err = ufshcd_hba_enable(hba);
6284 if (err)
6285 goto out;
6286
6287 /* Establish the link again and restore the device */
1b9e2141 6288 err = ufshcd_probe_hba(hba, false);
1d337ec2
SRT
6289
6290 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3441da7d
SRT
6291 err = -EIO;
6292out:
6293 if (err)
6294 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
8808b4e9 6295 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
3441da7d
SRT
6296 return err;
6297}
6298
6299/**
6300 * ufshcd_reset_and_restore - reset and re-initialize host/device
6301 * @hba: per-adapter instance
6302 *
6303 * Reset and recover device, host and re-establish link. This
6304 * is helpful to recover the communication in fatal error conditions.
6305 *
6306 * Returns zero on success, non-zero on failure
6307 */
6308static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6309{
6310 int err = 0;
1d337ec2 6311 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 6312
1d337ec2 6313 do {
d8d9f793
BA
6314 /* Reset the attached device */
6315 ufshcd_vops_device_reset(hba);
6316
1d337ec2
SRT
6317 err = ufshcd_host_reset_and_restore(hba);
6318 } while (err && --retries);
3441da7d 6319
3441da7d
SRT
6320 return err;
6321}
6322
6323/**
6324 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
8aa29f19 6325 * @cmd: SCSI command pointer
3441da7d
SRT
6326 *
6327 * Returns SUCCESS/FAILED
6328 */
6329static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6330{
6331 int err;
6332 unsigned long flags;
6333 struct ufs_hba *hba;
6334
6335 hba = shost_priv(cmd->device->host);
6336
1ab27c9c 6337 ufshcd_hold(hba, false);
3441da7d
SRT
6338 /*
6339 * Check if there is any race with fatal error handling.
6340 * If so, wait for it to complete. Even though fatal error
6341 * handling does reset and restore in some cases, don't assume
6342 * anything out of it. We are just avoiding race here.
6343 */
6344 do {
6345 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 6346 if (!(work_pending(&hba->eh_work) ||
8dc0da79
ZL
6347 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6348 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
3441da7d
SRT
6349 break;
6350 spin_unlock_irqrestore(hba->host->host_lock, flags);
6351 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
e8e7f271 6352 flush_work(&hba->eh_work);
3441da7d
SRT
6353 } while (1);
6354
6355 hba->ufshcd_state = UFSHCD_STATE_RESET;
6356 ufshcd_set_eh_in_progress(hba);
6357 spin_unlock_irqrestore(hba->host->host_lock, flags);
6358
6359 err = ufshcd_reset_and_restore(hba);
6360
6361 spin_lock_irqsave(hba->host->host_lock, flags);
6362 if (!err) {
6363 err = SUCCESS;
6364 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6365 } else {
6366 err = FAILED;
6367 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6368 }
6369 ufshcd_clear_eh_in_progress(hba);
6370 spin_unlock_irqrestore(hba->host->host_lock, flags);
6371
1ab27c9c 6372 ufshcd_release(hba);
3441da7d
SRT
6373 return err;
6374}
6375
3a4bf06d
YG
6376/**
6377 * ufshcd_get_max_icc_level - calculate the ICC level
6378 * @sup_curr_uA: max. current supported by the regulator
6379 * @start_scan: row at the desc table to start scan from
6380 * @buff: power descriptor buffer
6381 *
6382 * Returns calculated max ICC level for specific regulator
6383 */
6384static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6385{
6386 int i;
6387 int curr_uA;
6388 u16 data;
6389 u16 unit;
6390
6391 for (i = start_scan; i >= 0; i--) {
d79713f9 6392 data = be16_to_cpup((__be16 *)&buff[2 * i]);
3a4bf06d
YG
6393 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6394 ATTR_ICC_LVL_UNIT_OFFSET;
6395 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6396 switch (unit) {
6397 case UFSHCD_NANO_AMP:
6398 curr_uA = curr_uA / 1000;
6399 break;
6400 case UFSHCD_MILI_AMP:
6401 curr_uA = curr_uA * 1000;
6402 break;
6403 case UFSHCD_AMP:
6404 curr_uA = curr_uA * 1000 * 1000;
6405 break;
6406 case UFSHCD_MICRO_AMP:
6407 default:
6408 break;
6409 }
6410 if (sup_curr_uA >= curr_uA)
6411 break;
6412 }
6413 if (i < 0) {
6414 i = 0;
6415 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6416 }
6417
6418 return (u32)i;
6419}
6420
6421/**
6422 * ufshcd_calc_icc_level - calculate the max ICC level
6423 * In case regulators are not initialized we'll return 0
6424 * @hba: per-adapter instance
6425 * @desc_buf: power descriptor buffer to extract ICC levels from.
6426 * @len: length of desc_buff
6427 *
6428 * Returns calculated ICC level
6429 */
6430static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6431 u8 *desc_buf, int len)
6432{
6433 u32 icc_level = 0;
6434
6435 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6436 !hba->vreg_info.vccq2) {
6437 dev_err(hba->dev,
6438 "%s: Regulator capability was not set, actvIccLevel=%d",
6439 __func__, icc_level);
6440 goto out;
6441 }
6442
0487fff7 6443 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
3a4bf06d
YG
6444 icc_level = ufshcd_get_max_icc_level(
6445 hba->vreg_info.vcc->max_uA,
6446 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6447 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6448
0487fff7 6449 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
3a4bf06d
YG
6450 icc_level = ufshcd_get_max_icc_level(
6451 hba->vreg_info.vccq->max_uA,
6452 icc_level,
6453 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6454
0487fff7 6455 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
3a4bf06d
YG
6456 icc_level = ufshcd_get_max_icc_level(
6457 hba->vreg_info.vccq2->max_uA,
6458 icc_level,
6459 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6460out:
6461 return icc_level;
6462}
6463
6464static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6465{
6466 int ret;
a4b0e8a4 6467 int buff_len = hba->desc_size.pwr_desc;
bbe21d7a
KC
6468 u8 *desc_buf;
6469
6470 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6471 if (!desc_buf)
6472 return;
3a4bf06d 6473
8c9a51b0
BH
6474 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
6475 desc_buf, buff_len);
3a4bf06d
YG
6476 if (ret) {
6477 dev_err(hba->dev,
6478 "%s: Failed reading power descriptor.len = %d ret = %d",
6479 __func__, buff_len, ret);
bbe21d7a 6480 goto out;
3a4bf06d
YG
6481 }
6482
6483 hba->init_prefetch_data.icc_level =
6484 ufshcd_find_max_sup_active_icc_level(hba,
6485 desc_buf, buff_len);
6486 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6487 __func__, hba->init_prefetch_data.icc_level);
6488
dbd34a61
SM
6489 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6490 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6491 &hba->init_prefetch_data.icc_level);
3a4bf06d
YG
6492
6493 if (ret)
6494 dev_err(hba->dev,
6495 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6496 __func__, hba->init_prefetch_data.icc_level , ret);
6497
bbe21d7a
KC
6498out:
6499 kfree(desc_buf);
3a4bf06d
YG
6500}
6501
2a8fa600
SJ
6502/**
6503 * ufshcd_scsi_add_wlus - Adds required W-LUs
6504 * @hba: per-adapter instance
6505 *
6506 * UFS device specification requires the UFS devices to support 4 well known
6507 * logical units:
6508 * "REPORT_LUNS" (address: 01h)
6509 * "UFS Device" (address: 50h)
6510 * "RPMB" (address: 44h)
6511 * "BOOT" (address: 30h)
6512 * UFS device's power management needs to be controlled by "POWER CONDITION"
6513 * field of SSU (START STOP UNIT) command. But this "power condition" field
6514 * will take effect only when its sent to "UFS device" well known logical unit
6515 * hence we require the scsi_device instance to represent this logical unit in
6516 * order for the UFS host driver to send the SSU command for power management.
8aa29f19 6517 *
2a8fa600
SJ
6518 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6519 * Block) LU so user space process can control this LU. User space may also
6520 * want to have access to BOOT LU.
8aa29f19 6521 *
2a8fa600
SJ
6522 * This function adds scsi device instances for each of all well known LUs
6523 * (except "REPORT LUNS" LU).
6524 *
6525 * Returns zero on success (all required W-LUs are added successfully),
6526 * non-zero error value on failure (if failed to add any of the required W-LU).
6527 */
6528static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6529{
6530 int ret = 0;
7c48bfd0
AM
6531 struct scsi_device *sdev_rpmb;
6532 struct scsi_device *sdev_boot;
2a8fa600
SJ
6533
6534 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6535 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6536 if (IS_ERR(hba->sdev_ufs_device)) {
6537 ret = PTR_ERR(hba->sdev_ufs_device);
6538 hba->sdev_ufs_device = NULL;
6539 goto out;
6540 }
7c48bfd0 6541 scsi_device_put(hba->sdev_ufs_device);
2a8fa600 6542
7c48bfd0 6543 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 6544 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7c48bfd0
AM
6545 if (IS_ERR(sdev_rpmb)) {
6546 ret = PTR_ERR(sdev_rpmb);
3d21fbde 6547 goto remove_sdev_ufs_device;
2a8fa600 6548 }
7c48bfd0 6549 scsi_device_put(sdev_rpmb);
3d21fbde
HK
6550
6551 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6552 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6553 if (IS_ERR(sdev_boot))
6554 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6555 else
6556 scsi_device_put(sdev_boot);
2a8fa600
SJ
6557 goto out;
6558
2a8fa600
SJ
6559remove_sdev_ufs_device:
6560 scsi_remove_device(hba->sdev_ufs_device);
6561out:
6562 return ret;
6563}
6564
09750066 6565static int ufs_get_device_desc(struct ufs_hba *hba)
c58ab7aa
YG
6566{
6567 int err;
bbe21d7a 6568 size_t buff_len;
c58ab7aa 6569 u8 model_index;
bbe21d7a 6570 u8 *desc_buf;
09750066 6571 struct ufs_dev_info *dev_info = &hba->dev_info;
4b828fe1 6572
bbe21d7a
KC
6573 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6574 QUERY_DESC_MAX_SIZE + 1);
6575 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6576 if (!desc_buf) {
6577 err = -ENOMEM;
6578 goto out;
6579 }
c58ab7aa 6580
8c9a51b0
BH
6581 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
6582 hba->desc_size.dev_desc);
c58ab7aa
YG
6583 if (err) {
6584 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6585 __func__, err);
6586 goto out;
6587 }
6588
6589 /*
6590 * getting vendor (manufacturerID) and Bank Index in big endian
6591 * format
6592 */
09750066 6593 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
c58ab7aa
YG
6594 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6595
09f17791
CG
6596 /* getting Specification Version in big endian format */
6597 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
6598 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
6599
c58ab7aa 6600 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4b828fe1 6601 err = ufshcd_read_string_desc(hba, model_index,
09750066 6602 &dev_info->model, SD_ASCII_STD);
4b828fe1 6603 if (err < 0) {
c58ab7aa
YG
6604 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6605 __func__, err);
6606 goto out;
6607 }
6608
4b828fe1
TW
6609 /*
6610 * ufshcd_read_string_desc returns size of the string
6611 * reset the error value
6612 */
6613 err = 0;
c58ab7aa
YG
6614
6615out:
bbe21d7a 6616 kfree(desc_buf);
c58ab7aa
YG
6617 return err;
6618}
6619
09750066 6620static void ufs_put_device_desc(struct ufs_hba *hba)
4b828fe1 6621{
09750066
BH
6622 struct ufs_dev_info *dev_info = &hba->dev_info;
6623
6624 kfree(dev_info->model);
6625 dev_info->model = NULL;
4b828fe1
TW
6626}
6627
09750066 6628static void ufs_fixup_device_setup(struct ufs_hba *hba)
c58ab7aa 6629{
c58ab7aa 6630 struct ufs_dev_fix *f;
09750066 6631 struct ufs_dev_info *dev_info = &hba->dev_info;
c58ab7aa
YG
6632
6633 for (f = ufs_fixups; f->quirk; f++) {
09750066
BH
6634 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
6635 f->wmanufacturerid == UFS_ANY_VENDOR) &&
6636 ((dev_info->model &&
6637 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
6638 !strcmp(f->model, UFS_ANY_MODEL)))
c58ab7aa
YG
6639 hba->dev_quirks |= f->quirk;
6640 }
6641}
6642
37113106
YG
6643/**
6644 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6645 * @hba: per-adapter instance
6646 *
6647 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6648 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6649 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6650 * the hibern8 exit latency.
6651 *
6652 * Returns zero on success, non-zero error value on failure.
6653 */
6654static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6655{
6656 int ret = 0;
6657 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6658
6659 ret = ufshcd_dme_peer_get(hba,
6660 UIC_ARG_MIB_SEL(
6661 RX_MIN_ACTIVATETIME_CAPABILITY,
6662 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6663 &peer_rx_min_activatetime);
6664 if (ret)
6665 goto out;
6666
6667 /* make sure proper unit conversion is applied */
6668 tuned_pa_tactivate =
6669 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6670 / PA_TACTIVATE_TIME_UNIT_US);
6671 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6672 tuned_pa_tactivate);
6673
6674out:
6675 return ret;
6676}
6677
6678/**
6679 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6680 * @hba: per-adapter instance
6681 *
6682 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6683 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6684 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6685 * This optimal value can help reduce the hibern8 exit latency.
6686 *
6687 * Returns zero on success, non-zero error value on failure.
6688 */
6689static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6690{
6691 int ret = 0;
6692 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6693 u32 max_hibern8_time, tuned_pa_hibern8time;
6694
6695 ret = ufshcd_dme_get(hba,
6696 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6697 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6698 &local_tx_hibern8_time_cap);
6699 if (ret)
6700 goto out;
6701
6702 ret = ufshcd_dme_peer_get(hba,
6703 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6704 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6705 &peer_rx_hibern8_time_cap);
6706 if (ret)
6707 goto out;
6708
6709 max_hibern8_time = max(local_tx_hibern8_time_cap,
6710 peer_rx_hibern8_time_cap);
6711 /* make sure proper unit conversion is applied */
6712 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6713 / PA_HIBERN8_TIME_UNIT_US);
6714 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6715 tuned_pa_hibern8time);
6716out:
6717 return ret;
6718}
6719
c6a6db43 6720/**
6721 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6722 * less than device PA_TACTIVATE time.
6723 * @hba: per-adapter instance
6724 *
6725 * Some UFS devices require host PA_TACTIVATE to be lower than device
6726 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6727 * for such devices.
6728 *
6729 * Returns zero on success, non-zero error value on failure.
6730 */
6731static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6732{
6733 int ret = 0;
6734 u32 granularity, peer_granularity;
6735 u32 pa_tactivate, peer_pa_tactivate;
6736 u32 pa_tactivate_us, peer_pa_tactivate_us;
6737 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6738
6739 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6740 &granularity);
6741 if (ret)
6742 goto out;
6743
6744 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6745 &peer_granularity);
6746 if (ret)
6747 goto out;
6748
6749 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6750 (granularity > PA_GRANULARITY_MAX_VAL)) {
6751 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6752 __func__, granularity);
6753 return -EINVAL;
6754 }
6755
6756 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6757 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6758 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6759 __func__, peer_granularity);
6760 return -EINVAL;
6761 }
6762
6763 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6764 if (ret)
6765 goto out;
6766
6767 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6768 &peer_pa_tactivate);
6769 if (ret)
6770 goto out;
6771
6772 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6773 peer_pa_tactivate_us = peer_pa_tactivate *
6774 gran_to_us_table[peer_granularity - 1];
6775
6776 if (pa_tactivate_us > peer_pa_tactivate_us) {
6777 u32 new_peer_pa_tactivate;
6778
6779 new_peer_pa_tactivate = pa_tactivate_us /
6780 gran_to_us_table[peer_granularity - 1];
6781 new_peer_pa_tactivate++;
6782 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6783 new_peer_pa_tactivate);
6784 }
6785
6786out:
6787 return ret;
6788}
6789
09750066 6790static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
37113106
YG
6791{
6792 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6793 ufshcd_tune_pa_tactivate(hba);
6794 ufshcd_tune_pa_hibern8time(hba);
6795 }
6796
e91ed9e0
CG
6797 ufshcd_vops_apply_dev_quirks(hba);
6798
37113106
YG
6799 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6800 /* set 1ms timeout for PA_TACTIVATE */
6801 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
c6a6db43 6802
6803 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6804 ufshcd_quirk_tune_host_pa_tactivate(hba);
37113106
YG
6805}
6806
ff8e20c6
DR
6807static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6808{
ff8e20c6
DR
6809 hba->ufs_stats.hibern8_exit_cnt = 0;
6810 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7fabb77b 6811 hba->req_abort_count = 0;
ff8e20c6
DR
6812}
6813
a4b0e8a4
PM
6814static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6815{
6816 int err;
6817
6818 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6819 &hba->desc_size.dev_desc);
6820 if (err)
6821 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6822
6823 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6824 &hba->desc_size.pwr_desc);
6825 if (err)
6826 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6827
6828 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6829 &hba->desc_size.interc_desc);
6830 if (err)
6831 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6832
6833 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6834 &hba->desc_size.conf_desc);
6835 if (err)
6836 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6837
6838 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6839 &hba->desc_size.unit_desc);
6840 if (err)
6841 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6842
6843 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6844 &hba->desc_size.geom_desc);
6845 if (err)
6846 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
059efd84 6847
c648c2d2
SN
6848 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6849 &hba->desc_size.hlth_desc);
6850 if (err)
6851 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
a4b0e8a4
PM
6852}
6853
731f0621
BH
6854static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
6855{
6856 int err;
6857 size_t buff_len;
6858 u8 *desc_buf;
6859
6860 buff_len = hba->desc_size.geom_desc;
6861 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6862 if (!desc_buf) {
6863 err = -ENOMEM;
6864 goto out;
6865 }
6866
6867 err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6868 desc_buf, buff_len);
6869 if (err) {
6870 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
6871 __func__, err);
6872 goto out;
6873 }
6874
6875 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
6876 hba->dev_info.max_lu_supported = 32;
6877 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
6878 hba->dev_info.max_lu_supported = 8;
6879
6880out:
6881 kfree(desc_buf);
6882 return err;
6883}
6884
9e1e8a75
SJ
6885static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
6886 {19200000, REF_CLK_FREQ_19_2_MHZ},
6887 {26000000, REF_CLK_FREQ_26_MHZ},
6888 {38400000, REF_CLK_FREQ_38_4_MHZ},
6889 {52000000, REF_CLK_FREQ_52_MHZ},
6890 {0, REF_CLK_FREQ_INVAL},
6891};
6892
6893static enum ufs_ref_clk_freq
6894ufs_get_bref_clk_from_hz(unsigned long freq)
6895{
6896 int i;
6897
6898 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
6899 if (ufs_ref_clk_freqs[i].freq_hz == freq)
6900 return ufs_ref_clk_freqs[i].val;
6901
6902 return REF_CLK_FREQ_INVAL;
6903}
6904
6905void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
6906{
6907 unsigned long freq;
6908
6909 freq = clk_get_rate(refclk);
6910
6911 hba->dev_ref_clk_freq =
6912 ufs_get_bref_clk_from_hz(freq);
6913
6914 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
6915 dev_err(hba->dev,
6916 "invalid ref_clk setting = %ld\n", freq);
6917}
6918
6919static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6920{
6921 int err;
6922 u32 ref_clk;
6923 u32 freq = hba->dev_ref_clk_freq;
6924
6925 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6926 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6927
6928 if (err) {
6929 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
6930 err);
6931 goto out;
6932 }
6933
6934 if (ref_clk == freq)
6935 goto out; /* nothing to update */
6936
6937 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6938 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
6939
6940 if (err) {
6941 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
6942 ufs_ref_clk_freqs[freq].freq_hz);
6943 goto out;
6944 }
6945
6946 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
6947 ufs_ref_clk_freqs[freq].freq_hz);
6948
6949out:
6950 return err;
6951}
6952
1b9e2141
BH
6953static int ufshcd_device_params_init(struct ufs_hba *hba)
6954{
6955 bool flag;
6956 int ret;
6957
731f0621
BH
6958 /* Clear any previous UFS device information */
6959 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6960
1b9e2141
BH
6961 /* Init check for device descriptor sizes */
6962 ufshcd_init_desc_sizes(hba);
6963
731f0621
BH
6964 /* Init UFS geometry descriptor related parameters */
6965 ret = ufshcd_device_geo_params_init(hba);
6966 if (ret)
6967 goto out;
6968
1b9e2141
BH
6969 /* Check and apply UFS device quirks */
6970 ret = ufs_get_device_desc(hba);
6971 if (ret) {
6972 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6973 __func__, ret);
6974 goto out;
6975 }
6976
09f17791
CG
6977 ufshcd_get_ref_clk_gating_wait(hba);
6978
1b9e2141
BH
6979 ufs_fixup_device_setup(hba);
6980
1b9e2141
BH
6981 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6982 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6983 hba->dev_info.f_power_on_wp_en = flag;
6984
2b35b2ad
BH
6985 /* Probe maximum power mode co-supported by both UFS host and device */
6986 if (ufshcd_get_max_pwr_mode(hba))
6987 dev_err(hba->dev,
6988 "%s: Failed getting max supported power mode\n",
6989 __func__);
1b9e2141
BH
6990out:
6991 return ret;
6992}
6993
6994/**
6995 * ufshcd_add_lus - probe and add UFS logical units
6996 * @hba: per-adapter instance
6997 */
6998static int ufshcd_add_lus(struct ufs_hba *hba)
6999{
7000 int ret;
7001
046c1e6f 7002 ufshcd_init_icc_levels(hba);
1b9e2141
BH
7003
7004 /* Add required well known logical units to scsi mid layer */
7005 ret = ufshcd_scsi_add_wlus(hba);
7006 if (ret)
7007 goto out;
7008
7009 /* Initialize devfreq after UFS device is detected */
7010 if (ufshcd_is_clkscaling_supported(hba)) {
7011 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7012 &hba->pwr_info,
7013 sizeof(struct ufs_pa_layer_attr));
7014 hba->clk_scaling.saved_pwr_info.is_valid = true;
7015 if (!hba->devfreq) {
7016 ret = ufshcd_devfreq_init(hba);
7017 if (ret)
7018 goto out;
7019 }
7020
7021 hba->clk_scaling.is_allowed = true;
7022 }
7023
7024 ufs_bsg_probe(hba);
7025 scsi_scan_host(hba->host);
7026 pm_runtime_put_sync(hba->dev);
7027
1b9e2141
BH
7028out:
7029 return ret;
7030}
7031
6ccf44fe 7032/**
1d337ec2
SRT
7033 * ufshcd_probe_hba - probe hba to detect device and initialize
7034 * @hba: per-adapter instance
1b9e2141 7035 * @async: asynchronous execution or not
1d337ec2
SRT
7036 *
7037 * Execute link-startup and verify device initialization
6ccf44fe 7038 */
1b9e2141 7039static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
6ccf44fe 7040{
6ccf44fe 7041 int ret;
7ff5ab47 7042 ktime_t start = ktime_get();
6ccf44fe
SJ
7043
7044 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
7045 if (ret)
7046 goto out;
7047
afdfff59
YG
7048 /* set the default level for urgent bkops */
7049 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
7050 hba->is_urgent_bkops_lvl_checked = false;
7051
ff8e20c6
DR
7052 /* Debug counters initialization */
7053 ufshcd_clear_dbg_ufs_stats(hba);
7054
57d104c1
SJ
7055 /* UniPro link is active now */
7056 ufshcd_set_link_active(hba);
d3e89bac 7057
1b9e2141 7058 /* Verify device initialization by sending NOP OUT UPIU */
5a0b0cb9
SRT
7059 ret = ufshcd_verify_dev_init(hba);
7060 if (ret)
7061 goto out;
68078d5c 7062
1b9e2141 7063 /* Initiate UFS initialization, and waiting until completion */
68078d5c
DR
7064 ret = ufshcd_complete_dev_init(hba);
7065 if (ret)
7066 goto out;
5a0b0cb9 7067
1b9e2141
BH
7068 /*
7069 * Initialize UFS device parameters used by driver, these
7070 * parameters are associated with UFS descriptors.
7071 */
7072 if (async) {
7073 ret = ufshcd_device_params_init(hba);
7074 if (ret)
7075 goto out;
93fdd5ac
TW
7076 }
7077
09750066 7078 ufshcd_tune_unipro_params(hba);
4b828fe1 7079
57d104c1
SJ
7080 /* UFS device is also active now */
7081 ufshcd_set_ufs_dev_active(hba);
66ec6d59 7082 ufshcd_force_reset_auto_bkops(hba);
57d104c1
SJ
7083 hba->wlun_dev_clr_ua = true;
7084
2b35b2ad
BH
7085 /* Gear up to HS gear if supported */
7086 if (hba->max_pwr_info.is_valid) {
9e1e8a75
SJ
7087 /*
7088 * Set the right value to bRefClkFreq before attempting to
7089 * switch to HS gears.
7090 */
7091 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7092 ufshcd_set_dev_ref_clk(hba);
7eb584db 7093 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8643ae66 7094 if (ret) {
7eb584db
DR
7095 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7096 __func__, ret);
8643ae66
DL
7097 goto out;
7098 }
7eb584db 7099 }
57d104c1 7100
53c12d0e
YG
7101 /* set the state as operational after switching to desired gear */
7102 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
a4b0e8a4 7103
71d848b8
CG
7104 /* Enable Auto-Hibernate if configured */
7105 ufshcd_auto_hibern8_enable(hba);
7106
5a0b0cb9 7107out:
1d337ec2 7108
7ff5ab47 7109 trace_ufshcd_init(dev_name(hba->dev), ret,
7110 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 7111 hba->curr_dev_pwr_mode, hba->uic_link_state);
1d337ec2
SRT
7112 return ret;
7113}
7114
7115/**
7116 * ufshcd_async_scan - asynchronous execution for probing hba
7117 * @data: data pointer to pass to this function
7118 * @cookie: cookie data
7119 */
7120static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7121{
7122 struct ufs_hba *hba = (struct ufs_hba *)data;
1b9e2141 7123 int ret;
1d337ec2 7124
1b9e2141
BH
7125 /* Initialize hba, detect and initialize UFS device */
7126 ret = ufshcd_probe_hba(hba, true);
7127 if (ret)
7128 goto out;
7129
7130 /* Probe and add UFS logical units */
7131 ret = ufshcd_add_lus(hba);
7132out:
7133 /*
7134 * If we failed to initialize the device or the device is not
7135 * present, turn off the power/clocks etc.
7136 */
7137 if (ret) {
7138 pm_runtime_put_sync(hba->dev);
7139 ufshcd_exit_clk_scaling(hba);
7140 ufshcd_hba_exit(hba);
7141 }
6ccf44fe
SJ
7142}
7143
d829fc8a
SN
7144static const struct attribute_group *ufshcd_driver_groups[] = {
7145 &ufs_sysfs_unit_descriptor_group,
ec92b59c 7146 &ufs_sysfs_lun_attributes_group,
d829fc8a
SN
7147 NULL,
7148};
7149
7a3e97b0
SY
7150static struct scsi_host_template ufshcd_driver_template = {
7151 .module = THIS_MODULE,
7152 .name = UFSHCD,
7153 .proc_name = UFSHCD,
7154 .queuecommand = ufshcd_queuecommand,
7155 .slave_alloc = ufshcd_slave_alloc,
eeda4749 7156 .slave_configure = ufshcd_slave_configure,
7a3e97b0 7157 .slave_destroy = ufshcd_slave_destroy,
4264fd61 7158 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 7159 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
7160 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7161 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7a3e97b0
SY
7162 .this_id = -1,
7163 .sg_tablesize = SG_ALL,
7164 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7165 .can_queue = UFSHCD_CAN_QUEUE,
552a990c 7166 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
1ab27c9c 7167 .max_host_blocked = 1,
c40ecc12 7168 .track_queue_depth = 1,
d829fc8a 7169 .sdev_groups = ufshcd_driver_groups,
4af14d11 7170 .dma_boundary = PAGE_SIZE - 1,
49615ba1 7171 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
7a3e97b0
SY
7172};
7173
57d104c1
SJ
7174static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7175 int ua)
7176{
7b16a07c 7177 int ret;
57d104c1 7178
7b16a07c
BA
7179 if (!vreg)
7180 return 0;
57d104c1 7181
0487fff7
SC
7182 /*
7183 * "set_load" operation shall be required on those regulators
7184 * which specifically configured current limitation. Otherwise
7185 * zero max_uA may cause unexpected behavior when regulator is
7186 * enabled or set as high power mode.
7187 */
7188 if (!vreg->max_uA)
7189 return 0;
7190
7b16a07c
BA
7191 ret = regulator_set_load(vreg->reg, ua);
7192 if (ret < 0) {
7193 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7194 __func__, vreg->name, ua, ret);
57d104c1
SJ
7195 }
7196
7197 return ret;
7198}
7199
7200static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7201 struct ufs_vreg *vreg)
7202{
73067981 7203 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
57d104c1
SJ
7204}
7205
7206static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7207 struct ufs_vreg *vreg)
7208{
7c7cfdcf
AH
7209 if (!vreg)
7210 return 0;
7211
73067981 7212 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
57d104c1
SJ
7213}
7214
aa497613
SRT
7215static int ufshcd_config_vreg(struct device *dev,
7216 struct ufs_vreg *vreg, bool on)
7217{
7218 int ret = 0;
72753590
GS
7219 struct regulator *reg;
7220 const char *name;
aa497613
SRT
7221 int min_uV, uA_load;
7222
7223 BUG_ON(!vreg);
7224
72753590
GS
7225 reg = vreg->reg;
7226 name = vreg->name;
7227
aa497613 7228 if (regulator_count_voltages(reg) > 0) {
90d88f47
AD
7229 uA_load = on ? vreg->max_uA : 0;
7230 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7231 if (ret)
7232 goto out;
7233
3b141e8c
SC
7234 if (vreg->min_uV && vreg->max_uV) {
7235 min_uV = on ? vreg->min_uV : 0;
7236 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7237 if (ret) {
7238 dev_err(dev,
7239 "%s: %s set voltage failed, err=%d\n",
aa497613 7240 __func__, name, ret);
3b141e8c
SC
7241 goto out;
7242 }
aa497613 7243 }
aa497613
SRT
7244 }
7245out:
7246 return ret;
7247}
7248
7249static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7250{
7251 int ret = 0;
7252
73067981 7253 if (!vreg || vreg->enabled)
aa497613
SRT
7254 goto out;
7255
7256 ret = ufshcd_config_vreg(dev, vreg, true);
7257 if (!ret)
7258 ret = regulator_enable(vreg->reg);
7259
7260 if (!ret)
7261 vreg->enabled = true;
7262 else
7263 dev_err(dev, "%s: %s enable failed, err=%d\n",
7264 __func__, vreg->name, ret);
7265out:
7266 return ret;
7267}
7268
7269static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7270{
7271 int ret = 0;
7272
73067981 7273 if (!vreg || !vreg->enabled)
aa497613
SRT
7274 goto out;
7275
7276 ret = regulator_disable(vreg->reg);
7277
7278 if (!ret) {
7279 /* ignore errors on applying disable config */
7280 ufshcd_config_vreg(dev, vreg, false);
7281 vreg->enabled = false;
7282 } else {
7283 dev_err(dev, "%s: %s disable failed, err=%d\n",
7284 __func__, vreg->name, ret);
7285 }
7286out:
7287 return ret;
7288}
7289
7290static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7291{
7292 int ret = 0;
7293 struct device *dev = hba->dev;
7294 struct ufs_vreg_info *info = &hba->vreg_info;
7295
aa497613
SRT
7296 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7297 if (ret)
7298 goto out;
7299
7300 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7301 if (ret)
7302 goto out;
7303
7304 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7305 if (ret)
7306 goto out;
7307
7308out:
7309 if (ret) {
7310 ufshcd_toggle_vreg(dev, info->vccq2, false);
7311 ufshcd_toggle_vreg(dev, info->vccq, false);
7312 ufshcd_toggle_vreg(dev, info->vcc, false);
7313 }
7314 return ret;
7315}
7316
6a771a65
RS
7317static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7318{
7319 struct ufs_vreg_info *info = &hba->vreg_info;
7320
60b7b823 7321 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6a771a65
RS
7322}
7323
aa497613
SRT
7324static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7325{
7326 int ret = 0;
7327
7328 if (!vreg)
7329 goto out;
7330
7331 vreg->reg = devm_regulator_get(dev, vreg->name);
7332 if (IS_ERR(vreg->reg)) {
7333 ret = PTR_ERR(vreg->reg);
7334 dev_err(dev, "%s: %s get failed, err=%d\n",
7335 __func__, vreg->name, ret);
7336 }
7337out:
7338 return ret;
7339}
7340
7341static int ufshcd_init_vreg(struct ufs_hba *hba)
7342{
7343 int ret = 0;
7344 struct device *dev = hba->dev;
7345 struct ufs_vreg_info *info = &hba->vreg_info;
7346
aa497613
SRT
7347 ret = ufshcd_get_vreg(dev, info->vcc);
7348 if (ret)
7349 goto out;
7350
7351 ret = ufshcd_get_vreg(dev, info->vccq);
7352 if (ret)
7353 goto out;
7354
7355 ret = ufshcd_get_vreg(dev, info->vccq2);
7356out:
7357 return ret;
7358}
7359
6a771a65
RS
7360static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7361{
7362 struct ufs_vreg_info *info = &hba->vreg_info;
7363
7364 if (info)
7365 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7366
7367 return 0;
7368}
7369
57d104c1
SJ
7370static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7371 bool skip_ref_clk)
c6e79dac
SRT
7372{
7373 int ret = 0;
7374 struct ufs_clk_info *clki;
7375 struct list_head *head = &hba->clk_list_head;
1ab27c9c 7376 unsigned long flags;
911a0771 7377 ktime_t start = ktime_get();
7378 bool clk_state_changed = false;
c6e79dac 7379
566ec9ad 7380 if (list_empty(head))
c6e79dac
SRT
7381 goto out;
7382
38f3242e
CG
7383 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7384 if (ret)
7385 return ret;
1e879e8f 7386
c6e79dac
SRT
7387 list_for_each_entry(clki, head, list) {
7388 if (!IS_ERR_OR_NULL(clki->clk)) {
57d104c1
SJ
7389 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7390 continue;
7391
911a0771 7392 clk_state_changed = on ^ clki->enabled;
c6e79dac
SRT
7393 if (on && !clki->enabled) {
7394 ret = clk_prepare_enable(clki->clk);
7395 if (ret) {
7396 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7397 __func__, clki->name, ret);
7398 goto out;
7399 }
7400 } else if (!on && clki->enabled) {
7401 clk_disable_unprepare(clki->clk);
7402 }
7403 clki->enabled = on;
7404 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7405 clki->name, on ? "en" : "dis");
7406 }
7407 }
1ab27c9c 7408
38f3242e
CG
7409 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7410 if (ret)
7411 return ret;
1e879e8f 7412
c6e79dac
SRT
7413out:
7414 if (ret) {
7415 list_for_each_entry(clki, head, list) {
7416 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7417 clk_disable_unprepare(clki->clk);
7418 }
7ff5ab47 7419 } else if (!ret && on) {
1ab27c9c
ST
7420 spin_lock_irqsave(hba->host->host_lock, flags);
7421 hba->clk_gating.state = CLKS_ON;
7ff5ab47 7422 trace_ufshcd_clk_gating(dev_name(hba->dev),
7423 hba->clk_gating.state);
1ab27c9c 7424 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac 7425 }
7ff5ab47 7426
911a0771 7427 if (clk_state_changed)
7428 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7429 (on ? "on" : "off"),
7430 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
c6e79dac
SRT
7431 return ret;
7432}
7433
57d104c1
SJ
7434static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7435{
7436 return __ufshcd_setup_clocks(hba, on, false);
7437}
7438
c6e79dac
SRT
7439static int ufshcd_init_clocks(struct ufs_hba *hba)
7440{
7441 int ret = 0;
7442 struct ufs_clk_info *clki;
7443 struct device *dev = hba->dev;
7444 struct list_head *head = &hba->clk_list_head;
7445
566ec9ad 7446 if (list_empty(head))
c6e79dac
SRT
7447 goto out;
7448
7449 list_for_each_entry(clki, head, list) {
7450 if (!clki->name)
7451 continue;
7452
7453 clki->clk = devm_clk_get(dev, clki->name);
7454 if (IS_ERR(clki->clk)) {
7455 ret = PTR_ERR(clki->clk);
7456 dev_err(dev, "%s: %s clk get failed, %d\n",
7457 __func__, clki->name, ret);
7458 goto out;
7459 }
7460
9e1e8a75
SJ
7461 /*
7462 * Parse device ref clk freq as per device tree "ref_clk".
7463 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7464 * in ufshcd_alloc_host().
7465 */
7466 if (!strcmp(clki->name, "ref_clk"))
7467 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7468
c6e79dac
SRT
7469 if (clki->max_freq) {
7470 ret = clk_set_rate(clki->clk, clki->max_freq);
7471 if (ret) {
7472 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7473 __func__, clki->name,
7474 clki->max_freq, ret);
7475 goto out;
7476 }
856b3483 7477 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
7478 }
7479 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7480 clki->name, clk_get_rate(clki->clk));
7481 }
7482out:
7483 return ret;
7484}
7485
5c0c28a8
SRT
7486static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7487{
7488 int err = 0;
7489
7490 if (!hba->vops)
7491 goto out;
7492
0263bcd0
YG
7493 err = ufshcd_vops_init(hba);
7494 if (err)
7495 goto out;
5c0c28a8 7496
0263bcd0
YG
7497 err = ufshcd_vops_setup_regulators(hba, true);
7498 if (err)
7499 goto out_exit;
5c0c28a8
SRT
7500
7501 goto out;
7502
5c0c28a8 7503out_exit:
0263bcd0 7504 ufshcd_vops_exit(hba);
5c0c28a8
SRT
7505out:
7506 if (err)
7507 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
0263bcd0 7508 __func__, ufshcd_get_var_name(hba), err);
5c0c28a8
SRT
7509 return err;
7510}
7511
7512static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7513{
7514 if (!hba->vops)
7515 return;
7516
0263bcd0 7517 ufshcd_vops_setup_regulators(hba, false);
5c0c28a8 7518
0263bcd0 7519 ufshcd_vops_exit(hba);
5c0c28a8
SRT
7520}
7521
aa497613
SRT
7522static int ufshcd_hba_init(struct ufs_hba *hba)
7523{
7524 int err;
7525
6a771a65
RS
7526 /*
7527 * Handle host controller power separately from the UFS device power
7528 * rails as it will help controlling the UFS host controller power
7529 * collapse easily which is different than UFS device power collapse.
7530 * Also, enable the host controller power before we go ahead with rest
7531 * of the initialization here.
7532 */
7533 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
7534 if (err)
7535 goto out;
7536
6a771a65 7537 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
7538 if (err)
7539 goto out;
7540
6a771a65
RS
7541 err = ufshcd_init_clocks(hba);
7542 if (err)
7543 goto out_disable_hba_vreg;
7544
7545 err = ufshcd_setup_clocks(hba, true);
7546 if (err)
7547 goto out_disable_hba_vreg;
7548
c6e79dac
SRT
7549 err = ufshcd_init_vreg(hba);
7550 if (err)
7551 goto out_disable_clks;
7552
7553 err = ufshcd_setup_vreg(hba, true);
7554 if (err)
7555 goto out_disable_clks;
7556
aa497613
SRT
7557 err = ufshcd_variant_hba_init(hba);
7558 if (err)
7559 goto out_disable_vreg;
7560
1d337ec2 7561 hba->is_powered = true;
aa497613
SRT
7562 goto out;
7563
7564out_disable_vreg:
7565 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
7566out_disable_clks:
7567 ufshcd_setup_clocks(hba, false);
6a771a65
RS
7568out_disable_hba_vreg:
7569 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
7570out:
7571 return err;
7572}
7573
7574static void ufshcd_hba_exit(struct ufs_hba *hba)
7575{
1d337ec2
SRT
7576 if (hba->is_powered) {
7577 ufshcd_variant_hba_exit(hba);
7578 ufshcd_setup_vreg(hba, false);
a508253d 7579 ufshcd_suspend_clkscaling(hba);
eebcc196 7580 if (ufshcd_is_clkscaling_supported(hba))
0701e49d 7581 if (hba->devfreq)
7582 ufshcd_suspend_clkscaling(hba);
1d337ec2
SRT
7583 ufshcd_setup_clocks(hba, false);
7584 ufshcd_setup_hba_vreg(hba, false);
7585 hba->is_powered = false;
09750066 7586 ufs_put_device_desc(hba);
1d337ec2 7587 }
aa497613
SRT
7588}
7589
57d104c1
SJ
7590static int
7591ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7592{
7593 unsigned char cmd[6] = {REQUEST_SENSE,
7594 0,
7595 0,
7596 0,
09a5a24f 7597 UFS_SENSE_SIZE,
57d104c1
SJ
7598 0};
7599 char *buffer;
7600 int ret;
7601
09a5a24f 7602 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
57d104c1
SJ
7603 if (!buffer) {
7604 ret = -ENOMEM;
7605 goto out;
7606 }
7607
fcbfffe2 7608 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
09a5a24f 7609 UFS_SENSE_SIZE, NULL, NULL,
fcbfffe2 7610 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
57d104c1
SJ
7611 if (ret)
7612 pr_err("%s: failed with err %d\n", __func__, ret);
7613
7614 kfree(buffer);
7615out:
7616 return ret;
7617}
7618
7619/**
7620 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7621 * power mode
7622 * @hba: per adapter instance
7623 * @pwr_mode: device power mode to set
7624 *
7625 * Returns 0 if requested power mode is set successfully
7626 * Returns non-zero if failed to set the requested power mode
7627 */
7628static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7629 enum ufs_dev_pwr_mode pwr_mode)
7630{
7631 unsigned char cmd[6] = { START_STOP };
7632 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
7633 struct scsi_device *sdp;
7634 unsigned long flags;
57d104c1
SJ
7635 int ret;
7636
7c48bfd0
AM
7637 spin_lock_irqsave(hba->host->host_lock, flags);
7638 sdp = hba->sdev_ufs_device;
7639 if (sdp) {
7640 ret = scsi_device_get(sdp);
7641 if (!ret && !scsi_device_online(sdp)) {
7642 ret = -ENODEV;
7643 scsi_device_put(sdp);
7644 }
7645 } else {
7646 ret = -ENODEV;
7647 }
7648 spin_unlock_irqrestore(hba->host->host_lock, flags);
7649
7650 if (ret)
7651 return ret;
57d104c1
SJ
7652
7653 /*
7654 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7655 * handling, which would wait for host to be resumed. Since we know
7656 * we are functional while we are here, skip host resume in error
7657 * handling context.
7658 */
7659 hba->host->eh_noresume = 1;
7660 if (hba->wlun_dev_clr_ua) {
7661 ret = ufshcd_send_request_sense(hba, sdp);
7662 if (ret)
7663 goto out;
7664 /* Unit attention condition is cleared now */
7665 hba->wlun_dev_clr_ua = false;
7666 }
7667
7668 cmd[4] = pwr_mode << 4;
7669
7670 /*
7671 * Current function would be generally called from the power management
e8064021 7672 * callbacks hence set the RQF_PM flag so that it doesn't resume the
57d104c1
SJ
7673 * already suspended childs.
7674 */
fcbfffe2
CH
7675 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7676 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
57d104c1
SJ
7677 if (ret) {
7678 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
7679 "START_STOP failed for power mode: %d, result %x\n",
7680 pwr_mode, ret);
c65be1a6 7681 if (driver_byte(ret) == DRIVER_SENSE)
21045519 7682 scsi_print_sense_hdr(sdp, NULL, &sshdr);
57d104c1
SJ
7683 }
7684
7685 if (!ret)
7686 hba->curr_dev_pwr_mode = pwr_mode;
7687out:
7c48bfd0 7688 scsi_device_put(sdp);
57d104c1
SJ
7689 hba->host->eh_noresume = 0;
7690 return ret;
7691}
7692
7693static int ufshcd_link_state_transition(struct ufs_hba *hba,
7694 enum uic_link_state req_link_state,
7695 int check_for_bkops)
7696{
7697 int ret = 0;
7698
7699 if (req_link_state == hba->uic_link_state)
7700 return 0;
7701
7702 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7703 ret = ufshcd_uic_hibern8_enter(hba);
7704 if (!ret)
7705 ufshcd_set_link_hibern8(hba);
7706 else
7707 goto out;
7708 }
7709 /*
7710 * If autobkops is enabled, link can't be turned off because
7711 * turning off the link would also turn off the device.
7712 */
7713 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
dc30c9e6 7714 (!check_for_bkops || !hba->auto_bkops_enabled)) {
f3099fbd
YG
7715 /*
7716 * Let's make sure that link is in low power mode, we are doing
7717 * this currently by putting the link in Hibern8. Otherway to
7718 * put the link in low power mode is to send the DME end point
7719 * to device and then send the DME reset command to local
7720 * unipro. But putting the link in hibern8 is much faster.
7721 */
7722 ret = ufshcd_uic_hibern8_enter(hba);
7723 if (ret)
7724 goto out;
57d104c1
SJ
7725 /*
7726 * Change controller state to "reset state" which
7727 * should also put the link in off/reset state
7728 */
596585a2 7729 ufshcd_hba_stop(hba, true);
57d104c1
SJ
7730 /*
7731 * TODO: Check if we need any delay to make sure that
7732 * controller is reset
7733 */
7734 ufshcd_set_link_off(hba);
7735 }
7736
7737out:
7738 return ret;
7739}
7740
7741static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7742{
b799fdf7
YG
7743 /*
7744 * It seems some UFS devices may keep drawing more than sleep current
7745 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7746 * To avoid this situation, add 2ms delay before putting these UFS
7747 * rails in LPM mode.
7748 */
7749 if (!ufshcd_is_link_active(hba) &&
7750 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7751 usleep_range(2000, 2100);
7752
57d104c1
SJ
7753 /*
7754 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7755 * power.
7756 *
7757 * If UFS device and link is in OFF state, all power supplies (VCC,
7758 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7759 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7760 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7761 *
7762 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7763 * in low power state which would save some power.
7764 */
7765 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7766 !hba->dev_info.is_lu_power_on_wp) {
7767 ufshcd_setup_vreg(hba, false);
7768 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7769 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7770 if (!ufshcd_is_link_active(hba)) {
7771 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7772 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7773 }
7774 }
7775}
7776
7777static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7778{
7779 int ret = 0;
7780
7781 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7782 !hba->dev_info.is_lu_power_on_wp) {
7783 ret = ufshcd_setup_vreg(hba, true);
7784 } else if (!ufshcd_is_ufs_dev_active(hba)) {
57d104c1
SJ
7785 if (!ret && !ufshcd_is_link_active(hba)) {
7786 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7787 if (ret)
7788 goto vcc_disable;
7789 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7790 if (ret)
7791 goto vccq_lpm;
7792 }
69d72ac8 7793 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
57d104c1
SJ
7794 }
7795 goto out;
7796
7797vccq_lpm:
7798 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7799vcc_disable:
7800 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7801out:
7802 return ret;
7803}
7804
7805static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7806{
7807 if (ufshcd_is_link_off(hba))
7808 ufshcd_setup_hba_vreg(hba, false);
7809}
7810
7811static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7812{
7813 if (ufshcd_is_link_off(hba))
7814 ufshcd_setup_hba_vreg(hba, true);
7815}
7816
7a3e97b0 7817/**
57d104c1 7818 * ufshcd_suspend - helper function for suspend operations
3b1d0580 7819 * @hba: per adapter instance
57d104c1
SJ
7820 * @pm_op: desired low power operation type
7821 *
7822 * This function will try to put the UFS device and link into low power
7823 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7824 * (System PM level).
7825 *
7826 * If this function is called during shutdown, it will make sure that
7827 * both UFS device and UFS link is powered off.
7a3e97b0 7828 *
57d104c1
SJ
7829 * NOTE: UFS device & link must be active before we enter in this function.
7830 *
7831 * Returns 0 for success and non-zero for failure
7a3e97b0 7832 */
57d104c1 7833static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 7834{
57d104c1
SJ
7835 int ret = 0;
7836 enum ufs_pm_level pm_lvl;
7837 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7838 enum uic_link_state req_link_state;
7839
7840 hba->pm_op_in_progress = 1;
7841 if (!ufshcd_is_shutdown_pm(pm_op)) {
7842 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7843 hba->rpm_lvl : hba->spm_lvl;
7844 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7845 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7846 } else {
7847 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7848 req_link_state = UIC_LINK_OFF_STATE;
7849 }
7850
7a3e97b0 7851 /*
57d104c1
SJ
7852 * If we can't transition into any of the low power modes
7853 * just gate the clocks.
7a3e97b0 7854 */
1ab27c9c
ST
7855 ufshcd_hold(hba, false);
7856 hba->clk_gating.is_suspended = true;
7857
401f1e44 7858 if (hba->clk_scaling.is_allowed) {
7859 cancel_work_sync(&hba->clk_scaling.suspend_work);
7860 cancel_work_sync(&hba->clk_scaling.resume_work);
7861 ufshcd_suspend_clkscaling(hba);
7862 }
d6fcf81a 7863
57d104c1
SJ
7864 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7865 req_link_state == UIC_LINK_ACTIVE_STATE) {
7866 goto disable_clks;
7867 }
7a3e97b0 7868
57d104c1
SJ
7869 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7870 (req_link_state == hba->uic_link_state))
d6fcf81a 7871 goto enable_gating;
57d104c1
SJ
7872
7873 /* UFS device & link must be active before we enter in this function */
7874 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7875 ret = -EINVAL;
d6fcf81a 7876 goto enable_gating;
57d104c1
SJ
7877 }
7878
7879 if (ufshcd_is_runtime_pm(pm_op)) {
374a246e
SJ
7880 if (ufshcd_can_autobkops_during_suspend(hba)) {
7881 /*
7882 * The device is idle with no requests in the queue,
7883 * allow background operations if bkops status shows
7884 * that performance might be impacted.
7885 */
7886 ret = ufshcd_urgent_bkops(hba);
7887 if (ret)
7888 goto enable_gating;
7889 } else {
7890 /* make sure that auto bkops is disabled */
7891 ufshcd_disable_auto_bkops(hba);
7892 }
57d104c1
SJ
7893 }
7894
7895 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7896 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7897 !ufshcd_is_runtime_pm(pm_op))) {
7898 /* ensure that bkops is disabled */
7899 ufshcd_disable_auto_bkops(hba);
7900 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7901 if (ret)
1ab27c9c 7902 goto enable_gating;
57d104c1
SJ
7903 }
7904
2824ec9f 7905 flush_work(&hba->eeh_work);
57d104c1
SJ
7906 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7907 if (ret)
7908 goto set_dev_active;
7909
7910 ufshcd_vreg_set_lpm(hba);
7911
7912disable_clks:
7913 /*
7914 * Call vendor specific suspend callback. As these callbacks may access
7915 * vendor specific host controller register space call them before the
7916 * host clocks are ON.
7917 */
0263bcd0
YG
7918 ret = ufshcd_vops_suspend(hba, pm_op);
7919 if (ret)
7920 goto set_link_active;
dcb6cec5
SC
7921 /*
7922 * Disable the host irq as host controller as there won't be any
7923 * host controller transaction expected till resume.
7924 */
7925 ufshcd_disable_irq(hba);
57d104c1 7926
57d104c1
SJ
7927 if (!ufshcd_is_link_active(hba))
7928 ufshcd_setup_clocks(hba, false);
7929 else
7930 /* If link is active, device ref_clk can't be switched off */
7931 __ufshcd_setup_clocks(hba, false, true);
7932
1ab27c9c 7933 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 7934 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
dcb6cec5 7935
57d104c1
SJ
7936 /* Put the host controller in low power mode if possible */
7937 ufshcd_hba_vreg_set_lpm(hba);
7938 goto out;
7939
57d104c1 7940set_link_active:
401f1e44 7941 if (hba->clk_scaling.is_allowed)
7942 ufshcd_resume_clkscaling(hba);
57d104c1
SJ
7943 ufshcd_vreg_set_hpm(hba);
7944 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7945 ufshcd_set_link_active(hba);
7946 else if (ufshcd_is_link_off(hba))
7947 ufshcd_host_reset_and_restore(hba);
7948set_dev_active:
7949 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7950 ufshcd_disable_auto_bkops(hba);
1ab27c9c 7951enable_gating:
401f1e44 7952 if (hba->clk_scaling.is_allowed)
7953 ufshcd_resume_clkscaling(hba);
1ab27c9c
ST
7954 hba->clk_gating.is_suspended = false;
7955 ufshcd_release(hba);
57d104c1
SJ
7956out:
7957 hba->pm_op_in_progress = 0;
8808b4e9
SC
7958 if (ret)
7959 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
57d104c1 7960 return ret;
7a3e97b0
SY
7961}
7962
7963/**
57d104c1 7964 * ufshcd_resume - helper function for resume operations
3b1d0580 7965 * @hba: per adapter instance
57d104c1 7966 * @pm_op: runtime PM or system PM
7a3e97b0 7967 *
57d104c1
SJ
7968 * This function basically brings the UFS device, UniPro link and controller
7969 * to active state.
7970 *
7971 * Returns 0 for success and non-zero for failure
7a3e97b0 7972 */
57d104c1 7973static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 7974{
57d104c1
SJ
7975 int ret;
7976 enum uic_link_state old_link_state;
7977
7978 hba->pm_op_in_progress = 1;
7979 old_link_state = hba->uic_link_state;
7980
7981 ufshcd_hba_vreg_set_hpm(hba);
7982 /* Make sure clocks are enabled before accessing controller */
7983 ret = ufshcd_setup_clocks(hba, true);
7984 if (ret)
7985 goto out;
7986
57d104c1 7987 /* enable the host irq as host controller would be active soon */
5231d38c 7988 ufshcd_enable_irq(hba);
57d104c1
SJ
7989
7990 ret = ufshcd_vreg_set_hpm(hba);
7991 if (ret)
7992 goto disable_irq_and_vops_clks;
7993
7a3e97b0 7994 /*
57d104c1
SJ
7995 * Call vendor specific resume callback. As these callbacks may access
7996 * vendor specific host controller register space call them when the
7997 * host clocks are ON.
7a3e97b0 7998 */
0263bcd0
YG
7999 ret = ufshcd_vops_resume(hba, pm_op);
8000 if (ret)
8001 goto disable_vreg;
57d104c1
SJ
8002
8003 if (ufshcd_is_link_hibern8(hba)) {
8004 ret = ufshcd_uic_hibern8_exit(hba);
8005 if (!ret)
8006 ufshcd_set_link_active(hba);
8007 else
8008 goto vendor_suspend;
8009 } else if (ufshcd_is_link_off(hba)) {
8010 ret = ufshcd_host_reset_and_restore(hba);
8011 /*
8012 * ufshcd_host_reset_and_restore() should have already
8013 * set the link state as active
8014 */
8015 if (ret || !ufshcd_is_link_active(hba))
8016 goto vendor_suspend;
8017 }
8018
8019 if (!ufshcd_is_ufs_dev_active(hba)) {
8020 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8021 if (ret)
8022 goto set_old_link_state;
8023 }
8024
4e768e76 8025 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8026 ufshcd_enable_auto_bkops(hba);
8027 else
8028 /*
8029 * If BKOPs operations are urgently needed at this moment then
8030 * keep auto-bkops enabled or else disable it.
8031 */
8032 ufshcd_urgent_bkops(hba);
8033
1ab27c9c
ST
8034 hba->clk_gating.is_suspended = false;
8035
fcb0c4b0
ST
8036 if (hba->clk_scaling.is_allowed)
8037 ufshcd_resume_clkscaling(hba);
856b3483 8038
ad448378
AH
8039 /* Enable Auto-Hibernate if configured */
8040 ufshcd_auto_hibern8_enable(hba);
8041
71d848b8
CG
8042 /* Schedule clock gating in case of no access to UFS device yet */
8043 ufshcd_release(hba);
8044
57d104c1
SJ
8045 goto out;
8046
8047set_old_link_state:
8048 ufshcd_link_state_transition(hba, old_link_state, 0);
8049vendor_suspend:
0263bcd0 8050 ufshcd_vops_suspend(hba, pm_op);
57d104c1
SJ
8051disable_vreg:
8052 ufshcd_vreg_set_lpm(hba);
8053disable_irq_and_vops_clks:
8054 ufshcd_disable_irq(hba);
401f1e44 8055 if (hba->clk_scaling.is_allowed)
8056 ufshcd_suspend_clkscaling(hba);
57d104c1
SJ
8057 ufshcd_setup_clocks(hba, false);
8058out:
8059 hba->pm_op_in_progress = 0;
8808b4e9
SC
8060 if (ret)
8061 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
57d104c1
SJ
8062 return ret;
8063}
8064
8065/**
8066 * ufshcd_system_suspend - system suspend routine
8067 * @hba: per adapter instance
57d104c1
SJ
8068 *
8069 * Check the description of ufshcd_suspend() function for more details.
8070 *
8071 * Returns 0 for success and non-zero for failure
8072 */
8073int ufshcd_system_suspend(struct ufs_hba *hba)
8074{
8075 int ret = 0;
7ff5ab47 8076 ktime_t start = ktime_get();
57d104c1
SJ
8077
8078 if (!hba || !hba->is_powered)
233b594b 8079 return 0;
57d104c1 8080
0b257734 8081 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8082 hba->curr_dev_pwr_mode) &&
8083 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8084 hba->uic_link_state))
8085 goto out;
57d104c1 8086
0b257734 8087 if (pm_runtime_suspended(hba->dev)) {
57d104c1
SJ
8088 /*
8089 * UFS device and/or UFS link low power states during runtime
8090 * suspend seems to be different than what is expected during
8091 * system suspend. Hence runtime resume the devic & link and
8092 * let the system suspend low power states to take effect.
8093 * TODO: If resume takes longer time, we might have optimize
8094 * it in future by not resuming everything if possible.
8095 */
8096 ret = ufshcd_runtime_resume(hba);
8097 if (ret)
8098 goto out;
8099 }
8100
8101 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8102out:
7ff5ab47 8103 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8104 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8105 hba->curr_dev_pwr_mode, hba->uic_link_state);
e785060e
DR
8106 if (!ret)
8107 hba->is_sys_suspended = true;
57d104c1
SJ
8108 return ret;
8109}
8110EXPORT_SYMBOL(ufshcd_system_suspend);
8111
8112/**
8113 * ufshcd_system_resume - system resume routine
8114 * @hba: per adapter instance
8115 *
8116 * Returns 0 for success and non-zero for failure
8117 */
7a3e97b0 8118
57d104c1
SJ
8119int ufshcd_system_resume(struct ufs_hba *hba)
8120{
7ff5ab47 8121 int ret = 0;
8122 ktime_t start = ktime_get();
8123
e3ce73d6
YG
8124 if (!hba)
8125 return -EINVAL;
8126
8127 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
57d104c1
SJ
8128 /*
8129 * Let the runtime resume take care of resuming
8130 * if runtime suspended.
8131 */
7ff5ab47 8132 goto out;
8133 else
8134 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8135out:
8136 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8137 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8138 hba->curr_dev_pwr_mode, hba->uic_link_state);
ce9e7bce
SC
8139 if (!ret)
8140 hba->is_sys_suspended = false;
7ff5ab47 8141 return ret;
7a3e97b0 8142}
57d104c1 8143EXPORT_SYMBOL(ufshcd_system_resume);
3b1d0580 8144
57d104c1
SJ
8145/**
8146 * ufshcd_runtime_suspend - runtime suspend routine
8147 * @hba: per adapter instance
8148 *
8149 * Check the description of ufshcd_suspend() function for more details.
8150 *
8151 * Returns 0 for success and non-zero for failure
8152 */
66ec6d59
SRT
8153int ufshcd_runtime_suspend(struct ufs_hba *hba)
8154{
7ff5ab47 8155 int ret = 0;
8156 ktime_t start = ktime_get();
8157
e3ce73d6
YG
8158 if (!hba)
8159 return -EINVAL;
8160
8161 if (!hba->is_powered)
7ff5ab47 8162 goto out;
8163 else
8164 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8165out:
8166 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8167 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8168 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 8169 return ret;
66ec6d59
SRT
8170}
8171EXPORT_SYMBOL(ufshcd_runtime_suspend);
8172
57d104c1
SJ
8173/**
8174 * ufshcd_runtime_resume - runtime resume routine
8175 * @hba: per adapter instance
8176 *
8177 * This function basically brings the UFS device, UniPro link and controller
8178 * to active state. Following operations are done in this function:
8179 *
8180 * 1. Turn on all the controller related clocks
8181 * 2. Bring the UniPro link out of Hibernate state
8182 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8183 * to active state.
8184 * 4. If auto-bkops is enabled on the device, disable it.
8185 *
8186 * So following would be the possible power state after this function return
8187 * successfully:
8188 * S1: UFS device in Active state with VCC rail ON
8189 * UniPro link in Active state
8190 * All the UFS/UniPro controller clocks are ON
8191 *
8192 * Returns 0 for success and non-zero for failure
8193 */
66ec6d59
SRT
8194int ufshcd_runtime_resume(struct ufs_hba *hba)
8195{
7ff5ab47 8196 int ret = 0;
8197 ktime_t start = ktime_get();
8198
e3ce73d6
YG
8199 if (!hba)
8200 return -EINVAL;
8201
8202 if (!hba->is_powered)
7ff5ab47 8203 goto out;
8204 else
8205 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8206out:
8207 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8208 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8209 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 8210 return ret;
66ec6d59
SRT
8211}
8212EXPORT_SYMBOL(ufshcd_runtime_resume);
8213
8214int ufshcd_runtime_idle(struct ufs_hba *hba)
8215{
8216 return 0;
8217}
8218EXPORT_SYMBOL(ufshcd_runtime_idle);
8219
57d104c1
SJ
8220/**
8221 * ufshcd_shutdown - shutdown routine
8222 * @hba: per adapter instance
8223 *
8224 * This function would power off both UFS device and UFS link.
8225 *
8226 * Returns 0 always to allow force shutdown even in case of errors.
8227 */
8228int ufshcd_shutdown(struct ufs_hba *hba)
8229{
8230 int ret = 0;
8231
f51913ee
SC
8232 if (!hba->is_powered)
8233 goto out;
8234
57d104c1
SJ
8235 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8236 goto out;
8237
8238 if (pm_runtime_suspended(hba->dev)) {
8239 ret = ufshcd_runtime_resume(hba);
8240 if (ret)
8241 goto out;
8242 }
8243
8244 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8245out:
8246 if (ret)
8247 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8248 /* allow force shutdown even in case of errors */
8249 return 0;
8250}
8251EXPORT_SYMBOL(ufshcd_shutdown);
8252
7a3e97b0 8253/**
3b1d0580 8254 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 8255 * data structure memory
8aa29f19 8256 * @hba: per adapter instance
7a3e97b0 8257 */
3b1d0580 8258void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 8259{
df032bf2 8260 ufs_bsg_remove(hba);
cbb6813e 8261 ufs_sysfs_remove_nodes(hba->dev);
69a6c269
BVA
8262 blk_cleanup_queue(hba->tmf_queue);
8263 blk_mq_free_tag_set(&hba->tmf_tag_set);
7252a360 8264 blk_cleanup_queue(hba->cmd_queue);
cfdf9c91 8265 scsi_remove_host(hba->host);
7a3e97b0 8266 /* disable interrupts */
2fbd009b 8267 ufshcd_disable_intr(hba, hba->intr_mask);
596585a2 8268 ufshcd_hba_stop(hba, true);
7a3e97b0 8269
eebcc196 8270 ufshcd_exit_clk_scaling(hba);
1ab27c9c 8271 ufshcd_exit_clk_gating(hba);
fcb0c4b0
ST
8272 if (ufshcd_is_clkscaling_supported(hba))
8273 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
aa497613 8274 ufshcd_hba_exit(hba);
3b1d0580
VH
8275}
8276EXPORT_SYMBOL_GPL(ufshcd_remove);
8277
47555a5c
YG
8278/**
8279 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8280 * @hba: pointer to Host Bus Adapter (HBA)
8281 */
8282void ufshcd_dealloc_host(struct ufs_hba *hba)
8283{
8284 scsi_host_put(hba->host);
8285}
8286EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8287
ca3d7bf9
AM
8288/**
8289 * ufshcd_set_dma_mask - Set dma mask based on the controller
8290 * addressing capability
8291 * @hba: per adapter instance
8292 *
8293 * Returns 0 for success, non-zero for failure
8294 */
8295static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8296{
8297 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8298 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8299 return 0;
8300 }
8301 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8302}
8303
7a3e97b0 8304/**
5c0c28a8 8305 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
8306 * @dev: pointer to device handle
8307 * @hba_handle: driver private handle
7a3e97b0
SY
8308 * Returns 0 on success, non-zero value on failure
8309 */
5c0c28a8 8310int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
8311{
8312 struct Scsi_Host *host;
8313 struct ufs_hba *hba;
5c0c28a8 8314 int err = 0;
7a3e97b0 8315
3b1d0580
VH
8316 if (!dev) {
8317 dev_err(dev,
8318 "Invalid memory reference for dev is NULL\n");
8319 err = -ENODEV;
7a3e97b0
SY
8320 goto out_error;
8321 }
8322
7a3e97b0
SY
8323 host = scsi_host_alloc(&ufshcd_driver_template,
8324 sizeof(struct ufs_hba));
8325 if (!host) {
3b1d0580 8326 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 8327 err = -ENOMEM;
3b1d0580 8328 goto out_error;
7a3e97b0
SY
8329 }
8330 hba = shost_priv(host);
7a3e97b0 8331 hba->host = host;
3b1d0580 8332 hba->dev = dev;
5c0c28a8 8333 *hba_handle = hba;
9e1e8a75 8334 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
5c0c28a8 8335
566ec9ad
SM
8336 INIT_LIST_HEAD(&hba->clk_list_head);
8337
5c0c28a8
SRT
8338out_error:
8339 return err;
8340}
8341EXPORT_SYMBOL(ufshcd_alloc_host);
8342
69a6c269
BVA
8343/* This function exists because blk_mq_alloc_tag_set() requires this. */
8344static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8345 const struct blk_mq_queue_data *qd)
8346{
8347 WARN_ON_ONCE(true);
8348 return BLK_STS_NOTSUPP;
8349}
8350
8351static const struct blk_mq_ops ufshcd_tmf_ops = {
8352 .queue_rq = ufshcd_queue_tmf,
8353};
8354
5c0c28a8
SRT
8355/**
8356 * ufshcd_init - Driver initialization routine
8357 * @hba: per-adapter instance
8358 * @mmio_base: base register address
8359 * @irq: Interrupt line of device
8360 * Returns 0 on success, non-zero value on failure
8361 */
8362int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8363{
8364 int err;
8365 struct Scsi_Host *host = hba->host;
8366 struct device *dev = hba->dev;
8367
8368 if (!mmio_base) {
8369 dev_err(hba->dev,
8370 "Invalid memory reference for mmio_base is NULL\n");
8371 err = -ENODEV;
8372 goto out_error;
8373 }
8374
3b1d0580
VH
8375 hba->mmio_base = mmio_base;
8376 hba->irq = irq;
b9dc8aca 8377 hba->hba_enable_delay_us = 1000;
7a3e97b0 8378
aa497613 8379 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
8380 if (err)
8381 goto out_error;
8382
7a3e97b0
SY
8383 /* Read capabilities registers */
8384 ufshcd_hba_capabilities(hba);
8385
8386 /* Get UFS version supported by the controller */
8387 hba->ufs_version = ufshcd_get_ufs_version(hba);
8388
c01848c6
YG
8389 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8390 (hba->ufs_version != UFSHCI_VERSION_11) &&
8391 (hba->ufs_version != UFSHCI_VERSION_20) &&
8392 (hba->ufs_version != UFSHCI_VERSION_21))
8393 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8394 hba->ufs_version);
8395
2fbd009b
SJ
8396 /* Get Interrupt bit mask per version */
8397 hba->intr_mask = ufshcd_get_intr_mask(hba);
8398
ca3d7bf9
AM
8399 err = ufshcd_set_dma_mask(hba);
8400 if (err) {
8401 dev_err(hba->dev, "set dma mask failed\n");
8402 goto out_disable;
8403 }
8404
7a3e97b0
SY
8405 /* Allocate memory for host memory space */
8406 err = ufshcd_memory_alloc(hba);
8407 if (err) {
3b1d0580
VH
8408 dev_err(hba->dev, "Memory allocation failed\n");
8409 goto out_disable;
7a3e97b0
SY
8410 }
8411
8412 /* Configure LRB */
8413 ufshcd_host_memory_configure(hba);
8414
8415 host->can_queue = hba->nutrs;
8416 host->cmd_per_lun = hba->nutrs;
8417 host->max_id = UFSHCD_MAX_ID;
0ce147d4 8418 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
8419 host->max_channel = UFSHCD_MAX_CHANNEL;
8420 host->unique_id = host->host_no;
a851b2bd 8421 host->max_cmd_len = UFS_CDB_SIZE;
7a3e97b0 8422
7eb584db
DR
8423 hba->max_pwr_info.is_valid = false;
8424
7a3e97b0 8425 /* Initialize work queues */
e8e7f271 8426 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 8427 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 8428
6ccf44fe
SJ
8429 /* Initialize UIC command mutex */
8430 mutex_init(&hba->uic_cmd_mutex);
8431
5a0b0cb9
SRT
8432 /* Initialize mutex for device management commands */
8433 mutex_init(&hba->dev_cmd.lock);
8434
a3cd5ec5 8435 init_rwsem(&hba->clk_scaling_lock);
8436
1ab27c9c 8437 ufshcd_init_clk_gating(hba);
199ef13c 8438
eebcc196
VG
8439 ufshcd_init_clk_scaling(hba);
8440
199ef13c
YG
8441 /*
8442 * In order to avoid any spurious interrupt immediately after
8443 * registering UFS controller interrupt handler, clear any pending UFS
8444 * interrupt status and disable all the UFS interrupts.
8445 */
8446 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8447 REG_INTERRUPT_STATUS);
8448 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8449 /*
8450 * Make sure that UFS interrupts are disabled and any pending interrupt
8451 * status is cleared before registering UFS interrupt handler.
8452 */
8453 mb();
8454
7a3e97b0 8455 /* IRQ registration */
2953f850 8456 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 8457 if (err) {
3b1d0580 8458 dev_err(hba->dev, "request irq failed\n");
1ab27c9c 8459 goto exit_gating;
57d104c1
SJ
8460 } else {
8461 hba->is_irq_enabled = true;
7a3e97b0
SY
8462 }
8463
3b1d0580 8464 err = scsi_add_host(host, hba->dev);
7a3e97b0 8465 if (err) {
3b1d0580 8466 dev_err(hba->dev, "scsi_add_host failed\n");
1ab27c9c 8467 goto exit_gating;
7a3e97b0
SY
8468 }
8469
7252a360
BVA
8470 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8471 if (IS_ERR(hba->cmd_queue)) {
8472 err = PTR_ERR(hba->cmd_queue);
8473 goto out_remove_scsi_host;
8474 }
8475
69a6c269
BVA
8476 hba->tmf_tag_set = (struct blk_mq_tag_set) {
8477 .nr_hw_queues = 1,
8478 .queue_depth = hba->nutmrs,
8479 .ops = &ufshcd_tmf_ops,
8480 .flags = BLK_MQ_F_NO_SCHED,
8481 };
8482 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8483 if (err < 0)
8484 goto free_cmd_queue;
8485 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8486 if (IS_ERR(hba->tmf_queue)) {
8487 err = PTR_ERR(hba->tmf_queue);
8488 goto free_tmf_tag_set;
8489 }
8490
d8d9f793
BA
8491 /* Reset the attached device */
8492 ufshcd_vops_device_reset(hba);
8493
6ccf44fe
SJ
8494 /* Host controller enable */
8495 err = ufshcd_hba_enable(hba);
7a3e97b0 8496 if (err) {
6ccf44fe 8497 dev_err(hba->dev, "Host controller enable failed\n");
66cc820f 8498 ufshcd_print_host_regs(hba);
6ba65588 8499 ufshcd_print_host_state(hba);
69a6c269 8500 goto free_tmf_queue;
7a3e97b0 8501 }
6ccf44fe 8502
0c8f7586 8503 /*
8504 * Set the default power management level for runtime and system PM.
8505 * Default power saving mode is to keep UFS link in Hibern8 state
8506 * and UFS device in sleep state.
8507 */
8508 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8509 UFS_SLEEP_PWR_MODE,
8510 UIC_LINK_HIBERN8_STATE);
8511 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8512 UFS_SLEEP_PWR_MODE,
8513 UIC_LINK_HIBERN8_STATE);
8514
ad448378 8515 /* Set the default auto-hiberate idle timer value to 150 ms */
f571b377 8516 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
ad448378
AH
8517 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8518 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8519 }
8520
62694735
SRT
8521 /* Hold auto suspend until async scan completes */
8522 pm_runtime_get_sync(dev);
38135535 8523 atomic_set(&hba->scsi_block_reqs_cnt, 0);
57d104c1 8524 /*
7caf489b 8525 * We are assuming that device wasn't put in sleep/power-down
8526 * state exclusively during the boot stage before kernel.
8527 * This assumption helps avoid doing link startup twice during
8528 * ufshcd_probe_hba().
57d104c1 8529 */
7caf489b 8530 ufshcd_set_ufs_dev_active(hba);
57d104c1 8531
6ccf44fe 8532 async_schedule(ufshcd_async_scan, hba);
cbb6813e 8533 ufs_sysfs_add_nodes(hba->dev);
6ccf44fe 8534
7a3e97b0
SY
8535 return 0;
8536
69a6c269
BVA
8537free_tmf_queue:
8538 blk_cleanup_queue(hba->tmf_queue);
8539free_tmf_tag_set:
8540 blk_mq_free_tag_set(&hba->tmf_tag_set);
7252a360
BVA
8541free_cmd_queue:
8542 blk_cleanup_queue(hba->cmd_queue);
3b1d0580
VH
8543out_remove_scsi_host:
8544 scsi_remove_host(hba->host);
1ab27c9c 8545exit_gating:
eebcc196 8546 ufshcd_exit_clk_scaling(hba);
1ab27c9c 8547 ufshcd_exit_clk_gating(hba);
3b1d0580 8548out_disable:
57d104c1 8549 hba->is_irq_enabled = false;
aa497613 8550 ufshcd_hba_exit(hba);
3b1d0580
VH
8551out_error:
8552 return err;
8553}
8554EXPORT_SYMBOL_GPL(ufshcd_init);
8555
3b1d0580
VH
8556MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8557MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 8558MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
8559MODULE_LICENSE("GPL");
8560MODULE_VERSION(UFSHCD_DRIVER_VERSION);