Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-block.git] / drivers / scsi / ufs / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include "ufshcd.h"
20 #include "ufs_quirks.h"
21 #include "unipro.h"
22 #include "ufs-sysfs.h"
23 #include "ufs-debugfs.h"
24 #include "ufs_bsg.h"
25 #include "ufshcd-crypto.h"
26 #include <asm/unaligned.h>
27 #include <linux/blkdev.h>
28
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/ufs.h>
31
32 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
33                                  UTP_TASK_REQ_COMPL |\
34                                  UFSHCD_ERROR_MASK)
35 /* UIC command timeout, unit: ms */
36 #define UIC_CMD_TIMEOUT 500
37
38 /* NOP OUT retries waiting for NOP IN response */
39 #define NOP_OUT_RETRIES    10
40 /* Timeout after 50 msecs if NOP OUT hangs without response */
41 #define NOP_OUT_TIMEOUT    50 /* msecs */
42
43 /* Query request retries */
44 #define QUERY_REQ_RETRIES 3
45 /* Query request timeout */
46 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
47
48 /* Task management command timeout */
49 #define TM_CMD_TIMEOUT  100 /* msecs */
50
51 /* maximum number of retries for a general UIC command  */
52 #define UFS_UIC_COMMAND_RETRIES 3
53
54 /* maximum number of link-startup retries */
55 #define DME_LINKSTARTUP_RETRIES 3
56
57 /* Maximum retries for Hibern8 enter */
58 #define UIC_HIBERN8_ENTER_RETRIES 3
59
60 /* maximum number of reset retries before giving up */
61 #define MAX_HOST_RESET_RETRIES 5
62
63 /* Expose the flag value from utp_upiu_query.value */
64 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
65
66 /* Interrupt aggregation default timeout, unit: 40us */
67 #define INT_AGGR_DEF_TO 0x02
68
69 /* default delay of autosuspend: 2000 ms */
70 #define RPM_AUTOSUSPEND_DELAY_MS 2000
71
72 /* Default delay of RPM device flush delayed work */
73 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
74
75 /* Default value of wait time before gating device ref clock */
76 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
77
78 /* Polling time to wait for fDeviceInit */
79 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
80
81 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
82         ({                                                              \
83                 int _ret;                                               \
84                 if (_on)                                                \
85                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
86                 else                                                    \
87                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
88                 _ret;                                                   \
89         })
90
91 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
92         size_t __len = (len);                                            \
93         print_hex_dump(KERN_ERR, prefix_str,                             \
94                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
95                        16, 4, buf, __len, false);                        \
96 } while (0)
97
98 static bool early_suspend;
99
100 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
101                      const char *prefix)
102 {
103         u32 *regs;
104         size_t pos;
105
106         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
107                 return -EINVAL;
108
109         regs = kzalloc(len, GFP_ATOMIC);
110         if (!regs)
111                 return -ENOMEM;
112
113         for (pos = 0; pos < len; pos += 4)
114                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
115
116         ufshcd_hex_dump(prefix, regs, len);
117         kfree(regs);
118
119         return 0;
120 }
121 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
122
123 enum {
124         UFSHCD_MAX_CHANNEL      = 0,
125         UFSHCD_MAX_ID           = 1,
126         UFSHCD_CMD_PER_LUN      = 32,
127         UFSHCD_CAN_QUEUE        = 32,
128 };
129
130 /* UFSHCD states */
131 enum {
132         UFSHCD_STATE_RESET,
133         UFSHCD_STATE_ERROR,
134         UFSHCD_STATE_OPERATIONAL,
135         UFSHCD_STATE_EH_SCHEDULED_FATAL,
136         UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
137 };
138
139 /* UFSHCD error handling flags */
140 enum {
141         UFSHCD_EH_IN_PROGRESS = (1 << 0),
142 };
143
144 /* UFSHCD UIC layer error flags */
145 enum {
146         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
147         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
148         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
149         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
150         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
151         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
152         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
153 };
154
155 #define ufshcd_set_eh_in_progress(h) \
156         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
157 #define ufshcd_eh_in_progress(h) \
158         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
159 #define ufshcd_clear_eh_in_progress(h) \
160         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
161
162 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
163         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
164         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
165         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
166         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
167         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
168         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
169         /*
170          * For DeepSleep, the link is first put in hibern8 and then off.
171          * Leaving the link in hibern8 is not supported.
172          */
173         {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
174 };
175
176 static inline enum ufs_dev_pwr_mode
177 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
178 {
179         return ufs_pm_lvl_states[lvl].dev_state;
180 }
181
182 static inline enum uic_link_state
183 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
184 {
185         return ufs_pm_lvl_states[lvl].link_state;
186 }
187
188 static inline enum ufs_pm_level
189 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
190                                         enum uic_link_state link_state)
191 {
192         enum ufs_pm_level lvl;
193
194         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
195                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
196                         (ufs_pm_lvl_states[lvl].link_state == link_state))
197                         return lvl;
198         }
199
200         /* if no match found, return the level 0 */
201         return UFS_PM_LVL_0;
202 }
203
204 static struct ufs_dev_fix ufs_fixups[] = {
205         /* UFS cards deviations table */
206         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
207                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
208         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
209                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
210                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
211                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
212         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
213                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
214         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
215                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
216         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
217                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
218         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
219                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
220         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
221                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
222         END_FIX
223 };
224
225 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
226 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
227 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
228 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
229 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
230 static void ufshcd_hba_exit(struct ufs_hba *hba);
231 static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
232 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
233 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
234 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
235 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
236 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
237 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
238 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
239 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
240 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
241 static irqreturn_t ufshcd_intr(int irq, void *__hba);
242 static int ufshcd_change_power_mode(struct ufs_hba *hba,
243                              struct ufs_pa_layer_attr *pwr_mode);
244 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
245 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
246 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
247 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
248                                          struct ufs_vreg *vreg);
249 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
250 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
251 static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
252 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
253 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
254
255 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
256 {
257         return tag >= 0 && tag < hba->nutrs;
258 }
259
260 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
261 {
262         if (!hba->is_irq_enabled) {
263                 enable_irq(hba->irq);
264                 hba->is_irq_enabled = true;
265         }
266 }
267
268 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
269 {
270         if (hba->is_irq_enabled) {
271                 disable_irq(hba->irq);
272                 hba->is_irq_enabled = false;
273         }
274 }
275
276 static inline void ufshcd_wb_config(struct ufs_hba *hba)
277 {
278         int ret;
279
280         if (!ufshcd_is_wb_allowed(hba))
281                 return;
282
283         ret = ufshcd_wb_ctrl(hba, true);
284         if (ret)
285                 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
286         else
287                 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
288         ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
289         if (ret)
290                 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
291                         __func__, ret);
292         if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
293                 ufshcd_wb_toggle_flush(hba, true);
294 }
295
296 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
297 {
298         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
299                 scsi_unblock_requests(hba->host);
300 }
301
302 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
303 {
304         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
305                 scsi_block_requests(hba->host);
306 }
307
308 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
309                                       enum ufs_trace_str_t str_t)
310 {
311         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
312
313         if (!trace_ufshcd_upiu_enabled())
314                 return;
315
316         trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
317                           UFS_TSF_CDB);
318 }
319
320 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
321                                         enum ufs_trace_str_t str_t,
322                                         struct utp_upiu_req *rq_rsp)
323 {
324         if (!trace_ufshcd_upiu_enabled())
325                 return;
326
327         trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
328                           &rq_rsp->qr, UFS_TSF_OSF);
329 }
330
331 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
332                                      enum ufs_trace_str_t str_t)
333 {
334         int off = (int)tag - hba->nutrs;
335         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
336
337         if (!trace_ufshcd_upiu_enabled())
338                 return;
339
340         if (str_t == UFS_TM_SEND)
341                 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header,
342                                   &descp->input_param1, UFS_TSF_TM_INPUT);
343         else
344                 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header,
345                                   &descp->output_param1, UFS_TSF_TM_OUTPUT);
346 }
347
348 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
349                                          struct uic_command *ucmd,
350                                          enum ufs_trace_str_t str_t)
351 {
352         u32 cmd;
353
354         if (!trace_ufshcd_uic_command_enabled())
355                 return;
356
357         if (str_t == UFS_CMD_SEND)
358                 cmd = ucmd->command;
359         else
360                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
361
362         trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
363                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
364                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
365                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
366 }
367
368 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
369                                      enum ufs_trace_str_t str_t)
370 {
371         sector_t lba = -1;
372         u8 opcode = 0, group_id = 0;
373         u32 intr, doorbell;
374         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
375         struct scsi_cmnd *cmd = lrbp->cmd;
376         int transfer_len = -1;
377
378         if (!trace_ufshcd_command_enabled()) {
379                 /* trace UPIU W/O tracing command */
380                 if (cmd)
381                         ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
382                 return;
383         }
384
385         if (cmd) { /* data phase exists */
386                 /* trace UPIU also */
387                 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
388                 opcode = cmd->cmnd[0];
389                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
390                         /*
391                          * Currently we only fully trace read(10) and write(10)
392                          * commands
393                          */
394                         if (cmd->request && cmd->request->bio)
395                                 lba = cmd->request->bio->bi_iter.bi_sector;
396                         transfer_len = be32_to_cpu(
397                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
398                         if (opcode == WRITE_10)
399                                 group_id = lrbp->cmd->cmnd[6];
400                 } else if (opcode == UNMAP) {
401                         if (cmd->request) {
402                                 lba = scsi_get_lba(cmd);
403                                 transfer_len = blk_rq_bytes(cmd->request);
404                         }
405                 }
406         }
407
408         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
409         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
410         trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
411                         doorbell, transfer_len, intr, lba, opcode, group_id);
412 }
413
414 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
415 {
416         struct ufs_clk_info *clki;
417         struct list_head *head = &hba->clk_list_head;
418
419         if (list_empty(head))
420                 return;
421
422         list_for_each_entry(clki, head, list) {
423                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
424                                 clki->max_freq)
425                         dev_err(hba->dev, "clk: %s, rate: %u\n",
426                                         clki->name, clki->curr_freq);
427         }
428 }
429
430 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
431                              char *err_name)
432 {
433         int i;
434         bool found = false;
435         struct ufs_event_hist *e;
436
437         if (id >= UFS_EVT_CNT)
438                 return;
439
440         e = &hba->ufs_stats.event[id];
441
442         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
443                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
444
445                 if (e->tstamp[p] == 0)
446                         continue;
447                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
448                         e->val[p], ktime_to_us(e->tstamp[p]));
449                 found = true;
450         }
451
452         if (!found)
453                 dev_err(hba->dev, "No record of %s\n", err_name);
454         else
455                 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
456 }
457
458 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
459 {
460         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
461
462         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
463         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
464         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
465         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
466         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
467         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
468                          "auto_hibern8_err");
469         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
470         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
471                          "link_startup_fail");
472         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
473         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
474                          "suspend_fail");
475         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
476         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
477         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
478
479         ufshcd_vops_dbg_register_dump(hba);
480 }
481
482 static
483 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
484 {
485         struct ufshcd_lrb *lrbp;
486         int prdt_length;
487         int tag;
488
489         for_each_set_bit(tag, &bitmap, hba->nutrs) {
490                 lrbp = &hba->lrb[tag];
491
492                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
493                                 tag, ktime_to_us(lrbp->issue_time_stamp));
494                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
495                                 tag, ktime_to_us(lrbp->compl_time_stamp));
496                 dev_err(hba->dev,
497                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
498                         tag, (u64)lrbp->utrd_dma_addr);
499
500                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
501                                 sizeof(struct utp_transfer_req_desc));
502                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
503                         (u64)lrbp->ucd_req_dma_addr);
504                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
505                                 sizeof(struct utp_upiu_req));
506                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
507                         (u64)lrbp->ucd_rsp_dma_addr);
508                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
509                                 sizeof(struct utp_upiu_rsp));
510
511                 prdt_length = le16_to_cpu(
512                         lrbp->utr_descriptor_ptr->prd_table_length);
513                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
514                         prdt_length /= sizeof(struct ufshcd_sg_entry);
515
516                 dev_err(hba->dev,
517                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
518                         tag, prdt_length,
519                         (u64)lrbp->ucd_prdt_dma_addr);
520
521                 if (pr_prdt)
522                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
523                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
524         }
525 }
526
527 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
528 {
529         int tag;
530
531         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
532                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
533
534                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
535                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
536         }
537 }
538
539 static void ufshcd_print_host_state(struct ufs_hba *hba)
540 {
541         struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
542
543         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
544         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
545                 hba->outstanding_reqs, hba->outstanding_tasks);
546         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
547                 hba->saved_err, hba->saved_uic_err);
548         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
549                 hba->curr_dev_pwr_mode, hba->uic_link_state);
550         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
551                 hba->pm_op_in_progress, hba->is_sys_suspended);
552         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
553                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
554         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
555         dev_err(hba->dev,
556                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
557                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
558                 hba->ufs_stats.hibern8_exit_cnt);
559         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
560                 ktime_to_us(hba->ufs_stats.last_intr_ts),
561                 hba->ufs_stats.last_intr_status);
562         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
563                 hba->eh_flags, hba->req_abort_count);
564         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
565                 hba->ufs_version, hba->capabilities, hba->caps);
566         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
567                 hba->dev_quirks);
568         if (sdev_ufs)
569                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
570                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
571
572         ufshcd_print_clk_freqs(hba);
573 }
574
575 /**
576  * ufshcd_print_pwr_info - print power params as saved in hba
577  * power info
578  * @hba: per-adapter instance
579  */
580 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
581 {
582         static const char * const names[] = {
583                 "INVALID MODE",
584                 "FAST MODE",
585                 "SLOW_MODE",
586                 "INVALID MODE",
587                 "FASTAUTO_MODE",
588                 "SLOWAUTO_MODE",
589                 "INVALID MODE",
590         };
591
592         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
593                  __func__,
594                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
595                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
596                  names[hba->pwr_info.pwr_rx],
597                  names[hba->pwr_info.pwr_tx],
598                  hba->pwr_info.hs_rate);
599 }
600
601 static void ufshcd_device_reset(struct ufs_hba *hba)
602 {
603         int err;
604
605         err = ufshcd_vops_device_reset(hba);
606
607         if (!err) {
608                 ufshcd_set_ufs_dev_active(hba);
609                 if (ufshcd_is_wb_allowed(hba)) {
610                         hba->dev_info.wb_enabled = false;
611                         hba->dev_info.wb_buf_flush_enabled = false;
612                 }
613         }
614         if (err != -EOPNOTSUPP)
615                 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
616 }
617
618 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
619 {
620         if (!us)
621                 return;
622
623         if (us < 10)
624                 udelay(us);
625         else
626                 usleep_range(us, us + tolerance);
627 }
628 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
629
630 /**
631  * ufshcd_wait_for_register - wait for register value to change
632  * @hba: per-adapter interface
633  * @reg: mmio register offset
634  * @mask: mask to apply to the read register value
635  * @val: value to wait for
636  * @interval_us: polling interval in microseconds
637  * @timeout_ms: timeout in milliseconds
638  *
639  * Return:
640  * -ETIMEDOUT on error, zero on success.
641  */
642 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
643                                 u32 val, unsigned long interval_us,
644                                 unsigned long timeout_ms)
645 {
646         int err = 0;
647         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
648
649         /* ignore bits that we don't intend to wait on */
650         val = val & mask;
651
652         while ((ufshcd_readl(hba, reg) & mask) != val) {
653                 usleep_range(interval_us, interval_us + 50);
654                 if (time_after(jiffies, timeout)) {
655                         if ((ufshcd_readl(hba, reg) & mask) != val)
656                                 err = -ETIMEDOUT;
657                         break;
658                 }
659         }
660
661         return err;
662 }
663
664 /**
665  * ufshcd_get_intr_mask - Get the interrupt bit mask
666  * @hba: Pointer to adapter instance
667  *
668  * Returns interrupt bit mask per version
669  */
670 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
671 {
672         u32 intr_mask = 0;
673
674         switch (hba->ufs_version) {
675         case UFSHCI_VERSION_10:
676                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
677                 break;
678         case UFSHCI_VERSION_11:
679         case UFSHCI_VERSION_20:
680                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
681                 break;
682         case UFSHCI_VERSION_21:
683         default:
684                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
685                 break;
686         }
687
688         return intr_mask;
689 }
690
691 /**
692  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
693  * @hba: Pointer to adapter instance
694  *
695  * Returns UFSHCI version supported by the controller
696  */
697 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
698 {
699         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
700                 return ufshcd_vops_get_ufs_hci_version(hba);
701
702         return ufshcd_readl(hba, REG_UFS_VERSION);
703 }
704
705 /**
706  * ufshcd_is_device_present - Check if any device connected to
707  *                            the host controller
708  * @hba: pointer to adapter instance
709  *
710  * Returns true if device present, false if no device detected
711  */
712 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
713 {
714         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
715                                                 DEVICE_PRESENT) ? true : false;
716 }
717
718 /**
719  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
720  * @lrbp: pointer to local command reference block
721  *
722  * This function is used to get the OCS field from UTRD
723  * Returns the OCS field in the UTRD
724  */
725 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
726 {
727         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
728 }
729
730 /**
731  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
732  * @hba: per adapter instance
733  * @pos: position of the bit to be cleared
734  */
735 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
736 {
737         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
738                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
739         else
740                 ufshcd_writel(hba, ~(1 << pos),
741                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
742 }
743
744 /**
745  * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
746  * @hba: per adapter instance
747  * @pos: position of the bit to be cleared
748  */
749 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
750 {
751         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
752                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
753         else
754                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
755 }
756
757 /**
758  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
759  * @hba: per adapter instance
760  * @tag: position of the bit to be cleared
761  */
762 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
763 {
764         __clear_bit(tag, &hba->outstanding_reqs);
765 }
766
767 /**
768  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
769  * @reg: Register value of host controller status
770  *
771  * Returns integer, 0 on Success and positive value if failed
772  */
773 static inline int ufshcd_get_lists_status(u32 reg)
774 {
775         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
776 }
777
778 /**
779  * ufshcd_get_uic_cmd_result - Get the UIC command result
780  * @hba: Pointer to adapter instance
781  *
782  * This function gets the result of UIC command completion
783  * Returns 0 on success, non zero value on error
784  */
785 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
786 {
787         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
788                MASK_UIC_COMMAND_RESULT;
789 }
790
791 /**
792  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
793  * @hba: Pointer to adapter instance
794  *
795  * This function gets UIC command argument3
796  * Returns 0 on success, non zero value on error
797  */
798 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
799 {
800         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
801 }
802
803 /**
804  * ufshcd_get_req_rsp - returns the TR response transaction type
805  * @ucd_rsp_ptr: pointer to response UPIU
806  */
807 static inline int
808 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
809 {
810         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
811 }
812
813 /**
814  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
815  * @ucd_rsp_ptr: pointer to response UPIU
816  *
817  * This function gets the response status and scsi_status from response UPIU
818  * Returns the response result code.
819  */
820 static inline int
821 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
822 {
823         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
824 }
825
826 /*
827  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
828  *                              from response UPIU
829  * @ucd_rsp_ptr: pointer to response UPIU
830  *
831  * Return the data segment length.
832  */
833 static inline unsigned int
834 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
835 {
836         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
837                 MASK_RSP_UPIU_DATA_SEG_LEN;
838 }
839
840 /**
841  * ufshcd_is_exception_event - Check if the device raised an exception event
842  * @ucd_rsp_ptr: pointer to response UPIU
843  *
844  * The function checks if the device raised an exception event indicated in
845  * the Device Information field of response UPIU.
846  *
847  * Returns true if exception is raised, false otherwise.
848  */
849 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
850 {
851         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
852                         MASK_RSP_EXCEPTION_EVENT ? true : false;
853 }
854
855 /**
856  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
857  * @hba: per adapter instance
858  */
859 static inline void
860 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
861 {
862         ufshcd_writel(hba, INT_AGGR_ENABLE |
863                       INT_AGGR_COUNTER_AND_TIMER_RESET,
864                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
865 }
866
867 /**
868  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
869  * @hba: per adapter instance
870  * @cnt: Interrupt aggregation counter threshold
871  * @tmout: Interrupt aggregation timeout value
872  */
873 static inline void
874 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
875 {
876         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
877                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
878                       INT_AGGR_TIMEOUT_VAL(tmout),
879                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
880 }
881
882 /**
883  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
884  * @hba: per adapter instance
885  */
886 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
887 {
888         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
889 }
890
891 /**
892  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
893  *                      When run-stop registers are set to 1, it indicates the
894  *                      host controller that it can process the requests
895  * @hba: per adapter instance
896  */
897 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
898 {
899         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
900                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
901         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
902                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
903 }
904
905 /**
906  * ufshcd_hba_start - Start controller initialization sequence
907  * @hba: per adapter instance
908  */
909 static inline void ufshcd_hba_start(struct ufs_hba *hba)
910 {
911         u32 val = CONTROLLER_ENABLE;
912
913         if (ufshcd_crypto_enable(hba))
914                 val |= CRYPTO_GENERAL_ENABLE;
915
916         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
917 }
918
919 /**
920  * ufshcd_is_hba_active - Get controller state
921  * @hba: per adapter instance
922  *
923  * Returns false if controller is active, true otherwise
924  */
925 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
926 {
927         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
928                 ? false : true;
929 }
930
931 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
932 {
933         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
934         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
935             (hba->ufs_version == UFSHCI_VERSION_11))
936                 return UFS_UNIPRO_VER_1_41;
937         else
938                 return UFS_UNIPRO_VER_1_6;
939 }
940 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
941
942 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
943 {
944         /*
945          * If both host and device support UniPro ver1.6 or later, PA layer
946          * parameters tuning happens during link startup itself.
947          *
948          * We can manually tune PA layer parameters if either host or device
949          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
950          * logic simple, we will only do manual tuning if local unipro version
951          * doesn't support ver1.6 or later.
952          */
953         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
954                 return true;
955         else
956                 return false;
957 }
958
959 /**
960  * ufshcd_set_clk_freq - set UFS controller clock frequencies
961  * @hba: per adapter instance
962  * @scale_up: If True, set max possible frequency othewise set low frequency
963  *
964  * Returns 0 if successful
965  * Returns < 0 for any other errors
966  */
967 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
968 {
969         int ret = 0;
970         struct ufs_clk_info *clki;
971         struct list_head *head = &hba->clk_list_head;
972
973         if (list_empty(head))
974                 goto out;
975
976         list_for_each_entry(clki, head, list) {
977                 if (!IS_ERR_OR_NULL(clki->clk)) {
978                         if (scale_up && clki->max_freq) {
979                                 if (clki->curr_freq == clki->max_freq)
980                                         continue;
981
982                                 ret = clk_set_rate(clki->clk, clki->max_freq);
983                                 if (ret) {
984                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
985                                                 __func__, clki->name,
986                                                 clki->max_freq, ret);
987                                         break;
988                                 }
989                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
990                                                 "scaled up", clki->name,
991                                                 clki->curr_freq,
992                                                 clki->max_freq);
993
994                                 clki->curr_freq = clki->max_freq;
995
996                         } else if (!scale_up && clki->min_freq) {
997                                 if (clki->curr_freq == clki->min_freq)
998                                         continue;
999
1000                                 ret = clk_set_rate(clki->clk, clki->min_freq);
1001                                 if (ret) {
1002                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1003                                                 __func__, clki->name,
1004                                                 clki->min_freq, ret);
1005                                         break;
1006                                 }
1007                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1008                                                 "scaled down", clki->name,
1009                                                 clki->curr_freq,
1010                                                 clki->min_freq);
1011                                 clki->curr_freq = clki->min_freq;
1012                         }
1013                 }
1014                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1015                                 clki->name, clk_get_rate(clki->clk));
1016         }
1017
1018 out:
1019         return ret;
1020 }
1021
1022 /**
1023  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1024  * @hba: per adapter instance
1025  * @scale_up: True if scaling up and false if scaling down
1026  *
1027  * Returns 0 if successful
1028  * Returns < 0 for any other errors
1029  */
1030 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1031 {
1032         int ret = 0;
1033         ktime_t start = ktime_get();
1034
1035         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1036         if (ret)
1037                 goto out;
1038
1039         ret = ufshcd_set_clk_freq(hba, scale_up);
1040         if (ret)
1041                 goto out;
1042
1043         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1044         if (ret)
1045                 ufshcd_set_clk_freq(hba, !scale_up);
1046
1047 out:
1048         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1049                         (scale_up ? "up" : "down"),
1050                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1051         return ret;
1052 }
1053
1054 /**
1055  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1056  * @hba: per adapter instance
1057  * @scale_up: True if scaling up and false if scaling down
1058  *
1059  * Returns true if scaling is required, false otherwise.
1060  */
1061 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1062                                                bool scale_up)
1063 {
1064         struct ufs_clk_info *clki;
1065         struct list_head *head = &hba->clk_list_head;
1066
1067         if (list_empty(head))
1068                 return false;
1069
1070         list_for_each_entry(clki, head, list) {
1071                 if (!IS_ERR_OR_NULL(clki->clk)) {
1072                         if (scale_up && clki->max_freq) {
1073                                 if (clki->curr_freq == clki->max_freq)
1074                                         continue;
1075                                 return true;
1076                         } else if (!scale_up && clki->min_freq) {
1077                                 if (clki->curr_freq == clki->min_freq)
1078                                         continue;
1079                                 return true;
1080                         }
1081                 }
1082         }
1083
1084         return false;
1085 }
1086
1087 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1088                                         u64 wait_timeout_us)
1089 {
1090         unsigned long flags;
1091         int ret = 0;
1092         u32 tm_doorbell;
1093         u32 tr_doorbell;
1094         bool timeout = false, do_last_check = false;
1095         ktime_t start;
1096
1097         ufshcd_hold(hba, false);
1098         spin_lock_irqsave(hba->host->host_lock, flags);
1099         /*
1100          * Wait for all the outstanding tasks/transfer requests.
1101          * Verify by checking the doorbell registers are clear.
1102          */
1103         start = ktime_get();
1104         do {
1105                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1106                         ret = -EBUSY;
1107                         goto out;
1108                 }
1109
1110                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1111                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1112                 if (!tm_doorbell && !tr_doorbell) {
1113                         timeout = false;
1114                         break;
1115                 } else if (do_last_check) {
1116                         break;
1117                 }
1118
1119                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1120                 schedule();
1121                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1122                     wait_timeout_us) {
1123                         timeout = true;
1124                         /*
1125                          * We might have scheduled out for long time so make
1126                          * sure to check if doorbells are cleared by this time
1127                          * or not.
1128                          */
1129                         do_last_check = true;
1130                 }
1131                 spin_lock_irqsave(hba->host->host_lock, flags);
1132         } while (tm_doorbell || tr_doorbell);
1133
1134         if (timeout) {
1135                 dev_err(hba->dev,
1136                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1137                         __func__, tm_doorbell, tr_doorbell);
1138                 ret = -EBUSY;
1139         }
1140 out:
1141         spin_unlock_irqrestore(hba->host->host_lock, flags);
1142         ufshcd_release(hba);
1143         return ret;
1144 }
1145
1146 /**
1147  * ufshcd_scale_gear - scale up/down UFS gear
1148  * @hba: per adapter instance
1149  * @scale_up: True for scaling up gear and false for scaling down
1150  *
1151  * Returns 0 for success,
1152  * Returns -EBUSY if scaling can't happen at this time
1153  * Returns non-zero for any other errors
1154  */
1155 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1156 {
1157         int ret = 0;
1158         struct ufs_pa_layer_attr new_pwr_info;
1159
1160         if (scale_up) {
1161                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1162                        sizeof(struct ufs_pa_layer_attr));
1163         } else {
1164                 memcpy(&new_pwr_info, &hba->pwr_info,
1165                        sizeof(struct ufs_pa_layer_attr));
1166
1167                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1168                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1169                         /* save the current power mode */
1170                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1171                                 &hba->pwr_info,
1172                                 sizeof(struct ufs_pa_layer_attr));
1173
1174                         /* scale down gear */
1175                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1176                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1177                 }
1178         }
1179
1180         /* check if the power mode needs to be changed or not? */
1181         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1182         if (ret)
1183                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1184                         __func__, ret,
1185                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1186                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1187
1188         return ret;
1189 }
1190
1191 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1192 {
1193         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1194         int ret = 0;
1195         /*
1196          * make sure that there are no outstanding requests when
1197          * clock scaling is in progress
1198          */
1199         ufshcd_scsi_block_requests(hba);
1200         down_write(&hba->clk_scaling_lock);
1201
1202         if (!hba->clk_scaling.is_allowed ||
1203             ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1204                 ret = -EBUSY;
1205                 up_write(&hba->clk_scaling_lock);
1206                 ufshcd_scsi_unblock_requests(hba);
1207                 goto out;
1208         }
1209
1210         /* let's not get into low power until clock scaling is completed */
1211         ufshcd_hold(hba, false);
1212
1213 out:
1214         return ret;
1215 }
1216
1217 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
1218 {
1219         if (writelock)
1220                 up_write(&hba->clk_scaling_lock);
1221         else
1222                 up_read(&hba->clk_scaling_lock);
1223         ufshcd_scsi_unblock_requests(hba);
1224         ufshcd_release(hba);
1225 }
1226
1227 /**
1228  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1229  * @hba: per adapter instance
1230  * @scale_up: True for scaling up and false for scalin down
1231  *
1232  * Returns 0 for success,
1233  * Returns -EBUSY if scaling can't happen at this time
1234  * Returns non-zero for any other errors
1235  */
1236 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1237 {
1238         int ret = 0;
1239         bool is_writelock = true;
1240
1241         ret = ufshcd_clock_scaling_prepare(hba);
1242         if (ret)
1243                 return ret;
1244
1245         /* scale down the gear before scaling down clocks */
1246         if (!scale_up) {
1247                 ret = ufshcd_scale_gear(hba, false);
1248                 if (ret)
1249                         goto out_unprepare;
1250         }
1251
1252         ret = ufshcd_scale_clks(hba, scale_up);
1253         if (ret) {
1254                 if (!scale_up)
1255                         ufshcd_scale_gear(hba, true);
1256                 goto out_unprepare;
1257         }
1258
1259         /* scale up the gear after scaling up clocks */
1260         if (scale_up) {
1261                 ret = ufshcd_scale_gear(hba, true);
1262                 if (ret) {
1263                         ufshcd_scale_clks(hba, false);
1264                         goto out_unprepare;
1265                 }
1266         }
1267
1268         /* Enable Write Booster if we have scaled up else disable it */
1269         downgrade_write(&hba->clk_scaling_lock);
1270         is_writelock = false;
1271         ufshcd_wb_ctrl(hba, scale_up);
1272
1273 out_unprepare:
1274         ufshcd_clock_scaling_unprepare(hba, is_writelock);
1275         return ret;
1276 }
1277
1278 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1279 {
1280         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1281                                            clk_scaling.suspend_work);
1282         unsigned long irq_flags;
1283
1284         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1285         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1286                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1287                 return;
1288         }
1289         hba->clk_scaling.is_suspended = true;
1290         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1291
1292         __ufshcd_suspend_clkscaling(hba);
1293 }
1294
1295 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1296 {
1297         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1298                                            clk_scaling.resume_work);
1299         unsigned long irq_flags;
1300
1301         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1302         if (!hba->clk_scaling.is_suspended) {
1303                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1304                 return;
1305         }
1306         hba->clk_scaling.is_suspended = false;
1307         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1308
1309         devfreq_resume_device(hba->devfreq);
1310 }
1311
1312 static int ufshcd_devfreq_target(struct device *dev,
1313                                 unsigned long *freq, u32 flags)
1314 {
1315         int ret = 0;
1316         struct ufs_hba *hba = dev_get_drvdata(dev);
1317         ktime_t start;
1318         bool scale_up, sched_clk_scaling_suspend_work = false;
1319         struct list_head *clk_list = &hba->clk_list_head;
1320         struct ufs_clk_info *clki;
1321         unsigned long irq_flags;
1322
1323         if (!ufshcd_is_clkscaling_supported(hba))
1324                 return -EINVAL;
1325
1326         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1327         /* Override with the closest supported frequency */
1328         *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1329         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1330         if (ufshcd_eh_in_progress(hba)) {
1331                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1332                 return 0;
1333         }
1334
1335         if (!hba->clk_scaling.active_reqs)
1336                 sched_clk_scaling_suspend_work = true;
1337
1338         if (list_empty(clk_list)) {
1339                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1340                 goto out;
1341         }
1342
1343         /* Decide based on the rounded-off frequency and update */
1344         scale_up = (*freq == clki->max_freq) ? true : false;
1345         if (!scale_up)
1346                 *freq = clki->min_freq;
1347         /* Update the frequency */
1348         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1349                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1350                 ret = 0;
1351                 goto out; /* no state change required */
1352         }
1353         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1354
1355         start = ktime_get();
1356         ret = ufshcd_devfreq_scale(hba, scale_up);
1357
1358         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1359                 (scale_up ? "up" : "down"),
1360                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1361
1362 out:
1363         if (sched_clk_scaling_suspend_work)
1364                 queue_work(hba->clk_scaling.workq,
1365                            &hba->clk_scaling.suspend_work);
1366
1367         return ret;
1368 }
1369
1370 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1371 {
1372         int *busy = priv;
1373
1374         WARN_ON_ONCE(reserved);
1375         (*busy)++;
1376         return false;
1377 }
1378
1379 /* Whether or not any tag is in use by a request that is in progress. */
1380 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1381 {
1382         struct request_queue *q = hba->cmd_queue;
1383         int busy = 0;
1384
1385         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1386         return busy;
1387 }
1388
1389 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1390                 struct devfreq_dev_status *stat)
1391 {
1392         struct ufs_hba *hba = dev_get_drvdata(dev);
1393         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1394         unsigned long flags;
1395         struct list_head *clk_list = &hba->clk_list_head;
1396         struct ufs_clk_info *clki;
1397         ktime_t curr_t;
1398
1399         if (!ufshcd_is_clkscaling_supported(hba))
1400                 return -EINVAL;
1401
1402         memset(stat, 0, sizeof(*stat));
1403
1404         spin_lock_irqsave(hba->host->host_lock, flags);
1405         curr_t = ktime_get();
1406         if (!scaling->window_start_t)
1407                 goto start_window;
1408
1409         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1410         /*
1411          * If current frequency is 0, then the ondemand governor considers
1412          * there's no initial frequency set. And it always requests to set
1413          * to max. frequency.
1414          */
1415         stat->current_frequency = clki->curr_freq;
1416         if (scaling->is_busy_started)
1417                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1418                                 scaling->busy_start_t);
1419
1420         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1421         stat->busy_time = scaling->tot_busy_t;
1422 start_window:
1423         scaling->window_start_t = curr_t;
1424         scaling->tot_busy_t = 0;
1425
1426         if (hba->outstanding_reqs) {
1427                 scaling->busy_start_t = curr_t;
1428                 scaling->is_busy_started = true;
1429         } else {
1430                 scaling->busy_start_t = 0;
1431                 scaling->is_busy_started = false;
1432         }
1433         spin_unlock_irqrestore(hba->host->host_lock, flags);
1434         return 0;
1435 }
1436
1437 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1438 {
1439         struct list_head *clk_list = &hba->clk_list_head;
1440         struct ufs_clk_info *clki;
1441         struct devfreq *devfreq;
1442         int ret;
1443
1444         /* Skip devfreq if we don't have any clocks in the list */
1445         if (list_empty(clk_list))
1446                 return 0;
1447
1448         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1449         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1450         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1451
1452         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1453                                          &hba->vps->ondemand_data);
1454         devfreq = devfreq_add_device(hba->dev,
1455                         &hba->vps->devfreq_profile,
1456                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1457                         &hba->vps->ondemand_data);
1458         if (IS_ERR(devfreq)) {
1459                 ret = PTR_ERR(devfreq);
1460                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1461
1462                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1463                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1464                 return ret;
1465         }
1466
1467         hba->devfreq = devfreq;
1468
1469         return 0;
1470 }
1471
1472 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1473 {
1474         struct list_head *clk_list = &hba->clk_list_head;
1475         struct ufs_clk_info *clki;
1476
1477         if (!hba->devfreq)
1478                 return;
1479
1480         devfreq_remove_device(hba->devfreq);
1481         hba->devfreq = NULL;
1482
1483         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1484         dev_pm_opp_remove(hba->dev, clki->min_freq);
1485         dev_pm_opp_remove(hba->dev, clki->max_freq);
1486 }
1487
1488 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1489 {
1490         unsigned long flags;
1491
1492         devfreq_suspend_device(hba->devfreq);
1493         spin_lock_irqsave(hba->host->host_lock, flags);
1494         hba->clk_scaling.window_start_t = 0;
1495         spin_unlock_irqrestore(hba->host->host_lock, flags);
1496 }
1497
1498 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1499 {
1500         unsigned long flags;
1501         bool suspend = false;
1502
1503         cancel_work_sync(&hba->clk_scaling.suspend_work);
1504         cancel_work_sync(&hba->clk_scaling.resume_work);
1505
1506         spin_lock_irqsave(hba->host->host_lock, flags);
1507         if (!hba->clk_scaling.is_suspended) {
1508                 suspend = true;
1509                 hba->clk_scaling.is_suspended = true;
1510         }
1511         spin_unlock_irqrestore(hba->host->host_lock, flags);
1512
1513         if (suspend)
1514                 __ufshcd_suspend_clkscaling(hba);
1515 }
1516
1517 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1518 {
1519         unsigned long flags;
1520         bool resume = false;
1521
1522         spin_lock_irqsave(hba->host->host_lock, flags);
1523         if (hba->clk_scaling.is_suspended) {
1524                 resume = true;
1525                 hba->clk_scaling.is_suspended = false;
1526         }
1527         spin_unlock_irqrestore(hba->host->host_lock, flags);
1528
1529         if (resume)
1530                 devfreq_resume_device(hba->devfreq);
1531 }
1532
1533 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1534                 struct device_attribute *attr, char *buf)
1535 {
1536         struct ufs_hba *hba = dev_get_drvdata(dev);
1537
1538         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
1539 }
1540
1541 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1542                 struct device_attribute *attr, const char *buf, size_t count)
1543 {
1544         struct ufs_hba *hba = dev_get_drvdata(dev);
1545         u32 value;
1546         int err = 0;
1547
1548         if (kstrtou32(buf, 0, &value))
1549                 return -EINVAL;
1550
1551         down(&hba->host_sem);
1552         if (!ufshcd_is_user_access_allowed(hba)) {
1553                 err = -EBUSY;
1554                 goto out;
1555         }
1556
1557         value = !!value;
1558         if (value == hba->clk_scaling.is_enabled)
1559                 goto out;
1560
1561         pm_runtime_get_sync(hba->dev);
1562         ufshcd_hold(hba, false);
1563
1564         hba->clk_scaling.is_enabled = value;
1565
1566         if (value) {
1567                 ufshcd_resume_clkscaling(hba);
1568         } else {
1569                 ufshcd_suspend_clkscaling(hba);
1570                 err = ufshcd_devfreq_scale(hba, true);
1571                 if (err)
1572                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1573                                         __func__, err);
1574         }
1575
1576         ufshcd_release(hba);
1577         pm_runtime_put_sync(hba->dev);
1578 out:
1579         up(&hba->host_sem);
1580         return err ? err : count;
1581 }
1582
1583 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1584 {
1585         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1586         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1587         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1588         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1589         hba->clk_scaling.enable_attr.attr.mode = 0644;
1590         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1591                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1592 }
1593
1594 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1595 {
1596         if (hba->clk_scaling.enable_attr.attr.name)
1597                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1598 }
1599
1600 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1601 {
1602         char wq_name[sizeof("ufs_clkscaling_00")];
1603
1604         if (!ufshcd_is_clkscaling_supported(hba))
1605                 return;
1606
1607         if (!hba->clk_scaling.min_gear)
1608                 hba->clk_scaling.min_gear = UFS_HS_G1;
1609
1610         INIT_WORK(&hba->clk_scaling.suspend_work,
1611                   ufshcd_clk_scaling_suspend_work);
1612         INIT_WORK(&hba->clk_scaling.resume_work,
1613                   ufshcd_clk_scaling_resume_work);
1614
1615         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1616                  hba->host->host_no);
1617         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1618
1619         hba->clk_scaling.is_initialized = true;
1620 }
1621
1622 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1623 {
1624         if (!hba->clk_scaling.is_initialized)
1625                 return;
1626
1627         ufshcd_remove_clk_scaling_sysfs(hba);
1628         destroy_workqueue(hba->clk_scaling.workq);
1629         ufshcd_devfreq_remove(hba);
1630         hba->clk_scaling.is_initialized = false;
1631 }
1632
1633 static void ufshcd_ungate_work(struct work_struct *work)
1634 {
1635         int ret;
1636         unsigned long flags;
1637         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1638                         clk_gating.ungate_work);
1639
1640         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1641
1642         spin_lock_irqsave(hba->host->host_lock, flags);
1643         if (hba->clk_gating.state == CLKS_ON) {
1644                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1645                 goto unblock_reqs;
1646         }
1647
1648         spin_unlock_irqrestore(hba->host->host_lock, flags);
1649         ufshcd_hba_vreg_set_hpm(hba);
1650         ufshcd_setup_clocks(hba, true);
1651
1652         ufshcd_enable_irq(hba);
1653
1654         /* Exit from hibern8 */
1655         if (ufshcd_can_hibern8_during_gating(hba)) {
1656                 /* Prevent gating in this path */
1657                 hba->clk_gating.is_suspended = true;
1658                 if (ufshcd_is_link_hibern8(hba)) {
1659                         ret = ufshcd_uic_hibern8_exit(hba);
1660                         if (ret)
1661                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1662                                         __func__, ret);
1663                         else
1664                                 ufshcd_set_link_active(hba);
1665                 }
1666                 hba->clk_gating.is_suspended = false;
1667         }
1668 unblock_reqs:
1669         ufshcd_scsi_unblock_requests(hba);
1670 }
1671
1672 /**
1673  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1674  * Also, exit from hibern8 mode and set the link as active.
1675  * @hba: per adapter instance
1676  * @async: This indicates whether caller should ungate clocks asynchronously.
1677  */
1678 int ufshcd_hold(struct ufs_hba *hba, bool async)
1679 {
1680         int rc = 0;
1681         bool flush_result;
1682         unsigned long flags;
1683
1684         if (!ufshcd_is_clkgating_allowed(hba))
1685                 goto out;
1686         spin_lock_irqsave(hba->host->host_lock, flags);
1687         hba->clk_gating.active_reqs++;
1688
1689 start:
1690         switch (hba->clk_gating.state) {
1691         case CLKS_ON:
1692                 /*
1693                  * Wait for the ungate work to complete if in progress.
1694                  * Though the clocks may be in ON state, the link could
1695                  * still be in hibner8 state if hibern8 is allowed
1696                  * during clock gating.
1697                  * Make sure we exit hibern8 state also in addition to
1698                  * clocks being ON.
1699                  */
1700                 if (ufshcd_can_hibern8_during_gating(hba) &&
1701                     ufshcd_is_link_hibern8(hba)) {
1702                         if (async) {
1703                                 rc = -EAGAIN;
1704                                 hba->clk_gating.active_reqs--;
1705                                 break;
1706                         }
1707                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1708                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1709                         if (hba->clk_gating.is_suspended && !flush_result)
1710                                 goto out;
1711                         spin_lock_irqsave(hba->host->host_lock, flags);
1712                         goto start;
1713                 }
1714                 break;
1715         case REQ_CLKS_OFF:
1716                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1717                         hba->clk_gating.state = CLKS_ON;
1718                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1719                                                 hba->clk_gating.state);
1720                         break;
1721                 }
1722                 /*
1723                  * If we are here, it means gating work is either done or
1724                  * currently running. Hence, fall through to cancel gating
1725                  * work and to enable clocks.
1726                  */
1727                 fallthrough;
1728         case CLKS_OFF:
1729                 hba->clk_gating.state = REQ_CLKS_ON;
1730                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1731                                         hba->clk_gating.state);
1732                 if (queue_work(hba->clk_gating.clk_gating_workq,
1733                                &hba->clk_gating.ungate_work))
1734                         ufshcd_scsi_block_requests(hba);
1735                 /*
1736                  * fall through to check if we should wait for this
1737                  * work to be done or not.
1738                  */
1739                 fallthrough;
1740         case REQ_CLKS_ON:
1741                 if (async) {
1742                         rc = -EAGAIN;
1743                         hba->clk_gating.active_reqs--;
1744                         break;
1745                 }
1746
1747                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1748                 flush_work(&hba->clk_gating.ungate_work);
1749                 /* Make sure state is CLKS_ON before returning */
1750                 spin_lock_irqsave(hba->host->host_lock, flags);
1751                 goto start;
1752         default:
1753                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1754                                 __func__, hba->clk_gating.state);
1755                 break;
1756         }
1757         spin_unlock_irqrestore(hba->host->host_lock, flags);
1758 out:
1759         return rc;
1760 }
1761 EXPORT_SYMBOL_GPL(ufshcd_hold);
1762
1763 static void ufshcd_gate_work(struct work_struct *work)
1764 {
1765         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1766                         clk_gating.gate_work.work);
1767         unsigned long flags;
1768         int ret;
1769
1770         spin_lock_irqsave(hba->host->host_lock, flags);
1771         /*
1772          * In case you are here to cancel this work the gating state
1773          * would be marked as REQ_CLKS_ON. In this case save time by
1774          * skipping the gating work and exit after changing the clock
1775          * state to CLKS_ON.
1776          */
1777         if (hba->clk_gating.is_suspended ||
1778                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1779                 hba->clk_gating.state = CLKS_ON;
1780                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1781                                         hba->clk_gating.state);
1782                 goto rel_lock;
1783         }
1784
1785         if (hba->clk_gating.active_reqs
1786                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1787                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1788                 || hba->active_uic_cmd || hba->uic_async_done)
1789                 goto rel_lock;
1790
1791         spin_unlock_irqrestore(hba->host->host_lock, flags);
1792
1793         /* put the link into hibern8 mode before turning off clocks */
1794         if (ufshcd_can_hibern8_during_gating(hba)) {
1795                 ret = ufshcd_uic_hibern8_enter(hba);
1796                 if (ret) {
1797                         hba->clk_gating.state = CLKS_ON;
1798                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1799                                         __func__, ret);
1800                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1801                                                 hba->clk_gating.state);
1802                         goto out;
1803                 }
1804                 ufshcd_set_link_hibern8(hba);
1805         }
1806
1807         ufshcd_disable_irq(hba);
1808
1809         ufshcd_setup_clocks(hba, false);
1810
1811         /* Put the host controller in low power mode if possible */
1812         ufshcd_hba_vreg_set_lpm(hba);
1813         /*
1814          * In case you are here to cancel this work the gating state
1815          * would be marked as REQ_CLKS_ON. In this case keep the state
1816          * as REQ_CLKS_ON which would anyway imply that clocks are off
1817          * and a request to turn them on is pending. By doing this way,
1818          * we keep the state machine in tact and this would ultimately
1819          * prevent from doing cancel work multiple times when there are
1820          * new requests arriving before the current cancel work is done.
1821          */
1822         spin_lock_irqsave(hba->host->host_lock, flags);
1823         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1824                 hba->clk_gating.state = CLKS_OFF;
1825                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1826                                         hba->clk_gating.state);
1827         }
1828 rel_lock:
1829         spin_unlock_irqrestore(hba->host->host_lock, flags);
1830 out:
1831         return;
1832 }
1833
1834 /* host lock must be held before calling this variant */
1835 static void __ufshcd_release(struct ufs_hba *hba)
1836 {
1837         if (!ufshcd_is_clkgating_allowed(hba))
1838                 return;
1839
1840         hba->clk_gating.active_reqs--;
1841
1842         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1843             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1844             hba->outstanding_tasks ||
1845             hba->active_uic_cmd || hba->uic_async_done ||
1846             hba->clk_gating.state == CLKS_OFF)
1847                 return;
1848
1849         hba->clk_gating.state = REQ_CLKS_OFF;
1850         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1851         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1852                            &hba->clk_gating.gate_work,
1853                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1854 }
1855
1856 void ufshcd_release(struct ufs_hba *hba)
1857 {
1858         unsigned long flags;
1859
1860         spin_lock_irqsave(hba->host->host_lock, flags);
1861         __ufshcd_release(hba);
1862         spin_unlock_irqrestore(hba->host->host_lock, flags);
1863 }
1864 EXPORT_SYMBOL_GPL(ufshcd_release);
1865
1866 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1867                 struct device_attribute *attr, char *buf)
1868 {
1869         struct ufs_hba *hba = dev_get_drvdata(dev);
1870
1871         return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
1872 }
1873
1874 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1875                 struct device_attribute *attr, const char *buf, size_t count)
1876 {
1877         struct ufs_hba *hba = dev_get_drvdata(dev);
1878         unsigned long flags, value;
1879
1880         if (kstrtoul(buf, 0, &value))
1881                 return -EINVAL;
1882
1883         spin_lock_irqsave(hba->host->host_lock, flags);
1884         hba->clk_gating.delay_ms = value;
1885         spin_unlock_irqrestore(hba->host->host_lock, flags);
1886         return count;
1887 }
1888
1889 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1890                 struct device_attribute *attr, char *buf)
1891 {
1892         struct ufs_hba *hba = dev_get_drvdata(dev);
1893
1894         return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
1895 }
1896
1897 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1898                 struct device_attribute *attr, const char *buf, size_t count)
1899 {
1900         struct ufs_hba *hba = dev_get_drvdata(dev);
1901         unsigned long flags;
1902         u32 value;
1903
1904         if (kstrtou32(buf, 0, &value))
1905                 return -EINVAL;
1906
1907         value = !!value;
1908
1909         spin_lock_irqsave(hba->host->host_lock, flags);
1910         if (value == hba->clk_gating.is_enabled)
1911                 goto out;
1912
1913         if (value)
1914                 __ufshcd_release(hba);
1915         else
1916                 hba->clk_gating.active_reqs++;
1917
1918         hba->clk_gating.is_enabled = value;
1919 out:
1920         spin_unlock_irqrestore(hba->host->host_lock, flags);
1921         return count;
1922 }
1923
1924 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
1925 {
1926         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1927         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1928         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1929         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1930         hba->clk_gating.delay_attr.attr.mode = 0644;
1931         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1932                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1933
1934         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1935         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1936         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1937         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1938         hba->clk_gating.enable_attr.attr.mode = 0644;
1939         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1940                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1941 }
1942
1943 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
1944 {
1945         if (hba->clk_gating.delay_attr.attr.name)
1946                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1947         if (hba->clk_gating.enable_attr.attr.name)
1948                 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1949 }
1950
1951 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1952 {
1953         char wq_name[sizeof("ufs_clk_gating_00")];
1954
1955         if (!ufshcd_is_clkgating_allowed(hba))
1956                 return;
1957
1958         hba->clk_gating.state = CLKS_ON;
1959
1960         hba->clk_gating.delay_ms = 150;
1961         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1962         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1963
1964         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1965                  hba->host->host_no);
1966         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1967                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
1968
1969         ufshcd_init_clk_gating_sysfs(hba);
1970
1971         hba->clk_gating.is_enabled = true;
1972         hba->clk_gating.is_initialized = true;
1973 }
1974
1975 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1976 {
1977         if (!hba->clk_gating.is_initialized)
1978                 return;
1979         ufshcd_remove_clk_gating_sysfs(hba);
1980         cancel_work_sync(&hba->clk_gating.ungate_work);
1981         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1982         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1983         hba->clk_gating.is_initialized = false;
1984 }
1985
1986 /* Must be called with host lock acquired */
1987 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1988 {
1989         bool queue_resume_work = false;
1990         ktime_t curr_t = ktime_get();
1991
1992         if (!ufshcd_is_clkscaling_supported(hba))
1993                 return;
1994
1995         if (!hba->clk_scaling.active_reqs++)
1996                 queue_resume_work = true;
1997
1998         if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
1999                 return;
2000
2001         if (queue_resume_work)
2002                 queue_work(hba->clk_scaling.workq,
2003                            &hba->clk_scaling.resume_work);
2004
2005         if (!hba->clk_scaling.window_start_t) {
2006                 hba->clk_scaling.window_start_t = curr_t;
2007                 hba->clk_scaling.tot_busy_t = 0;
2008                 hba->clk_scaling.is_busy_started = false;
2009         }
2010
2011         if (!hba->clk_scaling.is_busy_started) {
2012                 hba->clk_scaling.busy_start_t = curr_t;
2013                 hba->clk_scaling.is_busy_started = true;
2014         }
2015 }
2016
2017 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2018 {
2019         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2020
2021         if (!ufshcd_is_clkscaling_supported(hba))
2022                 return;
2023
2024         if (!hba->outstanding_reqs && scaling->is_busy_started) {
2025                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2026                                         scaling->busy_start_t));
2027                 scaling->busy_start_t = 0;
2028                 scaling->is_busy_started = false;
2029         }
2030 }
2031 /**
2032  * ufshcd_send_command - Send SCSI or device management commands
2033  * @hba: per adapter instance
2034  * @task_tag: Task tag of the command
2035  */
2036 static inline
2037 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
2038 {
2039         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2040
2041         lrbp->issue_time_stamp = ktime_get();
2042         lrbp->compl_time_stamp = ktime_set(0, 0);
2043         ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
2044         ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2045         ufshcd_clk_scaling_start_busy(hba);
2046         __set_bit(task_tag, &hba->outstanding_reqs);
2047         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2048         /* Make sure that doorbell is committed immediately */
2049         wmb();
2050 }
2051
2052 /**
2053  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2054  * @lrbp: pointer to local reference block
2055  */
2056 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2057 {
2058         int len;
2059         if (lrbp->sense_buffer &&
2060             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
2061                 int len_to_copy;
2062
2063                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2064                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2065
2066                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2067                        len_to_copy);
2068         }
2069 }
2070
2071 /**
2072  * ufshcd_copy_query_response() - Copy the Query Response and the data
2073  * descriptor
2074  * @hba: per adapter instance
2075  * @lrbp: pointer to local reference block
2076  */
2077 static
2078 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2079 {
2080         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2081
2082         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2083
2084         /* Get the descriptor */
2085         if (hba->dev_cmd.query.descriptor &&
2086             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2087                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2088                                 GENERAL_UPIU_REQUEST_SIZE;
2089                 u16 resp_len;
2090                 u16 buf_len;
2091
2092                 /* data segment length */
2093                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
2094                                                 MASK_QUERY_DATA_SEG_LEN;
2095                 buf_len = be16_to_cpu(
2096                                 hba->dev_cmd.query.request.upiu_req.length);
2097                 if (likely(buf_len >= resp_len)) {
2098                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2099                 } else {
2100                         dev_warn(hba->dev,
2101                                  "%s: rsp size %d is bigger than buffer size %d",
2102                                  __func__, resp_len, buf_len);
2103                         return -EINVAL;
2104                 }
2105         }
2106
2107         return 0;
2108 }
2109
2110 /**
2111  * ufshcd_hba_capabilities - Read controller capabilities
2112  * @hba: per adapter instance
2113  *
2114  * Return: 0 on success, negative on error.
2115  */
2116 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2117 {
2118         int err;
2119
2120         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2121
2122         /* nutrs and nutmrs are 0 based values */
2123         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2124         hba->nutmrs =
2125         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2126
2127         /* Read crypto capabilities */
2128         err = ufshcd_hba_init_crypto_capabilities(hba);
2129         if (err)
2130                 dev_err(hba->dev, "crypto setup failed\n");
2131
2132         return err;
2133 }
2134
2135 /**
2136  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2137  *                            to accept UIC commands
2138  * @hba: per adapter instance
2139  * Return true on success, else false
2140  */
2141 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2142 {
2143         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2144                 return true;
2145         else
2146                 return false;
2147 }
2148
2149 /**
2150  * ufshcd_get_upmcrs - Get the power mode change request status
2151  * @hba: Pointer to adapter instance
2152  *
2153  * This function gets the UPMCRS field of HCS register
2154  * Returns value of UPMCRS field
2155  */
2156 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2157 {
2158         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2159 }
2160
2161 /**
2162  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2163  * @hba: per adapter instance
2164  * @uic_cmd: UIC command
2165  *
2166  * Mutex must be held.
2167  */
2168 static inline void
2169 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2170 {
2171         WARN_ON(hba->active_uic_cmd);
2172
2173         hba->active_uic_cmd = uic_cmd;
2174
2175         /* Write Args */
2176         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2177         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2178         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2179
2180         ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2181
2182         /* Write UIC Cmd */
2183         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2184                       REG_UIC_COMMAND);
2185 }
2186
2187 /**
2188  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2189  * @hba: per adapter instance
2190  * @uic_cmd: UIC command
2191  *
2192  * Must be called with mutex held.
2193  * Returns 0 only if success.
2194  */
2195 static int
2196 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2197 {
2198         int ret;
2199         unsigned long flags;
2200
2201         if (wait_for_completion_timeout(&uic_cmd->done,
2202                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2203                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2204         } else {
2205                 ret = -ETIMEDOUT;
2206                 dev_err(hba->dev,
2207                         "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2208                         uic_cmd->command, uic_cmd->argument3);
2209
2210                 if (!uic_cmd->cmd_active) {
2211                         dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2212                                 __func__);
2213                         ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2214                 }
2215         }
2216
2217         spin_lock_irqsave(hba->host->host_lock, flags);
2218         hba->active_uic_cmd = NULL;
2219         spin_unlock_irqrestore(hba->host->host_lock, flags);
2220
2221         return ret;
2222 }
2223
2224 /**
2225  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2226  * @hba: per adapter instance
2227  * @uic_cmd: UIC command
2228  * @completion: initialize the completion only if this is set to true
2229  *
2230  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2231  * with mutex held and host_lock locked.
2232  * Returns 0 only if success.
2233  */
2234 static int
2235 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2236                       bool completion)
2237 {
2238         if (!ufshcd_ready_for_uic_cmd(hba)) {
2239                 dev_err(hba->dev,
2240                         "Controller not ready to accept UIC commands\n");
2241                 return -EIO;
2242         }
2243
2244         if (completion)
2245                 init_completion(&uic_cmd->done);
2246
2247         uic_cmd->cmd_active = 1;
2248         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2249
2250         return 0;
2251 }
2252
2253 /**
2254  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2255  * @hba: per adapter instance
2256  * @uic_cmd: UIC command
2257  *
2258  * Returns 0 only if success.
2259  */
2260 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2261 {
2262         int ret;
2263         unsigned long flags;
2264
2265         ufshcd_hold(hba, false);
2266         mutex_lock(&hba->uic_cmd_mutex);
2267         ufshcd_add_delay_before_dme_cmd(hba);
2268
2269         spin_lock_irqsave(hba->host->host_lock, flags);
2270         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2271         spin_unlock_irqrestore(hba->host->host_lock, flags);
2272         if (!ret)
2273                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2274
2275         mutex_unlock(&hba->uic_cmd_mutex);
2276
2277         ufshcd_release(hba);
2278         return ret;
2279 }
2280
2281 /**
2282  * ufshcd_map_sg - Map scatter-gather list to prdt
2283  * @hba: per adapter instance
2284  * @lrbp: pointer to local reference block
2285  *
2286  * Returns 0 in case of success, non-zero value in case of failure
2287  */
2288 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2289 {
2290         struct ufshcd_sg_entry *prd_table;
2291         struct scatterlist *sg;
2292         struct scsi_cmnd *cmd;
2293         int sg_segments;
2294         int i;
2295
2296         cmd = lrbp->cmd;
2297         sg_segments = scsi_dma_map(cmd);
2298         if (sg_segments < 0)
2299                 return sg_segments;
2300
2301         if (sg_segments) {
2302
2303                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2304                         lrbp->utr_descriptor_ptr->prd_table_length =
2305                                 cpu_to_le16((sg_segments *
2306                                         sizeof(struct ufshcd_sg_entry)));
2307                 else
2308                         lrbp->utr_descriptor_ptr->prd_table_length =
2309                                 cpu_to_le16((u16) (sg_segments));
2310
2311                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2312
2313                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2314                         prd_table[i].size  =
2315                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2316                         prd_table[i].base_addr =
2317                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2318                         prd_table[i].upper_addr =
2319                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2320                         prd_table[i].reserved = 0;
2321                 }
2322         } else {
2323                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2324         }
2325
2326         return 0;
2327 }
2328
2329 /**
2330  * ufshcd_enable_intr - enable interrupts
2331  * @hba: per adapter instance
2332  * @intrs: interrupt bits
2333  */
2334 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2335 {
2336         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2337
2338         if (hba->ufs_version == UFSHCI_VERSION_10) {
2339                 u32 rw;
2340                 rw = set & INTERRUPT_MASK_RW_VER_10;
2341                 set = rw | ((set ^ intrs) & intrs);
2342         } else {
2343                 set |= intrs;
2344         }
2345
2346         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2347 }
2348
2349 /**
2350  * ufshcd_disable_intr - disable interrupts
2351  * @hba: per adapter instance
2352  * @intrs: interrupt bits
2353  */
2354 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2355 {
2356         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2357
2358         if (hba->ufs_version == UFSHCI_VERSION_10) {
2359                 u32 rw;
2360                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2361                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2362                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2363
2364         } else {
2365                 set &= ~intrs;
2366         }
2367
2368         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2369 }
2370
2371 /**
2372  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2373  * descriptor according to request
2374  * @lrbp: pointer to local reference block
2375  * @upiu_flags: flags required in the header
2376  * @cmd_dir: requests data direction
2377  */
2378 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2379                         u8 *upiu_flags, enum dma_data_direction cmd_dir)
2380 {
2381         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2382         u32 data_direction;
2383         u32 dword_0;
2384         u32 dword_1 = 0;
2385         u32 dword_3 = 0;
2386
2387         if (cmd_dir == DMA_FROM_DEVICE) {
2388                 data_direction = UTP_DEVICE_TO_HOST;
2389                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2390         } else if (cmd_dir == DMA_TO_DEVICE) {
2391                 data_direction = UTP_HOST_TO_DEVICE;
2392                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2393         } else {
2394                 data_direction = UTP_NO_DATA_TRANSFER;
2395                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2396         }
2397
2398         dword_0 = data_direction | (lrbp->command_type
2399                                 << UPIU_COMMAND_TYPE_OFFSET);
2400         if (lrbp->intr_cmd)
2401                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2402
2403         /* Prepare crypto related dwords */
2404         ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2405
2406         /* Transfer request descriptor header fields */
2407         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2408         req_desc->header.dword_1 = cpu_to_le32(dword_1);
2409         /*
2410          * assigning invalid value for command status. Controller
2411          * updates OCS on command completion, with the command
2412          * status
2413          */
2414         req_desc->header.dword_2 =
2415                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2416         req_desc->header.dword_3 = cpu_to_le32(dword_3);
2417
2418         req_desc->prd_table_length = 0;
2419 }
2420
2421 /**
2422  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2423  * for scsi commands
2424  * @lrbp: local reference block pointer
2425  * @upiu_flags: flags
2426  */
2427 static
2428 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2429 {
2430         struct scsi_cmnd *cmd = lrbp->cmd;
2431         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2432         unsigned short cdb_len;
2433
2434         /* command descriptor fields */
2435         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2436                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2437                                 lrbp->lun, lrbp->task_tag);
2438         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2439                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2440
2441         /* Total EHS length and Data segment length will be zero */
2442         ucd_req_ptr->header.dword_2 = 0;
2443
2444         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2445
2446         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2447         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2448         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2449
2450         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2451 }
2452
2453 /**
2454  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2455  * for query requsts
2456  * @hba: UFS hba
2457  * @lrbp: local reference block pointer
2458  * @upiu_flags: flags
2459  */
2460 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2461                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2462 {
2463         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2464         struct ufs_query *query = &hba->dev_cmd.query;
2465         u16 len = be16_to_cpu(query->request.upiu_req.length);
2466
2467         /* Query request header */
2468         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2469                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2470                         lrbp->lun, lrbp->task_tag);
2471         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2472                         0, query->request.query_func, 0, 0);
2473
2474         /* Data segment length only need for WRITE_DESC */
2475         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2476                 ucd_req_ptr->header.dword_2 =
2477                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2478         else
2479                 ucd_req_ptr->header.dword_2 = 0;
2480
2481         /* Copy the Query Request buffer as is */
2482         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2483                         QUERY_OSF_SIZE);
2484
2485         /* Copy the Descriptor */
2486         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2487                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2488
2489         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2490 }
2491
2492 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2493 {
2494         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2495
2496         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2497
2498         /* command descriptor fields */
2499         ucd_req_ptr->header.dword_0 =
2500                 UPIU_HEADER_DWORD(
2501                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2502         /* clear rest of the fields of basic header */
2503         ucd_req_ptr->header.dword_1 = 0;
2504         ucd_req_ptr->header.dword_2 = 0;
2505
2506         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2507 }
2508
2509 /**
2510  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2511  *                           for Device Management Purposes
2512  * @hba: per adapter instance
2513  * @lrbp: pointer to local reference block
2514  */
2515 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2516                                       struct ufshcd_lrb *lrbp)
2517 {
2518         u8 upiu_flags;
2519         int ret = 0;
2520
2521         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2522             (hba->ufs_version == UFSHCI_VERSION_11))
2523                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2524         else
2525                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2526
2527         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2528         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2529                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2530         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2531                 ufshcd_prepare_utp_nop_upiu(lrbp);
2532         else
2533                 ret = -EINVAL;
2534
2535         return ret;
2536 }
2537
2538 /**
2539  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2540  *                         for SCSI Purposes
2541  * @hba: per adapter instance
2542  * @lrbp: pointer to local reference block
2543  */
2544 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2545 {
2546         u8 upiu_flags;
2547         int ret = 0;
2548
2549         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2550             (hba->ufs_version == UFSHCI_VERSION_11))
2551                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2552         else
2553                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2554
2555         if (likely(lrbp->cmd)) {
2556                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2557                                                 lrbp->cmd->sc_data_direction);
2558                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2559         } else {
2560                 ret = -EINVAL;
2561         }
2562
2563         return ret;
2564 }
2565
2566 /**
2567  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2568  * @upiu_wlun_id: UPIU W-LUN id
2569  *
2570  * Returns SCSI W-LUN id
2571  */
2572 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2573 {
2574         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2575 }
2576
2577 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2578 {
2579         struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2580         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2581         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2582                 i * sizeof(struct utp_transfer_cmd_desc);
2583         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2584                                        response_upiu);
2585         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2586
2587         lrb->utr_descriptor_ptr = utrdlp + i;
2588         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2589                 i * sizeof(struct utp_transfer_req_desc);
2590         lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2591         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2592         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2593         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2594         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2595         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2596 }
2597
2598 /**
2599  * ufshcd_queuecommand - main entry point for SCSI requests
2600  * @host: SCSI host pointer
2601  * @cmd: command from SCSI Midlayer
2602  *
2603  * Returns 0 for success, non-zero in case of failure
2604  */
2605 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2606 {
2607         struct ufshcd_lrb *lrbp;
2608         struct ufs_hba *hba;
2609         unsigned long flags;
2610         int tag;
2611         int err = 0;
2612
2613         hba = shost_priv(host);
2614
2615         tag = cmd->request->tag;
2616         if (!ufshcd_valid_tag(hba, tag)) {
2617                 dev_err(hba->dev,
2618                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2619                         __func__, tag, cmd, cmd->request);
2620                 BUG();
2621         }
2622
2623         if (!down_read_trylock(&hba->clk_scaling_lock))
2624                 return SCSI_MLQUEUE_HOST_BUSY;
2625
2626         hba->req_abort_count = 0;
2627
2628         err = ufshcd_hold(hba, true);
2629         if (err) {
2630                 err = SCSI_MLQUEUE_HOST_BUSY;
2631                 goto out;
2632         }
2633         WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2634                 (hba->clk_gating.state != CLKS_ON));
2635
2636         lrbp = &hba->lrb[tag];
2637         if (unlikely(lrbp->in_use)) {
2638                 if (hba->pm_op_in_progress)
2639                         set_host_byte(cmd, DID_BAD_TARGET);
2640                 else
2641                         err = SCSI_MLQUEUE_HOST_BUSY;
2642                 ufshcd_release(hba);
2643                 goto out;
2644         }
2645
2646         WARN_ON(lrbp->cmd);
2647         lrbp->cmd = cmd;
2648         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2649         lrbp->sense_buffer = cmd->sense_buffer;
2650         lrbp->task_tag = tag;
2651         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2652         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2653
2654         ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2655
2656         lrbp->req_abort_skip = false;
2657
2658         ufshcd_comp_scsi_upiu(hba, lrbp);
2659
2660         err = ufshcd_map_sg(hba, lrbp);
2661         if (err) {
2662                 lrbp->cmd = NULL;
2663                 ufshcd_release(hba);
2664                 goto out;
2665         }
2666         /* Make sure descriptors are ready before ringing the doorbell */
2667         wmb();
2668
2669         spin_lock_irqsave(hba->host->host_lock, flags);
2670         switch (hba->ufshcd_state) {
2671         case UFSHCD_STATE_OPERATIONAL:
2672         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2673                 break;
2674         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2675                 /*
2676                  * pm_runtime_get_sync() is used at error handling preparation
2677                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2678                  * PM ops, it can never be finished if we let SCSI layer keep
2679                  * retrying it, which gets err handler stuck forever. Neither
2680                  * can we let the scsi cmd pass through, because UFS is in bad
2681                  * state, the scsi cmd may eventually time out, which will get
2682                  * err handler blocked for too long. So, just fail the scsi cmd
2683                  * sent from PM ops, err handler can recover PM error anyways.
2684                  */
2685                 if (hba->pm_op_in_progress) {
2686                         hba->force_reset = true;
2687                         set_host_byte(cmd, DID_BAD_TARGET);
2688                         goto out_compl_cmd;
2689                 }
2690                 fallthrough;
2691         case UFSHCD_STATE_RESET:
2692                 err = SCSI_MLQUEUE_HOST_BUSY;
2693                 goto out_compl_cmd;
2694         case UFSHCD_STATE_ERROR:
2695                 set_host_byte(cmd, DID_ERROR);
2696                 goto out_compl_cmd;
2697         default:
2698                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2699                                 __func__, hba->ufshcd_state);
2700                 set_host_byte(cmd, DID_BAD_TARGET);
2701                 goto out_compl_cmd;
2702         }
2703         ufshcd_send_command(hba, tag);
2704         spin_unlock_irqrestore(hba->host->host_lock, flags);
2705         goto out;
2706
2707 out_compl_cmd:
2708         scsi_dma_unmap(lrbp->cmd);
2709         lrbp->cmd = NULL;
2710         spin_unlock_irqrestore(hba->host->host_lock, flags);
2711         ufshcd_release(hba);
2712         if (!err)
2713                 cmd->scsi_done(cmd);
2714 out:
2715         up_read(&hba->clk_scaling_lock);
2716         return err;
2717 }
2718
2719 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2720                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2721 {
2722         lrbp->cmd = NULL;
2723         lrbp->sense_bufflen = 0;
2724         lrbp->sense_buffer = NULL;
2725         lrbp->task_tag = tag;
2726         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2727         lrbp->intr_cmd = true; /* No interrupt aggregation */
2728         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
2729         hba->dev_cmd.type = cmd_type;
2730
2731         return ufshcd_compose_devman_upiu(hba, lrbp);
2732 }
2733
2734 static int
2735 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2736 {
2737         int err = 0;
2738         unsigned long flags;
2739         u32 mask = 1 << tag;
2740
2741         /* clear outstanding transaction before retry */
2742         spin_lock_irqsave(hba->host->host_lock, flags);
2743         ufshcd_utrl_clear(hba, tag);
2744         spin_unlock_irqrestore(hba->host->host_lock, flags);
2745
2746         /*
2747          * wait for for h/w to clear corresponding bit in door-bell.
2748          * max. wait is 1 sec.
2749          */
2750         err = ufshcd_wait_for_register(hba,
2751                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2752                         mask, ~mask, 1000, 1000);
2753
2754         return err;
2755 }
2756
2757 static int
2758 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2759 {
2760         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2761
2762         /* Get the UPIU response */
2763         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2764                                 UPIU_RSP_CODE_OFFSET;
2765         return query_res->response;
2766 }
2767
2768 /**
2769  * ufshcd_dev_cmd_completion() - handles device management command responses
2770  * @hba: per adapter instance
2771  * @lrbp: pointer to local reference block
2772  */
2773 static int
2774 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2775 {
2776         int resp;
2777         int err = 0;
2778
2779         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2780         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2781
2782         switch (resp) {
2783         case UPIU_TRANSACTION_NOP_IN:
2784                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2785                         err = -EINVAL;
2786                         dev_err(hba->dev, "%s: unexpected response %x\n",
2787                                         __func__, resp);
2788                 }
2789                 break;
2790         case UPIU_TRANSACTION_QUERY_RSP:
2791                 err = ufshcd_check_query_response(hba, lrbp);
2792                 if (!err)
2793                         err = ufshcd_copy_query_response(hba, lrbp);
2794                 break;
2795         case UPIU_TRANSACTION_REJECT_UPIU:
2796                 /* TODO: handle Reject UPIU Response */
2797                 err = -EPERM;
2798                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2799                                 __func__);
2800                 break;
2801         default:
2802                 err = -EINVAL;
2803                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2804                                 __func__, resp);
2805                 break;
2806         }
2807
2808         return err;
2809 }
2810
2811 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2812                 struct ufshcd_lrb *lrbp, int max_timeout)
2813 {
2814         int err = 0;
2815         unsigned long time_left;
2816         unsigned long flags;
2817
2818         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2819                         msecs_to_jiffies(max_timeout));
2820
2821         /* Make sure descriptors are ready before ringing the doorbell */
2822         wmb();
2823         spin_lock_irqsave(hba->host->host_lock, flags);
2824         hba->dev_cmd.complete = NULL;
2825         if (likely(time_left)) {
2826                 err = ufshcd_get_tr_ocs(lrbp);
2827                 if (!err)
2828                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2829         }
2830         spin_unlock_irqrestore(hba->host->host_lock, flags);
2831
2832         if (!time_left) {
2833                 err = -ETIMEDOUT;
2834                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2835                         __func__, lrbp->task_tag);
2836                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2837                         /* successfully cleared the command, retry if needed */
2838                         err = -EAGAIN;
2839                 /*
2840                  * in case of an error, after clearing the doorbell,
2841                  * we also need to clear the outstanding_request
2842                  * field in hba
2843                  */
2844                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2845         }
2846
2847         return err;
2848 }
2849
2850 /**
2851  * ufshcd_exec_dev_cmd - API for sending device management requests
2852  * @hba: UFS hba
2853  * @cmd_type: specifies the type (NOP, Query...)
2854  * @timeout: time in seconds
2855  *
2856  * NOTE: Since there is only one available tag for device management commands,
2857  * it is expected you hold the hba->dev_cmd.lock mutex.
2858  */
2859 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2860                 enum dev_cmd_type cmd_type, int timeout)
2861 {
2862         struct request_queue *q = hba->cmd_queue;
2863         struct request *req;
2864         struct ufshcd_lrb *lrbp;
2865         int err;
2866         int tag;
2867         struct completion wait;
2868         unsigned long flags;
2869
2870         down_read(&hba->clk_scaling_lock);
2871
2872         /*
2873          * Get free slot, sleep if slots are unavailable.
2874          * Even though we use wait_event() which sleeps indefinitely,
2875          * the maximum wait time is bounded by SCSI request timeout.
2876          */
2877         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2878         if (IS_ERR(req)) {
2879                 err = PTR_ERR(req);
2880                 goto out_unlock;
2881         }
2882         tag = req->tag;
2883         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2884
2885         init_completion(&wait);
2886         lrbp = &hba->lrb[tag];
2887         if (unlikely(lrbp->in_use)) {
2888                 err = -EBUSY;
2889                 goto out;
2890         }
2891
2892         WARN_ON(lrbp->cmd);
2893         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2894         if (unlikely(err))
2895                 goto out_put_tag;
2896
2897         hba->dev_cmd.complete = &wait;
2898
2899         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
2900         /* Make sure descriptors are ready before ringing the doorbell */
2901         wmb();
2902         spin_lock_irqsave(hba->host->host_lock, flags);
2903         ufshcd_send_command(hba, tag);
2904         spin_unlock_irqrestore(hba->host->host_lock, flags);
2905
2906         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2907
2908 out:
2909         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
2910                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
2911
2912 out_put_tag:
2913         blk_put_request(req);
2914 out_unlock:
2915         up_read(&hba->clk_scaling_lock);
2916         return err;
2917 }
2918
2919 /**
2920  * ufshcd_init_query() - init the query response and request parameters
2921  * @hba: per-adapter instance
2922  * @request: address of the request pointer to be initialized
2923  * @response: address of the response pointer to be initialized
2924  * @opcode: operation to perform
2925  * @idn: flag idn to access
2926  * @index: LU number to access
2927  * @selector: query/flag/descriptor further identification
2928  */
2929 static inline void ufshcd_init_query(struct ufs_hba *hba,
2930                 struct ufs_query_req **request, struct ufs_query_res **response,
2931                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2932 {
2933         *request = &hba->dev_cmd.query.request;
2934         *response = &hba->dev_cmd.query.response;
2935         memset(*request, 0, sizeof(struct ufs_query_req));
2936         memset(*response, 0, sizeof(struct ufs_query_res));
2937         (*request)->upiu_req.opcode = opcode;
2938         (*request)->upiu_req.idn = idn;
2939         (*request)->upiu_req.index = index;
2940         (*request)->upiu_req.selector = selector;
2941 }
2942
2943 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2944         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
2945 {
2946         int ret;
2947         int retries;
2948
2949         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2950                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2951                 if (ret)
2952                         dev_dbg(hba->dev,
2953                                 "%s: failed with error %d, retries %d\n",
2954                                 __func__, ret, retries);
2955                 else
2956                         break;
2957         }
2958
2959         if (ret)
2960                 dev_err(hba->dev,
2961                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2962                         __func__, opcode, idn, ret, retries);
2963         return ret;
2964 }
2965
2966 /**
2967  * ufshcd_query_flag() - API function for sending flag query requests
2968  * @hba: per-adapter instance
2969  * @opcode: flag query to perform
2970  * @idn: flag idn to access
2971  * @index: flag index to access
2972  * @flag_res: the flag value after the query request completes
2973  *
2974  * Returns 0 for success, non-zero in case of failure
2975  */
2976 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2977                         enum flag_idn idn, u8 index, bool *flag_res)
2978 {
2979         struct ufs_query_req *request = NULL;
2980         struct ufs_query_res *response = NULL;
2981         int err, selector = 0;
2982         int timeout = QUERY_REQ_TIMEOUT;
2983
2984         BUG_ON(!hba);
2985
2986         ufshcd_hold(hba, false);
2987         mutex_lock(&hba->dev_cmd.lock);
2988         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2989                         selector);
2990
2991         switch (opcode) {
2992         case UPIU_QUERY_OPCODE_SET_FLAG:
2993         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2994         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2995                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2996                 break;
2997         case UPIU_QUERY_OPCODE_READ_FLAG:
2998                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2999                 if (!flag_res) {
3000                         /* No dummy reads */
3001                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3002                                         __func__);
3003                         err = -EINVAL;
3004                         goto out_unlock;
3005                 }
3006                 break;
3007         default:
3008                 dev_err(hba->dev,
3009                         "%s: Expected query flag opcode but got = %d\n",
3010                         __func__, opcode);
3011                 err = -EINVAL;
3012                 goto out_unlock;
3013         }
3014
3015         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3016
3017         if (err) {
3018                 dev_err(hba->dev,
3019                         "%s: Sending flag query for idn %d failed, err = %d\n",
3020                         __func__, idn, err);
3021                 goto out_unlock;
3022         }
3023
3024         if (flag_res)
3025                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3026                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3027
3028 out_unlock:
3029         mutex_unlock(&hba->dev_cmd.lock);
3030         ufshcd_release(hba);
3031         return err;
3032 }
3033
3034 /**
3035  * ufshcd_query_attr - API function for sending attribute requests
3036  * @hba: per-adapter instance
3037  * @opcode: attribute opcode
3038  * @idn: attribute idn to access
3039  * @index: index field
3040  * @selector: selector field
3041  * @attr_val: the attribute value after the query request completes
3042  *
3043  * Returns 0 for success, non-zero in case of failure
3044 */
3045 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3046                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3047 {
3048         struct ufs_query_req *request = NULL;
3049         struct ufs_query_res *response = NULL;
3050         int err;
3051
3052         BUG_ON(!hba);
3053
3054         if (!attr_val) {
3055                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3056                                 __func__, opcode);
3057                 return -EINVAL;
3058         }
3059
3060         ufshcd_hold(hba, false);
3061
3062         mutex_lock(&hba->dev_cmd.lock);
3063         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3064                         selector);
3065
3066         switch (opcode) {
3067         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3068                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3069                 request->upiu_req.value = cpu_to_be32(*attr_val);
3070                 break;
3071         case UPIU_QUERY_OPCODE_READ_ATTR:
3072                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3073                 break;
3074         default:
3075                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3076                                 __func__, opcode);
3077                 err = -EINVAL;
3078                 goto out_unlock;
3079         }
3080
3081         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3082
3083         if (err) {
3084                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3085                                 __func__, opcode, idn, index, err);
3086                 goto out_unlock;
3087         }
3088
3089         *attr_val = be32_to_cpu(response->upiu_res.value);
3090
3091 out_unlock:
3092         mutex_unlock(&hba->dev_cmd.lock);
3093         ufshcd_release(hba);
3094         return err;
3095 }
3096
3097 /**
3098  * ufshcd_query_attr_retry() - API function for sending query
3099  * attribute with retries
3100  * @hba: per-adapter instance
3101  * @opcode: attribute opcode
3102  * @idn: attribute idn to access
3103  * @index: index field
3104  * @selector: selector field
3105  * @attr_val: the attribute value after the query request
3106  * completes
3107  *
3108  * Returns 0 for success, non-zero in case of failure
3109 */
3110 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3111         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3112         u32 *attr_val)
3113 {
3114         int ret = 0;
3115         u32 retries;
3116
3117         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3118                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3119                                                 selector, attr_val);
3120                 if (ret)
3121                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3122                                 __func__, ret, retries);
3123                 else
3124                         break;
3125         }
3126
3127         if (ret)
3128                 dev_err(hba->dev,
3129                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
3130                         __func__, idn, ret, QUERY_REQ_RETRIES);
3131         return ret;
3132 }
3133
3134 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3135                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3136                         u8 selector, u8 *desc_buf, int *buf_len)
3137 {
3138         struct ufs_query_req *request = NULL;
3139         struct ufs_query_res *response = NULL;
3140         int err;
3141
3142         BUG_ON(!hba);
3143
3144         if (!desc_buf) {
3145                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3146                                 __func__, opcode);
3147                 return -EINVAL;
3148         }
3149
3150         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3151                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3152                                 __func__, *buf_len);
3153                 return -EINVAL;
3154         }
3155
3156         ufshcd_hold(hba, false);
3157
3158         mutex_lock(&hba->dev_cmd.lock);
3159         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3160                         selector);
3161         hba->dev_cmd.query.descriptor = desc_buf;
3162         request->upiu_req.length = cpu_to_be16(*buf_len);
3163
3164         switch (opcode) {
3165         case UPIU_QUERY_OPCODE_WRITE_DESC:
3166                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3167                 break;
3168         case UPIU_QUERY_OPCODE_READ_DESC:
3169                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3170                 break;
3171         default:
3172                 dev_err(hba->dev,
3173                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3174                                 __func__, opcode);
3175                 err = -EINVAL;
3176                 goto out_unlock;
3177         }
3178
3179         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3180
3181         if (err) {
3182                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3183                                 __func__, opcode, idn, index, err);
3184                 goto out_unlock;
3185         }
3186
3187         *buf_len = be16_to_cpu(response->upiu_res.length);
3188
3189 out_unlock:
3190         hba->dev_cmd.query.descriptor = NULL;
3191         mutex_unlock(&hba->dev_cmd.lock);
3192         ufshcd_release(hba);
3193         return err;
3194 }
3195
3196 /**
3197  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3198  * @hba: per-adapter instance
3199  * @opcode: attribute opcode
3200  * @idn: attribute idn to access
3201  * @index: index field
3202  * @selector: selector field
3203  * @desc_buf: the buffer that contains the descriptor
3204  * @buf_len: length parameter passed to the device
3205  *
3206  * Returns 0 for success, non-zero in case of failure.
3207  * The buf_len parameter will contain, on return, the length parameter
3208  * received on the response.
3209  */
3210 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3211                                   enum query_opcode opcode,
3212                                   enum desc_idn idn, u8 index,
3213                                   u8 selector,
3214                                   u8 *desc_buf, int *buf_len)
3215 {
3216         int err;
3217         int retries;
3218
3219         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3220                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3221                                                 selector, desc_buf, buf_len);
3222                 if (!err || err == -EINVAL)
3223                         break;
3224         }
3225
3226         return err;
3227 }
3228
3229 /**
3230  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3231  * @hba: Pointer to adapter instance
3232  * @desc_id: descriptor idn value
3233  * @desc_len: mapped desc length (out)
3234  */
3235 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3236                                   int *desc_len)
3237 {
3238         if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3239             desc_id == QUERY_DESC_IDN_RFU_1)
3240                 *desc_len = 0;
3241         else
3242                 *desc_len = hba->desc_size[desc_id];
3243 }
3244 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3245
3246 static void ufshcd_update_desc_length(struct ufs_hba *hba,
3247                                       enum desc_idn desc_id, int desc_index,
3248                                       unsigned char desc_len)
3249 {
3250         if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3251             desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3252                 /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3253                  * than the RPMB unit, however, both descriptors share the same
3254                  * desc_idn, to cover both unit descriptors with one length, we
3255                  * choose the normal unit descriptor length by desc_index.
3256                  */
3257                 hba->desc_size[desc_id] = desc_len;
3258 }
3259
3260 /**
3261  * ufshcd_read_desc_param - read the specified descriptor parameter
3262  * @hba: Pointer to adapter instance
3263  * @desc_id: descriptor idn value
3264  * @desc_index: descriptor index
3265  * @param_offset: offset of the parameter to read
3266  * @param_read_buf: pointer to buffer where parameter would be read
3267  * @param_size: sizeof(param_read_buf)
3268  *
3269  * Return 0 in case of success, non-zero otherwise
3270  */
3271 int ufshcd_read_desc_param(struct ufs_hba *hba,
3272                            enum desc_idn desc_id,
3273                            int desc_index,
3274                            u8 param_offset,
3275                            u8 *param_read_buf,
3276                            u8 param_size)
3277 {
3278         int ret;
3279         u8 *desc_buf;
3280         int buff_len;
3281         bool is_kmalloc = true;
3282
3283         /* Safety check */
3284         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3285                 return -EINVAL;
3286
3287         /* Get the length of descriptor */
3288         ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3289         if (!buff_len) {
3290                 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3291                 return -EINVAL;
3292         }
3293
3294         if (param_offset >= buff_len) {
3295                 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3296                         __func__, param_offset, desc_id, buff_len);
3297                 return -EINVAL;
3298         }
3299
3300         /* Check whether we need temp memory */
3301         if (param_offset != 0 || param_size < buff_len) {
3302                 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3303                 if (!desc_buf)
3304                         return -ENOMEM;
3305         } else {
3306                 desc_buf = param_read_buf;
3307                 is_kmalloc = false;
3308         }
3309
3310         /* Request for full descriptor */
3311         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3312                                         desc_id, desc_index, 0,
3313                                         desc_buf, &buff_len);
3314
3315         if (ret) {
3316                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3317                         __func__, desc_id, desc_index, param_offset, ret);
3318                 goto out;
3319         }
3320
3321         /* Sanity check */
3322         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3323                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3324                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3325                 ret = -EINVAL;
3326                 goto out;
3327         }
3328
3329         /* Update descriptor length */
3330         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3331         ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
3332
3333         if (is_kmalloc) {
3334                 /* Make sure we don't copy more data than available */
3335                 if (param_offset + param_size > buff_len)
3336                         param_size = buff_len - param_offset;
3337                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3338         }
3339 out:
3340         if (is_kmalloc)
3341                 kfree(desc_buf);
3342         return ret;
3343 }
3344
3345 /**
3346  * struct uc_string_id - unicode string
3347  *
3348  * @len: size of this descriptor inclusive
3349  * @type: descriptor type
3350  * @uc: unicode string character
3351  */
3352 struct uc_string_id {
3353         u8 len;
3354         u8 type;
3355         wchar_t uc[];
3356 } __packed;
3357
3358 /* replace non-printable or non-ASCII characters with spaces */
3359 static inline char ufshcd_remove_non_printable(u8 ch)
3360 {
3361         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3362 }
3363
3364 /**
3365  * ufshcd_read_string_desc - read string descriptor
3366  * @hba: pointer to adapter instance
3367  * @desc_index: descriptor index
3368  * @buf: pointer to buffer where descriptor would be read,
3369  *       the caller should free the memory.
3370  * @ascii: if true convert from unicode to ascii characters
3371  *         null terminated string.
3372  *
3373  * Return:
3374  * *      string size on success.
3375  * *      -ENOMEM: on allocation failure
3376  * *      -EINVAL: on a wrong parameter
3377  */
3378 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3379                             u8 **buf, bool ascii)
3380 {
3381         struct uc_string_id *uc_str;
3382         u8 *str;
3383         int ret;
3384
3385         if (!buf)
3386                 return -EINVAL;
3387
3388         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3389         if (!uc_str)
3390                 return -ENOMEM;
3391
3392         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3393                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3394         if (ret < 0) {
3395                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3396                         QUERY_REQ_RETRIES, ret);
3397                 str = NULL;
3398                 goto out;
3399         }
3400
3401         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3402                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3403                 str = NULL;
3404                 ret = 0;
3405                 goto out;
3406         }
3407
3408         if (ascii) {
3409                 ssize_t ascii_len;
3410                 int i;
3411                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3412                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3413                 str = kzalloc(ascii_len, GFP_KERNEL);
3414                 if (!str) {
3415                         ret = -ENOMEM;
3416                         goto out;
3417                 }
3418
3419                 /*
3420                  * the descriptor contains string in UTF16 format
3421                  * we need to convert to utf-8 so it can be displayed
3422                  */
3423                 ret = utf16s_to_utf8s(uc_str->uc,
3424                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3425                                       UTF16_BIG_ENDIAN, str, ascii_len);
3426
3427                 /* replace non-printable or non-ASCII characters with spaces */
3428                 for (i = 0; i < ret; i++)
3429                         str[i] = ufshcd_remove_non_printable(str[i]);
3430
3431                 str[ret++] = '\0';
3432
3433         } else {
3434                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3435                 if (!str) {
3436                         ret = -ENOMEM;
3437                         goto out;
3438                 }
3439                 ret = uc_str->len;
3440         }
3441 out:
3442         *buf = str;
3443         kfree(uc_str);
3444         return ret;
3445 }
3446
3447 /**
3448  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3449  * @hba: Pointer to adapter instance
3450  * @lun: lun id
3451  * @param_offset: offset of the parameter to read
3452  * @param_read_buf: pointer to buffer where parameter would be read
3453  * @param_size: sizeof(param_read_buf)
3454  *
3455  * Return 0 in case of success, non-zero otherwise
3456  */
3457 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3458                                               int lun,
3459                                               enum unit_desc_param param_offset,
3460                                               u8 *param_read_buf,
3461                                               u32 param_size)
3462 {
3463         /*
3464          * Unit descriptors are only available for general purpose LUs (LUN id
3465          * from 0 to 7) and RPMB Well known LU.
3466          */
3467         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
3468                 return -EOPNOTSUPP;
3469
3470         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3471                                       param_offset, param_read_buf, param_size);
3472 }
3473
3474 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3475 {
3476         int err = 0;
3477         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3478
3479         if (hba->dev_info.wspecversion >= 0x300) {
3480                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3481                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3482                                 &gating_wait);
3483                 if (err)
3484                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3485                                          err, gating_wait);
3486
3487                 if (gating_wait == 0) {
3488                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3489                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3490                                          gating_wait);
3491                 }
3492
3493                 hba->dev_info.clk_gating_wait_us = gating_wait;
3494         }
3495
3496         return err;
3497 }
3498
3499 /**
3500  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3501  * @hba: per adapter instance
3502  *
3503  * 1. Allocate DMA memory for Command Descriptor array
3504  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3505  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3506  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3507  *      (UTMRDL)
3508  * 4. Allocate memory for local reference block(lrb).
3509  *
3510  * Returns 0 for success, non-zero in case of failure
3511  */
3512 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3513 {
3514         size_t utmrdl_size, utrdl_size, ucdl_size;
3515
3516         /* Allocate memory for UTP command descriptors */
3517         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3518         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3519                                                   ucdl_size,
3520                                                   &hba->ucdl_dma_addr,
3521                                                   GFP_KERNEL);
3522
3523         /*
3524          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3525          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3526          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3527          * be aligned to 128 bytes as well
3528          */
3529         if (!hba->ucdl_base_addr ||
3530             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3531                 dev_err(hba->dev,
3532                         "Command Descriptor Memory allocation failed\n");
3533                 goto out;
3534         }
3535
3536         /*
3537          * Allocate memory for UTP Transfer descriptors
3538          * UFSHCI requires 1024 byte alignment of UTRD
3539          */
3540         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3541         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3542                                                    utrdl_size,
3543                                                    &hba->utrdl_dma_addr,
3544                                                    GFP_KERNEL);
3545         if (!hba->utrdl_base_addr ||
3546             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3547                 dev_err(hba->dev,
3548                         "Transfer Descriptor Memory allocation failed\n");
3549                 goto out;
3550         }
3551
3552         /*
3553          * Allocate memory for UTP Task Management descriptors
3554          * UFSHCI requires 1024 byte alignment of UTMRD
3555          */
3556         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3557         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3558                                                     utmrdl_size,
3559                                                     &hba->utmrdl_dma_addr,
3560                                                     GFP_KERNEL);
3561         if (!hba->utmrdl_base_addr ||
3562             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3563                 dev_err(hba->dev,
3564                 "Task Management Descriptor Memory allocation failed\n");
3565                 goto out;
3566         }
3567
3568         /* Allocate memory for local reference block */
3569         hba->lrb = devm_kcalloc(hba->dev,
3570                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3571                                 GFP_KERNEL);
3572         if (!hba->lrb) {
3573                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3574                 goto out;
3575         }
3576         return 0;
3577 out:
3578         return -ENOMEM;
3579 }
3580
3581 /**
3582  * ufshcd_host_memory_configure - configure local reference block with
3583  *                              memory offsets
3584  * @hba: per adapter instance
3585  *
3586  * Configure Host memory space
3587  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3588  * address.
3589  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3590  * and PRDT offset.
3591  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3592  * into local reference block.
3593  */
3594 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3595 {
3596         struct utp_transfer_req_desc *utrdlp;
3597         dma_addr_t cmd_desc_dma_addr;
3598         dma_addr_t cmd_desc_element_addr;
3599         u16 response_offset;
3600         u16 prdt_offset;
3601         int cmd_desc_size;
3602         int i;
3603
3604         utrdlp = hba->utrdl_base_addr;
3605
3606         response_offset =
3607                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3608         prdt_offset =
3609                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3610
3611         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3612         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3613
3614         for (i = 0; i < hba->nutrs; i++) {
3615                 /* Configure UTRD with command descriptor base address */
3616                 cmd_desc_element_addr =
3617                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3618                 utrdlp[i].command_desc_base_addr_lo =
3619                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3620                 utrdlp[i].command_desc_base_addr_hi =
3621                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3622
3623                 /* Response upiu and prdt offset should be in double words */
3624                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3625                         utrdlp[i].response_upiu_offset =
3626                                 cpu_to_le16(response_offset);
3627                         utrdlp[i].prd_table_offset =
3628                                 cpu_to_le16(prdt_offset);
3629                         utrdlp[i].response_upiu_length =
3630                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3631                 } else {
3632                         utrdlp[i].response_upiu_offset =
3633                                 cpu_to_le16(response_offset >> 2);
3634                         utrdlp[i].prd_table_offset =
3635                                 cpu_to_le16(prdt_offset >> 2);
3636                         utrdlp[i].response_upiu_length =
3637                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3638                 }
3639
3640                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3641         }
3642 }
3643
3644 /**
3645  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3646  * @hba: per adapter instance
3647  *
3648  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3649  * in order to initialize the Unipro link startup procedure.
3650  * Once the Unipro links are up, the device connected to the controller
3651  * is detected.
3652  *
3653  * Returns 0 on success, non-zero value on failure
3654  */
3655 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3656 {
3657         struct uic_command uic_cmd = {0};
3658         int ret;
3659
3660         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3661
3662         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3663         if (ret)
3664                 dev_dbg(hba->dev,
3665                         "dme-link-startup: error code %d\n", ret);
3666         return ret;
3667 }
3668 /**
3669  * ufshcd_dme_reset - UIC command for DME_RESET
3670  * @hba: per adapter instance
3671  *
3672  * DME_RESET command is issued in order to reset UniPro stack.
3673  * This function now deals with cold reset.
3674  *
3675  * Returns 0 on success, non-zero value on failure
3676  */
3677 static int ufshcd_dme_reset(struct ufs_hba *hba)
3678 {
3679         struct uic_command uic_cmd = {0};
3680         int ret;
3681
3682         uic_cmd.command = UIC_CMD_DME_RESET;
3683
3684         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3685         if (ret)
3686                 dev_err(hba->dev,
3687                         "dme-reset: error code %d\n", ret);
3688
3689         return ret;
3690 }
3691
3692 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
3693                                int agreed_gear,
3694                                int adapt_val)
3695 {
3696         int ret;
3697
3698         if (agreed_gear != UFS_HS_G4)
3699                 adapt_val = PA_NO_ADAPT;
3700
3701         ret = ufshcd_dme_set(hba,
3702                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
3703                              adapt_val);
3704         return ret;
3705 }
3706 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
3707
3708 /**
3709  * ufshcd_dme_enable - UIC command for DME_ENABLE
3710  * @hba: per adapter instance
3711  *
3712  * DME_ENABLE command is issued in order to enable UniPro stack.
3713  *
3714  * Returns 0 on success, non-zero value on failure
3715  */
3716 static int ufshcd_dme_enable(struct ufs_hba *hba)
3717 {
3718         struct uic_command uic_cmd = {0};
3719         int ret;
3720
3721         uic_cmd.command = UIC_CMD_DME_ENABLE;
3722
3723         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3724         if (ret)
3725                 dev_err(hba->dev,
3726                         "dme-enable: error code %d\n", ret);
3727
3728         return ret;
3729 }
3730
3731 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3732 {
3733         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3734         unsigned long min_sleep_time_us;
3735
3736         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3737                 return;
3738
3739         /*
3740          * last_dme_cmd_tstamp will be 0 only for 1st call to
3741          * this function
3742          */
3743         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3744                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3745         } else {
3746                 unsigned long delta =
3747                         (unsigned long) ktime_to_us(
3748                                 ktime_sub(ktime_get(),
3749                                 hba->last_dme_cmd_tstamp));
3750
3751                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3752                         min_sleep_time_us =
3753                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3754                 else
3755                         return; /* no more delay required */
3756         }
3757
3758         /* allow sleep for extra 50us if needed */
3759         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3760 }
3761
3762 /**
3763  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3764  * @hba: per adapter instance
3765  * @attr_sel: uic command argument1
3766  * @attr_set: attribute set type as uic command argument2
3767  * @mib_val: setting value as uic command argument3
3768  * @peer: indicate whether peer or local
3769  *
3770  * Returns 0 on success, non-zero value on failure
3771  */
3772 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3773                         u8 attr_set, u32 mib_val, u8 peer)
3774 {
3775         struct uic_command uic_cmd = {0};
3776         static const char *const action[] = {
3777                 "dme-set",
3778                 "dme-peer-set"
3779         };
3780         const char *set = action[!!peer];
3781         int ret;
3782         int retries = UFS_UIC_COMMAND_RETRIES;
3783
3784         uic_cmd.command = peer ?
3785                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3786         uic_cmd.argument1 = attr_sel;
3787         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3788         uic_cmd.argument3 = mib_val;
3789
3790         do {
3791                 /* for peer attributes we retry upon failure */
3792                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3793                 if (ret)
3794                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3795                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3796         } while (ret && peer && --retries);
3797
3798         if (ret)
3799                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3800                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3801                         UFS_UIC_COMMAND_RETRIES - retries);
3802
3803         return ret;
3804 }
3805 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3806
3807 /**
3808  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3809  * @hba: per adapter instance
3810  * @attr_sel: uic command argument1
3811  * @mib_val: the value of the attribute as returned by the UIC command
3812  * @peer: indicate whether peer or local
3813  *
3814  * Returns 0 on success, non-zero value on failure
3815  */
3816 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3817                         u32 *mib_val, u8 peer)
3818 {
3819         struct uic_command uic_cmd = {0};
3820         static const char *const action[] = {
3821                 "dme-get",
3822                 "dme-peer-get"
3823         };
3824         const char *get = action[!!peer];
3825         int ret;
3826         int retries = UFS_UIC_COMMAND_RETRIES;
3827         struct ufs_pa_layer_attr orig_pwr_info;
3828         struct ufs_pa_layer_attr temp_pwr_info;
3829         bool pwr_mode_change = false;
3830
3831         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3832                 orig_pwr_info = hba->pwr_info;
3833                 temp_pwr_info = orig_pwr_info;
3834
3835                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3836                     orig_pwr_info.pwr_rx == FAST_MODE) {
3837                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3838                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3839                         pwr_mode_change = true;
3840                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3841                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3842                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3843                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3844                         pwr_mode_change = true;
3845                 }
3846                 if (pwr_mode_change) {
3847                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3848                         if (ret)
3849                                 goto out;
3850                 }
3851         }
3852
3853         uic_cmd.command = peer ?
3854                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3855         uic_cmd.argument1 = attr_sel;
3856
3857         do {
3858                 /* for peer attributes we retry upon failure */
3859                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3860                 if (ret)
3861                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3862                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3863         } while (ret && peer && --retries);
3864
3865         if (ret)
3866                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3867                         get, UIC_GET_ATTR_ID(attr_sel),
3868                         UFS_UIC_COMMAND_RETRIES - retries);
3869
3870         if (mib_val && !ret)
3871                 *mib_val = uic_cmd.argument3;
3872
3873         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3874             && pwr_mode_change)
3875                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3876 out:
3877         return ret;
3878 }
3879 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3880
3881 /**
3882  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3883  * state) and waits for it to take effect.
3884  *
3885  * @hba: per adapter instance
3886  * @cmd: UIC command to execute
3887  *
3888  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3889  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3890  * and device UniPro link and hence it's final completion would be indicated by
3891  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3892  * addition to normal UIC command completion Status (UCCS). This function only
3893  * returns after the relevant status bits indicate the completion.
3894  *
3895  * Returns 0 on success, non-zero value on failure
3896  */
3897 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3898 {
3899         struct completion uic_async_done;
3900         unsigned long flags;
3901         u8 status;
3902         int ret;
3903         bool reenable_intr = false;
3904
3905         mutex_lock(&hba->uic_cmd_mutex);
3906         init_completion(&uic_async_done);
3907         ufshcd_add_delay_before_dme_cmd(hba);
3908
3909         spin_lock_irqsave(hba->host->host_lock, flags);
3910         if (ufshcd_is_link_broken(hba)) {
3911                 ret = -ENOLINK;
3912                 goto out_unlock;
3913         }
3914         hba->uic_async_done = &uic_async_done;
3915         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3916                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3917                 /*
3918                  * Make sure UIC command completion interrupt is disabled before
3919                  * issuing UIC command.
3920                  */
3921                 wmb();
3922                 reenable_intr = true;
3923         }
3924         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3925         spin_unlock_irqrestore(hba->host->host_lock, flags);
3926         if (ret) {
3927                 dev_err(hba->dev,
3928                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3929                         cmd->command, cmd->argument3, ret);
3930                 goto out;
3931         }
3932
3933         if (!wait_for_completion_timeout(hba->uic_async_done,
3934                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3935                 dev_err(hba->dev,
3936                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3937                         cmd->command, cmd->argument3);
3938
3939                 if (!cmd->cmd_active) {
3940                         dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
3941                                 __func__);
3942                         goto check_upmcrs;
3943                 }
3944
3945                 ret = -ETIMEDOUT;
3946                 goto out;
3947         }
3948
3949 check_upmcrs:
3950         status = ufshcd_get_upmcrs(hba);
3951         if (status != PWR_LOCAL) {
3952                 dev_err(hba->dev,
3953                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3954                         cmd->command, status);
3955                 ret = (status != PWR_OK) ? status : -1;
3956         }
3957 out:
3958         if (ret) {
3959                 ufshcd_print_host_state(hba);
3960                 ufshcd_print_pwr_info(hba);
3961                 ufshcd_print_evt_hist(hba);
3962         }
3963
3964         spin_lock_irqsave(hba->host->host_lock, flags);
3965         hba->active_uic_cmd = NULL;
3966         hba->uic_async_done = NULL;
3967         if (reenable_intr)
3968                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3969         if (ret) {
3970                 ufshcd_set_link_broken(hba);
3971                 ufshcd_schedule_eh_work(hba);
3972         }
3973 out_unlock:
3974         spin_unlock_irqrestore(hba->host->host_lock, flags);
3975         mutex_unlock(&hba->uic_cmd_mutex);
3976
3977         return ret;
3978 }
3979
3980 /**
3981  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3982  *                              using DME_SET primitives.
3983  * @hba: per adapter instance
3984  * @mode: powr mode value
3985  *
3986  * Returns 0 on success, non-zero value on failure
3987  */
3988 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3989 {
3990         struct uic_command uic_cmd = {0};
3991         int ret;
3992
3993         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3994                 ret = ufshcd_dme_set(hba,
3995                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3996                 if (ret) {
3997                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3998                                                 __func__, ret);
3999                         goto out;
4000                 }
4001         }
4002
4003         uic_cmd.command = UIC_CMD_DME_SET;
4004         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4005         uic_cmd.argument3 = mode;
4006         ufshcd_hold(hba, false);
4007         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4008         ufshcd_release(hba);
4009
4010 out:
4011         return ret;
4012 }
4013
4014 int ufshcd_link_recovery(struct ufs_hba *hba)
4015 {
4016         int ret;
4017         unsigned long flags;
4018
4019         spin_lock_irqsave(hba->host->host_lock, flags);
4020         hba->ufshcd_state = UFSHCD_STATE_RESET;
4021         ufshcd_set_eh_in_progress(hba);
4022         spin_unlock_irqrestore(hba->host->host_lock, flags);
4023
4024         /* Reset the attached device */
4025         ufshcd_device_reset(hba);
4026
4027         ret = ufshcd_host_reset_and_restore(hba);
4028
4029         spin_lock_irqsave(hba->host->host_lock, flags);
4030         if (ret)
4031                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4032         ufshcd_clear_eh_in_progress(hba);
4033         spin_unlock_irqrestore(hba->host->host_lock, flags);
4034
4035         if (ret)
4036                 dev_err(hba->dev, "%s: link recovery failed, err %d",
4037                         __func__, ret);
4038         else
4039                 ufshcd_clear_ua_wluns(hba);
4040
4041         return ret;
4042 }
4043 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4044
4045 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4046 {
4047         int ret;
4048         struct uic_command uic_cmd = {0};
4049         ktime_t start = ktime_get();
4050
4051         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4052
4053         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4054         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4055         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4056                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4057
4058         if (ret)
4059                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4060                         __func__, ret);
4061         else
4062                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4063                                                                 POST_CHANGE);
4064
4065         return ret;
4066 }
4067
4068 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4069 {
4070         struct uic_command uic_cmd = {0};
4071         int ret;
4072         ktime_t start = ktime_get();
4073
4074         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4075
4076         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4077         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4078         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4079                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4080
4081         if (ret) {
4082                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4083                         __func__, ret);
4084         } else {
4085                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4086                                                                 POST_CHANGE);
4087                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
4088                 hba->ufs_stats.hibern8_exit_cnt++;
4089         }
4090
4091         return ret;
4092 }
4093 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4094
4095 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4096 {
4097         unsigned long flags;
4098         bool update = false;
4099
4100         if (!ufshcd_is_auto_hibern8_supported(hba))
4101                 return;
4102
4103         spin_lock_irqsave(hba->host->host_lock, flags);
4104         if (hba->ahit != ahit) {
4105                 hba->ahit = ahit;
4106                 update = true;
4107         }
4108         spin_unlock_irqrestore(hba->host->host_lock, flags);
4109
4110         if (update && !pm_runtime_suspended(hba->dev)) {
4111                 pm_runtime_get_sync(hba->dev);
4112                 ufshcd_hold(hba, false);
4113                 ufshcd_auto_hibern8_enable(hba);
4114                 ufshcd_release(hba);
4115                 pm_runtime_put(hba->dev);
4116         }
4117 }
4118 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4119
4120 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4121 {
4122         unsigned long flags;
4123
4124         if (!ufshcd_is_auto_hibern8_supported(hba))
4125                 return;
4126
4127         spin_lock_irqsave(hba->host->host_lock, flags);
4128         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4129         spin_unlock_irqrestore(hba->host->host_lock, flags);
4130 }
4131
4132  /**
4133  * ufshcd_init_pwr_info - setting the POR (power on reset)
4134  * values in hba power info
4135  * @hba: per-adapter instance
4136  */
4137 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4138 {
4139         hba->pwr_info.gear_rx = UFS_PWM_G1;
4140         hba->pwr_info.gear_tx = UFS_PWM_G1;
4141         hba->pwr_info.lane_rx = 1;
4142         hba->pwr_info.lane_tx = 1;
4143         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4144         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4145         hba->pwr_info.hs_rate = 0;
4146 }
4147
4148 /**
4149  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4150  * @hba: per-adapter instance
4151  */
4152 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4153 {
4154         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4155
4156         if (hba->max_pwr_info.is_valid)
4157                 return 0;
4158
4159         pwr_info->pwr_tx = FAST_MODE;
4160         pwr_info->pwr_rx = FAST_MODE;
4161         pwr_info->hs_rate = PA_HS_MODE_B;
4162
4163         /* Get the connected lane count */
4164         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4165                         &pwr_info->lane_rx);
4166         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4167                         &pwr_info->lane_tx);
4168
4169         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4170                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4171                                 __func__,
4172                                 pwr_info->lane_rx,
4173                                 pwr_info->lane_tx);
4174                 return -EINVAL;
4175         }
4176
4177         /*
4178          * First, get the maximum gears of HS speed.
4179          * If a zero value, it means there is no HSGEAR capability.
4180          * Then, get the maximum gears of PWM speed.
4181          */
4182         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4183         if (!pwr_info->gear_rx) {
4184                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4185                                 &pwr_info->gear_rx);
4186                 if (!pwr_info->gear_rx) {
4187                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4188                                 __func__, pwr_info->gear_rx);
4189                         return -EINVAL;
4190                 }
4191                 pwr_info->pwr_rx = SLOW_MODE;
4192         }
4193
4194         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4195                         &pwr_info->gear_tx);
4196         if (!pwr_info->gear_tx) {
4197                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4198                                 &pwr_info->gear_tx);
4199                 if (!pwr_info->gear_tx) {
4200                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4201                                 __func__, pwr_info->gear_tx);
4202                         return -EINVAL;
4203                 }
4204                 pwr_info->pwr_tx = SLOW_MODE;
4205         }
4206
4207         hba->max_pwr_info.is_valid = true;
4208         return 0;
4209 }
4210
4211 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4212                              struct ufs_pa_layer_attr *pwr_mode)
4213 {
4214         int ret;
4215
4216         /* if already configured to the requested pwr_mode */
4217         if (!hba->force_pmc &&
4218             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4219             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4220             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4221             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4222             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4223             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4224             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4225                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4226                 return 0;
4227         }
4228
4229         /*
4230          * Configure attributes for power mode change with below.
4231          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4232          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4233          * - PA_HSSERIES
4234          */
4235         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4236         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4237                         pwr_mode->lane_rx);
4238         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4239                         pwr_mode->pwr_rx == FAST_MODE)
4240                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4241         else
4242                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4243
4244         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4245         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4246                         pwr_mode->lane_tx);
4247         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4248                         pwr_mode->pwr_tx == FAST_MODE)
4249                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4250         else
4251                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4252
4253         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4254             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4255             pwr_mode->pwr_rx == FAST_MODE ||
4256             pwr_mode->pwr_tx == FAST_MODE)
4257                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4258                                                 pwr_mode->hs_rate);
4259
4260         if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4261                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4262                                 DL_FC0ProtectionTimeOutVal_Default);
4263                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4264                                 DL_TC0ReplayTimeOutVal_Default);
4265                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4266                                 DL_AFC0ReqTimeOutVal_Default);
4267                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4268                                 DL_FC1ProtectionTimeOutVal_Default);
4269                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4270                                 DL_TC1ReplayTimeOutVal_Default);
4271                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4272                                 DL_AFC1ReqTimeOutVal_Default);
4273
4274                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4275                                 DL_FC0ProtectionTimeOutVal_Default);
4276                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4277                                 DL_TC0ReplayTimeOutVal_Default);
4278                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4279                                 DL_AFC0ReqTimeOutVal_Default);
4280         }
4281
4282         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4283                         | pwr_mode->pwr_tx);
4284
4285         if (ret) {
4286                 dev_err(hba->dev,
4287                         "%s: power mode change failed %d\n", __func__, ret);
4288         } else {
4289                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4290                                                                 pwr_mode);
4291
4292                 memcpy(&hba->pwr_info, pwr_mode,
4293                         sizeof(struct ufs_pa_layer_attr));
4294         }
4295
4296         return ret;
4297 }
4298
4299 /**
4300  * ufshcd_config_pwr_mode - configure a new power mode
4301  * @hba: per-adapter instance
4302  * @desired_pwr_mode: desired power configuration
4303  */
4304 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4305                 struct ufs_pa_layer_attr *desired_pwr_mode)
4306 {
4307         struct ufs_pa_layer_attr final_params = { 0 };
4308         int ret;
4309
4310         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4311                                         desired_pwr_mode, &final_params);
4312
4313         if (ret)
4314                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4315
4316         ret = ufshcd_change_power_mode(hba, &final_params);
4317
4318         return ret;
4319 }
4320 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4321
4322 /**
4323  * ufshcd_complete_dev_init() - checks device readiness
4324  * @hba: per-adapter instance
4325  *
4326  * Set fDeviceInit flag and poll until device toggles it.
4327  */
4328 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4329 {
4330         int err;
4331         bool flag_res = true;
4332         ktime_t timeout;
4333
4334         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4335                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4336         if (err) {
4337                 dev_err(hba->dev,
4338                         "%s setting fDeviceInit flag failed with error %d\n",
4339                         __func__, err);
4340                 goto out;
4341         }
4342
4343         /* Poll fDeviceInit flag to be cleared */
4344         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4345         do {
4346                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4347                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4348                 if (!flag_res)
4349                         break;
4350                 usleep_range(5000, 10000);
4351         } while (ktime_before(ktime_get(), timeout));
4352
4353         if (err) {
4354                 dev_err(hba->dev,
4355                                 "%s reading fDeviceInit flag failed with error %d\n",
4356                                 __func__, err);
4357         } else if (flag_res) {
4358                 dev_err(hba->dev,
4359                                 "%s fDeviceInit was not cleared by the device\n",
4360                                 __func__);
4361                 err = -EBUSY;
4362         }
4363 out:
4364         return err;
4365 }
4366
4367 /**
4368  * ufshcd_make_hba_operational - Make UFS controller operational
4369  * @hba: per adapter instance
4370  *
4371  * To bring UFS host controller to operational state,
4372  * 1. Enable required interrupts
4373  * 2. Configure interrupt aggregation
4374  * 3. Program UTRL and UTMRL base address
4375  * 4. Configure run-stop-registers
4376  *
4377  * Returns 0 on success, non-zero value on failure
4378  */
4379 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4380 {
4381         int err = 0;
4382         u32 reg;
4383
4384         /* Enable required interrupts */
4385         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4386
4387         /* Configure interrupt aggregation */
4388         if (ufshcd_is_intr_aggr_allowed(hba))
4389                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4390         else
4391                 ufshcd_disable_intr_aggr(hba);
4392
4393         /* Configure UTRL and UTMRL base address registers */
4394         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4395                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4396         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4397                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4398         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4399                         REG_UTP_TASK_REQ_LIST_BASE_L);
4400         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4401                         REG_UTP_TASK_REQ_LIST_BASE_H);
4402
4403         /*
4404          * Make sure base address and interrupt setup are updated before
4405          * enabling the run/stop registers below.
4406          */
4407         wmb();
4408
4409         /*
4410          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4411          */
4412         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4413         if (!(ufshcd_get_lists_status(reg))) {
4414                 ufshcd_enable_run_stop_reg(hba);
4415         } else {
4416                 dev_err(hba->dev,
4417                         "Host controller not ready to process requests");
4418                 err = -EIO;
4419         }
4420
4421         return err;
4422 }
4423 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4424
4425 /**
4426  * ufshcd_hba_stop - Send controller to reset state
4427  * @hba: per adapter instance
4428  */
4429 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4430 {
4431         unsigned long flags;
4432         int err;
4433
4434         /*
4435          * Obtain the host lock to prevent that the controller is disabled
4436          * while the UFS interrupt handler is active on another CPU.
4437          */
4438         spin_lock_irqsave(hba->host->host_lock, flags);
4439         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4440         spin_unlock_irqrestore(hba->host->host_lock, flags);
4441
4442         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4443                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4444                                         10, 1);
4445         if (err)
4446                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4447 }
4448
4449 /**
4450  * ufshcd_hba_execute_hce - initialize the controller
4451  * @hba: per adapter instance
4452  *
4453  * The controller resets itself and controller firmware initialization
4454  * sequence kicks off. When controller is ready it will set
4455  * the Host Controller Enable bit to 1.
4456  *
4457  * Returns 0 on success, non-zero value on failure
4458  */
4459 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4460 {
4461         int retry_outer = 3;
4462         int retry_inner;
4463
4464 start:
4465         if (!ufshcd_is_hba_active(hba))
4466                 /* change controller state to "reset state" */
4467                 ufshcd_hba_stop(hba);
4468
4469         /* UniPro link is disabled at this point */
4470         ufshcd_set_link_off(hba);
4471
4472         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4473
4474         /* start controller initialization sequence */
4475         ufshcd_hba_start(hba);
4476
4477         /*
4478          * To initialize a UFS host controller HCE bit must be set to 1.
4479          * During initialization the HCE bit value changes from 1->0->1.
4480          * When the host controller completes initialization sequence
4481          * it sets the value of HCE bit to 1. The same HCE bit is read back
4482          * to check if the controller has completed initialization sequence.
4483          * So without this delay the value HCE = 1, set in the previous
4484          * instruction might be read back.
4485          * This delay can be changed based on the controller.
4486          */
4487         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4488
4489         /* wait for the host controller to complete initialization */
4490         retry_inner = 50;
4491         while (ufshcd_is_hba_active(hba)) {
4492                 if (retry_inner) {
4493                         retry_inner--;
4494                 } else {
4495                         dev_err(hba->dev,
4496                                 "Controller enable failed\n");
4497                         if (retry_outer) {
4498                                 retry_outer--;
4499                                 goto start;
4500                         }
4501                         return -EIO;
4502                 }
4503                 usleep_range(1000, 1100);
4504         }
4505
4506         /* enable UIC related interrupts */
4507         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4508
4509         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4510
4511         return 0;
4512 }
4513
4514 int ufshcd_hba_enable(struct ufs_hba *hba)
4515 {
4516         int ret;
4517
4518         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4519                 ufshcd_set_link_off(hba);
4520                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4521
4522                 /* enable UIC related interrupts */
4523                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4524                 ret = ufshcd_dme_reset(hba);
4525                 if (!ret) {
4526                         ret = ufshcd_dme_enable(hba);
4527                         if (!ret)
4528                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4529                         if (ret)
4530                                 dev_err(hba->dev,
4531                                         "Host controller enable failed with non-hce\n");
4532                 }
4533         } else {
4534                 ret = ufshcd_hba_execute_hce(hba);
4535         }
4536
4537         return ret;
4538 }
4539 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4540
4541 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4542 {
4543         int tx_lanes = 0, i, err = 0;
4544
4545         if (!peer)
4546                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4547                                &tx_lanes);
4548         else
4549                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4550                                     &tx_lanes);
4551         for (i = 0; i < tx_lanes; i++) {
4552                 if (!peer)
4553                         err = ufshcd_dme_set(hba,
4554                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4555                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4556                                         0);
4557                 else
4558                         err = ufshcd_dme_peer_set(hba,
4559                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4560                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4561                                         0);
4562                 if (err) {
4563                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4564                                 __func__, peer, i, err);
4565                         break;
4566                 }
4567         }
4568
4569         return err;
4570 }
4571
4572 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4573 {
4574         return ufshcd_disable_tx_lcc(hba, true);
4575 }
4576
4577 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4578 {
4579         struct ufs_event_hist *e;
4580
4581         if (id >= UFS_EVT_CNT)
4582                 return;
4583
4584         e = &hba->ufs_stats.event[id];
4585         e->val[e->pos] = val;
4586         e->tstamp[e->pos] = ktime_get();
4587         e->cnt += 1;
4588         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4589
4590         ufshcd_vops_event_notify(hba, id, &val);
4591 }
4592 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4593
4594 /**
4595  * ufshcd_link_startup - Initialize unipro link startup
4596  * @hba: per adapter instance
4597  *
4598  * Returns 0 for success, non-zero in case of failure
4599  */
4600 static int ufshcd_link_startup(struct ufs_hba *hba)
4601 {
4602         int ret;
4603         int retries = DME_LINKSTARTUP_RETRIES;
4604         bool link_startup_again = false;
4605
4606         /*
4607          * If UFS device isn't active then we will have to issue link startup
4608          * 2 times to make sure the device state move to active.
4609          */
4610         if (!ufshcd_is_ufs_dev_active(hba))
4611                 link_startup_again = true;
4612
4613 link_startup:
4614         do {
4615                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4616
4617                 ret = ufshcd_dme_link_startup(hba);
4618
4619                 /* check if device is detected by inter-connect layer */
4620                 if (!ret && !ufshcd_is_device_present(hba)) {
4621                         ufshcd_update_evt_hist(hba,
4622                                                UFS_EVT_LINK_STARTUP_FAIL,
4623                                                0);
4624                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4625                         ret = -ENXIO;
4626                         goto out;
4627                 }
4628
4629                 /*
4630                  * DME link lost indication is only received when link is up,
4631                  * but we can't be sure if the link is up until link startup
4632                  * succeeds. So reset the local Uni-Pro and try again.
4633                  */
4634                 if (ret && ufshcd_hba_enable(hba)) {
4635                         ufshcd_update_evt_hist(hba,
4636                                                UFS_EVT_LINK_STARTUP_FAIL,
4637                                                (u32)ret);
4638                         goto out;
4639                 }
4640         } while (ret && retries--);
4641
4642         if (ret) {
4643                 /* failed to get the link up... retire */
4644                 ufshcd_update_evt_hist(hba,
4645                                        UFS_EVT_LINK_STARTUP_FAIL,
4646                                        (u32)ret);
4647                 goto out;
4648         }
4649
4650         if (link_startup_again) {
4651                 link_startup_again = false;
4652                 retries = DME_LINKSTARTUP_RETRIES;
4653                 goto link_startup;
4654         }
4655
4656         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4657         ufshcd_init_pwr_info(hba);
4658         ufshcd_print_pwr_info(hba);
4659
4660         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4661                 ret = ufshcd_disable_device_tx_lcc(hba);
4662                 if (ret)
4663                         goto out;
4664         }
4665
4666         /* Include any host controller configuration via UIC commands */
4667         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4668         if (ret)
4669                 goto out;
4670
4671         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4672         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4673         ret = ufshcd_make_hba_operational(hba);
4674 out:
4675         if (ret) {
4676                 dev_err(hba->dev, "link startup failed %d\n", ret);
4677                 ufshcd_print_host_state(hba);
4678                 ufshcd_print_pwr_info(hba);
4679                 ufshcd_print_evt_hist(hba);
4680         }
4681         return ret;
4682 }
4683
4684 /**
4685  * ufshcd_verify_dev_init() - Verify device initialization
4686  * @hba: per-adapter instance
4687  *
4688  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4689  * device Transport Protocol (UTP) layer is ready after a reset.
4690  * If the UTP layer at the device side is not initialized, it may
4691  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4692  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4693  */
4694 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4695 {
4696         int err = 0;
4697         int retries;
4698
4699         ufshcd_hold(hba, false);
4700         mutex_lock(&hba->dev_cmd.lock);
4701         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4702                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4703                                                NOP_OUT_TIMEOUT);
4704
4705                 if (!err || err == -ETIMEDOUT)
4706                         break;
4707
4708                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4709         }
4710         mutex_unlock(&hba->dev_cmd.lock);
4711         ufshcd_release(hba);
4712
4713         if (err)
4714                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4715         return err;
4716 }
4717
4718 /**
4719  * ufshcd_set_queue_depth - set lun queue depth
4720  * @sdev: pointer to SCSI device
4721  *
4722  * Read bLUQueueDepth value and activate scsi tagged command
4723  * queueing. For WLUN, queue depth is set to 1. For best-effort
4724  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4725  * value that host can queue.
4726  */
4727 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4728 {
4729         int ret = 0;
4730         u8 lun_qdepth;
4731         struct ufs_hba *hba;
4732
4733         hba = shost_priv(sdev->host);
4734
4735         lun_qdepth = hba->nutrs;
4736         ret = ufshcd_read_unit_desc_param(hba,
4737                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4738                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4739                                           &lun_qdepth,
4740                                           sizeof(lun_qdepth));
4741
4742         /* Some WLUN doesn't support unit descriptor */
4743         if (ret == -EOPNOTSUPP)
4744                 lun_qdepth = 1;
4745         else if (!lun_qdepth)
4746                 /* eventually, we can figure out the real queue depth */
4747                 lun_qdepth = hba->nutrs;
4748         else
4749                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4750
4751         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4752                         __func__, lun_qdepth);
4753         scsi_change_queue_depth(sdev, lun_qdepth);
4754 }
4755
4756 /*
4757  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4758  * @hba: per-adapter instance
4759  * @lun: UFS device lun id
4760  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4761  *
4762  * Returns 0 in case of success and b_lu_write_protect status would be returned
4763  * @b_lu_write_protect parameter.
4764  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4765  * Returns -EINVAL in case of invalid parameters passed to this function.
4766  */
4767 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4768                             u8 lun,
4769                             u8 *b_lu_write_protect)
4770 {
4771         int ret;
4772
4773         if (!b_lu_write_protect)
4774                 ret = -EINVAL;
4775         /*
4776          * According to UFS device spec, RPMB LU can't be write
4777          * protected so skip reading bLUWriteProtect parameter for
4778          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4779          */
4780         else if (lun >= hba->dev_info.max_lu_supported)
4781                 ret = -ENOTSUPP;
4782         else
4783                 ret = ufshcd_read_unit_desc_param(hba,
4784                                           lun,
4785                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4786                                           b_lu_write_protect,
4787                                           sizeof(*b_lu_write_protect));
4788         return ret;
4789 }
4790
4791 /**
4792  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4793  * status
4794  * @hba: per-adapter instance
4795  * @sdev: pointer to SCSI device
4796  *
4797  */
4798 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4799                                                     struct scsi_device *sdev)
4800 {
4801         if (hba->dev_info.f_power_on_wp_en &&
4802             !hba->dev_info.is_lu_power_on_wp) {
4803                 u8 b_lu_write_protect;
4804
4805                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4806                                       &b_lu_write_protect) &&
4807                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4808                         hba->dev_info.is_lu_power_on_wp = true;
4809         }
4810 }
4811
4812 /**
4813  * ufshcd_slave_alloc - handle initial SCSI device configurations
4814  * @sdev: pointer to SCSI device
4815  *
4816  * Returns success
4817  */
4818 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4819 {
4820         struct ufs_hba *hba;
4821
4822         hba = shost_priv(sdev->host);
4823
4824         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4825         sdev->use_10_for_ms = 1;
4826
4827         /* DBD field should be set to 1 in mode sense(10) */
4828         sdev->set_dbd_for_ms = 1;
4829
4830         /* allow SCSI layer to restart the device in case of errors */
4831         sdev->allow_restart = 1;
4832
4833         /* REPORT SUPPORTED OPERATION CODES is not supported */
4834         sdev->no_report_opcodes = 1;
4835
4836         /* WRITE_SAME command is not supported */
4837         sdev->no_write_same = 1;
4838
4839         ufshcd_set_queue_depth(sdev);
4840
4841         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4842
4843         return 0;
4844 }
4845
4846 /**
4847  * ufshcd_change_queue_depth - change queue depth
4848  * @sdev: pointer to SCSI device
4849  * @depth: required depth to set
4850  *
4851  * Change queue depth and make sure the max. limits are not crossed.
4852  */
4853 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4854 {
4855         struct ufs_hba *hba = shost_priv(sdev->host);
4856
4857         if (depth > hba->nutrs)
4858                 depth = hba->nutrs;
4859         return scsi_change_queue_depth(sdev, depth);
4860 }
4861
4862 /**
4863  * ufshcd_slave_configure - adjust SCSI device configurations
4864  * @sdev: pointer to SCSI device
4865  */
4866 static int ufshcd_slave_configure(struct scsi_device *sdev)
4867 {
4868         struct ufs_hba *hba = shost_priv(sdev->host);
4869         struct request_queue *q = sdev->request_queue;
4870
4871         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4872         if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
4873                 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
4874
4875         if (ufshcd_is_rpm_autosuspend_allowed(hba))
4876                 sdev->rpm_autosuspend = 1;
4877
4878         ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4879
4880         return 0;
4881 }
4882
4883 /**
4884  * ufshcd_slave_destroy - remove SCSI device configurations
4885  * @sdev: pointer to SCSI device
4886  */
4887 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4888 {
4889         struct ufs_hba *hba;
4890
4891         hba = shost_priv(sdev->host);
4892         /* Drop the reference as it won't be needed anymore */
4893         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4894                 unsigned long flags;
4895
4896                 spin_lock_irqsave(hba->host->host_lock, flags);
4897                 hba->sdev_ufs_device = NULL;
4898                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4899         }
4900 }
4901
4902 /**
4903  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4904  * @lrbp: pointer to local reference block of completed command
4905  * @scsi_status: SCSI command status
4906  *
4907  * Returns value base on SCSI command status
4908  */
4909 static inline int
4910 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4911 {
4912         int result = 0;
4913
4914         switch (scsi_status) {
4915         case SAM_STAT_CHECK_CONDITION:
4916                 ufshcd_copy_sense_data(lrbp);
4917                 fallthrough;
4918         case SAM_STAT_GOOD:
4919                 result |= DID_OK << 16 | scsi_status;
4920                 break;
4921         case SAM_STAT_TASK_SET_FULL:
4922         case SAM_STAT_BUSY:
4923         case SAM_STAT_TASK_ABORTED:
4924                 ufshcd_copy_sense_data(lrbp);
4925                 result |= scsi_status;
4926                 break;
4927         default:
4928                 result |= DID_ERROR << 16;
4929                 break;
4930         } /* end of switch */
4931
4932         return result;
4933 }
4934
4935 /**
4936  * ufshcd_transfer_rsp_status - Get overall status of the response
4937  * @hba: per adapter instance
4938  * @lrbp: pointer to local reference block of completed command
4939  *
4940  * Returns result of the command to notify SCSI midlayer
4941  */
4942 static inline int
4943 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4944 {
4945         int result = 0;
4946         int scsi_status;
4947         int ocs;
4948
4949         /* overall command status of utrd */
4950         ocs = ufshcd_get_tr_ocs(lrbp);
4951
4952         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4953                 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
4954                                         MASK_RSP_UPIU_RESULT)
4955                         ocs = OCS_SUCCESS;
4956         }
4957
4958         switch (ocs) {
4959         case OCS_SUCCESS:
4960                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4961                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4962                 switch (result) {
4963                 case UPIU_TRANSACTION_RESPONSE:
4964                         /*
4965                          * get the response UPIU result to extract
4966                          * the SCSI command status
4967                          */
4968                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4969
4970                         /*
4971                          * get the result based on SCSI status response
4972                          * to notify the SCSI midlayer of the command status
4973                          */
4974                         scsi_status = result & MASK_SCSI_STATUS;
4975                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4976
4977                         /*
4978                          * Currently we are only supporting BKOPs exception
4979                          * events hence we can ignore BKOPs exception event
4980                          * during power management callbacks. BKOPs exception
4981                          * event is not expected to be raised in runtime suspend
4982                          * callback as it allows the urgent bkops.
4983                          * During system suspend, we are anyway forcefully
4984                          * disabling the bkops and if urgent bkops is needed
4985                          * it will be enabled on system resume. Long term
4986                          * solution could be to abort the system suspend if
4987                          * UFS device needs urgent BKOPs.
4988                          */
4989                         if (!hba->pm_op_in_progress &&
4990                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4991                             schedule_work(&hba->eeh_work)) {
4992                                 /*
4993                                  * Prevent suspend once eeh_work is scheduled
4994                                  * to avoid deadlock between ufshcd_suspend
4995                                  * and exception event handler.
4996                                  */
4997                                 pm_runtime_get_noresume(hba->dev);
4998                         }
4999                         break;
5000                 case UPIU_TRANSACTION_REJECT_UPIU:
5001                         /* TODO: handle Reject UPIU Response */
5002                         result = DID_ERROR << 16;
5003                         dev_err(hba->dev,
5004                                 "Reject UPIU not fully implemented\n");
5005                         break;
5006                 default:
5007                         dev_err(hba->dev,
5008                                 "Unexpected request response code = %x\n",
5009                                 result);
5010                         result = DID_ERROR << 16;
5011                         break;
5012                 }
5013                 break;
5014         case OCS_ABORTED:
5015                 result |= DID_ABORT << 16;
5016                 break;
5017         case OCS_INVALID_COMMAND_STATUS:
5018                 result |= DID_REQUEUE << 16;
5019                 break;
5020         case OCS_INVALID_CMD_TABLE_ATTR:
5021         case OCS_INVALID_PRDT_ATTR:
5022         case OCS_MISMATCH_DATA_BUF_SIZE:
5023         case OCS_MISMATCH_RESP_UPIU_SIZE:
5024         case OCS_PEER_COMM_FAILURE:
5025         case OCS_FATAL_ERROR:
5026         case OCS_DEVICE_FATAL_ERROR:
5027         case OCS_INVALID_CRYPTO_CONFIG:
5028         case OCS_GENERAL_CRYPTO_ERROR:
5029         default:
5030                 result |= DID_ERROR << 16;
5031                 dev_err(hba->dev,
5032                                 "OCS error from controller = %x for tag %d\n",
5033                                 ocs, lrbp->task_tag);
5034                 ufshcd_print_evt_hist(hba);
5035                 ufshcd_print_host_state(hba);
5036                 break;
5037         } /* end of switch */
5038
5039         if ((host_byte(result) != DID_OK) &&
5040             (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5041                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
5042         return result;
5043 }
5044
5045 /**
5046  * ufshcd_uic_cmd_compl - handle completion of uic command
5047  * @hba: per adapter instance
5048  * @intr_status: interrupt status generated by the controller
5049  *
5050  * Returns
5051  *  IRQ_HANDLED - If interrupt is valid
5052  *  IRQ_NONE    - If invalid interrupt
5053  */
5054 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5055 {
5056         irqreturn_t retval = IRQ_NONE;
5057
5058         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5059                 hba->active_uic_cmd->argument2 |=
5060                         ufshcd_get_uic_cmd_result(hba);
5061                 hba->active_uic_cmd->argument3 =
5062                         ufshcd_get_dme_attr_val(hba);
5063                 if (!hba->uic_async_done)
5064                         hba->active_uic_cmd->cmd_active = 0;
5065                 complete(&hba->active_uic_cmd->done);
5066                 retval = IRQ_HANDLED;
5067         }
5068
5069         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5070                 hba->active_uic_cmd->cmd_active = 0;
5071                 complete(hba->uic_async_done);
5072                 retval = IRQ_HANDLED;
5073         }
5074
5075         if (retval == IRQ_HANDLED)
5076                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5077                                              UFS_CMD_COMP);
5078         return retval;
5079 }
5080
5081 /**
5082  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5083  * @hba: per adapter instance
5084  * @completed_reqs: requests to complete
5085  */
5086 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5087                                         unsigned long completed_reqs)
5088 {
5089         struct ufshcd_lrb *lrbp;
5090         struct scsi_cmnd *cmd;
5091         int result;
5092         int index;
5093         bool update_scaling = false;
5094
5095         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5096                 lrbp = &hba->lrb[index];
5097                 lrbp->in_use = false;
5098                 lrbp->compl_time_stamp = ktime_get();
5099                 cmd = lrbp->cmd;
5100                 if (cmd) {
5101                         ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
5102                         result = ufshcd_transfer_rsp_status(hba, lrbp);
5103                         scsi_dma_unmap(cmd);
5104                         cmd->result = result;
5105                         /* Mark completed command as NULL in LRB */
5106                         lrbp->cmd = NULL;
5107                         /* Do not touch lrbp after scsi done */
5108                         cmd->scsi_done(cmd);
5109                         __ufshcd_release(hba);
5110                         update_scaling = true;
5111                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5112                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5113                         if (hba->dev_cmd.complete) {
5114                                 ufshcd_add_command_trace(hba, index,
5115                                                          UFS_DEV_COMP);
5116                                 complete(hba->dev_cmd.complete);
5117                                 update_scaling = true;
5118                         }
5119                 }
5120                 if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
5121                         hba->clk_scaling.active_reqs--;
5122         }
5123
5124         /* clear corresponding bits of completed commands */
5125         hba->outstanding_reqs ^= completed_reqs;
5126
5127         ufshcd_clk_scaling_update_busy(hba);
5128 }
5129
5130 /**
5131  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5132  * @hba: per adapter instance
5133  *
5134  * Returns
5135  *  IRQ_HANDLED - If interrupt is valid
5136  *  IRQ_NONE    - If invalid interrupt
5137  */
5138 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5139 {
5140         unsigned long completed_reqs;
5141         u32 tr_doorbell;
5142
5143         /* Resetting interrupt aggregation counters first and reading the
5144          * DOOR_BELL afterward allows us to handle all the completed requests.
5145          * In order to prevent other interrupts starvation the DB is read once
5146          * after reset. The down side of this solution is the possibility of
5147          * false interrupt if device completes another request after resetting
5148          * aggregation and before reading the DB.
5149          */
5150         if (ufshcd_is_intr_aggr_allowed(hba) &&
5151             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5152                 ufshcd_reset_intr_aggr(hba);
5153
5154         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5155         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5156
5157         if (completed_reqs) {
5158                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5159                 return IRQ_HANDLED;
5160         } else {
5161                 return IRQ_NONE;
5162         }
5163 }
5164
5165 /**
5166  * ufshcd_disable_ee - disable exception event
5167  * @hba: per-adapter instance
5168  * @mask: exception event to disable
5169  *
5170  * Disables exception event in the device so that the EVENT_ALERT
5171  * bit is not set.
5172  *
5173  * Returns zero on success, non-zero error value on failure.
5174  */
5175 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5176 {
5177         int err = 0;
5178         u32 val;
5179
5180         if (!(hba->ee_ctrl_mask & mask))
5181                 goto out;
5182
5183         val = hba->ee_ctrl_mask & ~mask;
5184         val &= MASK_EE_STATUS;
5185         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5186                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5187         if (!err)
5188                 hba->ee_ctrl_mask &= ~mask;
5189 out:
5190         return err;
5191 }
5192
5193 /**
5194  * ufshcd_enable_ee - enable exception event
5195  * @hba: per-adapter instance
5196  * @mask: exception event to enable
5197  *
5198  * Enable corresponding exception event in the device to allow
5199  * device to alert host in critical scenarios.
5200  *
5201  * Returns zero on success, non-zero error value on failure.
5202  */
5203 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5204 {
5205         int err = 0;
5206         u32 val;
5207
5208         if (hba->ee_ctrl_mask & mask)
5209                 goto out;
5210
5211         val = hba->ee_ctrl_mask | mask;
5212         val &= MASK_EE_STATUS;
5213         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5214                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5215         if (!err)
5216                 hba->ee_ctrl_mask |= mask;
5217 out:
5218         return err;
5219 }
5220
5221 /**
5222  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5223  * @hba: per-adapter instance
5224  *
5225  * Allow device to manage background operations on its own. Enabling
5226  * this might lead to inconsistent latencies during normal data transfers
5227  * as the device is allowed to manage its own way of handling background
5228  * operations.
5229  *
5230  * Returns zero on success, non-zero on failure.
5231  */
5232 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5233 {
5234         int err = 0;
5235
5236         if (hba->auto_bkops_enabled)
5237                 goto out;
5238
5239         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5240                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5241         if (err) {
5242                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5243                                 __func__, err);
5244                 goto out;
5245         }
5246
5247         hba->auto_bkops_enabled = true;
5248         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5249
5250         /* No need of URGENT_BKOPS exception from the device */
5251         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5252         if (err)
5253                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5254                                 __func__, err);
5255 out:
5256         return err;
5257 }
5258
5259 /**
5260  * ufshcd_disable_auto_bkops - block device in doing background operations
5261  * @hba: per-adapter instance
5262  *
5263  * Disabling background operations improves command response latency but
5264  * has drawback of device moving into critical state where the device is
5265  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5266  * host is idle so that BKOPS are managed effectively without any negative
5267  * impacts.
5268  *
5269  * Returns zero on success, non-zero on failure.
5270  */
5271 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5272 {
5273         int err = 0;
5274
5275         if (!hba->auto_bkops_enabled)
5276                 goto out;
5277
5278         /*
5279          * If host assisted BKOPs is to be enabled, make sure
5280          * urgent bkops exception is allowed.
5281          */
5282         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5283         if (err) {
5284                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5285                                 __func__, err);
5286                 goto out;
5287         }
5288
5289         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5290                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5291         if (err) {
5292                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5293                                 __func__, err);
5294                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5295                 goto out;
5296         }
5297
5298         hba->auto_bkops_enabled = false;
5299         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5300         hba->is_urgent_bkops_lvl_checked = false;
5301 out:
5302         return err;
5303 }
5304
5305 /**
5306  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5307  * @hba: per adapter instance
5308  *
5309  * After a device reset the device may toggle the BKOPS_EN flag
5310  * to default value. The s/w tracking variables should be updated
5311  * as well. This function would change the auto-bkops state based on
5312  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5313  */
5314 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5315 {
5316         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5317                 hba->auto_bkops_enabled = false;
5318                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5319                 ufshcd_enable_auto_bkops(hba);
5320         } else {
5321                 hba->auto_bkops_enabled = true;
5322                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5323                 ufshcd_disable_auto_bkops(hba);
5324         }
5325         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5326         hba->is_urgent_bkops_lvl_checked = false;
5327 }
5328
5329 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5330 {
5331         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5332                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5333 }
5334
5335 /**
5336  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5337  * @hba: per-adapter instance
5338  * @status: bkops_status value
5339  *
5340  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5341  * flag in the device to permit background operations if the device
5342  * bkops_status is greater than or equal to "status" argument passed to
5343  * this function, disable otherwise.
5344  *
5345  * Returns 0 for success, non-zero in case of failure.
5346  *
5347  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5348  * to know whether auto bkops is enabled or disabled after this function
5349  * returns control to it.
5350  */
5351 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5352                              enum bkops_status status)
5353 {
5354         int err;
5355         u32 curr_status = 0;
5356
5357         err = ufshcd_get_bkops_status(hba, &curr_status);
5358         if (err) {
5359                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5360                                 __func__, err);
5361                 goto out;
5362         } else if (curr_status > BKOPS_STATUS_MAX) {
5363                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5364                                 __func__, curr_status);
5365                 err = -EINVAL;
5366                 goto out;
5367         }
5368
5369         if (curr_status >= status)
5370                 err = ufshcd_enable_auto_bkops(hba);
5371         else
5372                 err = ufshcd_disable_auto_bkops(hba);
5373 out:
5374         return err;
5375 }
5376
5377 /**
5378  * ufshcd_urgent_bkops - handle urgent bkops exception event
5379  * @hba: per-adapter instance
5380  *
5381  * Enable fBackgroundOpsEn flag in the device to permit background
5382  * operations.
5383  *
5384  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5385  * and negative error value for any other failure.
5386  */
5387 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5388 {
5389         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5390 }
5391
5392 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5393 {
5394         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5395                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5396 }
5397
5398 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5399 {
5400         int err;
5401         u32 curr_status = 0;
5402
5403         if (hba->is_urgent_bkops_lvl_checked)
5404                 goto enable_auto_bkops;
5405
5406         err = ufshcd_get_bkops_status(hba, &curr_status);
5407         if (err) {
5408                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5409                                 __func__, err);
5410                 goto out;
5411         }
5412
5413         /*
5414          * We are seeing that some devices are raising the urgent bkops
5415          * exception events even when BKOPS status doesn't indicate performace
5416          * impacted or critical. Handle these device by determining their urgent
5417          * bkops status at runtime.
5418          */
5419         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5420                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5421                                 __func__, curr_status);
5422                 /* update the current status as the urgent bkops level */
5423                 hba->urgent_bkops_lvl = curr_status;
5424                 hba->is_urgent_bkops_lvl_checked = true;
5425         }
5426
5427 enable_auto_bkops:
5428         err = ufshcd_enable_auto_bkops(hba);
5429 out:
5430         if (err < 0)
5431                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5432                                 __func__, err);
5433 }
5434
5435 int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5436 {
5437         int ret;
5438         u8 index;
5439         enum query_opcode opcode;
5440
5441         if (!ufshcd_is_wb_allowed(hba))
5442                 return 0;
5443
5444         if (!(enable ^ hba->dev_info.wb_enabled))
5445                 return 0;
5446         if (enable)
5447                 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5448         else
5449                 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5450
5451         index = ufshcd_wb_get_query_index(hba);
5452         ret = ufshcd_query_flag_retry(hba, opcode,
5453                                       QUERY_FLAG_IDN_WB_EN, index, NULL);
5454         if (ret) {
5455                 dev_err(hba->dev, "%s write booster %s failed %d\n",
5456                         __func__, enable ? "enable" : "disable", ret);
5457                 return ret;
5458         }
5459
5460         hba->dev_info.wb_enabled = enable;
5461         dev_dbg(hba->dev, "%s write booster %s %d\n",
5462                         __func__, enable ? "enable" : "disable", ret);
5463
5464         return ret;
5465 }
5466
5467 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5468 {
5469         int val;
5470         u8 index;
5471
5472         if (set)
5473                 val =  UPIU_QUERY_OPCODE_SET_FLAG;
5474         else
5475                 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5476
5477         index = ufshcd_wb_get_query_index(hba);
5478         return ufshcd_query_flag_retry(hba, val,
5479                                 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5480                                 index, NULL);
5481 }
5482
5483 static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5484 {
5485         int ret;
5486         u8 index;
5487         enum query_opcode opcode;
5488
5489         if (!ufshcd_is_wb_allowed(hba) ||
5490             hba->dev_info.wb_buf_flush_enabled == enable)
5491                 return 0;
5492
5493         if (enable)
5494                 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5495         else
5496                 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5497
5498         index = ufshcd_wb_get_query_index(hba);
5499         ret = ufshcd_query_flag_retry(hba, opcode,
5500                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index,
5501                                       NULL);
5502         if (ret) {
5503                 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
5504                         enable ? "enable" : "disable", ret);
5505                 goto out;
5506         }
5507
5508         hba->dev_info.wb_buf_flush_enabled = enable;
5509
5510         dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled");
5511 out:
5512         return ret;
5513
5514 }
5515
5516 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5517                                                 u32 avail_buf)
5518 {
5519         u32 cur_buf;
5520         int ret;
5521         u8 index;
5522
5523         index = ufshcd_wb_get_query_index(hba);
5524         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5525                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5526                                               index, 0, &cur_buf);
5527         if (ret) {
5528                 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5529                         __func__, ret);
5530                 return false;
5531         }
5532
5533         if (!cur_buf) {
5534                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5535                          cur_buf);
5536                 return false;
5537         }
5538         /* Let it continue to flush when available buffer exceeds threshold */
5539         if (avail_buf < hba->vps->wb_flush_threshold)
5540                 return true;
5541
5542         return false;
5543 }
5544
5545 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5546 {
5547         int ret;
5548         u32 avail_buf;
5549         u8 index;
5550
5551         if (!ufshcd_is_wb_allowed(hba))
5552                 return false;
5553         /*
5554          * The ufs device needs the vcc to be ON to flush.
5555          * With user-space reduction enabled, it's enough to enable flush
5556          * by checking only the available buffer. The threshold
5557          * defined here is > 90% full.
5558          * With user-space preserved enabled, the current-buffer
5559          * should be checked too because the wb buffer size can reduce
5560          * when disk tends to be full. This info is provided by current
5561          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5562          * keeping vcc on when current buffer is empty.
5563          */
5564         index = ufshcd_wb_get_query_index(hba);
5565         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5566                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5567                                       index, 0, &avail_buf);
5568         if (ret) {
5569                 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5570                          __func__, ret);
5571                 return false;
5572         }
5573
5574         if (!hba->dev_info.b_presrv_uspc_en) {
5575                 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5576                         return true;
5577                 return false;
5578         }
5579
5580         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5581 }
5582
5583 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5584 {
5585         struct ufs_hba *hba = container_of(to_delayed_work(work),
5586                                            struct ufs_hba,
5587                                            rpm_dev_flush_recheck_work);
5588         /*
5589          * To prevent unnecessary VCC power drain after device finishes
5590          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5591          * after a certain delay to recheck the threshold by next runtime
5592          * suspend.
5593          */
5594         pm_runtime_get_sync(hba->dev);
5595         pm_runtime_put_sync(hba->dev);
5596 }
5597
5598 /**
5599  * ufshcd_exception_event_handler - handle exceptions raised by device
5600  * @work: pointer to work data
5601  *
5602  * Read bExceptionEventStatus attribute from the device and handle the
5603  * exception event accordingly.
5604  */
5605 static void ufshcd_exception_event_handler(struct work_struct *work)
5606 {
5607         struct ufs_hba *hba;
5608         int err;
5609         u32 status = 0;
5610         hba = container_of(work, struct ufs_hba, eeh_work);
5611
5612         pm_runtime_get_sync(hba->dev);
5613         ufshcd_scsi_block_requests(hba);
5614         err = ufshcd_get_ee_status(hba, &status);
5615         if (err) {
5616                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5617                                 __func__, err);
5618                 goto out;
5619         }
5620
5621         status &= hba->ee_ctrl_mask;
5622
5623         if (status & MASK_EE_URGENT_BKOPS)
5624                 ufshcd_bkops_exception_event_handler(hba);
5625
5626 out:
5627         ufshcd_scsi_unblock_requests(hba);
5628         /*
5629          * pm_runtime_get_noresume is called while scheduling
5630          * eeh_work to avoid suspend racing with exception work.
5631          * Hence decrement usage counter using pm_runtime_put_noidle
5632          * to allow suspend on completion of exception event handler.
5633          */
5634         pm_runtime_put_noidle(hba->dev);
5635         pm_runtime_put(hba->dev);
5636         return;
5637 }
5638
5639 /* Complete requests that have door-bell cleared */
5640 static void ufshcd_complete_requests(struct ufs_hba *hba)
5641 {
5642         ufshcd_transfer_req_compl(hba);
5643         ufshcd_tmc_handler(hba);
5644 }
5645
5646 /**
5647  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5648  *                              to recover from the DL NAC errors or not.
5649  * @hba: per-adapter instance
5650  *
5651  * Returns true if error handling is required, false otherwise
5652  */
5653 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5654 {
5655         unsigned long flags;
5656         bool err_handling = true;
5657
5658         spin_lock_irqsave(hba->host->host_lock, flags);
5659         /*
5660          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5661          * device fatal error and/or DL NAC & REPLAY timeout errors.
5662          */
5663         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5664                 goto out;
5665
5666         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5667             ((hba->saved_err & UIC_ERROR) &&
5668              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5669                 goto out;
5670
5671         if ((hba->saved_err & UIC_ERROR) &&
5672             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5673                 int err;
5674                 /*
5675                  * wait for 50ms to see if we can get any other errors or not.
5676                  */
5677                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5678                 msleep(50);
5679                 spin_lock_irqsave(hba->host->host_lock, flags);
5680
5681                 /*
5682                  * now check if we have got any other severe errors other than
5683                  * DL NAC error?
5684                  */
5685                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5686                     ((hba->saved_err & UIC_ERROR) &&
5687                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5688                         goto out;
5689
5690                 /*
5691                  * As DL NAC is the only error received so far, send out NOP
5692                  * command to confirm if link is still active or not.
5693                  *   - If we don't get any response then do error recovery.
5694                  *   - If we get response then clear the DL NAC error bit.
5695                  */
5696
5697                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5698                 err = ufshcd_verify_dev_init(hba);
5699                 spin_lock_irqsave(hba->host->host_lock, flags);
5700
5701                 if (err)
5702                         goto out;
5703
5704                 /* Link seems to be alive hence ignore the DL NAC errors */
5705                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5706                         hba->saved_err &= ~UIC_ERROR;
5707                 /* clear NAC error */
5708                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5709                 if (!hba->saved_uic_err)
5710                         err_handling = false;
5711         }
5712 out:
5713         spin_unlock_irqrestore(hba->host->host_lock, flags);
5714         return err_handling;
5715 }
5716
5717 /* host lock must be held before calling this func */
5718 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5719 {
5720         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5721                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5722 }
5723
5724 /* host lock must be held before calling this func */
5725 static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5726 {
5727         /* handle fatal errors only when link is not in error state */
5728         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5729                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5730                     ufshcd_is_saved_err_fatal(hba))
5731                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5732                 else
5733                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5734                 queue_work(hba->eh_wq, &hba->eh_work);
5735         }
5736 }
5737
5738 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
5739 {
5740         down_write(&hba->clk_scaling_lock);
5741         hba->clk_scaling.is_allowed = allow;
5742         up_write(&hba->clk_scaling_lock);
5743 }
5744
5745 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
5746 {
5747         if (suspend) {
5748                 if (hba->clk_scaling.is_enabled)
5749                         ufshcd_suspend_clkscaling(hba);
5750                 ufshcd_clk_scaling_allow(hba, false);
5751         } else {
5752                 ufshcd_clk_scaling_allow(hba, true);
5753                 if (hba->clk_scaling.is_enabled)
5754                         ufshcd_resume_clkscaling(hba);
5755         }
5756 }
5757
5758 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5759 {
5760         pm_runtime_get_sync(hba->dev);
5761         if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
5762                 enum ufs_pm_op pm_op;
5763
5764                 /*
5765                  * Don't assume anything of pm_runtime_get_sync(), if
5766                  * resume fails, irq and clocks can be OFF, and powers
5767                  * can be OFF or in LPM.
5768                  */
5769                 ufshcd_setup_hba_vreg(hba, true);
5770                 ufshcd_enable_irq(hba);
5771                 ufshcd_setup_vreg(hba, true);
5772                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5773                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5774                 ufshcd_hold(hba, false);
5775                 if (!ufshcd_is_clkgating_allowed(hba))
5776                         ufshcd_setup_clocks(hba, true);
5777                 ufshcd_release(hba);
5778                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5779                 ufshcd_vops_resume(hba, pm_op);
5780         } else {
5781                 ufshcd_hold(hba, false);
5782                 if (ufshcd_is_clkscaling_supported(hba) &&
5783                     hba->clk_scaling.is_enabled)
5784                         ufshcd_suspend_clkscaling(hba);
5785                 ufshcd_clk_scaling_allow(hba, false);
5786         }
5787 }
5788
5789 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5790 {
5791         ufshcd_release(hba);
5792         if (ufshcd_is_clkscaling_supported(hba))
5793                 ufshcd_clk_scaling_suspend(hba, false);
5794         pm_runtime_put(hba->dev);
5795 }
5796
5797 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5798 {
5799         return (!hba->is_powered || hba->shutting_down ||
5800                 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5801                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5802                    ufshcd_is_link_broken(hba))));
5803 }
5804
5805 #ifdef CONFIG_PM
5806 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5807 {
5808         struct Scsi_Host *shost = hba->host;
5809         struct scsi_device *sdev;
5810         struct request_queue *q;
5811         int ret;
5812
5813         hba->is_sys_suspended = false;
5814         /*
5815          * Set RPM status of hba device to RPM_ACTIVE,
5816          * this also clears its runtime error.
5817          */
5818         ret = pm_runtime_set_active(hba->dev);
5819         /*
5820          * If hba device had runtime error, we also need to resume those
5821          * scsi devices under hba in case any of them has failed to be
5822          * resumed due to hba runtime resume failure. This is to unblock
5823          * blk_queue_enter in case there are bios waiting inside it.
5824          */
5825         if (!ret) {
5826                 shost_for_each_device(sdev, shost) {
5827                         q = sdev->request_queue;
5828                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
5829                                        q->rpm_status == RPM_SUSPENDING))
5830                                 pm_request_resume(q->dev);
5831                 }
5832         }
5833 }
5834 #else
5835 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
5836 {
5837 }
5838 #endif
5839
5840 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
5841 {
5842         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
5843         u32 mode;
5844
5845         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
5846
5847         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
5848                 return true;
5849
5850         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
5851                 return true;
5852
5853         return false;
5854 }
5855
5856 /**
5857  * ufshcd_err_handler - handle UFS errors that require s/w attention
5858  * @work: pointer to work structure
5859  */
5860 static void ufshcd_err_handler(struct work_struct *work)
5861 {
5862         struct ufs_hba *hba;
5863         unsigned long flags;
5864         bool err_xfer = false;
5865         bool err_tm = false;
5866         int err = 0, pmc_err;
5867         int tag;
5868         bool needs_reset = false, needs_restore = false;
5869
5870         hba = container_of(work, struct ufs_hba, eh_work);
5871
5872         down(&hba->host_sem);
5873         spin_lock_irqsave(hba->host->host_lock, flags);
5874         if (ufshcd_err_handling_should_stop(hba)) {
5875                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5876                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5877                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5878                 up(&hba->host_sem);
5879                 return;
5880         }
5881         ufshcd_set_eh_in_progress(hba);
5882         spin_unlock_irqrestore(hba->host->host_lock, flags);
5883         ufshcd_err_handling_prepare(hba);
5884         spin_lock_irqsave(hba->host->host_lock, flags);
5885         ufshcd_scsi_block_requests(hba);
5886         hba->ufshcd_state = UFSHCD_STATE_RESET;
5887
5888         /* Complete requests that have door-bell cleared by h/w */
5889         ufshcd_complete_requests(hba);
5890
5891         /*
5892          * A full reset and restore might have happened after preparation
5893          * is finished, double check whether we should stop.
5894          */
5895         if (ufshcd_err_handling_should_stop(hba))
5896                 goto skip_err_handling;
5897
5898         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5899                 bool ret;
5900
5901                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5902                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5903                 ret = ufshcd_quirk_dl_nac_errors(hba);
5904                 spin_lock_irqsave(hba->host->host_lock, flags);
5905                 if (!ret && ufshcd_err_handling_should_stop(hba))
5906                         goto skip_err_handling;
5907         }
5908
5909         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
5910             (hba->saved_uic_err &&
5911              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
5912                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
5913
5914                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5915                 ufshcd_print_host_state(hba);
5916                 ufshcd_print_pwr_info(hba);
5917                 ufshcd_print_evt_hist(hba);
5918                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5919                 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
5920                 spin_lock_irqsave(hba->host->host_lock, flags);
5921         }
5922
5923         /*
5924          * if host reset is required then skip clearing the pending
5925          * transfers forcefully because they will get cleared during
5926          * host reset and restore
5927          */
5928         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5929             ufshcd_is_saved_err_fatal(hba) ||
5930             ((hba->saved_err & UIC_ERROR) &&
5931              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5932                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
5933                 needs_reset = true;
5934                 goto do_reset;
5935         }
5936
5937         /*
5938          * If LINERESET was caught, UFS might have been put to PWM mode,
5939          * check if power mode restore is needed.
5940          */
5941         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
5942                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
5943                 if (!hba->saved_uic_err)
5944                         hba->saved_err &= ~UIC_ERROR;
5945                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5946                 if (ufshcd_is_pwr_mode_restore_needed(hba))
5947                         needs_restore = true;
5948                 spin_lock_irqsave(hba->host->host_lock, flags);
5949                 if (!hba->saved_err && !needs_restore)
5950                         goto skip_err_handling;
5951         }
5952
5953         hba->silence_err_logs = true;
5954         /* release lock as clear command might sleep */
5955         spin_unlock_irqrestore(hba->host->host_lock, flags);
5956         /* Clear pending transfer requests */
5957         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5958                 if (ufshcd_try_to_abort_task(hba, tag)) {
5959                         err_xfer = true;
5960                         goto lock_skip_pending_xfer_clear;
5961                 }
5962         }
5963
5964         /* Clear pending task management requests */
5965         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5966                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5967                         err_tm = true;
5968                         goto lock_skip_pending_xfer_clear;
5969                 }
5970         }
5971
5972 lock_skip_pending_xfer_clear:
5973         spin_lock_irqsave(hba->host->host_lock, flags);
5974
5975         /* Complete the requests that are cleared by s/w */
5976         ufshcd_complete_requests(hba);
5977         hba->silence_err_logs = false;
5978
5979         if (err_xfer || err_tm) {
5980                 needs_reset = true;
5981                 goto do_reset;
5982         }
5983
5984         /*
5985          * After all reqs and tasks are cleared from doorbell,
5986          * now it is safe to retore power mode.
5987          */
5988         if (needs_restore) {
5989                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5990                 /*
5991                  * Hold the scaling lock just in case dev cmds
5992                  * are sent via bsg and/or sysfs.
5993                  */
5994                 down_write(&hba->clk_scaling_lock);
5995                 hba->force_pmc = true;
5996                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
5997                 if (pmc_err) {
5998                         needs_reset = true;
5999                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6000                                         __func__, pmc_err);
6001                 }
6002                 hba->force_pmc = false;
6003                 ufshcd_print_pwr_info(hba);
6004                 up_write(&hba->clk_scaling_lock);
6005                 spin_lock_irqsave(hba->host->host_lock, flags);
6006         }
6007
6008 do_reset:
6009         /* Fatal errors need reset */
6010         if (needs_reset) {
6011                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
6012
6013                 /*
6014                  * ufshcd_reset_and_restore() does the link reinitialization
6015                  * which will need atleast one empty doorbell slot to send the
6016                  * device management commands (NOP and query commands).
6017                  * If there is no slot empty at this moment then free up last
6018                  * slot forcefully.
6019                  */
6020                 if (hba->outstanding_reqs == max_doorbells)
6021                         __ufshcd_transfer_req_compl(hba,
6022                                                     (1UL << (hba->nutrs - 1)));
6023
6024                 hba->force_reset = false;
6025                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6026                 err = ufshcd_reset_and_restore(hba);
6027                 if (err)
6028                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6029                                         __func__, err);
6030                 else
6031                         ufshcd_recover_pm_error(hba);
6032                 spin_lock_irqsave(hba->host->host_lock, flags);
6033         }
6034
6035 skip_err_handling:
6036         if (!needs_reset) {
6037                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6038                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6039                 if (hba->saved_err || hba->saved_uic_err)
6040                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6041                             __func__, hba->saved_err, hba->saved_uic_err);
6042         }
6043         ufshcd_clear_eh_in_progress(hba);
6044         spin_unlock_irqrestore(hba->host->host_lock, flags);
6045         ufshcd_scsi_unblock_requests(hba);
6046         ufshcd_err_handling_unprepare(hba);
6047         up(&hba->host_sem);
6048
6049         if (!err && needs_reset)
6050                 ufshcd_clear_ua_wluns(hba);
6051 }
6052
6053 /**
6054  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6055  * @hba: per-adapter instance
6056  *
6057  * Returns
6058  *  IRQ_HANDLED - If interrupt is valid
6059  *  IRQ_NONE    - If invalid interrupt
6060  */
6061 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6062 {
6063         u32 reg;
6064         irqreturn_t retval = IRQ_NONE;
6065
6066         /* PHY layer error */
6067         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6068         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6069             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6070                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6071                 /*
6072                  * To know whether this error is fatal or not, DB timeout
6073                  * must be checked but this error is handled separately.
6074                  */
6075                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6076                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6077                                         __func__);
6078
6079                 /* Got a LINERESET indication. */
6080                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6081                         struct uic_command *cmd = NULL;
6082
6083                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6084                         if (hba->uic_async_done && hba->active_uic_cmd)
6085                                 cmd = hba->active_uic_cmd;
6086                         /*
6087                          * Ignore the LINERESET during power mode change
6088                          * operation via DME_SET command.
6089                          */
6090                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
6091                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6092                 }
6093                 retval |= IRQ_HANDLED;
6094         }
6095
6096         /* PA_INIT_ERROR is fatal and needs UIC reset */
6097         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6098         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6099             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6100                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6101
6102                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6103                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6104                 else if (hba->dev_quirks &
6105                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6106                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6107                                 hba->uic_error |=
6108                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6109                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6110                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6111                 }
6112                 retval |= IRQ_HANDLED;
6113         }
6114
6115         /* UIC NL/TL/DME errors needs software retry */
6116         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6117         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6118             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6119                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6120                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6121                 retval |= IRQ_HANDLED;
6122         }
6123
6124         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6125         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6126             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6127                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6128                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6129                 retval |= IRQ_HANDLED;
6130         }
6131
6132         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6133         if ((reg & UIC_DME_ERROR) &&
6134             (reg & UIC_DME_ERROR_CODE_MASK)) {
6135                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6136                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6137                 retval |= IRQ_HANDLED;
6138         }
6139
6140         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6141                         __func__, hba->uic_error);
6142         return retval;
6143 }
6144
6145 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
6146                                          u32 intr_mask)
6147 {
6148         if (!ufshcd_is_auto_hibern8_supported(hba) ||
6149             !ufshcd_is_auto_hibern8_enabled(hba))
6150                 return false;
6151
6152         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
6153                 return false;
6154
6155         if (hba->active_uic_cmd &&
6156             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
6157             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
6158                 return false;
6159
6160         return true;
6161 }
6162
6163 /**
6164  * ufshcd_check_errors - Check for errors that need s/w attention
6165  * @hba: per-adapter instance
6166  *
6167  * Returns
6168  *  IRQ_HANDLED - If interrupt is valid
6169  *  IRQ_NONE    - If invalid interrupt
6170  */
6171 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
6172 {
6173         bool queue_eh_work = false;
6174         irqreturn_t retval = IRQ_NONE;
6175
6176         if (hba->errors & INT_FATAL_ERRORS) {
6177                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6178                                        hba->errors);
6179                 queue_eh_work = true;
6180         }
6181
6182         if (hba->errors & UIC_ERROR) {
6183                 hba->uic_error = 0;
6184                 retval = ufshcd_update_uic_error(hba);
6185                 if (hba->uic_error)
6186                         queue_eh_work = true;
6187         }
6188
6189         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6190                 dev_err(hba->dev,
6191                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6192                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6193                         "Enter" : "Exit",
6194                         hba->errors, ufshcd_get_upmcrs(hba));
6195                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6196                                        hba->errors);
6197                 ufshcd_set_link_broken(hba);
6198                 queue_eh_work = true;
6199         }
6200
6201         if (queue_eh_work) {
6202                 /*
6203                  * update the transfer error masks to sticky bits, let's do this
6204                  * irrespective of current ufshcd_state.
6205                  */
6206                 hba->saved_err |= hba->errors;
6207                 hba->saved_uic_err |= hba->uic_error;
6208
6209                 /* dump controller state before resetting */
6210                 if ((hba->saved_err &
6211                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6212                     (hba->saved_uic_err &&
6213                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6214                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6215                                         __func__, hba->saved_err,
6216                                         hba->saved_uic_err);
6217                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6218                                          "host_regs: ");
6219                         ufshcd_print_pwr_info(hba);
6220                 }
6221                 ufshcd_schedule_eh_work(hba);
6222                 retval |= IRQ_HANDLED;
6223         }
6224         /*
6225          * if (!queue_eh_work) -
6226          * Other errors are either non-fatal where host recovers
6227          * itself without s/w intervention or errors that will be
6228          * handled by the SCSI core layer.
6229          */
6230         return retval;
6231 }
6232
6233 struct ctm_info {
6234         struct ufs_hba  *hba;
6235         unsigned long   pending;
6236         unsigned int    ncpl;
6237 };
6238
6239 static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
6240 {
6241         struct ctm_info *const ci = priv;
6242         struct completion *c;
6243
6244         WARN_ON_ONCE(reserved);
6245         if (test_bit(req->tag, &ci->pending))
6246                 return true;
6247         ci->ncpl++;
6248         c = req->end_io_data;
6249         if (c)
6250                 complete(c);
6251         return true;
6252 }
6253
6254 /**
6255  * ufshcd_tmc_handler - handle task management function completion
6256  * @hba: per adapter instance
6257  *
6258  * Returns
6259  *  IRQ_HANDLED - If interrupt is valid
6260  *  IRQ_NONE    - If invalid interrupt
6261  */
6262 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6263 {
6264         struct request_queue *q = hba->tmf_queue;
6265         struct ctm_info ci = {
6266                 .hba     = hba,
6267                 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
6268         };
6269
6270         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
6271         return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
6272 }
6273
6274 /**
6275  * ufshcd_sl_intr - Interrupt service routine
6276  * @hba: per adapter instance
6277  * @intr_status: contains interrupts generated by the controller
6278  *
6279  * Returns
6280  *  IRQ_HANDLED - If interrupt is valid
6281  *  IRQ_NONE    - If invalid interrupt
6282  */
6283 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6284 {
6285         irqreturn_t retval = IRQ_NONE;
6286
6287         hba->errors = UFSHCD_ERROR_MASK & intr_status;
6288
6289         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
6290                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
6291
6292         if (hba->errors)
6293                 retval |= ufshcd_check_errors(hba);
6294
6295         if (intr_status & UFSHCD_UIC_MASK)
6296                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6297
6298         if (intr_status & UTP_TASK_REQ_COMPL)
6299                 retval |= ufshcd_tmc_handler(hba);
6300
6301         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6302                 retval |= ufshcd_transfer_req_compl(hba);
6303
6304         return retval;
6305 }
6306
6307 /**
6308  * ufshcd_intr - Main interrupt service routine
6309  * @irq: irq number
6310  * @__hba: pointer to adapter instance
6311  *
6312  * Returns
6313  *  IRQ_HANDLED - If interrupt is valid
6314  *  IRQ_NONE    - If invalid interrupt
6315  */
6316 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6317 {
6318         u32 intr_status, enabled_intr_status = 0;
6319         irqreturn_t retval = IRQ_NONE;
6320         struct ufs_hba *hba = __hba;
6321         int retries = hba->nutrs;
6322
6323         spin_lock(hba->host->host_lock);
6324         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6325         hba->ufs_stats.last_intr_status = intr_status;
6326         hba->ufs_stats.last_intr_ts = ktime_get();
6327
6328         /*
6329          * There could be max of hba->nutrs reqs in flight and in worst case
6330          * if the reqs get finished 1 by 1 after the interrupt status is
6331          * read, make sure we handle them by checking the interrupt status
6332          * again in a loop until we process all of the reqs before returning.
6333          */
6334         while (intr_status && retries--) {
6335                 enabled_intr_status =
6336                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6337                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6338                 if (enabled_intr_status)
6339                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6340
6341                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6342         }
6343
6344         if (enabled_intr_status && retval == IRQ_NONE &&
6345                                 !ufshcd_eh_in_progress(hba)) {
6346                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6347                                         __func__,
6348                                         intr_status,
6349                                         hba->ufs_stats.last_intr_status,
6350                                         enabled_intr_status);
6351                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6352         }
6353
6354         spin_unlock(hba->host->host_lock);
6355         return retval;
6356 }
6357
6358 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6359 {
6360         int err = 0;
6361         u32 mask = 1 << tag;
6362         unsigned long flags;
6363
6364         if (!test_bit(tag, &hba->outstanding_tasks))
6365                 goto out;
6366
6367         spin_lock_irqsave(hba->host->host_lock, flags);
6368         ufshcd_utmrl_clear(hba, tag);
6369         spin_unlock_irqrestore(hba->host->host_lock, flags);
6370
6371         /* poll for max. 1 sec to clear door bell register by h/w */
6372         err = ufshcd_wait_for_register(hba,
6373                         REG_UTP_TASK_REQ_DOOR_BELL,
6374                         mask, 0, 1000, 1000);
6375 out:
6376         return err;
6377 }
6378
6379 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6380                 struct utp_task_req_desc *treq, u8 tm_function)
6381 {
6382         struct request_queue *q = hba->tmf_queue;
6383         struct Scsi_Host *host = hba->host;
6384         DECLARE_COMPLETION_ONSTACK(wait);
6385         struct request *req;
6386         unsigned long flags;
6387         int free_slot, task_tag, err;
6388
6389         /*
6390          * Get free slot, sleep if slots are unavailable.
6391          * Even though we use wait_event() which sleeps indefinitely,
6392          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6393          */
6394         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6395         if (IS_ERR(req))
6396                 return PTR_ERR(req);
6397
6398         req->end_io_data = &wait;
6399         free_slot = req->tag;
6400         WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
6401         ufshcd_hold(hba, false);
6402
6403         spin_lock_irqsave(host->host_lock, flags);
6404         task_tag = hba->nutrs + free_slot;
6405
6406         treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6407
6408         memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
6409         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
6410
6411         /* send command to the controller */
6412         __set_bit(free_slot, &hba->outstanding_tasks);
6413
6414         /* Make sure descriptors are ready before ringing the task doorbell */
6415         wmb();
6416
6417         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6418         /* Make sure that doorbell is committed immediately */
6419         wmb();
6420
6421         spin_unlock_irqrestore(host->host_lock, flags);
6422
6423         ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
6424
6425         /* wait until the task management command is completed */
6426         err = wait_for_completion_io_timeout(&wait,
6427                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6428         if (!err) {
6429                 /*
6430                  * Make sure that ufshcd_compl_tm() does not trigger a
6431                  * use-after-free.
6432                  */
6433                 req->end_io_data = NULL;
6434                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
6435                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6436                                 __func__, tm_function);
6437                 if (ufshcd_clear_tm_cmd(hba, free_slot))
6438                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6439                                         __func__, free_slot);
6440                 err = -ETIMEDOUT;
6441         } else {
6442                 err = 0;
6443                 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
6444
6445                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
6446         }
6447
6448         spin_lock_irqsave(hba->host->host_lock, flags);
6449         __clear_bit(free_slot, &hba->outstanding_tasks);
6450         spin_unlock_irqrestore(hba->host->host_lock, flags);
6451
6452         blk_put_request(req);
6453
6454         ufshcd_release(hba);
6455         return err;
6456 }
6457
6458 /**
6459  * ufshcd_issue_tm_cmd - issues task management commands to controller
6460  * @hba: per adapter instance
6461  * @lun_id: LUN ID to which TM command is sent
6462  * @task_id: task ID to which the TM command is applicable
6463  * @tm_function: task management function opcode
6464  * @tm_response: task management service response return value
6465  *
6466  * Returns non-zero value on error, zero on success.
6467  */
6468 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6469                 u8 tm_function, u8 *tm_response)
6470 {
6471         struct utp_task_req_desc treq = { { 0 }, };
6472         int ocs_value, err;
6473
6474         /* Configure task request descriptor */
6475         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6476         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6477
6478         /* Configure task request UPIU */
6479         treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6480                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6481         treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6482
6483         /*
6484          * The host shall provide the same value for LUN field in the basic
6485          * header and for Input Parameter.
6486          */
6487         treq.input_param1 = cpu_to_be32(lun_id);
6488         treq.input_param2 = cpu_to_be32(task_id);
6489
6490         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6491         if (err == -ETIMEDOUT)
6492                 return err;
6493
6494         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6495         if (ocs_value != OCS_SUCCESS)
6496                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6497                                 __func__, ocs_value);
6498         else if (tm_response)
6499                 *tm_response = be32_to_cpu(treq.output_param1) &
6500                                 MASK_TM_SERVICE_RESP;
6501         return err;
6502 }
6503
6504 /**
6505  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6506  * @hba:        per-adapter instance
6507  * @req_upiu:   upiu request
6508  * @rsp_upiu:   upiu reply
6509  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6510  * @buff_len:   descriptor size, 0 if NA
6511  * @cmd_type:   specifies the type (NOP, Query...)
6512  * @desc_op:    descriptor operation
6513  *
6514  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6515  * Therefore, it "rides" the device management infrastructure: uses its tag and
6516  * tasks work queues.
6517  *
6518  * Since there is only one available tag for device management commands,
6519  * the caller is expected to hold the hba->dev_cmd.lock mutex.
6520  */
6521 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6522                                         struct utp_upiu_req *req_upiu,
6523                                         struct utp_upiu_req *rsp_upiu,
6524                                         u8 *desc_buff, int *buff_len,
6525                                         enum dev_cmd_type cmd_type,
6526                                         enum query_opcode desc_op)
6527 {
6528         struct request_queue *q = hba->cmd_queue;
6529         struct request *req;
6530         struct ufshcd_lrb *lrbp;
6531         int err = 0;
6532         int tag;
6533         struct completion wait;
6534         unsigned long flags;
6535         u8 upiu_flags;
6536
6537         down_read(&hba->clk_scaling_lock);
6538
6539         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6540         if (IS_ERR(req)) {
6541                 err = PTR_ERR(req);
6542                 goto out_unlock;
6543         }
6544         tag = req->tag;
6545         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6546
6547         init_completion(&wait);
6548         lrbp = &hba->lrb[tag];
6549         if (unlikely(lrbp->in_use)) {
6550                 err = -EBUSY;
6551                 goto out;
6552         }
6553
6554         WARN_ON(lrbp->cmd);
6555         lrbp->cmd = NULL;
6556         lrbp->sense_bufflen = 0;
6557         lrbp->sense_buffer = NULL;
6558         lrbp->task_tag = tag;
6559         lrbp->lun = 0;
6560         lrbp->intr_cmd = true;
6561         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6562         hba->dev_cmd.type = cmd_type;
6563
6564         switch (hba->ufs_version) {
6565         case UFSHCI_VERSION_10:
6566         case UFSHCI_VERSION_11:
6567                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6568                 break;
6569         default:
6570                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6571                 break;
6572         }
6573
6574         /* update the task tag in the request upiu */
6575         req_upiu->header.dword_0 |= cpu_to_be32(tag);
6576
6577         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6578
6579         /* just copy the upiu request as it is */
6580         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6581         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6582                 /* The Data Segment Area is optional depending upon the query
6583                  * function value. for WRITE DESCRIPTOR, the data segment
6584                  * follows right after the tsf.
6585                  */
6586                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6587                 *buff_len = 0;
6588         }
6589
6590         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6591
6592         hba->dev_cmd.complete = &wait;
6593
6594         /* Make sure descriptors are ready before ringing the doorbell */
6595         wmb();
6596         spin_lock_irqsave(hba->host->host_lock, flags);
6597         ufshcd_send_command(hba, tag);
6598         spin_unlock_irqrestore(hba->host->host_lock, flags);
6599
6600         /*
6601          * ignore the returning value here - ufshcd_check_query_response is
6602          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6603          * read the response directly ignoring all errors.
6604          */
6605         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6606
6607         /* just copy the upiu response as it is */
6608         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6609         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6610                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6611                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6612                                MASK_QUERY_DATA_SEG_LEN;
6613
6614                 if (*buff_len >= resp_len) {
6615                         memcpy(desc_buff, descp, resp_len);
6616                         *buff_len = resp_len;
6617                 } else {
6618                         dev_warn(hba->dev,
6619                                  "%s: rsp size %d is bigger than buffer size %d",
6620                                  __func__, resp_len, *buff_len);
6621                         *buff_len = 0;
6622                         err = -EINVAL;
6623                 }
6624         }
6625
6626 out:
6627         blk_put_request(req);
6628 out_unlock:
6629         up_read(&hba->clk_scaling_lock);
6630         return err;
6631 }
6632
6633 /**
6634  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6635  * @hba:        per-adapter instance
6636  * @req_upiu:   upiu request
6637  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
6638  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
6639  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6640  * @buff_len:   descriptor size, 0 if NA
6641  * @desc_op:    descriptor operation
6642  *
6643  * Supports UTP Transfer requests (nop and query), and UTP Task
6644  * Management requests.
6645  * It is up to the caller to fill the upiu conent properly, as it will
6646  * be copied without any further input validations.
6647  */
6648 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6649                              struct utp_upiu_req *req_upiu,
6650                              struct utp_upiu_req *rsp_upiu,
6651                              int msgcode,
6652                              u8 *desc_buff, int *buff_len,
6653                              enum query_opcode desc_op)
6654 {
6655         int err;
6656         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6657         struct utp_task_req_desc treq = { { 0 }, };
6658         int ocs_value;
6659         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6660
6661         switch (msgcode) {
6662         case UPIU_TRANSACTION_NOP_OUT:
6663                 cmd_type = DEV_CMD_TYPE_NOP;
6664                 fallthrough;
6665         case UPIU_TRANSACTION_QUERY_REQ:
6666                 ufshcd_hold(hba, false);
6667                 mutex_lock(&hba->dev_cmd.lock);
6668                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6669                                                    desc_buff, buff_len,
6670                                                    cmd_type, desc_op);
6671                 mutex_unlock(&hba->dev_cmd.lock);
6672                 ufshcd_release(hba);
6673
6674                 break;
6675         case UPIU_TRANSACTION_TASK_REQ:
6676                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6677                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6678
6679                 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6680
6681                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6682                 if (err == -ETIMEDOUT)
6683                         break;
6684
6685                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6686                 if (ocs_value != OCS_SUCCESS) {
6687                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6688                                 ocs_value);
6689                         break;
6690                 }
6691
6692                 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6693
6694                 break;
6695         default:
6696                 err = -EINVAL;
6697
6698                 break;
6699         }
6700
6701         return err;
6702 }
6703
6704 /**
6705  * ufshcd_eh_device_reset_handler - device reset handler registered to
6706  *                                    scsi layer.
6707  * @cmd: SCSI command pointer
6708  *
6709  * Returns SUCCESS/FAILED
6710  */
6711 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6712 {
6713         struct Scsi_Host *host;
6714         struct ufs_hba *hba;
6715         u32 pos;
6716         int err;
6717         u8 resp = 0xF, lun;
6718         unsigned long flags;
6719
6720         host = cmd->device->host;
6721         hba = shost_priv(host);
6722
6723         lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
6724         err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
6725         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6726                 if (!err)
6727                         err = resp;
6728                 goto out;
6729         }
6730
6731         /* clear the commands that were pending for corresponding LUN */
6732         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6733                 if (hba->lrb[pos].lun == lun) {
6734                         err = ufshcd_clear_cmd(hba, pos);
6735                         if (err)
6736                                 break;
6737                 }
6738         }
6739         spin_lock_irqsave(host->host_lock, flags);
6740         ufshcd_transfer_req_compl(hba);
6741         spin_unlock_irqrestore(host->host_lock, flags);
6742
6743 out:
6744         hba->req_abort_count = 0;
6745         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
6746         if (!err) {
6747                 err = SUCCESS;
6748         } else {
6749                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6750                 err = FAILED;
6751         }
6752         return err;
6753 }
6754
6755 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6756 {
6757         struct ufshcd_lrb *lrbp;
6758         int tag;
6759
6760         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6761                 lrbp = &hba->lrb[tag];
6762                 lrbp->req_abort_skip = true;
6763         }
6764 }
6765
6766 /**
6767  * ufshcd_try_to_abort_task - abort a specific task
6768  * @hba: Pointer to adapter instance
6769  * @tag: Task tag/index to be aborted
6770  *
6771  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6772  * command, and in host controller by clearing the door-bell register. There can
6773  * be race between controller sending the command to the device while abort is
6774  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6775  * really issued and then try to abort it.
6776  *
6777  * Returns zero on success, non-zero on failure
6778  */
6779 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
6780 {
6781         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6782         int err = 0;
6783         int poll_cnt;
6784         u8 resp = 0xF;
6785         u32 reg;
6786
6787         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6788                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6789                                 UFS_QUERY_TASK, &resp);
6790                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6791                         /* cmd pending in the device */
6792                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6793                                 __func__, tag);
6794                         break;
6795                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6796                         /*
6797                          * cmd not pending in the device, check if it is
6798                          * in transition.
6799                          */
6800                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6801                                 __func__, tag);
6802                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6803                         if (reg & (1 << tag)) {
6804                                 /* sleep for max. 200us to stabilize */
6805                                 usleep_range(100, 200);
6806                                 continue;
6807                         }
6808                         /* command completed already */
6809                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6810                                 __func__, tag);
6811                         goto out;
6812                 } else {
6813                         dev_err(hba->dev,
6814                                 "%s: no response from device. tag = %d, err %d\n",
6815                                 __func__, tag, err);
6816                         if (!err)
6817                                 err = resp; /* service response error */
6818                         goto out;
6819                 }
6820         }
6821
6822         if (!poll_cnt) {
6823                 err = -EBUSY;
6824                 goto out;
6825         }
6826
6827         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6828                         UFS_ABORT_TASK, &resp);
6829         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6830                 if (!err) {
6831                         err = resp; /* service response error */
6832                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6833                                 __func__, tag, err);
6834                 }
6835                 goto out;
6836         }
6837
6838         err = ufshcd_clear_cmd(hba, tag);
6839         if (err)
6840                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6841                         __func__, tag, err);
6842
6843 out:
6844         return err;
6845 }
6846
6847 /**
6848  * ufshcd_abort - scsi host template eh_abort_handler callback
6849  * @cmd: SCSI command pointer
6850  *
6851  * Returns SUCCESS/FAILED
6852  */
6853 static int ufshcd_abort(struct scsi_cmnd *cmd)
6854 {
6855         struct Scsi_Host *host;
6856         struct ufs_hba *hba;
6857         unsigned long flags;
6858         unsigned int tag;
6859         int err = 0;
6860         struct ufshcd_lrb *lrbp;
6861         u32 reg;
6862
6863         host = cmd->device->host;
6864         hba = shost_priv(host);
6865         tag = cmd->request->tag;
6866         lrbp = &hba->lrb[tag];
6867         if (!ufshcd_valid_tag(hba, tag)) {
6868                 dev_err(hba->dev,
6869                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6870                         __func__, tag, cmd, cmd->request);
6871                 BUG();
6872         }
6873
6874         ufshcd_hold(hba, false);
6875         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6876         /* If command is already aborted/completed, return SUCCESS */
6877         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6878                 dev_err(hba->dev,
6879                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6880                         __func__, tag, hba->outstanding_reqs, reg);
6881                 goto out;
6882         }
6883
6884         /* Print Transfer Request of aborted task */
6885         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6886
6887         /*
6888          * Print detailed info about aborted request.
6889          * As more than one request might get aborted at the same time,
6890          * print full information only for the first aborted request in order
6891          * to reduce repeated printouts. For other aborted requests only print
6892          * basic details.
6893          */
6894         scsi_print_command(cmd);
6895         if (!hba->req_abort_count) {
6896                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
6897                 ufshcd_print_evt_hist(hba);
6898                 ufshcd_print_host_state(hba);
6899                 ufshcd_print_pwr_info(hba);
6900                 ufshcd_print_trs(hba, 1 << tag, true);
6901         } else {
6902                 ufshcd_print_trs(hba, 1 << tag, false);
6903         }
6904         hba->req_abort_count++;
6905
6906         if (!(reg & (1 << tag))) {
6907                 dev_err(hba->dev,
6908                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6909                 __func__, tag);
6910                 goto cleanup;
6911         }
6912
6913         /*
6914          * Task abort to the device W-LUN is illegal. When this command
6915          * will fail, due to spec violation, scsi err handling next step
6916          * will be to send LU reset which, again, is a spec violation.
6917          * To avoid these unnecessary/illegal steps, first we clean up
6918          * the lrb taken by this cmd and mark the lrb as in_use, then
6919          * queue the eh_work and bail.
6920          */
6921         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
6922                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
6923                 spin_lock_irqsave(host->host_lock, flags);
6924                 if (lrbp->cmd) {
6925                         __ufshcd_transfer_req_compl(hba, (1UL << tag));
6926                         __set_bit(tag, &hba->outstanding_reqs);
6927                         lrbp->in_use = true;
6928                         hba->force_reset = true;
6929                         ufshcd_schedule_eh_work(hba);
6930                 }
6931
6932                 spin_unlock_irqrestore(host->host_lock, flags);
6933                 goto out;
6934         }
6935
6936         /* Skip task abort in case previous aborts failed and report failure */
6937         if (lrbp->req_abort_skip)
6938                 err = -EIO;
6939         else
6940                 err = ufshcd_try_to_abort_task(hba, tag);
6941
6942         if (!err) {
6943 cleanup:
6944                 spin_lock_irqsave(host->host_lock, flags);
6945                 __ufshcd_transfer_req_compl(hba, (1UL << tag));
6946                 spin_unlock_irqrestore(host->host_lock, flags);
6947 out:
6948                 err = SUCCESS;
6949         } else {
6950                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6951                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6952                 err = FAILED;
6953         }
6954
6955         /*
6956          * This ufshcd_release() corresponds to the original scsi cmd that got
6957          * aborted here (as we won't get any IRQ for it).
6958          */
6959         ufshcd_release(hba);
6960         return err;
6961 }
6962
6963 /**
6964  * ufshcd_host_reset_and_restore - reset and restore host controller
6965  * @hba: per-adapter instance
6966  *
6967  * Note that host controller reset may issue DME_RESET to
6968  * local and remote (device) Uni-Pro stack and the attributes
6969  * are reset to default state.
6970  *
6971  * Returns zero on success, non-zero on failure
6972  */
6973 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6974 {
6975         int err;
6976         unsigned long flags;
6977
6978         /*
6979          * Stop the host controller and complete the requests
6980          * cleared by h/w
6981          */
6982         ufshcd_hba_stop(hba);
6983
6984         spin_lock_irqsave(hba->host->host_lock, flags);
6985         hba->silence_err_logs = true;
6986         ufshcd_complete_requests(hba);
6987         hba->silence_err_logs = false;
6988         spin_unlock_irqrestore(hba->host->host_lock, flags);
6989
6990         /* scale up clocks to max frequency before full reinitialization */
6991         ufshcd_set_clk_freq(hba, true);
6992
6993         err = ufshcd_hba_enable(hba);
6994
6995         /* Establish the link again and restore the device */
6996         if (!err)
6997                 err = ufshcd_probe_hba(hba, false);
6998
6999         if (err)
7000                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7001         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7002         return err;
7003 }
7004
7005 /**
7006  * ufshcd_reset_and_restore - reset and re-initialize host/device
7007  * @hba: per-adapter instance
7008  *
7009  * Reset and recover device, host and re-establish link. This
7010  * is helpful to recover the communication in fatal error conditions.
7011  *
7012  * Returns zero on success, non-zero on failure
7013  */
7014 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7015 {
7016         u32 saved_err;
7017         u32 saved_uic_err;
7018         int err = 0;
7019         unsigned long flags;
7020         int retries = MAX_HOST_RESET_RETRIES;
7021
7022         /*
7023          * This is a fresh start, cache and clear saved error first,
7024          * in case new error generated during reset and restore.
7025          */
7026         spin_lock_irqsave(hba->host->host_lock, flags);
7027         saved_err = hba->saved_err;
7028         saved_uic_err = hba->saved_uic_err;
7029         hba->saved_err = 0;
7030         hba->saved_uic_err = 0;
7031         spin_unlock_irqrestore(hba->host->host_lock, flags);
7032
7033         do {
7034                 /* Reset the attached device */
7035                 ufshcd_device_reset(hba);
7036
7037                 err = ufshcd_host_reset_and_restore(hba);
7038         } while (err && --retries);
7039
7040         spin_lock_irqsave(hba->host->host_lock, flags);
7041         /*
7042          * Inform scsi mid-layer that we did reset and allow to handle
7043          * Unit Attention properly.
7044          */
7045         scsi_report_bus_reset(hba->host, 0);
7046         if (err) {
7047                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7048                 hba->saved_err |= saved_err;
7049                 hba->saved_uic_err |= saved_uic_err;
7050         }
7051         spin_unlock_irqrestore(hba->host->host_lock, flags);
7052
7053         return err;
7054 }
7055
7056 /**
7057  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7058  * @cmd: SCSI command pointer
7059  *
7060  * Returns SUCCESS/FAILED
7061  */
7062 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7063 {
7064         int err = SUCCESS;
7065         unsigned long flags;
7066         struct ufs_hba *hba;
7067
7068         hba = shost_priv(cmd->device->host);
7069
7070         spin_lock_irqsave(hba->host->host_lock, flags);
7071         hba->force_reset = true;
7072         ufshcd_schedule_eh_work(hba);
7073         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7074         spin_unlock_irqrestore(hba->host->host_lock, flags);
7075
7076         flush_work(&hba->eh_work);
7077
7078         spin_lock_irqsave(hba->host->host_lock, flags);
7079         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7080                 err = FAILED;
7081         spin_unlock_irqrestore(hba->host->host_lock, flags);
7082
7083         return err;
7084 }
7085
7086 /**
7087  * ufshcd_get_max_icc_level - calculate the ICC level
7088  * @sup_curr_uA: max. current supported by the regulator
7089  * @start_scan: row at the desc table to start scan from
7090  * @buff: power descriptor buffer
7091  *
7092  * Returns calculated max ICC level for specific regulator
7093  */
7094 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
7095 {
7096         int i;
7097         int curr_uA;
7098         u16 data;
7099         u16 unit;
7100
7101         for (i = start_scan; i >= 0; i--) {
7102                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
7103                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7104                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7105                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7106                 switch (unit) {
7107                 case UFSHCD_NANO_AMP:
7108                         curr_uA = curr_uA / 1000;
7109                         break;
7110                 case UFSHCD_MILI_AMP:
7111                         curr_uA = curr_uA * 1000;
7112                         break;
7113                 case UFSHCD_AMP:
7114                         curr_uA = curr_uA * 1000 * 1000;
7115                         break;
7116                 case UFSHCD_MICRO_AMP:
7117                 default:
7118                         break;
7119                 }
7120                 if (sup_curr_uA >= curr_uA)
7121                         break;
7122         }
7123         if (i < 0) {
7124                 i = 0;
7125                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7126         }
7127
7128         return (u32)i;
7129 }
7130
7131 /**
7132  * ufshcd_calc_icc_level - calculate the max ICC level
7133  * In case regulators are not initialized we'll return 0
7134  * @hba: per-adapter instance
7135  * @desc_buf: power descriptor buffer to extract ICC levels from.
7136  * @len: length of desc_buff
7137  *
7138  * Returns calculated ICC level
7139  */
7140 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7141                                                         u8 *desc_buf, int len)
7142 {
7143         u32 icc_level = 0;
7144
7145         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7146                                                 !hba->vreg_info.vccq2) {
7147                 dev_err(hba->dev,
7148                         "%s: Regulator capability was not set, actvIccLevel=%d",
7149                                                         __func__, icc_level);
7150                 goto out;
7151         }
7152
7153         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
7154                 icc_level = ufshcd_get_max_icc_level(
7155                                 hba->vreg_info.vcc->max_uA,
7156                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7157                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7158
7159         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
7160                 icc_level = ufshcd_get_max_icc_level(
7161                                 hba->vreg_info.vccq->max_uA,
7162                                 icc_level,
7163                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7164
7165         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
7166                 icc_level = ufshcd_get_max_icc_level(
7167                                 hba->vreg_info.vccq2->max_uA,
7168                                 icc_level,
7169                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7170 out:
7171         return icc_level;
7172 }
7173
7174 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7175 {
7176         int ret;
7177         int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
7178         u8 *desc_buf;
7179         u32 icc_level;
7180
7181         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7182         if (!desc_buf)
7183                 return;
7184
7185         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7186                                      desc_buf, buff_len);
7187         if (ret) {
7188                 dev_err(hba->dev,
7189                         "%s: Failed reading power descriptor.len = %d ret = %d",
7190                         __func__, buff_len, ret);
7191                 goto out;
7192         }
7193
7194         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7195                                                          buff_len);
7196         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7197
7198         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7199                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7200
7201         if (ret)
7202                 dev_err(hba->dev,
7203                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7204                         __func__, icc_level, ret);
7205
7206 out:
7207         kfree(desc_buf);
7208 }
7209
7210 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7211 {
7212         scsi_autopm_get_device(sdev);
7213         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7214         if (sdev->rpm_autosuspend)
7215                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7216                                                  RPM_AUTOSUSPEND_DELAY_MS);
7217         scsi_autopm_put_device(sdev);
7218 }
7219
7220 /**
7221  * ufshcd_scsi_add_wlus - Adds required W-LUs
7222  * @hba: per-adapter instance
7223  *
7224  * UFS device specification requires the UFS devices to support 4 well known
7225  * logical units:
7226  *      "REPORT_LUNS" (address: 01h)
7227  *      "UFS Device" (address: 50h)
7228  *      "RPMB" (address: 44h)
7229  *      "BOOT" (address: 30h)
7230  * UFS device's power management needs to be controlled by "POWER CONDITION"
7231  * field of SSU (START STOP UNIT) command. But this "power condition" field
7232  * will take effect only when its sent to "UFS device" well known logical unit
7233  * hence we require the scsi_device instance to represent this logical unit in
7234  * order for the UFS host driver to send the SSU command for power management.
7235  *
7236  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7237  * Block) LU so user space process can control this LU. User space may also
7238  * want to have access to BOOT LU.
7239  *
7240  * This function adds scsi device instances for each of all well known LUs
7241  * (except "REPORT LUNS" LU).
7242  *
7243  * Returns zero on success (all required W-LUs are added successfully),
7244  * non-zero error value on failure (if failed to add any of the required W-LU).
7245  */
7246 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
7247 {
7248         int ret = 0;
7249         struct scsi_device *sdev_boot;
7250
7251         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
7252                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
7253         if (IS_ERR(hba->sdev_ufs_device)) {
7254                 ret = PTR_ERR(hba->sdev_ufs_device);
7255                 hba->sdev_ufs_device = NULL;
7256                 goto out;
7257         }
7258         ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
7259         scsi_device_put(hba->sdev_ufs_device);
7260
7261         hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7262                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7263         if (IS_ERR(hba->sdev_rpmb)) {
7264                 ret = PTR_ERR(hba->sdev_rpmb);
7265                 goto remove_sdev_ufs_device;
7266         }
7267         ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
7268         scsi_device_put(hba->sdev_rpmb);
7269
7270         sdev_boot = __scsi_add_device(hba->host, 0, 0,
7271                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7272         if (IS_ERR(sdev_boot)) {
7273                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
7274         } else {
7275                 ufshcd_blk_pm_runtime_init(sdev_boot);
7276                 scsi_device_put(sdev_boot);
7277         }
7278         goto out;
7279
7280 remove_sdev_ufs_device:
7281         scsi_remove_device(hba->sdev_ufs_device);
7282 out:
7283         return ret;
7284 }
7285
7286 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7287 {
7288         struct ufs_dev_info *dev_info = &hba->dev_info;
7289         u8 lun;
7290         u32 d_lu_wb_buf_alloc;
7291         u32 ext_ufs_feature;
7292
7293         if (!ufshcd_is_wb_allowed(hba))
7294                 return;
7295         /*
7296          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7297          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7298          * enabled
7299          */
7300         if (!(dev_info->wspecversion >= 0x310 ||
7301               dev_info->wspecversion == 0x220 ||
7302              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7303                 goto wb_disabled;
7304
7305         if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7306             DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7307                 goto wb_disabled;
7308
7309         ext_ufs_feature = get_unaligned_be32(desc_buf +
7310                                         DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7311
7312         if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
7313                 goto wb_disabled;
7314
7315         /*
7316          * WB may be supported but not configured while provisioning. The spec
7317          * says, in dedicated wb buffer mode, a max of 1 lun would have wb
7318          * buffer configured.
7319          */
7320         dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7321
7322         dev_info->b_presrv_uspc_en =
7323                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7324
7325         if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
7326                 if (!get_unaligned_be32(desc_buf +
7327                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
7328                         goto wb_disabled;
7329         } else {
7330                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7331                         d_lu_wb_buf_alloc = 0;
7332                         ufshcd_read_unit_desc_param(hba,
7333                                         lun,
7334                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7335                                         (u8 *)&d_lu_wb_buf_alloc,
7336                                         sizeof(d_lu_wb_buf_alloc));
7337                         if (d_lu_wb_buf_alloc) {
7338                                 dev_info->wb_dedicated_lu = lun;
7339                                 break;
7340                         }
7341                 }
7342
7343                 if (!d_lu_wb_buf_alloc)
7344                         goto wb_disabled;
7345         }
7346         return;
7347
7348 wb_disabled:
7349         hba->caps &= ~UFSHCD_CAP_WB_EN;
7350 }
7351
7352 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7353 {
7354         struct ufs_dev_fix *f;
7355         struct ufs_dev_info *dev_info = &hba->dev_info;
7356
7357         if (!fixups)
7358                 return;
7359
7360         for (f = fixups; f->quirk; f++) {
7361                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7362                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
7363                      ((dev_info->model &&
7364                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7365                       !strcmp(f->model, UFS_ANY_MODEL)))
7366                         hba->dev_quirks |= f->quirk;
7367         }
7368 }
7369 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7370
7371 static void ufs_fixup_device_setup(struct ufs_hba *hba)
7372 {
7373         /* fix by general quirk table */
7374         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7375
7376         /* allow vendors to fix quirks */
7377         ufshcd_vops_fixup_dev_quirks(hba);
7378 }
7379
7380 static int ufs_get_device_desc(struct ufs_hba *hba)
7381 {
7382         int err;
7383         u8 model_index;
7384         u8 *desc_buf;
7385         struct ufs_dev_info *dev_info = &hba->dev_info;
7386
7387         desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7388         if (!desc_buf) {
7389                 err = -ENOMEM;
7390                 goto out;
7391         }
7392
7393         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7394                                      hba->desc_size[QUERY_DESC_IDN_DEVICE]);
7395         if (err) {
7396                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7397                         __func__, err);
7398                 goto out;
7399         }
7400
7401         /*
7402          * getting vendor (manufacturerID) and Bank Index in big endian
7403          * format
7404          */
7405         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7406                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7407
7408         /* getting Specification Version in big endian format */
7409         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7410                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7411
7412         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7413
7414         err = ufshcd_read_string_desc(hba, model_index,
7415                                       &dev_info->model, SD_ASCII_STD);
7416         if (err < 0) {
7417                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7418                         __func__, err);
7419                 goto out;
7420         }
7421
7422         ufs_fixup_device_setup(hba);
7423
7424         ufshcd_wb_probe(hba, desc_buf);
7425
7426         /*
7427          * ufshcd_read_string_desc returns size of the string
7428          * reset the error value
7429          */
7430         err = 0;
7431
7432 out:
7433         kfree(desc_buf);
7434         return err;
7435 }
7436
7437 static void ufs_put_device_desc(struct ufs_hba *hba)
7438 {
7439         struct ufs_dev_info *dev_info = &hba->dev_info;
7440
7441         kfree(dev_info->model);
7442         dev_info->model = NULL;
7443 }
7444
7445 /**
7446  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7447  * @hba: per-adapter instance
7448  *
7449  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7450  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7451  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7452  * the hibern8 exit latency.
7453  *
7454  * Returns zero on success, non-zero error value on failure.
7455  */
7456 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7457 {
7458         int ret = 0;
7459         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7460
7461         ret = ufshcd_dme_peer_get(hba,
7462                                   UIC_ARG_MIB_SEL(
7463                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7464                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7465                                   &peer_rx_min_activatetime);
7466         if (ret)
7467                 goto out;
7468
7469         /* make sure proper unit conversion is applied */
7470         tuned_pa_tactivate =
7471                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7472                  / PA_TACTIVATE_TIME_UNIT_US);
7473         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7474                              tuned_pa_tactivate);
7475
7476 out:
7477         return ret;
7478 }
7479
7480 /**
7481  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7482  * @hba: per-adapter instance
7483  *
7484  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7485  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7486  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7487  * This optimal value can help reduce the hibern8 exit latency.
7488  *
7489  * Returns zero on success, non-zero error value on failure.
7490  */
7491 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7492 {
7493         int ret = 0;
7494         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7495         u32 max_hibern8_time, tuned_pa_hibern8time;
7496
7497         ret = ufshcd_dme_get(hba,
7498                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7499                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7500                                   &local_tx_hibern8_time_cap);
7501         if (ret)
7502                 goto out;
7503
7504         ret = ufshcd_dme_peer_get(hba,
7505                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7506                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7507                                   &peer_rx_hibern8_time_cap);
7508         if (ret)
7509                 goto out;
7510
7511         max_hibern8_time = max(local_tx_hibern8_time_cap,
7512                                peer_rx_hibern8_time_cap);
7513         /* make sure proper unit conversion is applied */
7514         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7515                                 / PA_HIBERN8_TIME_UNIT_US);
7516         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7517                              tuned_pa_hibern8time);
7518 out:
7519         return ret;
7520 }
7521
7522 /**
7523  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7524  * less than device PA_TACTIVATE time.
7525  * @hba: per-adapter instance
7526  *
7527  * Some UFS devices require host PA_TACTIVATE to be lower than device
7528  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7529  * for such devices.
7530  *
7531  * Returns zero on success, non-zero error value on failure.
7532  */
7533 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7534 {
7535         int ret = 0;
7536         u32 granularity, peer_granularity;
7537         u32 pa_tactivate, peer_pa_tactivate;
7538         u32 pa_tactivate_us, peer_pa_tactivate_us;
7539         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7540
7541         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7542                                   &granularity);
7543         if (ret)
7544                 goto out;
7545
7546         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7547                                   &peer_granularity);
7548         if (ret)
7549                 goto out;
7550
7551         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7552             (granularity > PA_GRANULARITY_MAX_VAL)) {
7553                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7554                         __func__, granularity);
7555                 return -EINVAL;
7556         }
7557
7558         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7559             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7560                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7561                         __func__, peer_granularity);
7562                 return -EINVAL;
7563         }
7564
7565         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7566         if (ret)
7567                 goto out;
7568
7569         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7570                                   &peer_pa_tactivate);
7571         if (ret)
7572                 goto out;
7573
7574         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7575         peer_pa_tactivate_us = peer_pa_tactivate *
7576                              gran_to_us_table[peer_granularity - 1];
7577
7578         if (pa_tactivate_us > peer_pa_tactivate_us) {
7579                 u32 new_peer_pa_tactivate;
7580
7581                 new_peer_pa_tactivate = pa_tactivate_us /
7582                                       gran_to_us_table[peer_granularity - 1];
7583                 new_peer_pa_tactivate++;
7584                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7585                                           new_peer_pa_tactivate);
7586         }
7587
7588 out:
7589         return ret;
7590 }
7591
7592 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7593 {
7594         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7595                 ufshcd_tune_pa_tactivate(hba);
7596                 ufshcd_tune_pa_hibern8time(hba);
7597         }
7598
7599         ufshcd_vops_apply_dev_quirks(hba);
7600
7601         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7602                 /* set 1ms timeout for PA_TACTIVATE */
7603                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7604
7605         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7606                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7607 }
7608
7609 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7610 {
7611         hba->ufs_stats.hibern8_exit_cnt = 0;
7612         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7613         hba->req_abort_count = 0;
7614 }
7615
7616 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7617 {
7618         int err;
7619         size_t buff_len;
7620         u8 *desc_buf;
7621
7622         buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7623         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7624         if (!desc_buf) {
7625                 err = -ENOMEM;
7626                 goto out;
7627         }
7628
7629         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7630                                      desc_buf, buff_len);
7631         if (err) {
7632                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7633                                 __func__, err);
7634                 goto out;
7635         }
7636
7637         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7638                 hba->dev_info.max_lu_supported = 32;
7639         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7640                 hba->dev_info.max_lu_supported = 8;
7641
7642 out:
7643         kfree(desc_buf);
7644         return err;
7645 }
7646
7647 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7648         {19200000, REF_CLK_FREQ_19_2_MHZ},
7649         {26000000, REF_CLK_FREQ_26_MHZ},
7650         {38400000, REF_CLK_FREQ_38_4_MHZ},
7651         {52000000, REF_CLK_FREQ_52_MHZ},
7652         {0, REF_CLK_FREQ_INVAL},
7653 };
7654
7655 static enum ufs_ref_clk_freq
7656 ufs_get_bref_clk_from_hz(unsigned long freq)
7657 {
7658         int i;
7659
7660         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7661                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7662                         return ufs_ref_clk_freqs[i].val;
7663
7664         return REF_CLK_FREQ_INVAL;
7665 }
7666
7667 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7668 {
7669         unsigned long freq;
7670
7671         freq = clk_get_rate(refclk);
7672
7673         hba->dev_ref_clk_freq =
7674                 ufs_get_bref_clk_from_hz(freq);
7675
7676         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7677                 dev_err(hba->dev,
7678                 "invalid ref_clk setting = %ld\n", freq);
7679 }
7680
7681 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7682 {
7683         int err;
7684         u32 ref_clk;
7685         u32 freq = hba->dev_ref_clk_freq;
7686
7687         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7688                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7689
7690         if (err) {
7691                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7692                         err);
7693                 goto out;
7694         }
7695
7696         if (ref_clk == freq)
7697                 goto out; /* nothing to update */
7698
7699         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7700                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7701
7702         if (err) {
7703                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7704                         ufs_ref_clk_freqs[freq].freq_hz);
7705                 goto out;
7706         }
7707
7708         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7709                         ufs_ref_clk_freqs[freq].freq_hz);
7710
7711 out:
7712         return err;
7713 }
7714
7715 static int ufshcd_device_params_init(struct ufs_hba *hba)
7716 {
7717         bool flag;
7718         int ret, i;
7719
7720          /* Init device descriptor sizes */
7721         for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7722                 hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7723
7724         /* Init UFS geometry descriptor related parameters */
7725         ret = ufshcd_device_geo_params_init(hba);
7726         if (ret)
7727                 goto out;
7728
7729         /* Check and apply UFS device quirks */
7730         ret = ufs_get_device_desc(hba);
7731         if (ret) {
7732                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7733                         __func__, ret);
7734                 goto out;
7735         }
7736
7737         ufshcd_get_ref_clk_gating_wait(hba);
7738
7739         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7740                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7741                 hba->dev_info.f_power_on_wp_en = flag;
7742
7743         /* Probe maximum power mode co-supported by both UFS host and device */
7744         if (ufshcd_get_max_pwr_mode(hba))
7745                 dev_err(hba->dev,
7746                         "%s: Failed getting max supported power mode\n",
7747                         __func__);
7748 out:
7749         return ret;
7750 }
7751
7752 /**
7753  * ufshcd_add_lus - probe and add UFS logical units
7754  * @hba: per-adapter instance
7755  */
7756 static int ufshcd_add_lus(struct ufs_hba *hba)
7757 {
7758         int ret;
7759
7760         /* Add required well known logical units to scsi mid layer */
7761         ret = ufshcd_scsi_add_wlus(hba);
7762         if (ret)
7763                 goto out;
7764
7765         ufshcd_clear_ua_wluns(hba);
7766
7767         /* Initialize devfreq after UFS device is detected */
7768         if (ufshcd_is_clkscaling_supported(hba)) {
7769                 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7770                         &hba->pwr_info,
7771                         sizeof(struct ufs_pa_layer_attr));
7772                 hba->clk_scaling.saved_pwr_info.is_valid = true;
7773                 hba->clk_scaling.is_allowed = true;
7774
7775                 ret = ufshcd_devfreq_init(hba);
7776                 if (ret)
7777                         goto out;
7778
7779                 hba->clk_scaling.is_enabled = true;
7780                 ufshcd_init_clk_scaling_sysfs(hba);
7781         }
7782
7783         ufs_bsg_probe(hba);
7784         scsi_scan_host(hba->host);
7785         pm_runtime_put_sync(hba->dev);
7786
7787 out:
7788         return ret;
7789 }
7790
7791 static int
7792 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
7793
7794 static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
7795 {
7796         struct scsi_device *sdp;
7797         unsigned long flags;
7798         int ret = 0;
7799
7800         spin_lock_irqsave(hba->host->host_lock, flags);
7801         if (wlun == UFS_UPIU_UFS_DEVICE_WLUN)
7802                 sdp = hba->sdev_ufs_device;
7803         else if (wlun == UFS_UPIU_RPMB_WLUN)
7804                 sdp = hba->sdev_rpmb;
7805         else
7806                 BUG();
7807         if (sdp) {
7808                 ret = scsi_device_get(sdp);
7809                 if (!ret && !scsi_device_online(sdp)) {
7810                         ret = -ENODEV;
7811                         scsi_device_put(sdp);
7812                 }
7813         } else {
7814                 ret = -ENODEV;
7815         }
7816         spin_unlock_irqrestore(hba->host->host_lock, flags);
7817         if (ret)
7818                 goto out_err;
7819
7820         ret = ufshcd_send_request_sense(hba, sdp);
7821         scsi_device_put(sdp);
7822 out_err:
7823         if (ret)
7824                 dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
7825                                 __func__, wlun, ret);
7826         return ret;
7827 }
7828
7829 static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
7830 {
7831         int ret = 0;
7832
7833         if (!hba->wlun_dev_clr_ua)
7834                 goto out;
7835
7836         ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
7837         if (!ret)
7838                 ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
7839         if (!ret)
7840                 hba->wlun_dev_clr_ua = false;
7841 out:
7842         if (ret)
7843                 dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
7844                                 __func__, ret);
7845         return ret;
7846 }
7847
7848 /**
7849  * ufshcd_probe_hba - probe hba to detect device and initialize
7850  * @hba: per-adapter instance
7851  * @async: asynchronous execution or not
7852  *
7853  * Execute link-startup and verify device initialization
7854  */
7855 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7856 {
7857         int ret;
7858         unsigned long flags;
7859         ktime_t start = ktime_get();
7860
7861         ret = ufshcd_link_startup(hba);
7862         if (ret)
7863                 goto out;
7864
7865         /* Debug counters initialization */
7866         ufshcd_clear_dbg_ufs_stats(hba);
7867
7868         /* UniPro link is active now */
7869         ufshcd_set_link_active(hba);
7870
7871         /* Verify device initialization by sending NOP OUT UPIU */
7872         ret = ufshcd_verify_dev_init(hba);
7873         if (ret)
7874                 goto out;
7875
7876         /* Initiate UFS initialization, and waiting until completion */
7877         ret = ufshcd_complete_dev_init(hba);
7878         if (ret)
7879                 goto out;
7880
7881         /*
7882          * Initialize UFS device parameters used by driver, these
7883          * parameters are associated with UFS descriptors.
7884          */
7885         if (async) {
7886                 ret = ufshcd_device_params_init(hba);
7887                 if (ret)
7888                         goto out;
7889         }
7890
7891         ufshcd_tune_unipro_params(hba);
7892
7893         /* UFS device is also active now */
7894         ufshcd_set_ufs_dev_active(hba);
7895         ufshcd_force_reset_auto_bkops(hba);
7896         hba->wlun_dev_clr_ua = true;
7897
7898         /* Gear up to HS gear if supported */
7899         if (hba->max_pwr_info.is_valid) {
7900                 /*
7901                  * Set the right value to bRefClkFreq before attempting to
7902                  * switch to HS gears.
7903                  */
7904                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7905                         ufshcd_set_dev_ref_clk(hba);
7906                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7907                 if (ret) {
7908                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7909                                         __func__, ret);
7910                         goto out;
7911                 }
7912                 ufshcd_print_pwr_info(hba);
7913         }
7914
7915         /*
7916          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7917          * and for removable UFS card as well, hence always set the parameter.
7918          * Note: Error handler may issue the device reset hence resetting
7919          * bActiveICCLevel as well so it is always safe to set this here.
7920          */
7921         ufshcd_set_active_icc_lvl(hba);
7922
7923         ufshcd_wb_config(hba);
7924         /* Enable Auto-Hibernate if configured */
7925         ufshcd_auto_hibern8_enable(hba);
7926
7927 out:
7928         spin_lock_irqsave(hba->host->host_lock, flags);
7929         if (ret)
7930                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7931         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7932                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7933         spin_unlock_irqrestore(hba->host->host_lock, flags);
7934
7935         trace_ufshcd_init(dev_name(hba->dev), ret,
7936                 ktime_to_us(ktime_sub(ktime_get(), start)),
7937                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7938         return ret;
7939 }
7940
7941 /**
7942  * ufshcd_async_scan - asynchronous execution for probing hba
7943  * @data: data pointer to pass to this function
7944  * @cookie: cookie data
7945  */
7946 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7947 {
7948         struct ufs_hba *hba = (struct ufs_hba *)data;
7949         int ret;
7950
7951         down(&hba->host_sem);
7952         /* Initialize hba, detect and initialize UFS device */
7953         ret = ufshcd_probe_hba(hba, true);
7954         up(&hba->host_sem);
7955         if (ret)
7956                 goto out;
7957
7958         /* Probe and add UFS logical units  */
7959         ret = ufshcd_add_lus(hba);
7960 out:
7961         /*
7962          * If we failed to initialize the device or the device is not
7963          * present, turn off the power/clocks etc.
7964          */
7965         if (ret) {
7966                 pm_runtime_put_sync(hba->dev);
7967                 ufshcd_hba_exit(hba);
7968         }
7969 }
7970
7971 static const struct attribute_group *ufshcd_driver_groups[] = {
7972         &ufs_sysfs_unit_descriptor_group,
7973         &ufs_sysfs_lun_attributes_group,
7974         NULL,
7975 };
7976
7977 static struct ufs_hba_variant_params ufs_hba_vps = {
7978         .hba_enable_delay_us            = 1000,
7979         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
7980         .devfreq_profile.polling_ms     = 100,
7981         .devfreq_profile.target         = ufshcd_devfreq_target,
7982         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
7983         .ondemand_data.upthreshold      = 70,
7984         .ondemand_data.downdifferential = 5,
7985 };
7986
7987 static struct scsi_host_template ufshcd_driver_template = {
7988         .module                 = THIS_MODULE,
7989         .name                   = UFSHCD,
7990         .proc_name              = UFSHCD,
7991         .queuecommand           = ufshcd_queuecommand,
7992         .slave_alloc            = ufshcd_slave_alloc,
7993         .slave_configure        = ufshcd_slave_configure,
7994         .slave_destroy          = ufshcd_slave_destroy,
7995         .change_queue_depth     = ufshcd_change_queue_depth,
7996         .eh_abort_handler       = ufshcd_abort,
7997         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7998         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7999         .this_id                = -1,
8000         .sg_tablesize           = SG_ALL,
8001         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
8002         .can_queue              = UFSHCD_CAN_QUEUE,
8003         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
8004         .max_host_blocked       = 1,
8005         .track_queue_depth      = 1,
8006         .sdev_groups            = ufshcd_driver_groups,
8007         .dma_boundary           = PAGE_SIZE - 1,
8008         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
8009 };
8010
8011 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
8012                                    int ua)
8013 {
8014         int ret;
8015
8016         if (!vreg)
8017                 return 0;
8018
8019         /*
8020          * "set_load" operation shall be required on those regulators
8021          * which specifically configured current limitation. Otherwise
8022          * zero max_uA may cause unexpected behavior when regulator is
8023          * enabled or set as high power mode.
8024          */
8025         if (!vreg->max_uA)
8026                 return 0;
8027
8028         ret = regulator_set_load(vreg->reg, ua);
8029         if (ret < 0) {
8030                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
8031                                 __func__, vreg->name, ua, ret);
8032         }
8033
8034         return ret;
8035 }
8036
8037 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
8038                                          struct ufs_vreg *vreg)
8039 {
8040         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
8041 }
8042
8043 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
8044                                          struct ufs_vreg *vreg)
8045 {
8046         if (!vreg)
8047                 return 0;
8048
8049         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8050 }
8051
8052 static int ufshcd_config_vreg(struct device *dev,
8053                 struct ufs_vreg *vreg, bool on)
8054 {
8055         int ret = 0;
8056         struct regulator *reg;
8057         const char *name;
8058         int min_uV, uA_load;
8059
8060         BUG_ON(!vreg);
8061
8062         reg = vreg->reg;
8063         name = vreg->name;
8064
8065         if (regulator_count_voltages(reg) > 0) {
8066                 uA_load = on ? vreg->max_uA : 0;
8067                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
8068                 if (ret)
8069                         goto out;
8070
8071                 if (vreg->min_uV && vreg->max_uV) {
8072                         min_uV = on ? vreg->min_uV : 0;
8073                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8074                         if (ret)
8075                                 dev_err(dev,
8076                                         "%s: %s set voltage failed, err=%d\n",
8077                                         __func__, name, ret);
8078                 }
8079         }
8080 out:
8081         return ret;
8082 }
8083
8084 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
8085 {
8086         int ret = 0;
8087
8088         if (!vreg || vreg->enabled)
8089                 goto out;
8090
8091         ret = ufshcd_config_vreg(dev, vreg, true);
8092         if (!ret)
8093                 ret = regulator_enable(vreg->reg);
8094
8095         if (!ret)
8096                 vreg->enabled = true;
8097         else
8098                 dev_err(dev, "%s: %s enable failed, err=%d\n",
8099                                 __func__, vreg->name, ret);
8100 out:
8101         return ret;
8102 }
8103
8104 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
8105 {
8106         int ret = 0;
8107
8108         if (!vreg || !vreg->enabled || vreg->always_on)
8109                 goto out;
8110
8111         ret = regulator_disable(vreg->reg);
8112
8113         if (!ret) {
8114                 /* ignore errors on applying disable config */
8115                 ufshcd_config_vreg(dev, vreg, false);
8116                 vreg->enabled = false;
8117         } else {
8118                 dev_err(dev, "%s: %s disable failed, err=%d\n",
8119                                 __func__, vreg->name, ret);
8120         }
8121 out:
8122         return ret;
8123 }
8124
8125 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
8126 {
8127         int ret = 0;
8128         struct device *dev = hba->dev;
8129         struct ufs_vreg_info *info = &hba->vreg_info;
8130
8131         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
8132         if (ret)
8133                 goto out;
8134
8135         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
8136         if (ret)
8137                 goto out;
8138
8139         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
8140
8141 out:
8142         if (ret) {
8143                 ufshcd_toggle_vreg(dev, info->vccq2, false);
8144                 ufshcd_toggle_vreg(dev, info->vccq, false);
8145                 ufshcd_toggle_vreg(dev, info->vcc, false);
8146         }
8147         return ret;
8148 }
8149
8150 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
8151 {
8152         struct ufs_vreg_info *info = &hba->vreg_info;
8153
8154         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
8155 }
8156
8157 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
8158 {
8159         int ret = 0;
8160
8161         if (!vreg)
8162                 goto out;
8163
8164         vreg->reg = devm_regulator_get(dev, vreg->name);
8165         if (IS_ERR(vreg->reg)) {
8166                 ret = PTR_ERR(vreg->reg);
8167                 dev_err(dev, "%s: %s get failed, err=%d\n",
8168                                 __func__, vreg->name, ret);
8169         }
8170 out:
8171         return ret;
8172 }
8173
8174 static int ufshcd_init_vreg(struct ufs_hba *hba)
8175 {
8176         int ret = 0;
8177         struct device *dev = hba->dev;
8178         struct ufs_vreg_info *info = &hba->vreg_info;
8179
8180         ret = ufshcd_get_vreg(dev, info->vcc);
8181         if (ret)
8182                 goto out;
8183
8184         ret = ufshcd_get_vreg(dev, info->vccq);
8185         if (!ret)
8186                 ret = ufshcd_get_vreg(dev, info->vccq2);
8187 out:
8188         return ret;
8189 }
8190
8191 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
8192 {
8193         struct ufs_vreg_info *info = &hba->vreg_info;
8194
8195         if (info)
8196                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
8197
8198         return 0;
8199 }
8200
8201 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
8202 {
8203         int ret = 0;
8204         struct ufs_clk_info *clki;
8205         struct list_head *head = &hba->clk_list_head;
8206         unsigned long flags;
8207         ktime_t start = ktime_get();
8208         bool clk_state_changed = false;
8209
8210         if (list_empty(head))
8211                 goto out;
8212
8213         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8214         if (ret)
8215                 return ret;
8216
8217         list_for_each_entry(clki, head, list) {
8218                 if (!IS_ERR_OR_NULL(clki->clk)) {
8219                         /*
8220                          * Don't disable clocks which are needed
8221                          * to keep the link active.
8222                          */
8223                         if (ufshcd_is_link_active(hba) &&
8224                             clki->keep_link_active)
8225                                 continue;
8226
8227                         clk_state_changed = on ^ clki->enabled;
8228                         if (on && !clki->enabled) {
8229                                 ret = clk_prepare_enable(clki->clk);
8230                                 if (ret) {
8231                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
8232                                                 __func__, clki->name, ret);
8233                                         goto out;
8234                                 }
8235                         } else if (!on && clki->enabled) {
8236                                 clk_disable_unprepare(clki->clk);
8237                         }
8238                         clki->enabled = on;
8239                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
8240                                         clki->name, on ? "en" : "dis");
8241                 }
8242         }
8243
8244         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8245         if (ret)
8246                 return ret;
8247
8248 out:
8249         if (ret) {
8250                 list_for_each_entry(clki, head, list) {
8251                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
8252                                 clk_disable_unprepare(clki->clk);
8253                 }
8254         } else if (!ret && on) {
8255                 spin_lock_irqsave(hba->host->host_lock, flags);
8256                 hba->clk_gating.state = CLKS_ON;
8257                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8258                                         hba->clk_gating.state);
8259                 spin_unlock_irqrestore(hba->host->host_lock, flags);
8260         }
8261
8262         if (clk_state_changed)
8263                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
8264                         (on ? "on" : "off"),
8265                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
8266         return ret;
8267 }
8268
8269 static int ufshcd_init_clocks(struct ufs_hba *hba)
8270 {
8271         int ret = 0;
8272         struct ufs_clk_info *clki;
8273         struct device *dev = hba->dev;
8274         struct list_head *head = &hba->clk_list_head;
8275
8276         if (list_empty(head))
8277                 goto out;
8278
8279         list_for_each_entry(clki, head, list) {
8280                 if (!clki->name)
8281                         continue;
8282
8283                 clki->clk = devm_clk_get(dev, clki->name);
8284                 if (IS_ERR(clki->clk)) {
8285                         ret = PTR_ERR(clki->clk);
8286                         dev_err(dev, "%s: %s clk get failed, %d\n",
8287                                         __func__, clki->name, ret);
8288                         goto out;
8289                 }
8290
8291                 /*
8292                  * Parse device ref clk freq as per device tree "ref_clk".
8293                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8294                  * in ufshcd_alloc_host().
8295                  */
8296                 if (!strcmp(clki->name, "ref_clk"))
8297                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
8298
8299                 if (clki->max_freq) {
8300                         ret = clk_set_rate(clki->clk, clki->max_freq);
8301                         if (ret) {
8302                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
8303                                         __func__, clki->name,
8304                                         clki->max_freq, ret);
8305                                 goto out;
8306                         }
8307                         clki->curr_freq = clki->max_freq;
8308                 }
8309                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
8310                                 clki->name, clk_get_rate(clki->clk));
8311         }
8312 out:
8313         return ret;
8314 }
8315
8316 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
8317 {
8318         int err = 0;
8319
8320         if (!hba->vops)
8321                 goto out;
8322
8323         err = ufshcd_vops_init(hba);
8324         if (err)
8325                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
8326                         __func__, ufshcd_get_var_name(hba), err);
8327 out:
8328         return err;
8329 }
8330
8331 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
8332 {
8333         if (!hba->vops)
8334                 return;
8335
8336         ufshcd_vops_exit(hba);
8337 }
8338
8339 static int ufshcd_hba_init(struct ufs_hba *hba)
8340 {
8341         int err;
8342
8343         /*
8344          * Handle host controller power separately from the UFS device power
8345          * rails as it will help controlling the UFS host controller power
8346          * collapse easily which is different than UFS device power collapse.
8347          * Also, enable the host controller power before we go ahead with rest
8348          * of the initialization here.
8349          */
8350         err = ufshcd_init_hba_vreg(hba);
8351         if (err)
8352                 goto out;
8353
8354         err = ufshcd_setup_hba_vreg(hba, true);
8355         if (err)
8356                 goto out;
8357
8358         err = ufshcd_init_clocks(hba);
8359         if (err)
8360                 goto out_disable_hba_vreg;
8361
8362         err = ufshcd_setup_clocks(hba, true);
8363         if (err)
8364                 goto out_disable_hba_vreg;
8365
8366         err = ufshcd_init_vreg(hba);
8367         if (err)
8368                 goto out_disable_clks;
8369
8370         err = ufshcd_setup_vreg(hba, true);
8371         if (err)
8372                 goto out_disable_clks;
8373
8374         err = ufshcd_variant_hba_init(hba);
8375         if (err)
8376                 goto out_disable_vreg;
8377
8378         ufs_debugfs_hba_init(hba);
8379
8380         hba->is_powered = true;
8381         goto out;
8382
8383 out_disable_vreg:
8384         ufshcd_setup_vreg(hba, false);
8385 out_disable_clks:
8386         ufshcd_setup_clocks(hba, false);
8387 out_disable_hba_vreg:
8388         ufshcd_setup_hba_vreg(hba, false);
8389 out:
8390         return err;
8391 }
8392
8393 static void ufshcd_hba_exit(struct ufs_hba *hba)
8394 {
8395         if (hba->is_powered) {
8396                 ufshcd_exit_clk_scaling(hba);
8397                 ufshcd_exit_clk_gating(hba);
8398                 if (hba->eh_wq)
8399                         destroy_workqueue(hba->eh_wq);
8400                 ufs_debugfs_hba_exit(hba);
8401                 ufshcd_variant_hba_exit(hba);
8402                 ufshcd_setup_vreg(hba, false);
8403                 ufshcd_setup_clocks(hba, false);
8404                 ufshcd_setup_hba_vreg(hba, false);
8405                 hba->is_powered = false;
8406                 ufs_put_device_desc(hba);
8407         }
8408 }
8409
8410 static int
8411 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8412 {
8413         unsigned char cmd[6] = {REQUEST_SENSE,
8414                                 0,
8415                                 0,
8416                                 0,
8417                                 UFS_SENSE_SIZE,
8418                                 0};
8419         char *buffer;
8420         int ret;
8421
8422         buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
8423         if (!buffer) {
8424                 ret = -ENOMEM;
8425                 goto out;
8426         }
8427
8428         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
8429                         UFS_SENSE_SIZE, NULL, NULL,
8430                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
8431         if (ret)
8432                 pr_err("%s: failed with err %d\n", __func__, ret);
8433
8434         kfree(buffer);
8435 out:
8436         return ret;
8437 }
8438
8439 /**
8440  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8441  *                           power mode
8442  * @hba: per adapter instance
8443  * @pwr_mode: device power mode to set
8444  *
8445  * Returns 0 if requested power mode is set successfully
8446  * Returns non-zero if failed to set the requested power mode
8447  */
8448 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8449                                      enum ufs_dev_pwr_mode pwr_mode)
8450 {
8451         unsigned char cmd[6] = { START_STOP };
8452         struct scsi_sense_hdr sshdr;
8453         struct scsi_device *sdp;
8454         unsigned long flags;
8455         int ret;
8456
8457         spin_lock_irqsave(hba->host->host_lock, flags);
8458         sdp = hba->sdev_ufs_device;
8459         if (sdp) {
8460                 ret = scsi_device_get(sdp);
8461                 if (!ret && !scsi_device_online(sdp)) {
8462                         ret = -ENODEV;
8463                         scsi_device_put(sdp);
8464                 }
8465         } else {
8466                 ret = -ENODEV;
8467         }
8468         spin_unlock_irqrestore(hba->host->host_lock, flags);
8469
8470         if (ret)
8471                 return ret;
8472
8473         /*
8474          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8475          * handling, which would wait for host to be resumed. Since we know
8476          * we are functional while we are here, skip host resume in error
8477          * handling context.
8478          */
8479         hba->host->eh_noresume = 1;
8480         ufshcd_clear_ua_wluns(hba);
8481
8482         cmd[4] = pwr_mode << 4;
8483
8484         /*
8485          * Current function would be generally called from the power management
8486          * callbacks hence set the RQF_PM flag so that it doesn't resume the
8487          * already suspended childs.
8488          */
8489         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8490                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8491         if (ret) {
8492                 sdev_printk(KERN_WARNING, sdp,
8493                             "START_STOP failed for power mode: %d, result %x\n",
8494                             pwr_mode, ret);
8495                 if (driver_byte(ret) == DRIVER_SENSE)
8496                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8497         }
8498
8499         if (!ret)
8500                 hba->curr_dev_pwr_mode = pwr_mode;
8501
8502         scsi_device_put(sdp);
8503         hba->host->eh_noresume = 0;
8504         return ret;
8505 }
8506
8507 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8508                                         enum uic_link_state req_link_state,
8509                                         int check_for_bkops)
8510 {
8511         int ret = 0;
8512
8513         if (req_link_state == hba->uic_link_state)
8514                 return 0;
8515
8516         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8517                 ret = ufshcd_uic_hibern8_enter(hba);
8518                 if (!ret) {
8519                         ufshcd_set_link_hibern8(hba);
8520                 } else {
8521                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8522                                         __func__, ret);
8523                         goto out;
8524                 }
8525         }
8526         /*
8527          * If autobkops is enabled, link can't be turned off because
8528          * turning off the link would also turn off the device, except in the
8529          * case of DeepSleep where the device is expected to remain powered.
8530          */
8531         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8532                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
8533                 /*
8534                  * Let's make sure that link is in low power mode, we are doing
8535                  * this currently by putting the link in Hibern8. Otherway to
8536                  * put the link in low power mode is to send the DME end point
8537                  * to device and then send the DME reset command to local
8538                  * unipro. But putting the link in hibern8 is much faster.
8539                  *
8540                  * Note also that putting the link in Hibern8 is a requirement
8541                  * for entering DeepSleep.
8542                  */
8543                 ret = ufshcd_uic_hibern8_enter(hba);
8544                 if (ret) {
8545                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8546                                         __func__, ret);
8547                         goto out;
8548                 }
8549                 /*
8550                  * Change controller state to "reset state" which
8551                  * should also put the link in off/reset state
8552                  */
8553                 ufshcd_hba_stop(hba);
8554                 /*
8555                  * TODO: Check if we need any delay to make sure that
8556                  * controller is reset
8557                  */
8558                 ufshcd_set_link_off(hba);
8559         }
8560
8561 out:
8562         return ret;
8563 }
8564
8565 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8566 {
8567         bool vcc_off = false;
8568
8569         /*
8570          * It seems some UFS devices may keep drawing more than sleep current
8571          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8572          * To avoid this situation, add 2ms delay before putting these UFS
8573          * rails in LPM mode.
8574          */
8575         if (!ufshcd_is_link_active(hba) &&
8576             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8577                 usleep_range(2000, 2100);
8578
8579         /*
8580          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8581          * power.
8582          *
8583          * If UFS device and link is in OFF state, all power supplies (VCC,
8584          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8585          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8586          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8587          *
8588          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8589          * in low power state which would save some power.
8590          *
8591          * If Write Booster is enabled and the device needs to flush the WB
8592          * buffer OR if bkops status is urgent for WB, keep Vcc on.
8593          */
8594         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8595             !hba->dev_info.is_lu_power_on_wp) {
8596                 ufshcd_setup_vreg(hba, false);
8597                 vcc_off = true;
8598         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8599                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8600                 vcc_off = true;
8601                 if (!ufshcd_is_link_active(hba)) {
8602                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8603                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8604                 }
8605         }
8606
8607         /*
8608          * Some UFS devices require delay after VCC power rail is turned-off.
8609          */
8610         if (vcc_off && hba->vreg_info.vcc &&
8611                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8612                 usleep_range(5000, 5100);
8613 }
8614
8615 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8616 {
8617         int ret = 0;
8618
8619         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8620             !hba->dev_info.is_lu_power_on_wp) {
8621                 ret = ufshcd_setup_vreg(hba, true);
8622         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8623                 if (!ret && !ufshcd_is_link_active(hba)) {
8624                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8625                         if (ret)
8626                                 goto vcc_disable;
8627                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8628                         if (ret)
8629                                 goto vccq_lpm;
8630                 }
8631                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8632         }
8633         goto out;
8634
8635 vccq_lpm:
8636         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8637 vcc_disable:
8638         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8639 out:
8640         return ret;
8641 }
8642
8643 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8644 {
8645         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8646                 ufshcd_setup_hba_vreg(hba, false);
8647 }
8648
8649 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8650 {
8651         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
8652                 ufshcd_setup_hba_vreg(hba, true);
8653 }
8654
8655 /**
8656  * ufshcd_suspend - helper function for suspend operations
8657  * @hba: per adapter instance
8658  * @pm_op: desired low power operation type
8659  *
8660  * This function will try to put the UFS device and link into low power
8661  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8662  * (System PM level).
8663  *
8664  * If this function is called during shutdown, it will make sure that
8665  * both UFS device and UFS link is powered off.
8666  *
8667  * NOTE: UFS device & link must be active before we enter in this function.
8668  *
8669  * Returns 0 for success and non-zero for failure
8670  */
8671 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8672 {
8673         int ret = 0;
8674         int check_for_bkops;
8675         enum ufs_pm_level pm_lvl;
8676         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8677         enum uic_link_state req_link_state;
8678
8679         hba->pm_op_in_progress = 1;
8680         if (!ufshcd_is_shutdown_pm(pm_op)) {
8681                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8682                          hba->rpm_lvl : hba->spm_lvl;
8683                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8684                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8685         } else {
8686                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8687                 req_link_state = UIC_LINK_OFF_STATE;
8688         }
8689
8690         /*
8691          * If we can't transition into any of the low power modes
8692          * just gate the clocks.
8693          */
8694         ufshcd_hold(hba, false);
8695         hba->clk_gating.is_suspended = true;
8696
8697         if (ufshcd_is_clkscaling_supported(hba))
8698                 ufshcd_clk_scaling_suspend(hba, true);
8699
8700         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8701                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8702                 goto disable_clks;
8703         }
8704
8705         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8706             (req_link_state == hba->uic_link_state))
8707                 goto enable_gating;
8708
8709         /* UFS device & link must be active before we enter in this function */
8710         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8711                 ret = -EINVAL;
8712                 goto enable_gating;
8713         }
8714
8715         if (ufshcd_is_runtime_pm(pm_op)) {
8716                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8717                         /*
8718                          * The device is idle with no requests in the queue,
8719                          * allow background operations if bkops status shows
8720                          * that performance might be impacted.
8721                          */
8722                         ret = ufshcd_urgent_bkops(hba);
8723                         if (ret)
8724                                 goto enable_gating;
8725                 } else {
8726                         /* make sure that auto bkops is disabled */
8727                         ufshcd_disable_auto_bkops(hba);
8728                 }
8729                 /*
8730                  * If device needs to do BKOP or WB buffer flush during
8731                  * Hibern8, keep device power mode as "active power mode"
8732                  * and VCC supply.
8733                  */
8734                 hba->dev_info.b_rpm_dev_flush_capable =
8735                         hba->auto_bkops_enabled ||
8736                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8737                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8738                         ufshcd_is_auto_hibern8_enabled(hba))) &&
8739                         ufshcd_wb_need_flush(hba));
8740         }
8741
8742         flush_work(&hba->eeh_work);
8743
8744         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8745                 if (!ufshcd_is_runtime_pm(pm_op))
8746                         /* ensure that bkops is disabled */
8747                         ufshcd_disable_auto_bkops(hba);
8748
8749                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8750                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8751                         if (ret)
8752                                 goto enable_gating;
8753                 }
8754         }
8755
8756         /*
8757          * In the case of DeepSleep, the device is expected to remain powered
8758          * with the link off, so do not check for bkops.
8759          */
8760         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
8761         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
8762         if (ret)
8763                 goto set_dev_active;
8764
8765 disable_clks:
8766         /*
8767          * Call vendor specific suspend callback. As these callbacks may access
8768          * vendor specific host controller register space call them before the
8769          * host clocks are ON.
8770          */
8771         ret = ufshcd_vops_suspend(hba, pm_op);
8772         if (ret)
8773                 goto set_link_active;
8774         /*
8775          * Disable the host irq as host controller as there won't be any
8776          * host controller transaction expected till resume.
8777          */
8778         ufshcd_disable_irq(hba);
8779
8780         ufshcd_setup_clocks(hba, false);
8781
8782         if (ufshcd_is_clkgating_allowed(hba)) {
8783                 hba->clk_gating.state = CLKS_OFF;
8784                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8785                                         hba->clk_gating.state);
8786         }
8787
8788         ufshcd_vreg_set_lpm(hba);
8789
8790         /* Put the host controller in low power mode if possible */
8791         ufshcd_hba_vreg_set_lpm(hba);
8792         goto out;
8793
8794 set_link_active:
8795         ufshcd_vreg_set_hpm(hba);
8796         /*
8797          * Device hardware reset is required to exit DeepSleep. Also, for
8798          * DeepSleep, the link is off so host reset and restore will be done
8799          * further below.
8800          */
8801         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8802                 ufshcd_device_reset(hba);
8803                 WARN_ON(!ufshcd_is_link_off(hba));
8804         }
8805         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8806                 ufshcd_set_link_active(hba);
8807         else if (ufshcd_is_link_off(hba))
8808                 ufshcd_host_reset_and_restore(hba);
8809 set_dev_active:
8810         /* Can also get here needing to exit DeepSleep */
8811         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
8812                 ufshcd_device_reset(hba);
8813                 ufshcd_host_reset_and_restore(hba);
8814         }
8815         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8816                 ufshcd_disable_auto_bkops(hba);
8817 enable_gating:
8818         if (ufshcd_is_clkscaling_supported(hba))
8819                 ufshcd_clk_scaling_suspend(hba, false);
8820
8821         hba->clk_gating.is_suspended = false;
8822         hba->dev_info.b_rpm_dev_flush_capable = false;
8823         ufshcd_clear_ua_wluns(hba);
8824         ufshcd_release(hba);
8825 out:
8826         if (hba->dev_info.b_rpm_dev_flush_capable) {
8827                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8828                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8829         }
8830
8831         hba->pm_op_in_progress = 0;
8832
8833         if (ret)
8834                 ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
8835         return ret;
8836 }
8837
8838 /**
8839  * ufshcd_resume - helper function for resume operations
8840  * @hba: per adapter instance
8841  * @pm_op: runtime PM or system PM
8842  *
8843  * This function basically brings the UFS device, UniPro link and controller
8844  * to active state.
8845  *
8846  * Returns 0 for success and non-zero for failure
8847  */
8848 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8849 {
8850         int ret;
8851         enum uic_link_state old_link_state;
8852
8853         hba->pm_op_in_progress = 1;
8854         old_link_state = hba->uic_link_state;
8855
8856         ufshcd_hba_vreg_set_hpm(hba);
8857         ret = ufshcd_vreg_set_hpm(hba);
8858         if (ret)
8859                 goto out;
8860
8861         /* Make sure clocks are enabled before accessing controller */
8862         ret = ufshcd_setup_clocks(hba, true);
8863         if (ret)
8864                 goto disable_vreg;
8865
8866         /* enable the host irq as host controller would be active soon */
8867         ufshcd_enable_irq(hba);
8868
8869         /*
8870          * Call vendor specific resume callback. As these callbacks may access
8871          * vendor specific host controller register space call them when the
8872          * host clocks are ON.
8873          */
8874         ret = ufshcd_vops_resume(hba, pm_op);
8875         if (ret)
8876                 goto disable_irq_and_vops_clks;
8877
8878         /* For DeepSleep, the only supported option is to have the link off */
8879         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
8880
8881         if (ufshcd_is_link_hibern8(hba)) {
8882                 ret = ufshcd_uic_hibern8_exit(hba);
8883                 if (!ret) {
8884                         ufshcd_set_link_active(hba);
8885                 } else {
8886                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8887                                         __func__, ret);
8888                         goto vendor_suspend;
8889                 }
8890         } else if (ufshcd_is_link_off(hba)) {
8891                 /*
8892                  * A full initialization of the host and the device is
8893                  * required since the link was put to off during suspend.
8894                  * Note, in the case of DeepSleep, the device will exit
8895                  * DeepSleep due to device reset.
8896                  */
8897                 ret = ufshcd_reset_and_restore(hba);
8898                 /*
8899                  * ufshcd_reset_and_restore() should have already
8900                  * set the link state as active
8901                  */
8902                 if (ret || !ufshcd_is_link_active(hba))
8903                         goto vendor_suspend;
8904         }
8905
8906         if (!ufshcd_is_ufs_dev_active(hba)) {
8907                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8908                 if (ret)
8909                         goto set_old_link_state;
8910         }
8911
8912         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8913                 ufshcd_enable_auto_bkops(hba);
8914         else
8915                 /*
8916                  * If BKOPs operations are urgently needed at this moment then
8917                  * keep auto-bkops enabled or else disable it.
8918                  */
8919                 ufshcd_urgent_bkops(hba);
8920
8921         hba->clk_gating.is_suspended = false;
8922
8923         if (ufshcd_is_clkscaling_supported(hba))
8924                 ufshcd_clk_scaling_suspend(hba, false);
8925
8926         /* Enable Auto-Hibernate if configured */
8927         ufshcd_auto_hibern8_enable(hba);
8928
8929         if (hba->dev_info.b_rpm_dev_flush_capable) {
8930                 hba->dev_info.b_rpm_dev_flush_capable = false;
8931                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8932         }
8933
8934         ufshcd_clear_ua_wluns(hba);
8935
8936         /* Schedule clock gating in case of no access to UFS device yet */
8937         ufshcd_release(hba);
8938
8939         goto out;
8940
8941 set_old_link_state:
8942         ufshcd_link_state_transition(hba, old_link_state, 0);
8943 vendor_suspend:
8944         ufshcd_vops_suspend(hba, pm_op);
8945 disable_irq_and_vops_clks:
8946         ufshcd_disable_irq(hba);
8947         ufshcd_setup_clocks(hba, false);
8948         if (ufshcd_is_clkgating_allowed(hba)) {
8949                 hba->clk_gating.state = CLKS_OFF;
8950                 trace_ufshcd_clk_gating(dev_name(hba->dev),
8951                                         hba->clk_gating.state);
8952         }
8953 disable_vreg:
8954         ufshcd_vreg_set_lpm(hba);
8955 out:
8956         hba->pm_op_in_progress = 0;
8957         if (ret)
8958                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
8959         return ret;
8960 }
8961
8962 /**
8963  * ufshcd_system_suspend - system suspend routine
8964  * @hba: per adapter instance
8965  *
8966  * Check the description of ufshcd_suspend() function for more details.
8967  *
8968  * Returns 0 for success and non-zero for failure
8969  */
8970 int ufshcd_system_suspend(struct ufs_hba *hba)
8971 {
8972         int ret = 0;
8973         ktime_t start = ktime_get();
8974
8975         if (!hba) {
8976                 early_suspend = true;
8977                 return 0;
8978         }
8979
8980         down(&hba->host_sem);
8981
8982         if (!hba->is_powered)
8983                 return 0;
8984
8985         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8986              hba->curr_dev_pwr_mode) &&
8987             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8988              hba->uic_link_state) &&
8989              !hba->dev_info.b_rpm_dev_flush_capable)
8990                 goto out;
8991
8992         if (pm_runtime_suspended(hba->dev)) {
8993                 /*
8994                  * UFS device and/or UFS link low power states during runtime
8995                  * suspend seems to be different than what is expected during
8996                  * system suspend. Hence runtime resume the devic & link and
8997                  * let the system suspend low power states to take effect.
8998                  * TODO: If resume takes longer time, we might have optimize
8999                  * it in future by not resuming everything if possible.
9000                  */
9001                 ret = ufshcd_runtime_resume(hba);
9002                 if (ret)
9003                         goto out;
9004         }
9005
9006         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
9007 out:
9008         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
9009                 ktime_to_us(ktime_sub(ktime_get(), start)),
9010                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9011         if (!ret)
9012                 hba->is_sys_suspended = true;
9013         else
9014                 up(&hba->host_sem);
9015         return ret;
9016 }
9017 EXPORT_SYMBOL(ufshcd_system_suspend);
9018
9019 /**
9020  * ufshcd_system_resume - system resume routine
9021  * @hba: per adapter instance
9022  *
9023  * Returns 0 for success and non-zero for failure
9024  */
9025
9026 int ufshcd_system_resume(struct ufs_hba *hba)
9027 {
9028         int ret = 0;
9029         ktime_t start = ktime_get();
9030
9031         if (!hba)
9032                 return -EINVAL;
9033
9034         if (unlikely(early_suspend)) {
9035                 early_suspend = false;
9036                 down(&hba->host_sem);
9037         }
9038
9039         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
9040                 /*
9041                  * Let the runtime resume take care of resuming
9042                  * if runtime suspended.
9043                  */
9044                 goto out;
9045         else
9046                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
9047 out:
9048         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
9049                 ktime_to_us(ktime_sub(ktime_get(), start)),
9050                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9051         if (!ret)
9052                 hba->is_sys_suspended = false;
9053         up(&hba->host_sem);
9054         return ret;
9055 }
9056 EXPORT_SYMBOL(ufshcd_system_resume);
9057
9058 /**
9059  * ufshcd_runtime_suspend - runtime suspend routine
9060  * @hba: per adapter instance
9061  *
9062  * Check the description of ufshcd_suspend() function for more details.
9063  *
9064  * Returns 0 for success and non-zero for failure
9065  */
9066 int ufshcd_runtime_suspend(struct ufs_hba *hba)
9067 {
9068         int ret = 0;
9069         ktime_t start = ktime_get();
9070
9071         if (!hba)
9072                 return -EINVAL;
9073
9074         if (!hba->is_powered)
9075                 goto out;
9076         else
9077                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
9078 out:
9079         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
9080                 ktime_to_us(ktime_sub(ktime_get(), start)),
9081                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9082         return ret;
9083 }
9084 EXPORT_SYMBOL(ufshcd_runtime_suspend);
9085
9086 /**
9087  * ufshcd_runtime_resume - runtime resume routine
9088  * @hba: per adapter instance
9089  *
9090  * This function basically brings the UFS device, UniPro link and controller
9091  * to active state. Following operations are done in this function:
9092  *
9093  * 1. Turn on all the controller related clocks
9094  * 2. Bring the UniPro link out of Hibernate state
9095  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
9096  *    to active state.
9097  * 4. If auto-bkops is enabled on the device, disable it.
9098  *
9099  * So following would be the possible power state after this function return
9100  * successfully:
9101  *      S1: UFS device in Active state with VCC rail ON
9102  *          UniPro link in Active state
9103  *          All the UFS/UniPro controller clocks are ON
9104  *
9105  * Returns 0 for success and non-zero for failure
9106  */
9107 int ufshcd_runtime_resume(struct ufs_hba *hba)
9108 {
9109         int ret = 0;
9110         ktime_t start = ktime_get();
9111
9112         if (!hba)
9113                 return -EINVAL;
9114
9115         if (!hba->is_powered)
9116                 goto out;
9117         else
9118                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
9119 out:
9120         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
9121                 ktime_to_us(ktime_sub(ktime_get(), start)),
9122                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9123         return ret;
9124 }
9125 EXPORT_SYMBOL(ufshcd_runtime_resume);
9126
9127 int ufshcd_runtime_idle(struct ufs_hba *hba)
9128 {
9129         return 0;
9130 }
9131 EXPORT_SYMBOL(ufshcd_runtime_idle);
9132
9133 /**
9134  * ufshcd_shutdown - shutdown routine
9135  * @hba: per adapter instance
9136  *
9137  * This function would power off both UFS device and UFS link.
9138  *
9139  * Returns 0 always to allow force shutdown even in case of errors.
9140  */
9141 int ufshcd_shutdown(struct ufs_hba *hba)
9142 {
9143         int ret = 0;
9144
9145         down(&hba->host_sem);
9146         hba->shutting_down = true;
9147         up(&hba->host_sem);
9148
9149         if (!hba->is_powered)
9150                 goto out;
9151
9152         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
9153                 goto out;
9154
9155         pm_runtime_get_sync(hba->dev);
9156
9157         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
9158 out:
9159         if (ret)
9160                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9161         hba->is_powered = false;
9162         /* allow force shutdown even in case of errors */
9163         return 0;
9164 }
9165 EXPORT_SYMBOL(ufshcd_shutdown);
9166
9167 /**
9168  * ufshcd_remove - de-allocate SCSI host and host memory space
9169  *              data structure memory
9170  * @hba: per adapter instance
9171  */
9172 void ufshcd_remove(struct ufs_hba *hba)
9173 {
9174         ufs_bsg_remove(hba);
9175         ufs_sysfs_remove_nodes(hba->dev);
9176         blk_cleanup_queue(hba->tmf_queue);
9177         blk_mq_free_tag_set(&hba->tmf_tag_set);
9178         blk_cleanup_queue(hba->cmd_queue);
9179         scsi_remove_host(hba->host);
9180         /* disable interrupts */
9181         ufshcd_disable_intr(hba, hba->intr_mask);
9182         ufshcd_hba_stop(hba);
9183         ufshcd_hba_exit(hba);
9184 }
9185 EXPORT_SYMBOL_GPL(ufshcd_remove);
9186
9187 /**
9188  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
9189  * @hba: pointer to Host Bus Adapter (HBA)
9190  */
9191 void ufshcd_dealloc_host(struct ufs_hba *hba)
9192 {
9193         scsi_host_put(hba->host);
9194 }
9195 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
9196
9197 /**
9198  * ufshcd_set_dma_mask - Set dma mask based on the controller
9199  *                       addressing capability
9200  * @hba: per adapter instance
9201  *
9202  * Returns 0 for success, non-zero for failure
9203  */
9204 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
9205 {
9206         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
9207                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
9208                         return 0;
9209         }
9210         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
9211 }
9212
9213 /**
9214  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
9215  * @dev: pointer to device handle
9216  * @hba_handle: driver private handle
9217  * Returns 0 on success, non-zero value on failure
9218  */
9219 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
9220 {
9221         struct Scsi_Host *host;
9222         struct ufs_hba *hba;
9223         int err = 0;
9224
9225         if (!dev) {
9226                 dev_err(dev,
9227                 "Invalid memory reference for dev is NULL\n");
9228                 err = -ENODEV;
9229                 goto out_error;
9230         }
9231
9232         host = scsi_host_alloc(&ufshcd_driver_template,
9233                                 sizeof(struct ufs_hba));
9234         if (!host) {
9235                 dev_err(dev, "scsi_host_alloc failed\n");
9236                 err = -ENOMEM;
9237                 goto out_error;
9238         }
9239         hba = shost_priv(host);
9240         hba->host = host;
9241         hba->dev = dev;
9242         *hba_handle = hba;
9243         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
9244
9245         INIT_LIST_HEAD(&hba->clk_list_head);
9246
9247 out_error:
9248         return err;
9249 }
9250 EXPORT_SYMBOL(ufshcd_alloc_host);
9251
9252 /* This function exists because blk_mq_alloc_tag_set() requires this. */
9253 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9254                                      const struct blk_mq_queue_data *qd)
9255 {
9256         WARN_ON_ONCE(true);
9257         return BLK_STS_NOTSUPP;
9258 }
9259
9260 static const struct blk_mq_ops ufshcd_tmf_ops = {
9261         .queue_rq = ufshcd_queue_tmf,
9262 };
9263
9264 /**
9265  * ufshcd_init - Driver initialization routine
9266  * @hba: per-adapter instance
9267  * @mmio_base: base register address
9268  * @irq: Interrupt line of device
9269  * Returns 0 on success, non-zero value on failure
9270  */
9271 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
9272 {
9273         int err;
9274         struct Scsi_Host *host = hba->host;
9275         struct device *dev = hba->dev;
9276         char eh_wq_name[sizeof("ufs_eh_wq_00")];
9277
9278         if (!mmio_base) {
9279                 dev_err(hba->dev,
9280                 "Invalid memory reference for mmio_base is NULL\n");
9281                 err = -ENODEV;
9282                 goto out_error;
9283         }
9284
9285         hba->mmio_base = mmio_base;
9286         hba->irq = irq;
9287         hba->vps = &ufs_hba_vps;
9288
9289         err = ufshcd_hba_init(hba);
9290         if (err)
9291                 goto out_error;
9292
9293         /* Read capabilities registers */
9294         err = ufshcd_hba_capabilities(hba);
9295         if (err)
9296                 goto out_disable;
9297
9298         /* Get UFS version supported by the controller */
9299         hba->ufs_version = ufshcd_get_ufs_version(hba);
9300
9301         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
9302             (hba->ufs_version != UFSHCI_VERSION_11) &&
9303             (hba->ufs_version != UFSHCI_VERSION_20) &&
9304             (hba->ufs_version != UFSHCI_VERSION_21))
9305                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
9306                         hba->ufs_version);
9307
9308         /* Get Interrupt bit mask per version */
9309         hba->intr_mask = ufshcd_get_intr_mask(hba);
9310
9311         err = ufshcd_set_dma_mask(hba);
9312         if (err) {
9313                 dev_err(hba->dev, "set dma mask failed\n");
9314                 goto out_disable;
9315         }
9316
9317         /* Allocate memory for host memory space */
9318         err = ufshcd_memory_alloc(hba);
9319         if (err) {
9320                 dev_err(hba->dev, "Memory allocation failed\n");
9321                 goto out_disable;
9322         }
9323
9324         /* Configure LRB */
9325         ufshcd_host_memory_configure(hba);
9326
9327         host->can_queue = hba->nutrs;
9328         host->cmd_per_lun = hba->nutrs;
9329         host->max_id = UFSHCD_MAX_ID;
9330         host->max_lun = UFS_MAX_LUNS;
9331         host->max_channel = UFSHCD_MAX_CHANNEL;
9332         host->unique_id = host->host_no;
9333         host->max_cmd_len = UFS_CDB_SIZE;
9334
9335         hba->max_pwr_info.is_valid = false;
9336
9337         /* Initialize work queues */
9338         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9339                  hba->host->host_no);
9340         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9341         if (!hba->eh_wq) {
9342                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9343                                 __func__);
9344                 err = -ENOMEM;
9345                 goto out_disable;
9346         }
9347         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
9348         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9349
9350         sema_init(&hba->host_sem, 1);
9351
9352         /* Initialize UIC command mutex */
9353         mutex_init(&hba->uic_cmd_mutex);
9354
9355         /* Initialize mutex for device management commands */
9356         mutex_init(&hba->dev_cmd.lock);
9357
9358         init_rwsem(&hba->clk_scaling_lock);
9359
9360         ufshcd_init_clk_gating(hba);
9361
9362         ufshcd_init_clk_scaling(hba);
9363
9364         /*
9365          * In order to avoid any spurious interrupt immediately after
9366          * registering UFS controller interrupt handler, clear any pending UFS
9367          * interrupt status and disable all the UFS interrupts.
9368          */
9369         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
9370                       REG_INTERRUPT_STATUS);
9371         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
9372         /*
9373          * Make sure that UFS interrupts are disabled and any pending interrupt
9374          * status is cleared before registering UFS interrupt handler.
9375          */
9376         mb();
9377
9378         /* IRQ registration */
9379         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
9380         if (err) {
9381                 dev_err(hba->dev, "request irq failed\n");
9382                 goto out_disable;
9383         } else {
9384                 hba->is_irq_enabled = true;
9385         }
9386
9387         err = scsi_add_host(host, hba->dev);
9388         if (err) {
9389                 dev_err(hba->dev, "scsi_add_host failed\n");
9390                 goto out_disable;
9391         }
9392
9393         hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9394         if (IS_ERR(hba->cmd_queue)) {
9395                 err = PTR_ERR(hba->cmd_queue);
9396                 goto out_remove_scsi_host;
9397         }
9398
9399         hba->tmf_tag_set = (struct blk_mq_tag_set) {
9400                 .nr_hw_queues   = 1,
9401                 .queue_depth    = hba->nutmrs,
9402                 .ops            = &ufshcd_tmf_ops,
9403                 .flags          = BLK_MQ_F_NO_SCHED,
9404         };
9405         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9406         if (err < 0)
9407                 goto free_cmd_queue;
9408         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9409         if (IS_ERR(hba->tmf_queue)) {
9410                 err = PTR_ERR(hba->tmf_queue);
9411                 goto free_tmf_tag_set;
9412         }
9413
9414         /* Reset the attached device */
9415         ufshcd_device_reset(hba);
9416
9417         ufshcd_init_crypto(hba);
9418
9419         /* Host controller enable */
9420         err = ufshcd_hba_enable(hba);
9421         if (err) {
9422                 dev_err(hba->dev, "Host controller enable failed\n");
9423                 ufshcd_print_evt_hist(hba);
9424                 ufshcd_print_host_state(hba);
9425                 goto free_tmf_queue;
9426         }
9427
9428         /*
9429          * Set the default power management level for runtime and system PM.
9430          * Default power saving mode is to keep UFS link in Hibern8 state
9431          * and UFS device in sleep state.
9432          */
9433         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9434                                                 UFS_SLEEP_PWR_MODE,
9435                                                 UIC_LINK_HIBERN8_STATE);
9436         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
9437                                                 UFS_SLEEP_PWR_MODE,
9438                                                 UIC_LINK_HIBERN8_STATE);
9439
9440         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9441                           ufshcd_rpm_dev_flush_recheck_work);
9442
9443         /* Set the default auto-hiberate idle timer value to 150 ms */
9444         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9445                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9446                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9447         }
9448
9449         /* Hold auto suspend until async scan completes */
9450         pm_runtime_get_sync(dev);
9451         atomic_set(&hba->scsi_block_reqs_cnt, 0);
9452         /*
9453          * We are assuming that device wasn't put in sleep/power-down
9454          * state exclusively during the boot stage before kernel.
9455          * This assumption helps avoid doing link startup twice during
9456          * ufshcd_probe_hba().
9457          */
9458         ufshcd_set_ufs_dev_active(hba);
9459
9460         async_schedule(ufshcd_async_scan, hba);
9461         ufs_sysfs_add_nodes(hba->dev);
9462
9463         return 0;
9464
9465 free_tmf_queue:
9466         blk_cleanup_queue(hba->tmf_queue);
9467 free_tmf_tag_set:
9468         blk_mq_free_tag_set(&hba->tmf_tag_set);
9469 free_cmd_queue:
9470         blk_cleanup_queue(hba->cmd_queue);
9471 out_remove_scsi_host:
9472         scsi_remove_host(hba->host);
9473 out_disable:
9474         hba->is_irq_enabled = false;
9475         ufshcd_hba_exit(hba);
9476 out_error:
9477         return err;
9478 }
9479 EXPORT_SYMBOL_GPL(ufshcd_init);
9480
9481 static int __init ufshcd_core_init(void)
9482 {
9483         ufs_debugfs_init();
9484         return 0;
9485 }
9486
9487 static void __exit ufshcd_core_exit(void)
9488 {
9489         ufs_debugfs_exit();
9490 }
9491
9492 module_init(ufshcd_core_init);
9493 module_exit(ufshcd_core_exit);
9494
9495 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9496 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9497 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9498 MODULE_LICENSE("GPL");
9499 MODULE_VERSION(UFSHCD_DRIVER_VERSION);