Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-block.git] / drivers / ufs / core / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
37 #include "ufs_bsg.h"
38 #include "ufshcd-crypto.h"
39 #include <asm/unaligned.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/ufs.h>
43
44 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
45                                  UTP_TASK_REQ_COMPL |\
46                                  UFSHCD_ERROR_MASK)
47
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
49                                  UFSHCD_ERROR_MASK |\
50                                  MCQ_CQ_EVENT_STATUS)
51
52
53 /* UIC command timeout, unit: ms */
54 #define UIC_CMD_TIMEOUT 500
55
56 /* NOP OUT retries waiting for NOP IN response */
57 #define NOP_OUT_RETRIES    10
58 /* Timeout after 50 msecs if NOP OUT hangs without response */
59 #define NOP_OUT_TIMEOUT    50 /* msecs */
60
61 /* Query request retries */
62 #define QUERY_REQ_RETRIES 3
63 /* Query request timeout */
64 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
65
66 /* Advanced RPMB request timeout */
67 #define ADVANCED_RPMB_REQ_TIMEOUT  3000 /* 3 seconds */
68
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT  100 /* msecs */
71
72 /* maximum number of retries for a general UIC command  */
73 #define UFS_UIC_COMMAND_RETRIES 3
74
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
77
78 /* maximum number of reset retries before giving up */
79 #define MAX_HOST_RESET_RETRIES 5
80
81 /* Maximum number of error handler retries before giving up */
82 #define MAX_ERR_HANDLER_RETRIES 5
83
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
89
90 /* default delay of autosuspend: 2000 ms */
91 #define RPM_AUTOSUSPEND_DELAY_MS 2000
92
93 /* Default delay of RPM device flush delayed work */
94 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
95
96 /* Default value of wait time before gating device ref clock */
97 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
98
99 /* Polling time to wait for fDeviceInit */
100 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
101
102 /* Default RTC update every 10 seconds */
103 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
104
105 /* UFSHC 4.0 compliant HC support this mode. */
106 static bool use_mcq_mode = true;
107
108 static bool is_mcq_supported(struct ufs_hba *hba)
109 {
110         return hba->mcq_sup && use_mcq_mode;
111 }
112
113 module_param(use_mcq_mode, bool, 0644);
114 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
115
116 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
117         ({                                                              \
118                 int _ret;                                               \
119                 if (_on)                                                \
120                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
121                 else                                                    \
122                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
123                 _ret;                                                   \
124         })
125
126 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
127         size_t __len = (len);                                            \
128         print_hex_dump(KERN_ERR, prefix_str,                             \
129                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
130                        16, 4, buf, __len, false);                        \
131 } while (0)
132
133 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
134                      const char *prefix)
135 {
136         u32 *regs;
137         size_t pos;
138
139         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
140                 return -EINVAL;
141
142         regs = kzalloc(len, GFP_ATOMIC);
143         if (!regs)
144                 return -ENOMEM;
145
146         for (pos = 0; pos < len; pos += 4) {
147                 if (offset == 0 &&
148                     pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
149                     pos <= REG_UIC_ERROR_CODE_DME)
150                         continue;
151                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
152         }
153
154         ufshcd_hex_dump(prefix, regs, len);
155         kfree(regs);
156
157         return 0;
158 }
159 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
160
161 enum {
162         UFSHCD_MAX_CHANNEL      = 0,
163         UFSHCD_MAX_ID           = 1,
164         UFSHCD_CMD_PER_LUN      = 32 - UFSHCD_NUM_RESERVED,
165         UFSHCD_CAN_QUEUE        = 32 - UFSHCD_NUM_RESERVED,
166 };
167
168 static const char *const ufshcd_state_name[] = {
169         [UFSHCD_STATE_RESET]                    = "reset",
170         [UFSHCD_STATE_OPERATIONAL]              = "operational",
171         [UFSHCD_STATE_ERROR]                    = "error",
172         [UFSHCD_STATE_EH_SCHEDULED_FATAL]       = "eh_fatal",
173         [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL]   = "eh_non_fatal",
174 };
175
176 /* UFSHCD error handling flags */
177 enum {
178         UFSHCD_EH_IN_PROGRESS = (1 << 0),
179 };
180
181 /* UFSHCD UIC layer error flags */
182 enum {
183         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
184         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
185         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
186         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
187         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
188         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
189         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
190 };
191
192 #define ufshcd_set_eh_in_progress(h) \
193         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
194 #define ufshcd_eh_in_progress(h) \
195         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
196 #define ufshcd_clear_eh_in_progress(h) \
197         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
198
199 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
200         [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
201         [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
202         [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
203         [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
204         [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
205         [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
206         /*
207          * For DeepSleep, the link is first put in hibern8 and then off.
208          * Leaving the link in hibern8 is not supported.
209          */
210         [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
211 };
212
213 static inline enum ufs_dev_pwr_mode
214 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
215 {
216         return ufs_pm_lvl_states[lvl].dev_state;
217 }
218
219 static inline enum uic_link_state
220 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
221 {
222         return ufs_pm_lvl_states[lvl].link_state;
223 }
224
225 static inline enum ufs_pm_level
226 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
227                                         enum uic_link_state link_state)
228 {
229         enum ufs_pm_level lvl;
230
231         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
232                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
233                         (ufs_pm_lvl_states[lvl].link_state == link_state))
234                         return lvl;
235         }
236
237         /* if no match found, return the level 0 */
238         return UFS_PM_LVL_0;
239 }
240
241 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
242 {
243         return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
244                 hba->active_uic_cmd || hba->uic_async_done);
245 }
246
247 static const struct ufs_dev_quirk ufs_fixups[] = {
248         /* UFS cards deviations table */
249         { .wmanufacturerid = UFS_VENDOR_MICRON,
250           .model = UFS_ANY_MODEL,
251           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
252         { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
253           .model = UFS_ANY_MODEL,
254           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
255                    UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
256                    UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
257         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
258           .model = UFS_ANY_MODEL,
259           .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
260         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
261           .model = "hB8aL1" /*H28U62301AMR*/,
262           .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
263         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
264           .model = UFS_ANY_MODEL,
265           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
266         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
267           .model = "THGLF2G9C8KBADG",
268           .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
269         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
270           .model = "THGLF2G9D8KBADG",
271           .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
272         {}
273 };
274
275 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
276 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
277 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
278 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
279 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
280 static void ufshcd_hba_exit(struct ufs_hba *hba);
281 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
282 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
283 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
284 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
285 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
286 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
287 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
288                              bool scale_up);
289 static irqreturn_t ufshcd_intr(int irq, void *__hba);
290 static int ufshcd_change_power_mode(struct ufs_hba *hba,
291                              struct ufs_pa_layer_attr *pwr_mode);
292 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
293 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
294 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
295                                          struct ufs_vreg *vreg);
296 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
297                                                  bool enable);
298 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
299 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
300
301 void ufshcd_enable_irq(struct ufs_hba *hba)
302 {
303         if (!hba->is_irq_enabled) {
304                 enable_irq(hba->irq);
305                 hba->is_irq_enabled = true;
306         }
307 }
308 EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
309
310 void ufshcd_disable_irq(struct ufs_hba *hba)
311 {
312         if (hba->is_irq_enabled) {
313                 disable_irq(hba->irq);
314                 hba->is_irq_enabled = false;
315         }
316 }
317 EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
318
319 static void ufshcd_configure_wb(struct ufs_hba *hba)
320 {
321         if (!ufshcd_is_wb_allowed(hba))
322                 return;
323
324         ufshcd_wb_toggle(hba, true);
325
326         ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
327
328         if (ufshcd_is_wb_buf_flush_allowed(hba))
329                 ufshcd_wb_toggle_buf_flush(hba, true);
330 }
331
332 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
333 {
334         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
335                 scsi_unblock_requests(hba->host);
336 }
337
338 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
339 {
340         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
341                 scsi_block_requests(hba->host);
342 }
343
344 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
345                                       enum ufs_trace_str_t str_t)
346 {
347         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
348         struct utp_upiu_header *header;
349
350         if (!trace_ufshcd_upiu_enabled())
351                 return;
352
353         if (str_t == UFS_CMD_SEND)
354                 header = &rq->header;
355         else
356                 header = &hba->lrb[tag].ucd_rsp_ptr->header;
357
358         trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
359                           UFS_TSF_CDB);
360 }
361
362 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
363                                         enum ufs_trace_str_t str_t,
364                                         struct utp_upiu_req *rq_rsp)
365 {
366         if (!trace_ufshcd_upiu_enabled())
367                 return;
368
369         trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
370                           &rq_rsp->qr, UFS_TSF_OSF);
371 }
372
373 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
374                                      enum ufs_trace_str_t str_t)
375 {
376         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
377
378         if (!trace_ufshcd_upiu_enabled())
379                 return;
380
381         if (str_t == UFS_TM_SEND)
382                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
383                                   &descp->upiu_req.req_header,
384                                   &descp->upiu_req.input_param1,
385                                   UFS_TSF_TM_INPUT);
386         else
387                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
388                                   &descp->upiu_rsp.rsp_header,
389                                   &descp->upiu_rsp.output_param1,
390                                   UFS_TSF_TM_OUTPUT);
391 }
392
393 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
394                                          const struct uic_command *ucmd,
395                                          enum ufs_trace_str_t str_t)
396 {
397         u32 cmd;
398
399         if (!trace_ufshcd_uic_command_enabled())
400                 return;
401
402         if (str_t == UFS_CMD_SEND)
403                 cmd = ucmd->command;
404         else
405                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
406
407         trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
408                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
409                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
410                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
411 }
412
413 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
414                                      enum ufs_trace_str_t str_t)
415 {
416         u64 lba = 0;
417         u8 opcode = 0, group_id = 0;
418         u32 doorbell = 0;
419         u32 intr;
420         int hwq_id = -1;
421         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
422         struct scsi_cmnd *cmd = lrbp->cmd;
423         struct request *rq = scsi_cmd_to_rq(cmd);
424         int transfer_len = -1;
425
426         if (!cmd)
427                 return;
428
429         /* trace UPIU also */
430         ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
431         if (!trace_ufshcd_command_enabled())
432                 return;
433
434         opcode = cmd->cmnd[0];
435
436         if (opcode == READ_10 || opcode == WRITE_10) {
437                 /*
438                  * Currently we only fully trace read(10) and write(10) commands
439                  */
440                 transfer_len =
441                        be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
442                 lba = scsi_get_lba(cmd);
443                 if (opcode == WRITE_10)
444                         group_id = lrbp->cmd->cmnd[6];
445         } else if (opcode == UNMAP) {
446                 /*
447                  * The number of Bytes to be unmapped beginning with the lba.
448                  */
449                 transfer_len = blk_rq_bytes(rq);
450                 lba = scsi_get_lba(cmd);
451         }
452
453         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
454
455         if (is_mcq_enabled(hba)) {
456                 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
457
458                 hwq_id = hwq->id;
459         } else {
460                 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
461         }
462         trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
463                              transfer_len, intr, lba, opcode, group_id);
464 }
465
466 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
467 {
468         struct ufs_clk_info *clki;
469         struct list_head *head = &hba->clk_list_head;
470
471         if (list_empty(head))
472                 return;
473
474         list_for_each_entry(clki, head, list) {
475                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
476                                 clki->max_freq)
477                         dev_err(hba->dev, "clk: %s, rate: %u\n",
478                                         clki->name, clki->curr_freq);
479         }
480 }
481
482 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
483                              const char *err_name)
484 {
485         int i;
486         bool found = false;
487         const struct ufs_event_hist *e;
488
489         if (id >= UFS_EVT_CNT)
490                 return;
491
492         e = &hba->ufs_stats.event[id];
493
494         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
495                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
496
497                 if (e->tstamp[p] == 0)
498                         continue;
499                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
500                         e->val[p], div_u64(e->tstamp[p], 1000));
501                 found = true;
502         }
503
504         if (!found)
505                 dev_err(hba->dev, "No record of %s\n", err_name);
506         else
507                 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
508 }
509
510 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
511 {
512         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
513
514         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
515         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
516         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
517         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
518         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
519         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
520                          "auto_hibern8_err");
521         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
522         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
523                          "link_startup_fail");
524         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
525         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
526                          "suspend_fail");
527         ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
528         ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
529                          "wlun suspend_fail");
530         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
531         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
532         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
533
534         ufshcd_vops_dbg_register_dump(hba);
535 }
536
537 static
538 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
539 {
540         const struct ufshcd_lrb *lrbp;
541         int prdt_length;
542
543         lrbp = &hba->lrb[tag];
544
545         dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
546                         tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
547         dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
548                         tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
549         dev_err(hba->dev,
550                 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551                 tag, (u64)lrbp->utrd_dma_addr);
552
553         ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
554                         sizeof(struct utp_transfer_req_desc));
555         dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
556                 (u64)lrbp->ucd_req_dma_addr);
557         ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
558                         sizeof(struct utp_upiu_req));
559         dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
560                 (u64)lrbp->ucd_rsp_dma_addr);
561         ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
562                         sizeof(struct utp_upiu_rsp));
563
564         prdt_length = le16_to_cpu(
565                 lrbp->utr_descriptor_ptr->prd_table_length);
566         if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
567                 prdt_length /= ufshcd_sg_entry_size(hba);
568
569         dev_err(hba->dev,
570                 "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
571                 tag, prdt_length,
572                 (u64)lrbp->ucd_prdt_dma_addr);
573
574         if (pr_prdt)
575                 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
576                         ufshcd_sg_entry_size(hba) * prdt_length);
577 }
578
579 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
580 {
581         struct scsi_device *sdev = req->q->queuedata;
582         struct Scsi_Host *shost = sdev->host;
583         struct ufs_hba *hba = shost_priv(shost);
584
585         ufshcd_print_tr(hba, req->tag, *(bool *)priv);
586
587         return true;
588 }
589
590 /**
591  * ufshcd_print_trs_all - print trs for all started requests.
592  * @hba: per-adapter instance.
593  * @pr_prdt: need to print prdt or not.
594  */
595 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
596 {
597         blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
598 }
599
600 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
601 {
602         int tag;
603
604         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
605                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
606
607                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
608                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
609         }
610 }
611
612 static void ufshcd_print_host_state(struct ufs_hba *hba)
613 {
614         const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
615
616         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
617         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
618                 hba->outstanding_reqs, hba->outstanding_tasks);
619         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
620                 hba->saved_err, hba->saved_uic_err);
621         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
622                 hba->curr_dev_pwr_mode, hba->uic_link_state);
623         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
624                 hba->pm_op_in_progress, hba->is_sys_suspended);
625         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
626                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
627         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
628         dev_err(hba->dev,
629                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
630                 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
631                 hba->ufs_stats.hibern8_exit_cnt);
632         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
633                 div_u64(hba->ufs_stats.last_intr_ts, 1000),
634                 hba->ufs_stats.last_intr_status);
635         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
636                 hba->eh_flags, hba->req_abort_count);
637         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
638                 hba->ufs_version, hba->capabilities, hba->caps);
639         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
640                 hba->dev_quirks);
641         if (sdev_ufs)
642                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
643                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
644
645         ufshcd_print_clk_freqs(hba);
646 }
647
648 /**
649  * ufshcd_print_pwr_info - print power params as saved in hba
650  * power info
651  * @hba: per-adapter instance
652  */
653 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
654 {
655         static const char * const names[] = {
656                 "INVALID MODE",
657                 "FAST MODE",
658                 "SLOW_MODE",
659                 "INVALID MODE",
660                 "FASTAUTO_MODE",
661                 "SLOWAUTO_MODE",
662                 "INVALID MODE",
663         };
664
665         /*
666          * Using dev_dbg to avoid messages during runtime PM to avoid
667          * never-ending cycles of messages written back to storage by user space
668          * causing runtime resume, causing more messages and so on.
669          */
670         dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
671                  __func__,
672                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
673                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
674                  names[hba->pwr_info.pwr_rx],
675                  names[hba->pwr_info.pwr_tx],
676                  hba->pwr_info.hs_rate);
677 }
678
679 static void ufshcd_device_reset(struct ufs_hba *hba)
680 {
681         int err;
682
683         err = ufshcd_vops_device_reset(hba);
684
685         if (!err) {
686                 ufshcd_set_ufs_dev_active(hba);
687                 if (ufshcd_is_wb_allowed(hba)) {
688                         hba->dev_info.wb_enabled = false;
689                         hba->dev_info.wb_buf_flush_enabled = false;
690                 }
691                 if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
692                         hba->dev_info.rtc_time_baseline = 0;
693         }
694         if (err != -EOPNOTSUPP)
695                 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
696 }
697
698 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
699 {
700         if (!us)
701                 return;
702
703         if (us < 10)
704                 udelay(us);
705         else
706                 usleep_range(us, us + tolerance);
707 }
708 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
709
710 /**
711  * ufshcd_wait_for_register - wait for register value to change
712  * @hba: per-adapter interface
713  * @reg: mmio register offset
714  * @mask: mask to apply to the read register value
715  * @val: value to wait for
716  * @interval_us: polling interval in microseconds
717  * @timeout_ms: timeout in milliseconds
718  *
719  * Return: -ETIMEDOUT on error, zero on success.
720  */
721 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
722                                 u32 val, unsigned long interval_us,
723                                 unsigned long timeout_ms)
724 {
725         int err = 0;
726         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
727
728         /* ignore bits that we don't intend to wait on */
729         val = val & mask;
730
731         while ((ufshcd_readl(hba, reg) & mask) != val) {
732                 usleep_range(interval_us, interval_us + 50);
733                 if (time_after(jiffies, timeout)) {
734                         if ((ufshcd_readl(hba, reg) & mask) != val)
735                                 err = -ETIMEDOUT;
736                         break;
737                 }
738         }
739
740         return err;
741 }
742
743 /**
744  * ufshcd_get_intr_mask - Get the interrupt bit mask
745  * @hba: Pointer to adapter instance
746  *
747  * Return: interrupt bit mask per version
748  */
749 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
750 {
751         if (hba->ufs_version == ufshci_version(1, 0))
752                 return INTERRUPT_MASK_ALL_VER_10;
753         if (hba->ufs_version <= ufshci_version(2, 0))
754                 return INTERRUPT_MASK_ALL_VER_11;
755
756         return INTERRUPT_MASK_ALL_VER_21;
757 }
758
759 /**
760  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
761  * @hba: Pointer to adapter instance
762  *
763  * Return: UFSHCI version supported by the controller
764  */
765 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
766 {
767         u32 ufshci_ver;
768
769         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
770                 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
771         else
772                 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
773
774         /*
775          * UFSHCI v1.x uses a different version scheme, in order
776          * to allow the use of comparisons with the ufshci_version
777          * function, we convert it to the same scheme as ufs 2.0+.
778          */
779         if (ufshci_ver & 0x00010000)
780                 return ufshci_version(1, ufshci_ver & 0x00000100);
781
782         return ufshci_ver;
783 }
784
785 /**
786  * ufshcd_is_device_present - Check if any device connected to
787  *                            the host controller
788  * @hba: pointer to adapter instance
789  *
790  * Return: true if device present, false if no device detected
791  */
792 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
793 {
794         return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
795 }
796
797 /**
798  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
799  * @lrbp: pointer to local command reference block
800  * @cqe: pointer to the completion queue entry
801  *
802  * This function is used to get the OCS field from UTRD
803  *
804  * Return: the OCS field in the UTRD.
805  */
806 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
807                                       struct cq_entry *cqe)
808 {
809         if (cqe)
810                 return le32_to_cpu(cqe->status) & MASK_OCS;
811
812         return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
813 }
814
815 /**
816  * ufshcd_utrl_clear() - Clear requests from the controller request list.
817  * @hba: per adapter instance
818  * @mask: mask with one bit set for each request to be cleared
819  */
820 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
821 {
822         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
823                 mask = ~mask;
824         /*
825          * From the UFSHCI specification: "UTP Transfer Request List CLear
826          * Register (UTRLCLR): This field is bit significant. Each bit
827          * corresponds to a slot in the UTP Transfer Request List, where bit 0
828          * corresponds to request slot 0. A bit in this field is set to ‘0’
829          * by host software to indicate to the host controller that a transfer
830          * request slot is cleared. The host controller
831          * shall free up any resources associated to the request slot
832          * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
833          * host software indicates no change to request slots by setting the
834          * associated bits in this field to ‘1’. Bits in this field shall only
835          * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
836          */
837         ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
838 }
839
840 /**
841  * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
842  * @hba: per adapter instance
843  * @pos: position of the bit to be cleared
844  */
845 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
846 {
847         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
848                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
849         else
850                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
851 }
852
853 /**
854  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
855  * @reg: Register value of host controller status
856  *
857  * Return: 0 on success; a positive value if failed.
858  */
859 static inline int ufshcd_get_lists_status(u32 reg)
860 {
861         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
862 }
863
864 /**
865  * ufshcd_get_uic_cmd_result - Get the UIC command result
866  * @hba: Pointer to adapter instance
867  *
868  * This function gets the result of UIC command completion
869  *
870  * Return: 0 on success; non-zero value on error.
871  */
872 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
873 {
874         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
875                MASK_UIC_COMMAND_RESULT;
876 }
877
878 /**
879  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
880  * @hba: Pointer to adapter instance
881  *
882  * This function gets UIC command argument3
883  *
884  * Return: 0 on success; non-zero value on error.
885  */
886 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
887 {
888         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
889 }
890
891 /**
892  * ufshcd_get_req_rsp - returns the TR response transaction type
893  * @ucd_rsp_ptr: pointer to response UPIU
894  *
895  * Return: UPIU type.
896  */
897 static inline enum upiu_response_transaction
898 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
899 {
900         return ucd_rsp_ptr->header.transaction_code;
901 }
902
903 /**
904  * ufshcd_is_exception_event - Check if the device raised an exception event
905  * @ucd_rsp_ptr: pointer to response UPIU
906  *
907  * The function checks if the device raised an exception event indicated in
908  * the Device Information field of response UPIU.
909  *
910  * Return: true if exception is raised, false otherwise.
911  */
912 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
913 {
914         return ucd_rsp_ptr->header.device_information & 1;
915 }
916
917 /**
918  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
919  * @hba: per adapter instance
920  */
921 static inline void
922 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
923 {
924         ufshcd_writel(hba, INT_AGGR_ENABLE |
925                       INT_AGGR_COUNTER_AND_TIMER_RESET,
926                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
927 }
928
929 /**
930  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
931  * @hba: per adapter instance
932  * @cnt: Interrupt aggregation counter threshold
933  * @tmout: Interrupt aggregation timeout value
934  */
935 static inline void
936 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
937 {
938         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
939                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
940                       INT_AGGR_TIMEOUT_VAL(tmout),
941                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
942 }
943
944 /**
945  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
946  * @hba: per adapter instance
947  */
948 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
949 {
950         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
951 }
952
953 /**
954  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
955  *                      When run-stop registers are set to 1, it indicates the
956  *                      host controller that it can process the requests
957  * @hba: per adapter instance
958  */
959 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
960 {
961         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
962                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
963         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
964                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
965 }
966
967 /**
968  * ufshcd_hba_start - Start controller initialization sequence
969  * @hba: per adapter instance
970  */
971 static inline void ufshcd_hba_start(struct ufs_hba *hba)
972 {
973         u32 val = CONTROLLER_ENABLE;
974
975         if (ufshcd_crypto_enable(hba))
976                 val |= CRYPTO_GENERAL_ENABLE;
977
978         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
979 }
980
981 /**
982  * ufshcd_is_hba_active - Get controller state
983  * @hba: per adapter instance
984  *
985  * Return: true if and only if the controller is active.
986  */
987 bool ufshcd_is_hba_active(struct ufs_hba *hba)
988 {
989         return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
990 }
991 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
992
993 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
994 {
995         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
996         if (hba->ufs_version <= ufshci_version(1, 1))
997                 return UFS_UNIPRO_VER_1_41;
998         else
999                 return UFS_UNIPRO_VER_1_6;
1000 }
1001 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1002
1003 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1004 {
1005         /*
1006          * If both host and device support UniPro ver1.6 or later, PA layer
1007          * parameters tuning happens during link startup itself.
1008          *
1009          * We can manually tune PA layer parameters if either host or device
1010          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1011          * logic simple, we will only do manual tuning if local unipro version
1012          * doesn't support ver1.6 or later.
1013          */
1014         return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
1015 }
1016
1017 /**
1018  * ufshcd_pm_qos_init - initialize PM QoS request
1019  * @hba: per adapter instance
1020  */
1021 void ufshcd_pm_qos_init(struct ufs_hba *hba)
1022 {
1023
1024         if (hba->pm_qos_enabled)
1025                 return;
1026
1027         cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE);
1028
1029         if (cpu_latency_qos_request_active(&hba->pm_qos_req))
1030                 hba->pm_qos_enabled = true;
1031 }
1032
1033 /**
1034  * ufshcd_pm_qos_exit - remove request from PM QoS
1035  * @hba: per adapter instance
1036  */
1037 void ufshcd_pm_qos_exit(struct ufs_hba *hba)
1038 {
1039         if (!hba->pm_qos_enabled)
1040                 return;
1041
1042         cpu_latency_qos_remove_request(&hba->pm_qos_req);
1043         hba->pm_qos_enabled = false;
1044 }
1045
1046 /**
1047  * ufshcd_pm_qos_update - update PM QoS request
1048  * @hba: per adapter instance
1049  * @on: If True, vote for perf PM QoS mode otherwise power save mode
1050  */
1051 static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
1052 {
1053         if (!hba->pm_qos_enabled)
1054                 return;
1055
1056         cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
1057 }
1058
1059 /**
1060  * ufshcd_set_clk_freq - set UFS controller clock frequencies
1061  * @hba: per adapter instance
1062  * @scale_up: If True, set max possible frequency othewise set low frequency
1063  *
1064  * Return: 0 if successful; < 0 upon failure.
1065  */
1066 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1067 {
1068         int ret = 0;
1069         struct ufs_clk_info *clki;
1070         struct list_head *head = &hba->clk_list_head;
1071
1072         if (list_empty(head))
1073                 goto out;
1074
1075         list_for_each_entry(clki, head, list) {
1076                 if (!IS_ERR_OR_NULL(clki->clk)) {
1077                         if (scale_up && clki->max_freq) {
1078                                 if (clki->curr_freq == clki->max_freq)
1079                                         continue;
1080
1081                                 ret = clk_set_rate(clki->clk, clki->max_freq);
1082                                 if (ret) {
1083                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1084                                                 __func__, clki->name,
1085                                                 clki->max_freq, ret);
1086                                         break;
1087                                 }
1088                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1089                                                 "scaled up", clki->name,
1090                                                 clki->curr_freq,
1091                                                 clki->max_freq);
1092
1093                                 clki->curr_freq = clki->max_freq;
1094
1095                         } else if (!scale_up && clki->min_freq) {
1096                                 if (clki->curr_freq == clki->min_freq)
1097                                         continue;
1098
1099                                 ret = clk_set_rate(clki->clk, clki->min_freq);
1100                                 if (ret) {
1101                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1102                                                 __func__, clki->name,
1103                                                 clki->min_freq, ret);
1104                                         break;
1105                                 }
1106                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1107                                                 "scaled down", clki->name,
1108                                                 clki->curr_freq,
1109                                                 clki->min_freq);
1110                                 clki->curr_freq = clki->min_freq;
1111                         }
1112                 }
1113                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1114                                 clki->name, clk_get_rate(clki->clk));
1115         }
1116
1117 out:
1118         return ret;
1119 }
1120
1121 int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
1122                            struct dev_pm_opp *opp, void *data,
1123                            bool scaling_down)
1124 {
1125         struct ufs_hba *hba = dev_get_drvdata(dev);
1126         struct list_head *head = &hba->clk_list_head;
1127         struct ufs_clk_info *clki;
1128         unsigned long freq;
1129         u8 idx = 0;
1130         int ret;
1131
1132         list_for_each_entry(clki, head, list) {
1133                 if (!IS_ERR_OR_NULL(clki->clk)) {
1134                         freq = dev_pm_opp_get_freq_indexed(opp, idx++);
1135
1136                         /* Do not set rate for clocks having frequency as 0 */
1137                         if (!freq)
1138                                 continue;
1139
1140                         ret = clk_set_rate(clki->clk, freq);
1141                         if (ret) {
1142                                 dev_err(dev, "%s: %s clk set rate(%ldHz) failed, %d\n",
1143                                         __func__, clki->name, freq, ret);
1144                                 return ret;
1145                         }
1146
1147                         trace_ufshcd_clk_scaling(dev_name(dev),
1148                                 (scaling_down ? "scaled down" : "scaled up"),
1149                                 clki->name, hba->clk_scaling.target_freq, freq);
1150                 }
1151         }
1152
1153         return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks);
1156
1157 static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
1158 {
1159         struct dev_pm_opp *opp;
1160         int ret;
1161
1162         opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
1163                                                  &freq, 0);
1164         if (IS_ERR(opp))
1165                 return PTR_ERR(opp);
1166
1167         ret = dev_pm_opp_set_opp(hba->dev, opp);
1168         dev_pm_opp_put(opp);
1169
1170         return ret;
1171 }
1172
1173 /**
1174  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1175  * @hba: per adapter instance
1176  * @freq: frequency to scale
1177  * @scale_up: True if scaling up and false if scaling down
1178  *
1179  * Return: 0 if successful; < 0 upon failure.
1180  */
1181 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
1182                              bool scale_up)
1183 {
1184         int ret = 0;
1185         ktime_t start = ktime_get();
1186
1187         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1188         if (ret)
1189                 goto out;
1190
1191         if (hba->use_pm_opp)
1192                 ret = ufshcd_opp_set_rate(hba, freq);
1193         else
1194                 ret = ufshcd_set_clk_freq(hba, scale_up);
1195         if (ret)
1196                 goto out;
1197
1198         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1199         if (ret) {
1200                 if (hba->use_pm_opp)
1201                         ufshcd_opp_set_rate(hba,
1202                                             hba->devfreq->previous_freq);
1203                 else
1204                         ufshcd_set_clk_freq(hba, !scale_up);
1205                 goto out;
1206         }
1207
1208         ufshcd_pm_qos_update(hba, scale_up);
1209
1210 out:
1211         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1212                         (scale_up ? "up" : "down"),
1213                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1214         return ret;
1215 }
1216
1217 /**
1218  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1219  * @hba: per adapter instance
1220  * @freq: frequency to scale
1221  * @scale_up: True if scaling up and false if scaling down
1222  *
1223  * Return: true if scaling is required, false otherwise.
1224  */
1225 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1226                                                unsigned long freq, bool scale_up)
1227 {
1228         struct ufs_clk_info *clki;
1229         struct list_head *head = &hba->clk_list_head;
1230
1231         if (list_empty(head))
1232                 return false;
1233
1234         if (hba->use_pm_opp)
1235                 return freq != hba->clk_scaling.target_freq;
1236
1237         list_for_each_entry(clki, head, list) {
1238                 if (!IS_ERR_OR_NULL(clki->clk)) {
1239                         if (scale_up && clki->max_freq) {
1240                                 if (clki->curr_freq == clki->max_freq)
1241                                         continue;
1242                                 return true;
1243                         } else if (!scale_up && clki->min_freq) {
1244                                 if (clki->curr_freq == clki->min_freq)
1245                                         continue;
1246                                 return true;
1247                         }
1248                 }
1249         }
1250
1251         return false;
1252 }
1253
1254 /*
1255  * Determine the number of pending commands by counting the bits in the SCSI
1256  * device budget maps. This approach has been selected because a bit is set in
1257  * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1258  * flag. The host_self_blocked flag can be modified by calling
1259  * scsi_block_requests() or scsi_unblock_requests().
1260  */
1261 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1262 {
1263         const struct scsi_device *sdev;
1264         u32 pending = 0;
1265
1266         lockdep_assert_held(hba->host->host_lock);
1267         __shost_for_each_device(sdev, hba->host)
1268                 pending += sbitmap_weight(&sdev->budget_map);
1269
1270         return pending;
1271 }
1272
1273 /*
1274  * Wait until all pending SCSI commands and TMFs have finished or the timeout
1275  * has expired.
1276  *
1277  * Return: 0 upon success; -EBUSY upon timeout.
1278  */
1279 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1280                                         u64 wait_timeout_us)
1281 {
1282         unsigned long flags;
1283         int ret = 0;
1284         u32 tm_doorbell;
1285         u32 tr_pending;
1286         bool timeout = false, do_last_check = false;
1287         ktime_t start;
1288
1289         ufshcd_hold(hba);
1290         spin_lock_irqsave(hba->host->host_lock, flags);
1291         /*
1292          * Wait for all the outstanding tasks/transfer requests.
1293          * Verify by checking the doorbell registers are clear.
1294          */
1295         start = ktime_get();
1296         do {
1297                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1298                         ret = -EBUSY;
1299                         goto out;
1300                 }
1301
1302                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1303                 tr_pending = ufshcd_pending_cmds(hba);
1304                 if (!tm_doorbell && !tr_pending) {
1305                         timeout = false;
1306                         break;
1307                 } else if (do_last_check) {
1308                         break;
1309                 }
1310
1311                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1312                 io_schedule_timeout(msecs_to_jiffies(20));
1313                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1314                     wait_timeout_us) {
1315                         timeout = true;
1316                         /*
1317                          * We might have scheduled out for long time so make
1318                          * sure to check if doorbells are cleared by this time
1319                          * or not.
1320                          */
1321                         do_last_check = true;
1322                 }
1323                 spin_lock_irqsave(hba->host->host_lock, flags);
1324         } while (tm_doorbell || tr_pending);
1325
1326         if (timeout) {
1327                 dev_err(hba->dev,
1328                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1329                         __func__, tm_doorbell, tr_pending);
1330                 ret = -EBUSY;
1331         }
1332 out:
1333         spin_unlock_irqrestore(hba->host->host_lock, flags);
1334         ufshcd_release(hba);
1335         return ret;
1336 }
1337
1338 /**
1339  * ufshcd_scale_gear - scale up/down UFS gear
1340  * @hba: per adapter instance
1341  * @scale_up: True for scaling up gear and false for scaling down
1342  *
1343  * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1344  * non-zero for any other errors.
1345  */
1346 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1347 {
1348         int ret = 0;
1349         struct ufs_pa_layer_attr new_pwr_info;
1350
1351         if (scale_up) {
1352                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1353                        sizeof(struct ufs_pa_layer_attr));
1354         } else {
1355                 memcpy(&new_pwr_info, &hba->pwr_info,
1356                        sizeof(struct ufs_pa_layer_attr));
1357
1358                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1359                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1360                         /* save the current power mode */
1361                         memcpy(&hba->clk_scaling.saved_pwr_info,
1362                                 &hba->pwr_info,
1363                                 sizeof(struct ufs_pa_layer_attr));
1364
1365                         /* scale down gear */
1366                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1367                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1368                 }
1369         }
1370
1371         /* check if the power mode needs to be changed or not? */
1372         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1373         if (ret)
1374                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1375                         __func__, ret,
1376                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1377                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1378
1379         return ret;
1380 }
1381
1382 /*
1383  * Wait until all pending SCSI commands and TMFs have finished or the timeout
1384  * has expired.
1385  *
1386  * Return: 0 upon success; -EBUSY upon timeout.
1387  */
1388 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1389 {
1390         int ret = 0;
1391         /*
1392          * make sure that there are no outstanding requests when
1393          * clock scaling is in progress
1394          */
1395         ufshcd_scsi_block_requests(hba);
1396         mutex_lock(&hba->wb_mutex);
1397         down_write(&hba->clk_scaling_lock);
1398
1399         if (!hba->clk_scaling.is_allowed ||
1400             ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1401                 ret = -EBUSY;
1402                 up_write(&hba->clk_scaling_lock);
1403                 mutex_unlock(&hba->wb_mutex);
1404                 ufshcd_scsi_unblock_requests(hba);
1405                 goto out;
1406         }
1407
1408         /* let's not get into low power until clock scaling is completed */
1409         ufshcd_hold(hba);
1410
1411 out:
1412         return ret;
1413 }
1414
1415 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
1416 {
1417         up_write(&hba->clk_scaling_lock);
1418
1419         /* Enable Write Booster if we have scaled up else disable it */
1420         if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1421                 ufshcd_wb_toggle(hba, scale_up);
1422
1423         mutex_unlock(&hba->wb_mutex);
1424
1425         ufshcd_scsi_unblock_requests(hba);
1426         ufshcd_release(hba);
1427 }
1428
1429 /**
1430  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1431  * @hba: per adapter instance
1432  * @freq: frequency to scale
1433  * @scale_up: True for scaling up and false for scalin down
1434  *
1435  * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1436  * for any other errors.
1437  */
1438 static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
1439                                 bool scale_up)
1440 {
1441         int ret = 0;
1442
1443         ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1444         if (ret)
1445                 return ret;
1446
1447         /* scale down the gear before scaling down clocks */
1448         if (!scale_up) {
1449                 ret = ufshcd_scale_gear(hba, false);
1450                 if (ret)
1451                         goto out_unprepare;
1452         }
1453
1454         ret = ufshcd_scale_clks(hba, freq, scale_up);
1455         if (ret) {
1456                 if (!scale_up)
1457                         ufshcd_scale_gear(hba, true);
1458                 goto out_unprepare;
1459         }
1460
1461         /* scale up the gear after scaling up clocks */
1462         if (scale_up) {
1463                 ret = ufshcd_scale_gear(hba, true);
1464                 if (ret) {
1465                         ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
1466                                           false);
1467                         goto out_unprepare;
1468                 }
1469         }
1470
1471 out_unprepare:
1472         ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
1473         return ret;
1474 }
1475
1476 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1477 {
1478         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1479                                            clk_scaling.suspend_work);
1480         unsigned long irq_flags;
1481
1482         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1483         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1484                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1485                 return;
1486         }
1487         hba->clk_scaling.is_suspended = true;
1488         hba->clk_scaling.window_start_t = 0;
1489         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1490
1491         devfreq_suspend_device(hba->devfreq);
1492 }
1493
1494 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1495 {
1496         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1497                                            clk_scaling.resume_work);
1498         unsigned long irq_flags;
1499
1500         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1501         if (!hba->clk_scaling.is_suspended) {
1502                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1503                 return;
1504         }
1505         hba->clk_scaling.is_suspended = false;
1506         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1507
1508         devfreq_resume_device(hba->devfreq);
1509 }
1510
1511 static int ufshcd_devfreq_target(struct device *dev,
1512                                 unsigned long *freq, u32 flags)
1513 {
1514         int ret = 0;
1515         struct ufs_hba *hba = dev_get_drvdata(dev);
1516         ktime_t start;
1517         bool scale_up = false, sched_clk_scaling_suspend_work = false;
1518         struct list_head *clk_list = &hba->clk_list_head;
1519         struct ufs_clk_info *clki;
1520         unsigned long irq_flags;
1521
1522         if (!ufshcd_is_clkscaling_supported(hba))
1523                 return -EINVAL;
1524
1525         if (hba->use_pm_opp) {
1526                 struct dev_pm_opp *opp;
1527
1528                 /* Get the recommended frequency from OPP framework */
1529                 opp = devfreq_recommended_opp(dev, freq, flags);
1530                 if (IS_ERR(opp))
1531                         return PTR_ERR(opp);
1532
1533                 dev_pm_opp_put(opp);
1534         } else {
1535                 /* Override with the closest supported frequency */
1536                 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
1537                                         list);
1538                 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1539         }
1540
1541         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1542         if (ufshcd_eh_in_progress(hba)) {
1543                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1544                 return 0;
1545         }
1546
1547         /* Skip scaling clock when clock scaling is suspended */
1548         if (hba->clk_scaling.is_suspended) {
1549                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1550                 dev_warn(hba->dev, "clock scaling is suspended, skip");
1551                 return 0;
1552         }
1553
1554         if (!hba->clk_scaling.active_reqs)
1555                 sched_clk_scaling_suspend_work = true;
1556
1557         if (list_empty(clk_list)) {
1558                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1559                 goto out;
1560         }
1561
1562         /* Decide based on the target or rounded-off frequency and update */
1563         if (hba->use_pm_opp)
1564                 scale_up = *freq > hba->clk_scaling.target_freq;
1565         else
1566                 scale_up = *freq == clki->max_freq;
1567
1568         if (!hba->use_pm_opp && !scale_up)
1569                 *freq = clki->min_freq;
1570
1571         /* Update the frequency */
1572         if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
1573                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1574                 ret = 0;
1575                 goto out; /* no state change required */
1576         }
1577         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1578
1579         start = ktime_get();
1580         ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
1581         if (!ret)
1582                 hba->clk_scaling.target_freq = *freq;
1583
1584         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1585                 (scale_up ? "up" : "down"),
1586                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1587
1588 out:
1589         if (sched_clk_scaling_suspend_work && !scale_up)
1590                 queue_work(hba->clk_scaling.workq,
1591                            &hba->clk_scaling.suspend_work);
1592
1593         return ret;
1594 }
1595
1596 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1597                 struct devfreq_dev_status *stat)
1598 {
1599         struct ufs_hba *hba = dev_get_drvdata(dev);
1600         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1601         unsigned long flags;
1602         ktime_t curr_t;
1603
1604         if (!ufshcd_is_clkscaling_supported(hba))
1605                 return -EINVAL;
1606
1607         memset(stat, 0, sizeof(*stat));
1608
1609         spin_lock_irqsave(hba->host->host_lock, flags);
1610         curr_t = ktime_get();
1611         if (!scaling->window_start_t)
1612                 goto start_window;
1613
1614         /*
1615          * If current frequency is 0, then the ondemand governor considers
1616          * there's no initial frequency set. And it always requests to set
1617          * to max. frequency.
1618          */
1619         if (hba->use_pm_opp) {
1620                 stat->current_frequency = hba->clk_scaling.target_freq;
1621         } else {
1622                 struct list_head *clk_list = &hba->clk_list_head;
1623                 struct ufs_clk_info *clki;
1624
1625                 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1626                 stat->current_frequency = clki->curr_freq;
1627         }
1628
1629         if (scaling->is_busy_started)
1630                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1631                                 scaling->busy_start_t);
1632         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1633         stat->busy_time = scaling->tot_busy_t;
1634 start_window:
1635         scaling->window_start_t = curr_t;
1636         scaling->tot_busy_t = 0;
1637
1638         if (scaling->active_reqs) {
1639                 scaling->busy_start_t = curr_t;
1640                 scaling->is_busy_started = true;
1641         } else {
1642                 scaling->busy_start_t = 0;
1643                 scaling->is_busy_started = false;
1644         }
1645         spin_unlock_irqrestore(hba->host->host_lock, flags);
1646         return 0;
1647 }
1648
1649 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1650 {
1651         struct list_head *clk_list = &hba->clk_list_head;
1652         struct ufs_clk_info *clki;
1653         struct devfreq *devfreq;
1654         int ret;
1655
1656         /* Skip devfreq if we don't have any clocks in the list */
1657         if (list_empty(clk_list))
1658                 return 0;
1659
1660         if (!hba->use_pm_opp) {
1661                 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1662                 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1663                 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1664         }
1665
1666         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1667                                          &hba->vps->ondemand_data);
1668         devfreq = devfreq_add_device(hba->dev,
1669                         &hba->vps->devfreq_profile,
1670                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1671                         &hba->vps->ondemand_data);
1672         if (IS_ERR(devfreq)) {
1673                 ret = PTR_ERR(devfreq);
1674                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1675
1676                 if (!hba->use_pm_opp) {
1677                         dev_pm_opp_remove(hba->dev, clki->min_freq);
1678                         dev_pm_opp_remove(hba->dev, clki->max_freq);
1679                 }
1680                 return ret;
1681         }
1682
1683         hba->devfreq = devfreq;
1684
1685         return 0;
1686 }
1687
1688 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1689 {
1690         struct list_head *clk_list = &hba->clk_list_head;
1691
1692         if (!hba->devfreq)
1693                 return;
1694
1695         devfreq_remove_device(hba->devfreq);
1696         hba->devfreq = NULL;
1697
1698         if (!hba->use_pm_opp) {
1699                 struct ufs_clk_info *clki;
1700
1701                 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1702                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1703                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1704         }
1705 }
1706
1707 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1708 {
1709         unsigned long flags;
1710         bool suspend = false;
1711
1712         cancel_work_sync(&hba->clk_scaling.suspend_work);
1713         cancel_work_sync(&hba->clk_scaling.resume_work);
1714
1715         spin_lock_irqsave(hba->host->host_lock, flags);
1716         if (!hba->clk_scaling.is_suspended) {
1717                 suspend = true;
1718                 hba->clk_scaling.is_suspended = true;
1719                 hba->clk_scaling.window_start_t = 0;
1720         }
1721         spin_unlock_irqrestore(hba->host->host_lock, flags);
1722
1723         if (suspend)
1724                 devfreq_suspend_device(hba->devfreq);
1725 }
1726
1727 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1728 {
1729         unsigned long flags;
1730         bool resume = false;
1731
1732         spin_lock_irqsave(hba->host->host_lock, flags);
1733         if (hba->clk_scaling.is_suspended) {
1734                 resume = true;
1735                 hba->clk_scaling.is_suspended = false;
1736         }
1737         spin_unlock_irqrestore(hba->host->host_lock, flags);
1738
1739         if (resume)
1740                 devfreq_resume_device(hba->devfreq);
1741 }
1742
1743 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1744                 struct device_attribute *attr, char *buf)
1745 {
1746         struct ufs_hba *hba = dev_get_drvdata(dev);
1747
1748         return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1749 }
1750
1751 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1752                 struct device_attribute *attr, const char *buf, size_t count)
1753 {
1754         struct ufs_hba *hba = dev_get_drvdata(dev);
1755         u32 value;
1756         int err = 0;
1757
1758         if (kstrtou32(buf, 0, &value))
1759                 return -EINVAL;
1760
1761         down(&hba->host_sem);
1762         if (!ufshcd_is_user_access_allowed(hba)) {
1763                 err = -EBUSY;
1764                 goto out;
1765         }
1766
1767         value = !!value;
1768         if (value == hba->clk_scaling.is_enabled)
1769                 goto out;
1770
1771         ufshcd_rpm_get_sync(hba);
1772         ufshcd_hold(hba);
1773
1774         hba->clk_scaling.is_enabled = value;
1775
1776         if (value) {
1777                 ufshcd_resume_clkscaling(hba);
1778         } else {
1779                 ufshcd_suspend_clkscaling(hba);
1780                 err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
1781                 if (err)
1782                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1783                                         __func__, err);
1784         }
1785
1786         ufshcd_release(hba);
1787         ufshcd_rpm_put_sync(hba);
1788 out:
1789         up(&hba->host_sem);
1790         return err ? err : count;
1791 }
1792
1793 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1794 {
1795         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1796         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1797         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1798         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1799         hba->clk_scaling.enable_attr.attr.mode = 0644;
1800         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1801                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1802 }
1803
1804 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1805 {
1806         if (hba->clk_scaling.enable_attr.attr.name)
1807                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1808 }
1809
1810 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1811 {
1812         char wq_name[sizeof("ufs_clkscaling_00")];
1813
1814         if (!ufshcd_is_clkscaling_supported(hba))
1815                 return;
1816
1817         if (!hba->clk_scaling.min_gear)
1818                 hba->clk_scaling.min_gear = UFS_HS_G1;
1819
1820         INIT_WORK(&hba->clk_scaling.suspend_work,
1821                   ufshcd_clk_scaling_suspend_work);
1822         INIT_WORK(&hba->clk_scaling.resume_work,
1823                   ufshcd_clk_scaling_resume_work);
1824
1825         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1826                  hba->host->host_no);
1827         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1828
1829         hba->clk_scaling.is_initialized = true;
1830 }
1831
1832 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1833 {
1834         if (!hba->clk_scaling.is_initialized)
1835                 return;
1836
1837         ufshcd_remove_clk_scaling_sysfs(hba);
1838         destroy_workqueue(hba->clk_scaling.workq);
1839         ufshcd_devfreq_remove(hba);
1840         hba->clk_scaling.is_initialized = false;
1841 }
1842
1843 static void ufshcd_ungate_work(struct work_struct *work)
1844 {
1845         int ret;
1846         unsigned long flags;
1847         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1848                         clk_gating.ungate_work);
1849
1850         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1851
1852         spin_lock_irqsave(hba->host->host_lock, flags);
1853         if (hba->clk_gating.state == CLKS_ON) {
1854                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1855                 return;
1856         }
1857
1858         spin_unlock_irqrestore(hba->host->host_lock, flags);
1859         ufshcd_hba_vreg_set_hpm(hba);
1860         ufshcd_setup_clocks(hba, true);
1861
1862         ufshcd_enable_irq(hba);
1863
1864         /* Exit from hibern8 */
1865         if (ufshcd_can_hibern8_during_gating(hba)) {
1866                 /* Prevent gating in this path */
1867                 hba->clk_gating.is_suspended = true;
1868                 if (ufshcd_is_link_hibern8(hba)) {
1869                         ret = ufshcd_uic_hibern8_exit(hba);
1870                         if (ret)
1871                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1872                                         __func__, ret);
1873                         else
1874                                 ufshcd_set_link_active(hba);
1875                 }
1876                 hba->clk_gating.is_suspended = false;
1877         }
1878 }
1879
1880 /**
1881  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1882  * Also, exit from hibern8 mode and set the link as active.
1883  * @hba: per adapter instance
1884  */
1885 void ufshcd_hold(struct ufs_hba *hba)
1886 {
1887         bool flush_result;
1888         unsigned long flags;
1889
1890         if (!ufshcd_is_clkgating_allowed(hba) ||
1891             !hba->clk_gating.is_initialized)
1892                 return;
1893         spin_lock_irqsave(hba->host->host_lock, flags);
1894         hba->clk_gating.active_reqs++;
1895
1896 start:
1897         switch (hba->clk_gating.state) {
1898         case CLKS_ON:
1899                 /*
1900                  * Wait for the ungate work to complete if in progress.
1901                  * Though the clocks may be in ON state, the link could
1902                  * still be in hibner8 state if hibern8 is allowed
1903                  * during clock gating.
1904                  * Make sure we exit hibern8 state also in addition to
1905                  * clocks being ON.
1906                  */
1907                 if (ufshcd_can_hibern8_during_gating(hba) &&
1908                     ufshcd_is_link_hibern8(hba)) {
1909                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1910                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1911                         if (hba->clk_gating.is_suspended && !flush_result)
1912                                 return;
1913                         spin_lock_irqsave(hba->host->host_lock, flags);
1914                         goto start;
1915                 }
1916                 break;
1917         case REQ_CLKS_OFF:
1918                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1919                         hba->clk_gating.state = CLKS_ON;
1920                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1921                                                 hba->clk_gating.state);
1922                         break;
1923                 }
1924                 /*
1925                  * If we are here, it means gating work is either done or
1926                  * currently running. Hence, fall through to cancel gating
1927                  * work and to enable clocks.
1928                  */
1929                 fallthrough;
1930         case CLKS_OFF:
1931                 hba->clk_gating.state = REQ_CLKS_ON;
1932                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1933                                         hba->clk_gating.state);
1934                 queue_work(hba->clk_gating.clk_gating_workq,
1935                            &hba->clk_gating.ungate_work);
1936                 /*
1937                  * fall through to check if we should wait for this
1938                  * work to be done or not.
1939                  */
1940                 fallthrough;
1941         case REQ_CLKS_ON:
1942                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1943                 flush_work(&hba->clk_gating.ungate_work);
1944                 /* Make sure state is CLKS_ON before returning */
1945                 spin_lock_irqsave(hba->host->host_lock, flags);
1946                 goto start;
1947         default:
1948                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1949                                 __func__, hba->clk_gating.state);
1950                 break;
1951         }
1952         spin_unlock_irqrestore(hba->host->host_lock, flags);
1953 }
1954 EXPORT_SYMBOL_GPL(ufshcd_hold);
1955
1956 static void ufshcd_gate_work(struct work_struct *work)
1957 {
1958         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1959                         clk_gating.gate_work.work);
1960         unsigned long flags;
1961         int ret;
1962
1963         spin_lock_irqsave(hba->host->host_lock, flags);
1964         /*
1965          * In case you are here to cancel this work the gating state
1966          * would be marked as REQ_CLKS_ON. In this case save time by
1967          * skipping the gating work and exit after changing the clock
1968          * state to CLKS_ON.
1969          */
1970         if (hba->clk_gating.is_suspended ||
1971                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1972                 hba->clk_gating.state = CLKS_ON;
1973                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1974                                         hba->clk_gating.state);
1975                 goto rel_lock;
1976         }
1977
1978         if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1979                 goto rel_lock;
1980
1981         spin_unlock_irqrestore(hba->host->host_lock, flags);
1982
1983         /* put the link into hibern8 mode before turning off clocks */
1984         if (ufshcd_can_hibern8_during_gating(hba)) {
1985                 ret = ufshcd_uic_hibern8_enter(hba);
1986                 if (ret) {
1987                         hba->clk_gating.state = CLKS_ON;
1988                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1989                                         __func__, ret);
1990                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1991                                                 hba->clk_gating.state);
1992                         goto out;
1993                 }
1994                 ufshcd_set_link_hibern8(hba);
1995         }
1996
1997         ufshcd_disable_irq(hba);
1998
1999         ufshcd_setup_clocks(hba, false);
2000
2001         /* Put the host controller in low power mode if possible */
2002         ufshcd_hba_vreg_set_lpm(hba);
2003         /*
2004          * In case you are here to cancel this work the gating state
2005          * would be marked as REQ_CLKS_ON. In this case keep the state
2006          * as REQ_CLKS_ON which would anyway imply that clocks are off
2007          * and a request to turn them on is pending. By doing this way,
2008          * we keep the state machine in tact and this would ultimately
2009          * prevent from doing cancel work multiple times when there are
2010          * new requests arriving before the current cancel work is done.
2011          */
2012         spin_lock_irqsave(hba->host->host_lock, flags);
2013         if (hba->clk_gating.state == REQ_CLKS_OFF) {
2014                 hba->clk_gating.state = CLKS_OFF;
2015                 trace_ufshcd_clk_gating(dev_name(hba->dev),
2016                                         hba->clk_gating.state);
2017         }
2018 rel_lock:
2019         spin_unlock_irqrestore(hba->host->host_lock, flags);
2020 out:
2021         return;
2022 }
2023
2024 /* host lock must be held before calling this variant */
2025 static void __ufshcd_release(struct ufs_hba *hba)
2026 {
2027         if (!ufshcd_is_clkgating_allowed(hba))
2028                 return;
2029
2030         hba->clk_gating.active_reqs--;
2031
2032         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
2033             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
2034             hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
2035             hba->active_uic_cmd || hba->uic_async_done ||
2036             hba->clk_gating.state == CLKS_OFF)
2037                 return;
2038
2039         hba->clk_gating.state = REQ_CLKS_OFF;
2040         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
2041         queue_delayed_work(hba->clk_gating.clk_gating_workq,
2042                            &hba->clk_gating.gate_work,
2043                            msecs_to_jiffies(hba->clk_gating.delay_ms));
2044 }
2045
2046 void ufshcd_release(struct ufs_hba *hba)
2047 {
2048         unsigned long flags;
2049
2050         spin_lock_irqsave(hba->host->host_lock, flags);
2051         __ufshcd_release(hba);
2052         spin_unlock_irqrestore(hba->host->host_lock, flags);
2053 }
2054 EXPORT_SYMBOL_GPL(ufshcd_release);
2055
2056 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
2057                 struct device_attribute *attr, char *buf)
2058 {
2059         struct ufs_hba *hba = dev_get_drvdata(dev);
2060
2061         return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
2062 }
2063
2064 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
2065 {
2066         struct ufs_hba *hba = dev_get_drvdata(dev);
2067         unsigned long flags;
2068
2069         spin_lock_irqsave(hba->host->host_lock, flags);
2070         hba->clk_gating.delay_ms = value;
2071         spin_unlock_irqrestore(hba->host->host_lock, flags);
2072 }
2073 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
2074
2075 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
2076                 struct device_attribute *attr, const char *buf, size_t count)
2077 {
2078         unsigned long value;
2079
2080         if (kstrtoul(buf, 0, &value))
2081                 return -EINVAL;
2082
2083         ufshcd_clkgate_delay_set(dev, value);
2084         return count;
2085 }
2086
2087 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
2088                 struct device_attribute *attr, char *buf)
2089 {
2090         struct ufs_hba *hba = dev_get_drvdata(dev);
2091
2092         return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
2093 }
2094
2095 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
2096                 struct device_attribute *attr, const char *buf, size_t count)
2097 {
2098         struct ufs_hba *hba = dev_get_drvdata(dev);
2099         unsigned long flags;
2100         u32 value;
2101
2102         if (kstrtou32(buf, 0, &value))
2103                 return -EINVAL;
2104
2105         value = !!value;
2106
2107         spin_lock_irqsave(hba->host->host_lock, flags);
2108         if (value == hba->clk_gating.is_enabled)
2109                 goto out;
2110
2111         if (value)
2112                 __ufshcd_release(hba);
2113         else
2114                 hba->clk_gating.active_reqs++;
2115
2116         hba->clk_gating.is_enabled = value;
2117 out:
2118         spin_unlock_irqrestore(hba->host->host_lock, flags);
2119         return count;
2120 }
2121
2122 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
2123 {
2124         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
2125         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
2126         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
2127         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
2128         hba->clk_gating.delay_attr.attr.mode = 0644;
2129         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
2130                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
2131
2132         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
2133         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
2134         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
2135         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
2136         hba->clk_gating.enable_attr.attr.mode = 0644;
2137         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
2138                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
2139 }
2140
2141 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
2142 {
2143         if (hba->clk_gating.delay_attr.attr.name)
2144                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2145         if (hba->clk_gating.enable_attr.attr.name)
2146                 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
2147 }
2148
2149 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2150 {
2151         char wq_name[sizeof("ufs_clk_gating_00")];
2152
2153         if (!ufshcd_is_clkgating_allowed(hba))
2154                 return;
2155
2156         hba->clk_gating.state = CLKS_ON;
2157
2158         hba->clk_gating.delay_ms = 150;
2159         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2160         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2161
2162         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2163                  hba->host->host_no);
2164         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
2165                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
2166
2167         ufshcd_init_clk_gating_sysfs(hba);
2168
2169         hba->clk_gating.is_enabled = true;
2170         hba->clk_gating.is_initialized = true;
2171 }
2172
2173 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2174 {
2175         if (!hba->clk_gating.is_initialized)
2176                 return;
2177
2178         ufshcd_remove_clk_gating_sysfs(hba);
2179
2180         /* Ungate the clock if necessary. */
2181         ufshcd_hold(hba);
2182         hba->clk_gating.is_initialized = false;
2183         ufshcd_release(hba);
2184
2185         destroy_workqueue(hba->clk_gating.clk_gating_workq);
2186 }
2187
2188 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2189 {
2190         bool queue_resume_work = false;
2191         ktime_t curr_t = ktime_get();
2192         unsigned long flags;
2193
2194         if (!ufshcd_is_clkscaling_supported(hba))
2195                 return;
2196
2197         spin_lock_irqsave(hba->host->host_lock, flags);
2198         if (!hba->clk_scaling.active_reqs++)
2199                 queue_resume_work = true;
2200
2201         if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2202                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2203                 return;
2204         }
2205
2206         if (queue_resume_work)
2207                 queue_work(hba->clk_scaling.workq,
2208                            &hba->clk_scaling.resume_work);
2209
2210         if (!hba->clk_scaling.window_start_t) {
2211                 hba->clk_scaling.window_start_t = curr_t;
2212                 hba->clk_scaling.tot_busy_t = 0;
2213                 hba->clk_scaling.is_busy_started = false;
2214         }
2215
2216         if (!hba->clk_scaling.is_busy_started) {
2217                 hba->clk_scaling.busy_start_t = curr_t;
2218                 hba->clk_scaling.is_busy_started = true;
2219         }
2220         spin_unlock_irqrestore(hba->host->host_lock, flags);
2221 }
2222
2223 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2224 {
2225         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2226         unsigned long flags;
2227
2228         if (!ufshcd_is_clkscaling_supported(hba))
2229                 return;
2230
2231         spin_lock_irqsave(hba->host->host_lock, flags);
2232         hba->clk_scaling.active_reqs--;
2233         if (!scaling->active_reqs && scaling->is_busy_started) {
2234                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2235                                         scaling->busy_start_t));
2236                 scaling->busy_start_t = 0;
2237                 scaling->is_busy_started = false;
2238         }
2239         spin_unlock_irqrestore(hba->host->host_lock, flags);
2240 }
2241
2242 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2243 {
2244         if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2245                 return READ;
2246         else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2247                 return WRITE;
2248         else
2249                 return -EINVAL;
2250 }
2251
2252 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2253                                                 struct ufshcd_lrb *lrbp)
2254 {
2255         const struct ufs_hba_monitor *m = &hba->monitor;
2256
2257         return (m->enabled && lrbp && lrbp->cmd &&
2258                 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2259                 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2260 }
2261
2262 static void ufshcd_start_monitor(struct ufs_hba *hba,
2263                                  const struct ufshcd_lrb *lrbp)
2264 {
2265         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2266         unsigned long flags;
2267
2268         spin_lock_irqsave(hba->host->host_lock, flags);
2269         if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2270                 hba->monitor.busy_start_ts[dir] = ktime_get();
2271         spin_unlock_irqrestore(hba->host->host_lock, flags);
2272 }
2273
2274 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2275 {
2276         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2277         unsigned long flags;
2278
2279         spin_lock_irqsave(hba->host->host_lock, flags);
2280         if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2281                 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2282                 struct ufs_hba_monitor *m = &hba->monitor;
2283                 ktime_t now, inc, lat;
2284
2285                 now = lrbp->compl_time_stamp;
2286                 inc = ktime_sub(now, m->busy_start_ts[dir]);
2287                 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2288                 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2289
2290                 /* Update latencies */
2291                 m->nr_req[dir]++;
2292                 lat = ktime_sub(now, lrbp->issue_time_stamp);
2293                 m->lat_sum[dir] += lat;
2294                 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2295                         m->lat_max[dir] = lat;
2296                 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2297                         m->lat_min[dir] = lat;
2298
2299                 m->nr_queued[dir]--;
2300                 /* Push forward the busy start of monitor */
2301                 m->busy_start_ts[dir] = now;
2302         }
2303         spin_unlock_irqrestore(hba->host->host_lock, flags);
2304 }
2305
2306 /**
2307  * ufshcd_send_command - Send SCSI or device management commands
2308  * @hba: per adapter instance
2309  * @task_tag: Task tag of the command
2310  * @hwq: pointer to hardware queue instance
2311  */
2312 static inline
2313 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2314                          struct ufs_hw_queue *hwq)
2315 {
2316         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2317         unsigned long flags;
2318
2319         lrbp->issue_time_stamp = ktime_get();
2320         lrbp->issue_time_stamp_local_clock = local_clock();
2321         lrbp->compl_time_stamp = ktime_set(0, 0);
2322         lrbp->compl_time_stamp_local_clock = 0;
2323         ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2324         if (lrbp->cmd)
2325                 ufshcd_clk_scaling_start_busy(hba);
2326         if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2327                 ufshcd_start_monitor(hba, lrbp);
2328
2329         if (is_mcq_enabled(hba)) {
2330                 int utrd_size = sizeof(struct utp_transfer_req_desc);
2331                 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2332                 struct utp_transfer_req_desc *dest;
2333
2334                 spin_lock(&hwq->sq_lock);
2335                 dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2336                 memcpy(dest, src, utrd_size);
2337                 ufshcd_inc_sq_tail(hwq);
2338                 spin_unlock(&hwq->sq_lock);
2339         } else {
2340                 spin_lock_irqsave(&hba->outstanding_lock, flags);
2341                 if (hba->vops && hba->vops->setup_xfer_req)
2342                         hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2343                                                   !!lrbp->cmd);
2344                 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2345                 ufshcd_writel(hba, 1 << lrbp->task_tag,
2346                               REG_UTP_TRANSFER_REQ_DOOR_BELL);
2347                 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2348         }
2349 }
2350
2351 /**
2352  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2353  * @lrbp: pointer to local reference block
2354  */
2355 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2356 {
2357         u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2358         u16 resp_len;
2359         int len;
2360
2361         resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
2362         if (sense_buffer && resp_len) {
2363                 int len_to_copy;
2364
2365                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2366                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2367
2368                 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2369                        len_to_copy);
2370         }
2371 }
2372
2373 /**
2374  * ufshcd_copy_query_response() - Copy the Query Response and the data
2375  * descriptor
2376  * @hba: per adapter instance
2377  * @lrbp: pointer to local reference block
2378  *
2379  * Return: 0 upon success; < 0 upon failure.
2380  */
2381 static
2382 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2383 {
2384         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2385
2386         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2387
2388         /* Get the descriptor */
2389         if (hba->dev_cmd.query.descriptor &&
2390             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2391                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2392                                 GENERAL_UPIU_REQUEST_SIZE;
2393                 u16 resp_len;
2394                 u16 buf_len;
2395
2396                 /* data segment length */
2397                 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
2398                                        .data_segment_length);
2399                 buf_len = be16_to_cpu(
2400                                 hba->dev_cmd.query.request.upiu_req.length);
2401                 if (likely(buf_len >= resp_len)) {
2402                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2403                 } else {
2404                         dev_warn(hba->dev,
2405                                  "%s: rsp size %d is bigger than buffer size %d",
2406                                  __func__, resp_len, buf_len);
2407                         return -EINVAL;
2408                 }
2409         }
2410
2411         return 0;
2412 }
2413
2414 /**
2415  * ufshcd_hba_capabilities - Read controller capabilities
2416  * @hba: per adapter instance
2417  *
2418  * Return: 0 on success, negative on error.
2419  */
2420 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2421 {
2422         int err;
2423
2424         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2425         if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
2426                 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
2427
2428         /* nutrs and nutmrs are 0 based values */
2429         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2430         hba->nutmrs =
2431         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2432         hba->reserved_slot = hba->nutrs - 1;
2433
2434         /* Read crypto capabilities */
2435         err = ufshcd_hba_init_crypto_capabilities(hba);
2436         if (err) {
2437                 dev_err(hba->dev, "crypto setup failed\n");
2438                 return err;
2439         }
2440
2441         hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2442         if (!hba->mcq_sup)
2443                 return 0;
2444
2445         hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2446         hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2447                                      hba->mcq_capabilities);
2448
2449         return 0;
2450 }
2451
2452 /**
2453  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2454  *                            to accept UIC commands
2455  * @hba: per adapter instance
2456  *
2457  * Return: true on success, else false.
2458  */
2459 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2460 {
2461         u32 val;
2462         int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2463                                     500, UIC_CMD_TIMEOUT * 1000, false, hba,
2464                                     REG_CONTROLLER_STATUS);
2465         return ret == 0;
2466 }
2467
2468 /**
2469  * ufshcd_get_upmcrs - Get the power mode change request status
2470  * @hba: Pointer to adapter instance
2471  *
2472  * This function gets the UPMCRS field of HCS register
2473  *
2474  * Return: value of UPMCRS field.
2475  */
2476 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2477 {
2478         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2479 }
2480
2481 /**
2482  * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2483  * @hba: per adapter instance
2484  * @uic_cmd: UIC command
2485  */
2486 static inline void
2487 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2488 {
2489         lockdep_assert_held(&hba->uic_cmd_mutex);
2490
2491         WARN_ON(hba->active_uic_cmd);
2492
2493         hba->active_uic_cmd = uic_cmd;
2494
2495         /* Write Args */
2496         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2497         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2498         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2499
2500         ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2501
2502         /* Write UIC Cmd */
2503         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2504                       REG_UIC_COMMAND);
2505 }
2506
2507 /**
2508  * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2509  * @hba: per adapter instance
2510  * @uic_cmd: UIC command
2511  *
2512  * Return: 0 only if success.
2513  */
2514 static int
2515 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2516 {
2517         int ret;
2518         unsigned long flags;
2519
2520         lockdep_assert_held(&hba->uic_cmd_mutex);
2521
2522         if (wait_for_completion_timeout(&uic_cmd->done,
2523                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2524                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2525         } else {
2526                 ret = -ETIMEDOUT;
2527                 dev_err(hba->dev,
2528                         "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2529                         uic_cmd->command, uic_cmd->argument3);
2530
2531                 if (!uic_cmd->cmd_active) {
2532                         dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2533                                 __func__);
2534                         ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2535                 }
2536         }
2537
2538         spin_lock_irqsave(hba->host->host_lock, flags);
2539         hba->active_uic_cmd = NULL;
2540         spin_unlock_irqrestore(hba->host->host_lock, flags);
2541
2542         return ret;
2543 }
2544
2545 /**
2546  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2547  * @hba: per adapter instance
2548  * @uic_cmd: UIC command
2549  * @completion: initialize the completion only if this is set to true
2550  *
2551  * Return: 0 only if success.
2552  */
2553 static int
2554 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2555                       bool completion)
2556 {
2557         lockdep_assert_held(&hba->uic_cmd_mutex);
2558
2559         if (!ufshcd_ready_for_uic_cmd(hba)) {
2560                 dev_err(hba->dev,
2561                         "Controller not ready to accept UIC commands\n");
2562                 return -EIO;
2563         }
2564
2565         if (completion)
2566                 init_completion(&uic_cmd->done);
2567
2568         uic_cmd->cmd_active = 1;
2569         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2570
2571         return 0;
2572 }
2573
2574 /**
2575  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2576  * @hba: per adapter instance
2577  * @uic_cmd: UIC command
2578  *
2579  * Return: 0 only if success.
2580  */
2581 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2582 {
2583         int ret;
2584
2585         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2586                 return 0;
2587
2588         ufshcd_hold(hba);
2589         mutex_lock(&hba->uic_cmd_mutex);
2590         ufshcd_add_delay_before_dme_cmd(hba);
2591
2592         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2593         if (!ret)
2594                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2595
2596         mutex_unlock(&hba->uic_cmd_mutex);
2597
2598         ufshcd_release(hba);
2599         return ret;
2600 }
2601
2602 /**
2603  * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2604  * @hba:        per-adapter instance
2605  * @lrbp:       pointer to local reference block
2606  * @sg_entries: The number of sg lists actually used
2607  * @sg_list:    Pointer to SG list
2608  */
2609 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2610                                struct scatterlist *sg_list)
2611 {
2612         struct ufshcd_sg_entry *prd;
2613         struct scatterlist *sg;
2614         int i;
2615
2616         if (sg_entries) {
2617
2618                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2619                         lrbp->utr_descriptor_ptr->prd_table_length =
2620                                 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2621                 else
2622                         lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2623
2624                 prd = lrbp->ucd_prdt_ptr;
2625
2626                 for_each_sg(sg_list, sg, sg_entries, i) {
2627                         const unsigned int len = sg_dma_len(sg);
2628
2629                         /*
2630                          * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2631                          * based value that indicates the length, in bytes, of
2632                          * the data block. A maximum of length of 256KB may
2633                          * exist for any entry. Bits 1:0 of this field shall be
2634                          * 11b to indicate Dword granularity. A value of '3'
2635                          * indicates 4 bytes, '7' indicates 8 bytes, etc."
2636                          */
2637                         WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
2638                         prd->size = cpu_to_le32(len - 1);
2639                         prd->addr = cpu_to_le64(sg->dma_address);
2640                         prd->reserved = 0;
2641                         prd = (void *)prd + ufshcd_sg_entry_size(hba);
2642                 }
2643         } else {
2644                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2645         }
2646 }
2647
2648 /**
2649  * ufshcd_map_sg - Map scatter-gather list to prdt
2650  * @hba: per adapter instance
2651  * @lrbp: pointer to local reference block
2652  *
2653  * Return: 0 in case of success, non-zero value in case of failure.
2654  */
2655 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2656 {
2657         struct scsi_cmnd *cmd = lrbp->cmd;
2658         int sg_segments = scsi_dma_map(cmd);
2659
2660         if (sg_segments < 0)
2661                 return sg_segments;
2662
2663         ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2664
2665         return 0;
2666 }
2667
2668 /**
2669  * ufshcd_enable_intr - enable interrupts
2670  * @hba: per adapter instance
2671  * @intrs: interrupt bits
2672  */
2673 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2674 {
2675         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2676
2677         if (hba->ufs_version == ufshci_version(1, 0)) {
2678                 u32 rw;
2679                 rw = set & INTERRUPT_MASK_RW_VER_10;
2680                 set = rw | ((set ^ intrs) & intrs);
2681         } else {
2682                 set |= intrs;
2683         }
2684
2685         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2686 }
2687
2688 /**
2689  * ufshcd_disable_intr - disable interrupts
2690  * @hba: per adapter instance
2691  * @intrs: interrupt bits
2692  */
2693 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2694 {
2695         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2696
2697         if (hba->ufs_version == ufshci_version(1, 0)) {
2698                 u32 rw;
2699                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2700                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2701                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2702
2703         } else {
2704                 set &= ~intrs;
2705         }
2706
2707         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2708 }
2709
2710 /**
2711  * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2712  * descriptor according to request
2713  * @lrbp: pointer to local reference block
2714  * @upiu_flags: flags required in the header
2715  * @cmd_dir: requests data direction
2716  * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2717  */
2718 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2719                                         enum dma_data_direction cmd_dir, int ehs_length)
2720 {
2721         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2722         struct request_desc_header *h = &req_desc->header;
2723         enum utp_data_direction data_direction;
2724
2725         *h = (typeof(*h)){ };
2726
2727         if (cmd_dir == DMA_FROM_DEVICE) {
2728                 data_direction = UTP_DEVICE_TO_HOST;
2729                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2730         } else if (cmd_dir == DMA_TO_DEVICE) {
2731                 data_direction = UTP_HOST_TO_DEVICE;
2732                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2733         } else {
2734                 data_direction = UTP_NO_DATA_TRANSFER;
2735                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2736         }
2737
2738         h->command_type = lrbp->command_type;
2739         h->data_direction = data_direction;
2740         h->ehs_length = ehs_length;
2741
2742         if (lrbp->intr_cmd)
2743                 h->interrupt = 1;
2744
2745         /* Prepare crypto related dwords */
2746         ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
2747
2748         /*
2749          * assigning invalid value for command status. Controller
2750          * updates OCS on command completion, with the command
2751          * status
2752          */
2753         h->ocs = OCS_INVALID_COMMAND_STATUS;
2754
2755         req_desc->prd_table_length = 0;
2756 }
2757
2758 /**
2759  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2760  * for scsi commands
2761  * @lrbp: local reference block pointer
2762  * @upiu_flags: flags
2763  */
2764 static
2765 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2766 {
2767         struct scsi_cmnd *cmd = lrbp->cmd;
2768         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2769         unsigned short cdb_len;
2770
2771         ucd_req_ptr->header = (struct utp_upiu_header){
2772                 .transaction_code = UPIU_TRANSACTION_COMMAND,
2773                 .flags = upiu_flags,
2774                 .lun = lrbp->lun,
2775                 .task_tag = lrbp->task_tag,
2776                 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
2777         };
2778
2779         WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
2780
2781         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2782
2783         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2784         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2785         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2786
2787         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2788 }
2789
2790 /**
2791  * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2792  * @hba: UFS hba
2793  * @lrbp: local reference block pointer
2794  * @upiu_flags: flags
2795  */
2796 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2797                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2798 {
2799         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2800         struct ufs_query *query = &hba->dev_cmd.query;
2801         u16 len = be16_to_cpu(query->request.upiu_req.length);
2802
2803         /* Query request header */
2804         ucd_req_ptr->header = (struct utp_upiu_header){
2805                 .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
2806                 .flags = upiu_flags,
2807                 .lun = lrbp->lun,
2808                 .task_tag = lrbp->task_tag,
2809                 .query_function = query->request.query_func,
2810                 /* Data segment length only need for WRITE_DESC */
2811                 .data_segment_length =
2812                         query->request.upiu_req.opcode ==
2813                                         UPIU_QUERY_OPCODE_WRITE_DESC ?
2814                                 cpu_to_be16(len) :
2815                                 0,
2816         };
2817
2818         /* Copy the Query Request buffer as is */
2819         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2820                         QUERY_OSF_SIZE);
2821
2822         /* Copy the Descriptor */
2823         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2824                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2825
2826         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2827 }
2828
2829 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2830 {
2831         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2832
2833         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2834
2835         ucd_req_ptr->header = (struct utp_upiu_header){
2836                 .transaction_code = UPIU_TRANSACTION_NOP_OUT,
2837                 .task_tag = lrbp->task_tag,
2838         };
2839
2840         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2841 }
2842
2843 /**
2844  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2845  *                           for Device Management Purposes
2846  * @hba: per adapter instance
2847  * @lrbp: pointer to local reference block
2848  *
2849  * Return: 0 upon success; < 0 upon failure.
2850  */
2851 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2852                                       struct ufshcd_lrb *lrbp)
2853 {
2854         u8 upiu_flags;
2855         int ret = 0;
2856
2857         if (hba->ufs_version <= ufshci_version(1, 1))
2858                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2859         else
2860                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2861
2862         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
2863         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2864                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2865         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2866                 ufshcd_prepare_utp_nop_upiu(lrbp);
2867         else
2868                 ret = -EINVAL;
2869
2870         return ret;
2871 }
2872
2873 /**
2874  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2875  *                         for SCSI Purposes
2876  * @hba: per adapter instance
2877  * @lrbp: pointer to local reference block
2878  */
2879 static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2880 {
2881         struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
2882         unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
2883         u8 upiu_flags;
2884
2885         if (hba->ufs_version <= ufshci_version(1, 1))
2886                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2887         else
2888                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2889
2890         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2891                                     lrbp->cmd->sc_data_direction, 0);
2892         if (ioprio_class == IOPRIO_CLASS_RT)
2893                 upiu_flags |= UPIU_CMD_FLAGS_CP;
2894         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2895 }
2896
2897 /**
2898  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2899  * @upiu_wlun_id: UPIU W-LUN id
2900  *
2901  * Return: SCSI W-LUN id.
2902  */
2903 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2904 {
2905         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2906 }
2907
2908 static inline bool is_device_wlun(struct scsi_device *sdev)
2909 {
2910         return sdev->lun ==
2911                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2912 }
2913
2914 /*
2915  * Associate the UFS controller queue with the default and poll HCTX types.
2916  * Initialize the mq_map[] arrays.
2917  */
2918 static void ufshcd_map_queues(struct Scsi_Host *shost)
2919 {
2920         struct ufs_hba *hba = shost_priv(shost);
2921         int i, queue_offset = 0;
2922
2923         if (!is_mcq_supported(hba)) {
2924                 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2925                 hba->nr_queues[HCTX_TYPE_READ] = 0;
2926                 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2927                 hba->nr_hw_queues = 1;
2928         }
2929
2930         for (i = 0; i < shost->nr_maps; i++) {
2931                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2932
2933                 map->nr_queues = hba->nr_queues[i];
2934                 if (!map->nr_queues)
2935                         continue;
2936                 map->queue_offset = queue_offset;
2937                 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2938                         map->queue_offset = 0;
2939
2940                 blk_mq_map_queues(map);
2941                 queue_offset += map->nr_queues;
2942         }
2943 }
2944
2945 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2946 {
2947         struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2948                 i * ufshcd_get_ucd_size(hba);
2949         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2950         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2951                 i * ufshcd_get_ucd_size(hba);
2952         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2953                                        response_upiu);
2954         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2955
2956         lrb->utr_descriptor_ptr = utrdlp + i;
2957         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2958                 i * sizeof(struct utp_transfer_req_desc);
2959         lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2960         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2961         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2962         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2963         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2964         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2965 }
2966
2967 /**
2968  * ufshcd_queuecommand - main entry point for SCSI requests
2969  * @host: SCSI host pointer
2970  * @cmd: command from SCSI Midlayer
2971  *
2972  * Return: 0 for success, non-zero in case of failure.
2973  */
2974 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2975 {
2976         struct ufs_hba *hba = shost_priv(host);
2977         int tag = scsi_cmd_to_rq(cmd)->tag;
2978         struct ufshcd_lrb *lrbp;
2979         int err = 0;
2980         struct ufs_hw_queue *hwq = NULL;
2981
2982         switch (hba->ufshcd_state) {
2983         case UFSHCD_STATE_OPERATIONAL:
2984                 break;
2985         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2986                 /*
2987                  * SCSI error handler can call ->queuecommand() while UFS error
2988                  * handler is in progress. Error interrupts could change the
2989                  * state from UFSHCD_STATE_RESET to
2990                  * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2991                  * being issued in that case.
2992                  */
2993                 if (ufshcd_eh_in_progress(hba)) {
2994                         err = SCSI_MLQUEUE_HOST_BUSY;
2995                         goto out;
2996                 }
2997                 break;
2998         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2999                 /*
3000                  * pm_runtime_get_sync() is used at error handling preparation
3001                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
3002                  * PM ops, it can never be finished if we let SCSI layer keep
3003                  * retrying it, which gets err handler stuck forever. Neither
3004                  * can we let the scsi cmd pass through, because UFS is in bad
3005                  * state, the scsi cmd may eventually time out, which will get
3006                  * err handler blocked for too long. So, just fail the scsi cmd
3007                  * sent from PM ops, err handler can recover PM error anyways.
3008                  */
3009                 if (hba->pm_op_in_progress) {
3010                         hba->force_reset = true;
3011                         set_host_byte(cmd, DID_BAD_TARGET);
3012                         scsi_done(cmd);
3013                         goto out;
3014                 }
3015                 fallthrough;
3016         case UFSHCD_STATE_RESET:
3017                 err = SCSI_MLQUEUE_HOST_BUSY;
3018                 goto out;
3019         case UFSHCD_STATE_ERROR:
3020                 set_host_byte(cmd, DID_ERROR);
3021                 scsi_done(cmd);
3022                 goto out;
3023         }
3024
3025         hba->req_abort_count = 0;
3026
3027         ufshcd_hold(hba);
3028
3029         lrbp = &hba->lrb[tag];
3030         lrbp->cmd = cmd;
3031         lrbp->task_tag = tag;
3032         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
3033         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
3034
3035         ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
3036
3037         lrbp->req_abort_skip = false;
3038
3039         ufshcd_comp_scsi_upiu(hba, lrbp);
3040
3041         err = ufshcd_map_sg(hba, lrbp);
3042         if (err) {
3043                 ufshcd_release(hba);
3044                 goto out;
3045         }
3046
3047         if (is_mcq_enabled(hba))
3048                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
3049
3050         ufshcd_send_command(hba, tag, hwq);
3051
3052 out:
3053         if (ufs_trigger_eh(hba)) {
3054                 unsigned long flags;
3055
3056                 spin_lock_irqsave(hba->host->host_lock, flags);
3057                 ufshcd_schedule_eh_work(hba);
3058                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3059         }
3060
3061         return err;
3062 }
3063
3064 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3065                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3066 {
3067         lrbp->cmd = NULL;
3068         lrbp->task_tag = tag;
3069         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
3070         lrbp->intr_cmd = true; /* No interrupt aggregation */
3071         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
3072         hba->dev_cmd.type = cmd_type;
3073
3074         return ufshcd_compose_devman_upiu(hba, lrbp);
3075 }
3076
3077 /*
3078  * Check with the block layer if the command is inflight
3079  * @cmd: command to check.
3080  *
3081  * Return: true if command is inflight; false if not.
3082  */
3083 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
3084 {
3085         struct request *rq;
3086
3087         if (!cmd)
3088                 return false;
3089
3090         rq = scsi_cmd_to_rq(cmd);
3091         if (!blk_mq_request_started(rq))
3092                 return false;
3093
3094         return true;
3095 }
3096
3097 /*
3098  * Clear the pending command in the controller and wait until
3099  * the controller confirms that the command has been cleared.
3100  * @hba: per adapter instance
3101  * @task_tag: The tag number of the command to be cleared.
3102  */
3103 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
3104 {
3105         u32 mask;
3106         unsigned long flags;
3107         int err;
3108
3109         if (is_mcq_enabled(hba)) {
3110                 /*
3111                  * MCQ mode. Clean up the MCQ resources similar to
3112                  * what the ufshcd_utrl_clear() does for SDB mode.
3113                  */
3114                 err = ufshcd_mcq_sq_cleanup(hba, task_tag);
3115                 if (err) {
3116                         dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
3117                                 __func__, task_tag, err);
3118                         return err;
3119                 }
3120                 return 0;
3121         }
3122
3123         mask = 1U << task_tag;
3124
3125         /* clear outstanding transaction before retry */
3126         spin_lock_irqsave(hba->host->host_lock, flags);
3127         ufshcd_utrl_clear(hba, mask);
3128         spin_unlock_irqrestore(hba->host->host_lock, flags);
3129
3130         /*
3131          * wait for h/w to clear corresponding bit in door-bell.
3132          * max. wait is 1 sec.
3133          */
3134         return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
3135                                         mask, ~mask, 1000, 1000);
3136 }
3137
3138 /**
3139  * ufshcd_dev_cmd_completion() - handles device management command responses
3140  * @hba: per adapter instance
3141  * @lrbp: pointer to local reference block
3142  *
3143  * Return: 0 upon success; < 0 upon failure.
3144  */
3145 static int
3146 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3147 {
3148         enum upiu_response_transaction resp;
3149         int err = 0;
3150
3151         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3152         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3153
3154         switch (resp) {
3155         case UPIU_TRANSACTION_NOP_IN:
3156                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3157                         err = -EINVAL;
3158                         dev_err(hba->dev, "%s: unexpected response %x\n",
3159                                         __func__, resp);
3160                 }
3161                 break;
3162         case UPIU_TRANSACTION_QUERY_RSP: {
3163                 u8 response = lrbp->ucd_rsp_ptr->header.response;
3164
3165                 if (response == 0)
3166                         err = ufshcd_copy_query_response(hba, lrbp);
3167                 break;
3168         }
3169         case UPIU_TRANSACTION_REJECT_UPIU:
3170                 /* TODO: handle Reject UPIU Response */
3171                 err = -EPERM;
3172                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3173                                 __func__);
3174                 break;
3175         case UPIU_TRANSACTION_RESPONSE:
3176                 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3177                         err = -EINVAL;
3178                         dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3179                 }
3180                 break;
3181         default:
3182                 err = -EINVAL;
3183                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3184                                 __func__, resp);
3185                 break;
3186         }
3187
3188         return err;
3189 }
3190
3191 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3192                 struct ufshcd_lrb *lrbp, int max_timeout)
3193 {
3194         unsigned long time_left = msecs_to_jiffies(max_timeout);
3195         unsigned long flags;
3196         bool pending;
3197         int err;
3198
3199 retry:
3200         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3201                                                 time_left);
3202
3203         if (likely(time_left)) {
3204                 /*
3205                  * The completion handler called complete() and the caller of
3206                  * this function still owns the @lrbp tag so the code below does
3207                  * not trigger any race conditions.
3208                  */
3209                 hba->dev_cmd.complete = NULL;
3210                 err = ufshcd_get_tr_ocs(lrbp, NULL);
3211                 if (!err)
3212                         err = ufshcd_dev_cmd_completion(hba, lrbp);
3213         } else {
3214                 err = -ETIMEDOUT;
3215                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3216                         __func__, lrbp->task_tag);
3217
3218                 /* MCQ mode */
3219                 if (is_mcq_enabled(hba)) {
3220                         err = ufshcd_clear_cmd(hba, lrbp->task_tag);
3221                         hba->dev_cmd.complete = NULL;
3222                         return err;
3223                 }
3224
3225                 /* SDB mode */
3226                 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
3227                         /* successfully cleared the command, retry if needed */
3228                         err = -EAGAIN;
3229                         /*
3230                          * Since clearing the command succeeded we also need to
3231                          * clear the task tag bit from the outstanding_reqs
3232                          * variable.
3233                          */
3234                         spin_lock_irqsave(&hba->outstanding_lock, flags);
3235                         pending = test_bit(lrbp->task_tag,
3236                                            &hba->outstanding_reqs);
3237                         if (pending) {
3238                                 hba->dev_cmd.complete = NULL;
3239                                 __clear_bit(lrbp->task_tag,
3240                                             &hba->outstanding_reqs);
3241                         }
3242                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3243
3244                         if (!pending) {
3245                                 /*
3246                                  * The completion handler ran while we tried to
3247                                  * clear the command.
3248                                  */
3249                                 time_left = 1;
3250                                 goto retry;
3251                         }
3252                 } else {
3253                         dev_err(hba->dev, "%s: failed to clear tag %d\n",
3254                                 __func__, lrbp->task_tag);
3255
3256                         spin_lock_irqsave(&hba->outstanding_lock, flags);
3257                         pending = test_bit(lrbp->task_tag,
3258                                            &hba->outstanding_reqs);
3259                         if (pending)
3260                                 hba->dev_cmd.complete = NULL;
3261                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3262
3263                         if (!pending) {
3264                                 /*
3265                                  * The completion handler ran while we tried to
3266                                  * clear the command.
3267                                  */
3268                                 time_left = 1;
3269                                 goto retry;
3270                         }
3271                 }
3272         }
3273
3274         return err;
3275 }
3276
3277 /**
3278  * ufshcd_exec_dev_cmd - API for sending device management requests
3279  * @hba: UFS hba
3280  * @cmd_type: specifies the type (NOP, Query...)
3281  * @timeout: timeout in milliseconds
3282  *
3283  * Return: 0 upon success; < 0 upon failure.
3284  *
3285  * NOTE: Since there is only one available tag for device management commands,
3286  * it is expected you hold the hba->dev_cmd.lock mutex.
3287  */
3288 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3289                 enum dev_cmd_type cmd_type, int timeout)
3290 {
3291         DECLARE_COMPLETION_ONSTACK(wait);
3292         const u32 tag = hba->reserved_slot;
3293         struct ufshcd_lrb *lrbp;
3294         int err;
3295
3296         /* Protects use of hba->reserved_slot. */
3297         lockdep_assert_held(&hba->dev_cmd.lock);
3298
3299         down_read(&hba->clk_scaling_lock);
3300
3301         lrbp = &hba->lrb[tag];
3302         lrbp->cmd = NULL;
3303         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3304         if (unlikely(err))
3305                 goto out;
3306
3307         hba->dev_cmd.complete = &wait;
3308
3309         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3310
3311         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3312         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3313         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3314                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3315
3316 out:
3317         up_read(&hba->clk_scaling_lock);
3318         return err;
3319 }
3320
3321 /**
3322  * ufshcd_init_query() - init the query response and request parameters
3323  * @hba: per-adapter instance
3324  * @request: address of the request pointer to be initialized
3325  * @response: address of the response pointer to be initialized
3326  * @opcode: operation to perform
3327  * @idn: flag idn to access
3328  * @index: LU number to access
3329  * @selector: query/flag/descriptor further identification
3330  */
3331 static inline void ufshcd_init_query(struct ufs_hba *hba,
3332                 struct ufs_query_req **request, struct ufs_query_res **response,
3333                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3334 {
3335         *request = &hba->dev_cmd.query.request;
3336         *response = &hba->dev_cmd.query.response;
3337         memset(*request, 0, sizeof(struct ufs_query_req));
3338         memset(*response, 0, sizeof(struct ufs_query_res));
3339         (*request)->upiu_req.opcode = opcode;
3340         (*request)->upiu_req.idn = idn;
3341         (*request)->upiu_req.index = index;
3342         (*request)->upiu_req.selector = selector;
3343 }
3344
3345 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3346         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3347 {
3348         int ret;
3349         int retries;
3350
3351         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3352                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3353                 if (ret)
3354                         dev_dbg(hba->dev,
3355                                 "%s: failed with error %d, retries %d\n",
3356                                 __func__, ret, retries);
3357                 else
3358                         break;
3359         }
3360
3361         if (ret)
3362                 dev_err(hba->dev,
3363                         "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3364                         __func__, opcode, idn, ret, retries);
3365         return ret;
3366 }
3367
3368 /**
3369  * ufshcd_query_flag() - API function for sending flag query requests
3370  * @hba: per-adapter instance
3371  * @opcode: flag query to perform
3372  * @idn: flag idn to access
3373  * @index: flag index to access
3374  * @flag_res: the flag value after the query request completes
3375  *
3376  * Return: 0 for success, non-zero in case of failure.
3377  */
3378 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3379                         enum flag_idn idn, u8 index, bool *flag_res)
3380 {
3381         struct ufs_query_req *request = NULL;
3382         struct ufs_query_res *response = NULL;
3383         int err, selector = 0;
3384         int timeout = QUERY_REQ_TIMEOUT;
3385
3386         BUG_ON(!hba);
3387
3388         ufshcd_hold(hba);
3389         mutex_lock(&hba->dev_cmd.lock);
3390         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3391                         selector);
3392
3393         switch (opcode) {
3394         case UPIU_QUERY_OPCODE_SET_FLAG:
3395         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3396         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3397                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3398                 break;
3399         case UPIU_QUERY_OPCODE_READ_FLAG:
3400                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3401                 if (!flag_res) {
3402                         /* No dummy reads */
3403                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3404                                         __func__);
3405                         err = -EINVAL;
3406                         goto out_unlock;
3407                 }
3408                 break;
3409         default:
3410                 dev_err(hba->dev,
3411                         "%s: Expected query flag opcode but got = %d\n",
3412                         __func__, opcode);
3413                 err = -EINVAL;
3414                 goto out_unlock;
3415         }
3416
3417         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3418
3419         if (err) {
3420                 dev_err(hba->dev,
3421                         "%s: Sending flag query for idn %d failed, err = %d\n",
3422                         __func__, idn, err);
3423                 goto out_unlock;
3424         }
3425
3426         if (flag_res)
3427                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3428                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3429
3430 out_unlock:
3431         mutex_unlock(&hba->dev_cmd.lock);
3432         ufshcd_release(hba);
3433         return err;
3434 }
3435
3436 /**
3437  * ufshcd_query_attr - API function for sending attribute requests
3438  * @hba: per-adapter instance
3439  * @opcode: attribute opcode
3440  * @idn: attribute idn to access
3441  * @index: index field
3442  * @selector: selector field
3443  * @attr_val: the attribute value after the query request completes
3444  *
3445  * Return: 0 for success, non-zero in case of failure.
3446 */
3447 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3448                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3449 {
3450         struct ufs_query_req *request = NULL;
3451         struct ufs_query_res *response = NULL;
3452         int err;
3453
3454         BUG_ON(!hba);
3455
3456         if (!attr_val) {
3457                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3458                                 __func__, opcode);
3459                 return -EINVAL;
3460         }
3461
3462         ufshcd_hold(hba);
3463
3464         mutex_lock(&hba->dev_cmd.lock);
3465         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3466                         selector);
3467
3468         switch (opcode) {
3469         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3470                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3471                 request->upiu_req.value = cpu_to_be32(*attr_val);
3472                 break;
3473         case UPIU_QUERY_OPCODE_READ_ATTR:
3474                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3475                 break;
3476         default:
3477                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3478                                 __func__, opcode);
3479                 err = -EINVAL;
3480                 goto out_unlock;
3481         }
3482
3483         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3484
3485         if (err) {
3486                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3487                                 __func__, opcode, idn, index, err);
3488                 goto out_unlock;
3489         }
3490
3491         *attr_val = be32_to_cpu(response->upiu_res.value);
3492
3493 out_unlock:
3494         mutex_unlock(&hba->dev_cmd.lock);
3495         ufshcd_release(hba);
3496         return err;
3497 }
3498
3499 /**
3500  * ufshcd_query_attr_retry() - API function for sending query
3501  * attribute with retries
3502  * @hba: per-adapter instance
3503  * @opcode: attribute opcode
3504  * @idn: attribute idn to access
3505  * @index: index field
3506  * @selector: selector field
3507  * @attr_val: the attribute value after the query request
3508  * completes
3509  *
3510  * Return: 0 for success, non-zero in case of failure.
3511 */
3512 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3513         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3514         u32 *attr_val)
3515 {
3516         int ret = 0;
3517         u32 retries;
3518
3519         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3520                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3521                                                 selector, attr_val);
3522                 if (ret)
3523                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3524                                 __func__, ret, retries);
3525                 else
3526                         break;
3527         }
3528
3529         if (ret)
3530                 dev_err(hba->dev,
3531                         "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3532                         __func__, idn, ret, QUERY_REQ_RETRIES);
3533         return ret;
3534 }
3535
3536 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3537                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3538                         u8 selector, u8 *desc_buf, int *buf_len)
3539 {
3540         struct ufs_query_req *request = NULL;
3541         struct ufs_query_res *response = NULL;
3542         int err;
3543
3544         BUG_ON(!hba);
3545
3546         if (!desc_buf) {
3547                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3548                                 __func__, opcode);
3549                 return -EINVAL;
3550         }
3551
3552         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3553                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3554                                 __func__, *buf_len);
3555                 return -EINVAL;
3556         }
3557
3558         ufshcd_hold(hba);
3559
3560         mutex_lock(&hba->dev_cmd.lock);
3561         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3562                         selector);
3563         hba->dev_cmd.query.descriptor = desc_buf;
3564         request->upiu_req.length = cpu_to_be16(*buf_len);
3565
3566         switch (opcode) {
3567         case UPIU_QUERY_OPCODE_WRITE_DESC:
3568                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3569                 break;
3570         case UPIU_QUERY_OPCODE_READ_DESC:
3571                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3572                 break;
3573         default:
3574                 dev_err(hba->dev,
3575                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3576                                 __func__, opcode);
3577                 err = -EINVAL;
3578                 goto out_unlock;
3579         }
3580
3581         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3582
3583         if (err) {
3584                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3585                                 __func__, opcode, idn, index, err);
3586                 goto out_unlock;
3587         }
3588
3589         *buf_len = be16_to_cpu(response->upiu_res.length);
3590
3591 out_unlock:
3592         hba->dev_cmd.query.descriptor = NULL;
3593         mutex_unlock(&hba->dev_cmd.lock);
3594         ufshcd_release(hba);
3595         return err;
3596 }
3597
3598 /**
3599  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3600  * @hba: per-adapter instance
3601  * @opcode: attribute opcode
3602  * @idn: attribute idn to access
3603  * @index: index field
3604  * @selector: selector field
3605  * @desc_buf: the buffer that contains the descriptor
3606  * @buf_len: length parameter passed to the device
3607  *
3608  * The buf_len parameter will contain, on return, the length parameter
3609  * received on the response.
3610  *
3611  * Return: 0 for success, non-zero in case of failure.
3612  */
3613 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3614                                   enum query_opcode opcode,
3615                                   enum desc_idn idn, u8 index,
3616                                   u8 selector,
3617                                   u8 *desc_buf, int *buf_len)
3618 {
3619         int err;
3620         int retries;
3621
3622         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3623                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3624                                                 selector, desc_buf, buf_len);
3625                 if (!err || err == -EINVAL)
3626                         break;
3627         }
3628
3629         return err;
3630 }
3631
3632 /**
3633  * ufshcd_read_desc_param - read the specified descriptor parameter
3634  * @hba: Pointer to adapter instance
3635  * @desc_id: descriptor idn value
3636  * @desc_index: descriptor index
3637  * @param_offset: offset of the parameter to read
3638  * @param_read_buf: pointer to buffer where parameter would be read
3639  * @param_size: sizeof(param_read_buf)
3640  *
3641  * Return: 0 in case of success, non-zero otherwise.
3642  */
3643 int ufshcd_read_desc_param(struct ufs_hba *hba,
3644                            enum desc_idn desc_id,
3645                            int desc_index,
3646                            u8 param_offset,
3647                            u8 *param_read_buf,
3648                            u8 param_size)
3649 {
3650         int ret;
3651         u8 *desc_buf;
3652         int buff_len = QUERY_DESC_MAX_SIZE;
3653         bool is_kmalloc = true;
3654
3655         /* Safety check */
3656         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3657                 return -EINVAL;
3658
3659         /* Check whether we need temp memory */
3660         if (param_offset != 0 || param_size < buff_len) {
3661                 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3662                 if (!desc_buf)
3663                         return -ENOMEM;
3664         } else {
3665                 desc_buf = param_read_buf;
3666                 is_kmalloc = false;
3667         }
3668
3669         /* Request for full descriptor */
3670         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3671                                             desc_id, desc_index, 0,
3672                                             desc_buf, &buff_len);
3673         if (ret) {
3674                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3675                         __func__, desc_id, desc_index, param_offset, ret);
3676                 goto out;
3677         }
3678
3679         /* Update descriptor length */
3680         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3681
3682         if (param_offset >= buff_len) {
3683                 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3684                         __func__, param_offset, desc_id, buff_len);
3685                 ret = -EINVAL;
3686                 goto out;
3687         }
3688
3689         /* Sanity check */
3690         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3691                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3692                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3693                 ret = -EINVAL;
3694                 goto out;
3695         }
3696
3697         if (is_kmalloc) {
3698                 /* Make sure we don't copy more data than available */
3699                 if (param_offset >= buff_len)
3700                         ret = -EINVAL;
3701                 else
3702                         memcpy(param_read_buf, &desc_buf[param_offset],
3703                                min_t(u32, param_size, buff_len - param_offset));
3704         }
3705 out:
3706         if (is_kmalloc)
3707                 kfree(desc_buf);
3708         return ret;
3709 }
3710
3711 /**
3712  * struct uc_string_id - unicode string
3713  *
3714  * @len: size of this descriptor inclusive
3715  * @type: descriptor type
3716  * @uc: unicode string character
3717  */
3718 struct uc_string_id {
3719         u8 len;
3720         u8 type;
3721         wchar_t uc[];
3722 } __packed;
3723
3724 /* replace non-printable or non-ASCII characters with spaces */
3725 static inline char ufshcd_remove_non_printable(u8 ch)
3726 {
3727         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3728 }
3729
3730 /**
3731  * ufshcd_read_string_desc - read string descriptor
3732  * @hba: pointer to adapter instance
3733  * @desc_index: descriptor index
3734  * @buf: pointer to buffer where descriptor would be read,
3735  *       the caller should free the memory.
3736  * @ascii: if true convert from unicode to ascii characters
3737  *         null terminated string.
3738  *
3739  * Return:
3740  * *      string size on success.
3741  * *      -ENOMEM: on allocation failure
3742  * *      -EINVAL: on a wrong parameter
3743  */
3744 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3745                             u8 **buf, bool ascii)
3746 {
3747         struct uc_string_id *uc_str;
3748         u8 *str;
3749         int ret;
3750
3751         if (!buf)
3752                 return -EINVAL;
3753
3754         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3755         if (!uc_str)
3756                 return -ENOMEM;
3757
3758         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3759                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3760         if (ret < 0) {
3761                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3762                         QUERY_REQ_RETRIES, ret);
3763                 str = NULL;
3764                 goto out;
3765         }
3766
3767         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3768                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3769                 str = NULL;
3770                 ret = 0;
3771                 goto out;
3772         }
3773
3774         if (ascii) {
3775                 ssize_t ascii_len;
3776                 int i;
3777                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3778                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3779                 str = kzalloc(ascii_len, GFP_KERNEL);
3780                 if (!str) {
3781                         ret = -ENOMEM;
3782                         goto out;
3783                 }
3784
3785                 /*
3786                  * the descriptor contains string in UTF16 format
3787                  * we need to convert to utf-8 so it can be displayed
3788                  */
3789                 ret = utf16s_to_utf8s(uc_str->uc,
3790                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3791                                       UTF16_BIG_ENDIAN, str, ascii_len - 1);
3792
3793                 /* replace non-printable or non-ASCII characters with spaces */
3794                 for (i = 0; i < ret; i++)
3795                         str[i] = ufshcd_remove_non_printable(str[i]);
3796
3797                 str[ret++] = '\0';
3798
3799         } else {
3800                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3801                 if (!str) {
3802                         ret = -ENOMEM;
3803                         goto out;
3804                 }
3805                 ret = uc_str->len;
3806         }
3807 out:
3808         *buf = str;
3809         kfree(uc_str);
3810         return ret;
3811 }
3812
3813 /**
3814  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3815  * @hba: Pointer to adapter instance
3816  * @lun: lun id
3817  * @param_offset: offset of the parameter to read
3818  * @param_read_buf: pointer to buffer where parameter would be read
3819  * @param_size: sizeof(param_read_buf)
3820  *
3821  * Return: 0 in case of success, non-zero otherwise.
3822  */
3823 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3824                                               int lun,
3825                                               enum unit_desc_param param_offset,
3826                                               u8 *param_read_buf,
3827                                               u32 param_size)
3828 {
3829         /*
3830          * Unit descriptors are only available for general purpose LUs (LUN id
3831          * from 0 to 7) and RPMB Well known LU.
3832          */
3833         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3834                 return -EOPNOTSUPP;
3835
3836         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3837                                       param_offset, param_read_buf, param_size);
3838 }
3839
3840 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3841 {
3842         int err = 0;
3843         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3844
3845         if (hba->dev_info.wspecversion >= 0x300) {
3846                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3847                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3848                                 &gating_wait);
3849                 if (err)
3850                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3851                                          err, gating_wait);
3852
3853                 if (gating_wait == 0) {
3854                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3855                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3856                                          gating_wait);
3857                 }
3858
3859                 hba->dev_info.clk_gating_wait_us = gating_wait;
3860         }
3861
3862         return err;
3863 }
3864
3865 /**
3866  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3867  * @hba: per adapter instance
3868  *
3869  * 1. Allocate DMA memory for Command Descriptor array
3870  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3871  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3872  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3873  *      (UTMRDL)
3874  * 4. Allocate memory for local reference block(lrb).
3875  *
3876  * Return: 0 for success, non-zero in case of failure.
3877  */
3878 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3879 {
3880         size_t utmrdl_size, utrdl_size, ucdl_size;
3881
3882         /* Allocate memory for UTP command descriptors */
3883         ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
3884         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3885                                                   ucdl_size,
3886                                                   &hba->ucdl_dma_addr,
3887                                                   GFP_KERNEL);
3888
3889         /*
3890          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3891          */
3892         if (!hba->ucdl_base_addr ||
3893             WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3894                 dev_err(hba->dev,
3895                         "Command Descriptor Memory allocation failed\n");
3896                 goto out;
3897         }
3898
3899         /*
3900          * Allocate memory for UTP Transfer descriptors
3901          * UFSHCI requires 1KB alignment of UTRD
3902          */
3903         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3904         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3905                                                    utrdl_size,
3906                                                    &hba->utrdl_dma_addr,
3907                                                    GFP_KERNEL);
3908         if (!hba->utrdl_base_addr ||
3909             WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
3910                 dev_err(hba->dev,
3911                         "Transfer Descriptor Memory allocation failed\n");
3912                 goto out;
3913         }
3914
3915         /*
3916          * Skip utmrdl allocation; it may have been
3917          * allocated during first pass and not released during
3918          * MCQ memory allocation.
3919          * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3920          */
3921         if (hba->utmrdl_base_addr)
3922                 goto skip_utmrdl;
3923         /*
3924          * Allocate memory for UTP Task Management descriptors
3925          * UFSHCI requires 1KB alignment of UTMRD
3926          */
3927         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3928         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3929                                                     utmrdl_size,
3930                                                     &hba->utmrdl_dma_addr,
3931                                                     GFP_KERNEL);
3932         if (!hba->utmrdl_base_addr ||
3933             WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
3934                 dev_err(hba->dev,
3935                 "Task Management Descriptor Memory allocation failed\n");
3936                 goto out;
3937         }
3938
3939 skip_utmrdl:
3940         /* Allocate memory for local reference block */
3941         hba->lrb = devm_kcalloc(hba->dev,
3942                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3943                                 GFP_KERNEL);
3944         if (!hba->lrb) {
3945                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3946                 goto out;
3947         }
3948         return 0;
3949 out:
3950         return -ENOMEM;
3951 }
3952
3953 /**
3954  * ufshcd_host_memory_configure - configure local reference block with
3955  *                              memory offsets
3956  * @hba: per adapter instance
3957  *
3958  * Configure Host memory space
3959  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3960  * address.
3961  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3962  * and PRDT offset.
3963  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3964  * into local reference block.
3965  */
3966 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3967 {
3968         struct utp_transfer_req_desc *utrdlp;
3969         dma_addr_t cmd_desc_dma_addr;
3970         dma_addr_t cmd_desc_element_addr;
3971         u16 response_offset;
3972         u16 prdt_offset;
3973         int cmd_desc_size;
3974         int i;
3975
3976         utrdlp = hba->utrdl_base_addr;
3977
3978         response_offset =
3979                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3980         prdt_offset =
3981                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3982
3983         cmd_desc_size = ufshcd_get_ucd_size(hba);
3984         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3985
3986         for (i = 0; i < hba->nutrs; i++) {
3987                 /* Configure UTRD with command descriptor base address */
3988                 cmd_desc_element_addr =
3989                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3990                 utrdlp[i].command_desc_base_addr =
3991                                 cpu_to_le64(cmd_desc_element_addr);
3992
3993                 /* Response upiu and prdt offset should be in double words */
3994                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3995                         utrdlp[i].response_upiu_offset =
3996                                 cpu_to_le16(response_offset);
3997                         utrdlp[i].prd_table_offset =
3998                                 cpu_to_le16(prdt_offset);
3999                         utrdlp[i].response_upiu_length =
4000                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
4001                 } else {
4002                         utrdlp[i].response_upiu_offset =
4003                                 cpu_to_le16(response_offset >> 2);
4004                         utrdlp[i].prd_table_offset =
4005                                 cpu_to_le16(prdt_offset >> 2);
4006                         utrdlp[i].response_upiu_length =
4007                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
4008                 }
4009
4010                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
4011         }
4012 }
4013
4014 /**
4015  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
4016  * @hba: per adapter instance
4017  *
4018  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
4019  * in order to initialize the Unipro link startup procedure.
4020  * Once the Unipro links are up, the device connected to the controller
4021  * is detected.
4022  *
4023  * Return: 0 on success, non-zero value on failure.
4024  */
4025 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4026 {
4027         struct uic_command uic_cmd = {0};
4028         int ret;
4029
4030         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
4031
4032         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4033         if (ret)
4034                 dev_dbg(hba->dev,
4035                         "dme-link-startup: error code %d\n", ret);
4036         return ret;
4037 }
4038 /**
4039  * ufshcd_dme_reset - UIC command for DME_RESET
4040  * @hba: per adapter instance
4041  *
4042  * DME_RESET command is issued in order to reset UniPro stack.
4043  * This function now deals with cold reset.
4044  *
4045  * Return: 0 on success, non-zero value on failure.
4046  */
4047 static int ufshcd_dme_reset(struct ufs_hba *hba)
4048 {
4049         struct uic_command uic_cmd = {0};
4050         int ret;
4051
4052         uic_cmd.command = UIC_CMD_DME_RESET;
4053
4054         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4055         if (ret)
4056                 dev_err(hba->dev,
4057                         "dme-reset: error code %d\n", ret);
4058
4059         return ret;
4060 }
4061
4062 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
4063                                int agreed_gear,
4064                                int adapt_val)
4065 {
4066         int ret;
4067
4068         if (agreed_gear < UFS_HS_G4)
4069                 adapt_val = PA_NO_ADAPT;
4070
4071         ret = ufshcd_dme_set(hba,
4072                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
4073                              adapt_val);
4074         return ret;
4075 }
4076 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
4077
4078 /**
4079  * ufshcd_dme_enable - UIC command for DME_ENABLE
4080  * @hba: per adapter instance
4081  *
4082  * DME_ENABLE command is issued in order to enable UniPro stack.
4083  *
4084  * Return: 0 on success, non-zero value on failure.
4085  */
4086 static int ufshcd_dme_enable(struct ufs_hba *hba)
4087 {
4088         struct uic_command uic_cmd = {0};
4089         int ret;
4090
4091         uic_cmd.command = UIC_CMD_DME_ENABLE;
4092
4093         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4094         if (ret)
4095                 dev_err(hba->dev,
4096                         "dme-enable: error code %d\n", ret);
4097
4098         return ret;
4099 }
4100
4101 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4102 {
4103         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
4104         unsigned long min_sleep_time_us;
4105
4106         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4107                 return;
4108
4109         /*
4110          * last_dme_cmd_tstamp will be 0 only for 1st call to
4111          * this function
4112          */
4113         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4114                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4115         } else {
4116                 unsigned long delta =
4117                         (unsigned long) ktime_to_us(
4118                                 ktime_sub(ktime_get(),
4119                                 hba->last_dme_cmd_tstamp));
4120
4121                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4122                         min_sleep_time_us =
4123                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4124                 else
4125                         return; /* no more delay required */
4126         }
4127
4128         /* allow sleep for extra 50us if needed */
4129         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4130 }
4131
4132 /**
4133  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4134  * @hba: per adapter instance
4135  * @attr_sel: uic command argument1
4136  * @attr_set: attribute set type as uic command argument2
4137  * @mib_val: setting value as uic command argument3
4138  * @peer: indicate whether peer or local
4139  *
4140  * Return: 0 on success, non-zero value on failure.
4141  */
4142 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4143                         u8 attr_set, u32 mib_val, u8 peer)
4144 {
4145         struct uic_command uic_cmd = {0};
4146         static const char *const action[] = {
4147                 "dme-set",
4148                 "dme-peer-set"
4149         };
4150         const char *set = action[!!peer];
4151         int ret;
4152         int retries = UFS_UIC_COMMAND_RETRIES;
4153
4154         uic_cmd.command = peer ?
4155                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4156         uic_cmd.argument1 = attr_sel;
4157         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4158         uic_cmd.argument3 = mib_val;
4159
4160         do {
4161                 /* for peer attributes we retry upon failure */
4162                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4163                 if (ret)
4164                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4165                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4166         } while (ret && peer && --retries);
4167
4168         if (ret)
4169                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4170                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4171                         UFS_UIC_COMMAND_RETRIES - retries);
4172
4173         return ret;
4174 }
4175 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4176
4177 /**
4178  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4179  * @hba: per adapter instance
4180  * @attr_sel: uic command argument1
4181  * @mib_val: the value of the attribute as returned by the UIC command
4182  * @peer: indicate whether peer or local
4183  *
4184  * Return: 0 on success, non-zero value on failure.
4185  */
4186 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4187                         u32 *mib_val, u8 peer)
4188 {
4189         struct uic_command uic_cmd = {0};
4190         static const char *const action[] = {
4191                 "dme-get",
4192                 "dme-peer-get"
4193         };
4194         const char *get = action[!!peer];
4195         int ret;
4196         int retries = UFS_UIC_COMMAND_RETRIES;
4197         struct ufs_pa_layer_attr orig_pwr_info;
4198         struct ufs_pa_layer_attr temp_pwr_info;
4199         bool pwr_mode_change = false;
4200
4201         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4202                 orig_pwr_info = hba->pwr_info;
4203                 temp_pwr_info = orig_pwr_info;
4204
4205                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4206                     orig_pwr_info.pwr_rx == FAST_MODE) {
4207                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4208                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4209                         pwr_mode_change = true;
4210                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4211                     orig_pwr_info.pwr_rx == SLOW_MODE) {
4212                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4213                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4214                         pwr_mode_change = true;
4215                 }
4216                 if (pwr_mode_change) {
4217                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4218                         if (ret)
4219                                 goto out;
4220                 }
4221         }
4222
4223         uic_cmd.command = peer ?
4224                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4225         uic_cmd.argument1 = attr_sel;
4226
4227         do {
4228                 /* for peer attributes we retry upon failure */
4229                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4230                 if (ret)
4231                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4232                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
4233         } while (ret && peer && --retries);
4234
4235         if (ret)
4236                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4237                         get, UIC_GET_ATTR_ID(attr_sel),
4238                         UFS_UIC_COMMAND_RETRIES - retries);
4239
4240         if (mib_val && !ret)
4241                 *mib_val = uic_cmd.argument3;
4242
4243         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4244             && pwr_mode_change)
4245                 ufshcd_change_power_mode(hba, &orig_pwr_info);
4246 out:
4247         return ret;
4248 }
4249 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4250
4251 /**
4252  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4253  * state) and waits for it to take effect.
4254  *
4255  * @hba: per adapter instance
4256  * @cmd: UIC command to execute
4257  *
4258  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4259  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4260  * and device UniPro link and hence it's final completion would be indicated by
4261  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4262  * addition to normal UIC command completion Status (UCCS). This function only
4263  * returns after the relevant status bits indicate the completion.
4264  *
4265  * Return: 0 on success, non-zero value on failure.
4266  */
4267 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4268 {
4269         DECLARE_COMPLETION_ONSTACK(uic_async_done);
4270         unsigned long flags;
4271         u8 status;
4272         int ret;
4273         bool reenable_intr = false;
4274
4275         mutex_lock(&hba->uic_cmd_mutex);
4276         ufshcd_add_delay_before_dme_cmd(hba);
4277
4278         spin_lock_irqsave(hba->host->host_lock, flags);
4279         if (ufshcd_is_link_broken(hba)) {
4280                 ret = -ENOLINK;
4281                 goto out_unlock;
4282         }
4283         hba->uic_async_done = &uic_async_done;
4284         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4285                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4286                 /*
4287                  * Make sure UIC command completion interrupt is disabled before
4288                  * issuing UIC command.
4289                  */
4290                 wmb();
4291                 reenable_intr = true;
4292         }
4293         spin_unlock_irqrestore(hba->host->host_lock, flags);
4294         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4295         if (ret) {
4296                 dev_err(hba->dev,
4297                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4298                         cmd->command, cmd->argument3, ret);
4299                 goto out;
4300         }
4301
4302         if (!wait_for_completion_timeout(hba->uic_async_done,
4303                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4304                 dev_err(hba->dev,
4305                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4306                         cmd->command, cmd->argument3);
4307
4308                 if (!cmd->cmd_active) {
4309                         dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4310                                 __func__);
4311                         goto check_upmcrs;
4312                 }
4313
4314                 ret = -ETIMEDOUT;
4315                 goto out;
4316         }
4317
4318 check_upmcrs:
4319         status = ufshcd_get_upmcrs(hba);
4320         if (status != PWR_LOCAL) {
4321                 dev_err(hba->dev,
4322                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4323                         cmd->command, status);
4324                 ret = (status != PWR_OK) ? status : -1;
4325         }
4326 out:
4327         if (ret) {
4328                 ufshcd_print_host_state(hba);
4329                 ufshcd_print_pwr_info(hba);
4330                 ufshcd_print_evt_hist(hba);
4331         }
4332
4333         spin_lock_irqsave(hba->host->host_lock, flags);
4334         hba->active_uic_cmd = NULL;
4335         hba->uic_async_done = NULL;
4336         if (reenable_intr)
4337                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4338         if (ret) {
4339                 ufshcd_set_link_broken(hba);
4340                 ufshcd_schedule_eh_work(hba);
4341         }
4342 out_unlock:
4343         spin_unlock_irqrestore(hba->host->host_lock, flags);
4344         mutex_unlock(&hba->uic_cmd_mutex);
4345
4346         return ret;
4347 }
4348
4349 /**
4350  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4351  *                              using DME_SET primitives.
4352  * @hba: per adapter instance
4353  * @mode: powr mode value
4354  *
4355  * Return: 0 on success, non-zero value on failure.
4356  */
4357 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4358 {
4359         struct uic_command uic_cmd = {0};
4360         int ret;
4361
4362         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4363                 ret = ufshcd_dme_set(hba,
4364                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4365                 if (ret) {
4366                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4367                                                 __func__, ret);
4368                         goto out;
4369                 }
4370         }
4371
4372         uic_cmd.command = UIC_CMD_DME_SET;
4373         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4374         uic_cmd.argument3 = mode;
4375         ufshcd_hold(hba);
4376         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4377         ufshcd_release(hba);
4378
4379 out:
4380         return ret;
4381 }
4382 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4383
4384 int ufshcd_link_recovery(struct ufs_hba *hba)
4385 {
4386         int ret;
4387         unsigned long flags;
4388
4389         spin_lock_irqsave(hba->host->host_lock, flags);
4390         hba->ufshcd_state = UFSHCD_STATE_RESET;
4391         ufshcd_set_eh_in_progress(hba);
4392         spin_unlock_irqrestore(hba->host->host_lock, flags);
4393
4394         /* Reset the attached device */
4395         ufshcd_device_reset(hba);
4396
4397         ret = ufshcd_host_reset_and_restore(hba);
4398
4399         spin_lock_irqsave(hba->host->host_lock, flags);
4400         if (ret)
4401                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4402         ufshcd_clear_eh_in_progress(hba);
4403         spin_unlock_irqrestore(hba->host->host_lock, flags);
4404
4405         if (ret)
4406                 dev_err(hba->dev, "%s: link recovery failed, err %d",
4407                         __func__, ret);
4408
4409         return ret;
4410 }
4411 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4412
4413 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4414 {
4415         int ret;
4416         struct uic_command uic_cmd = {0};
4417         ktime_t start = ktime_get();
4418
4419         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4420
4421         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4422         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4423         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4424                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4425
4426         if (ret)
4427                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4428                         __func__, ret);
4429         else
4430                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4431                                                                 POST_CHANGE);
4432
4433         return ret;
4434 }
4435 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4436
4437 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4438 {
4439         struct uic_command uic_cmd = {0};
4440         int ret;
4441         ktime_t start = ktime_get();
4442
4443         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4444
4445         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4446         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4447         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4448                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4449
4450         if (ret) {
4451                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4452                         __func__, ret);
4453         } else {
4454                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4455                                                                 POST_CHANGE);
4456                 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4457                 hba->ufs_stats.hibern8_exit_cnt++;
4458         }
4459
4460         return ret;
4461 }
4462 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4463
4464 static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba)
4465 {
4466         if (!ufshcd_is_auto_hibern8_supported(hba))
4467                 return;
4468
4469         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4470 }
4471
4472 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4473 {
4474         const u32 cur_ahit = READ_ONCE(hba->ahit);
4475
4476         if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit)
4477                 return;
4478
4479         WRITE_ONCE(hba->ahit, ahit);
4480         if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4481                 ufshcd_rpm_get_sync(hba);
4482                 ufshcd_hold(hba);
4483                 ufshcd_configure_auto_hibern8(hba);
4484                 ufshcd_release(hba);
4485                 ufshcd_rpm_put_sync(hba);
4486         }
4487 }
4488 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4489
4490  /**
4491  * ufshcd_init_pwr_info - setting the POR (power on reset)
4492  * values in hba power info
4493  * @hba: per-adapter instance
4494  */
4495 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4496 {
4497         hba->pwr_info.gear_rx = UFS_PWM_G1;
4498         hba->pwr_info.gear_tx = UFS_PWM_G1;
4499         hba->pwr_info.lane_rx = UFS_LANE_1;
4500         hba->pwr_info.lane_tx = UFS_LANE_1;
4501         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4502         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4503         hba->pwr_info.hs_rate = 0;
4504 }
4505
4506 /**
4507  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4508  * @hba: per-adapter instance
4509  *
4510  * Return: 0 upon success; < 0 upon failure.
4511  */
4512 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4513 {
4514         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4515
4516         if (hba->max_pwr_info.is_valid)
4517                 return 0;
4518
4519         if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4520                 pwr_info->pwr_tx = FASTAUTO_MODE;
4521                 pwr_info->pwr_rx = FASTAUTO_MODE;
4522         } else {
4523                 pwr_info->pwr_tx = FAST_MODE;
4524                 pwr_info->pwr_rx = FAST_MODE;
4525         }
4526         pwr_info->hs_rate = PA_HS_MODE_B;
4527
4528         /* Get the connected lane count */
4529         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4530                         &pwr_info->lane_rx);
4531         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4532                         &pwr_info->lane_tx);
4533
4534         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4535                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4536                                 __func__,
4537                                 pwr_info->lane_rx,
4538                                 pwr_info->lane_tx);
4539                 return -EINVAL;
4540         }
4541
4542         /*
4543          * First, get the maximum gears of HS speed.
4544          * If a zero value, it means there is no HSGEAR capability.
4545          * Then, get the maximum gears of PWM speed.
4546          */
4547         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4548         if (!pwr_info->gear_rx) {
4549                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4550                                 &pwr_info->gear_rx);
4551                 if (!pwr_info->gear_rx) {
4552                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4553                                 __func__, pwr_info->gear_rx);
4554                         return -EINVAL;
4555                 }
4556                 pwr_info->pwr_rx = SLOW_MODE;
4557         }
4558
4559         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4560                         &pwr_info->gear_tx);
4561         if (!pwr_info->gear_tx) {
4562                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4563                                 &pwr_info->gear_tx);
4564                 if (!pwr_info->gear_tx) {
4565                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4566                                 __func__, pwr_info->gear_tx);
4567                         return -EINVAL;
4568                 }
4569                 pwr_info->pwr_tx = SLOW_MODE;
4570         }
4571
4572         hba->max_pwr_info.is_valid = true;
4573         return 0;
4574 }
4575
4576 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4577                              struct ufs_pa_layer_attr *pwr_mode)
4578 {
4579         int ret;
4580
4581         /* if already configured to the requested pwr_mode */
4582         if (!hba->force_pmc &&
4583             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4584             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4585             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4586             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4587             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4588             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4589             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4590                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4591                 return 0;
4592         }
4593
4594         /*
4595          * Configure attributes for power mode change with below.
4596          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4597          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4598          * - PA_HSSERIES
4599          */
4600         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4601         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4602                         pwr_mode->lane_rx);
4603         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4604                         pwr_mode->pwr_rx == FAST_MODE)
4605                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4606         else
4607                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4608
4609         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4610         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4611                         pwr_mode->lane_tx);
4612         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4613                         pwr_mode->pwr_tx == FAST_MODE)
4614                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4615         else
4616                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4617
4618         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4619             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4620             pwr_mode->pwr_rx == FAST_MODE ||
4621             pwr_mode->pwr_tx == FAST_MODE)
4622                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4623                                                 pwr_mode->hs_rate);
4624
4625         if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4626                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4627                                 DL_FC0ProtectionTimeOutVal_Default);
4628                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4629                                 DL_TC0ReplayTimeOutVal_Default);
4630                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4631                                 DL_AFC0ReqTimeOutVal_Default);
4632                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4633                                 DL_FC1ProtectionTimeOutVal_Default);
4634                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4635                                 DL_TC1ReplayTimeOutVal_Default);
4636                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4637                                 DL_AFC1ReqTimeOutVal_Default);
4638
4639                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4640                                 DL_FC0ProtectionTimeOutVal_Default);
4641                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4642                                 DL_TC0ReplayTimeOutVal_Default);
4643                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4644                                 DL_AFC0ReqTimeOutVal_Default);
4645         }
4646
4647         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4648                         | pwr_mode->pwr_tx);
4649
4650         if (ret) {
4651                 dev_err(hba->dev,
4652                         "%s: power mode change failed %d\n", __func__, ret);
4653         } else {
4654                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4655                                                                 pwr_mode);
4656
4657                 memcpy(&hba->pwr_info, pwr_mode,
4658                         sizeof(struct ufs_pa_layer_attr));
4659         }
4660
4661         return ret;
4662 }
4663
4664 /**
4665  * ufshcd_config_pwr_mode - configure a new power mode
4666  * @hba: per-adapter instance
4667  * @desired_pwr_mode: desired power configuration
4668  *
4669  * Return: 0 upon success; < 0 upon failure.
4670  */
4671 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4672                 struct ufs_pa_layer_attr *desired_pwr_mode)
4673 {
4674         struct ufs_pa_layer_attr final_params = { 0 };
4675         int ret;
4676
4677         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4678                                         desired_pwr_mode, &final_params);
4679
4680         if (ret)
4681                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4682
4683         ret = ufshcd_change_power_mode(hba, &final_params);
4684
4685         return ret;
4686 }
4687 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4688
4689 /**
4690  * ufshcd_complete_dev_init() - checks device readiness
4691  * @hba: per-adapter instance
4692  *
4693  * Set fDeviceInit flag and poll until device toggles it.
4694  *
4695  * Return: 0 upon success; < 0 upon failure.
4696  */
4697 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4698 {
4699         int err;
4700         bool flag_res = true;
4701         ktime_t timeout;
4702
4703         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4704                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4705         if (err) {
4706                 dev_err(hba->dev,
4707                         "%s: setting fDeviceInit flag failed with error %d\n",
4708                         __func__, err);
4709                 goto out;
4710         }
4711
4712         /* Poll fDeviceInit flag to be cleared */
4713         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4714         do {
4715                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4716                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4717                 if (!flag_res)
4718                         break;
4719                 usleep_range(500, 1000);
4720         } while (ktime_before(ktime_get(), timeout));
4721
4722         if (err) {
4723                 dev_err(hba->dev,
4724                                 "%s: reading fDeviceInit flag failed with error %d\n",
4725                                 __func__, err);
4726         } else if (flag_res) {
4727                 dev_err(hba->dev,
4728                                 "%s: fDeviceInit was not cleared by the device\n",
4729                                 __func__);
4730                 err = -EBUSY;
4731         }
4732 out:
4733         return err;
4734 }
4735
4736 /**
4737  * ufshcd_make_hba_operational - Make UFS controller operational
4738  * @hba: per adapter instance
4739  *
4740  * To bring UFS host controller to operational state,
4741  * 1. Enable required interrupts
4742  * 2. Configure interrupt aggregation
4743  * 3. Program UTRL and UTMRL base address
4744  * 4. Configure run-stop-registers
4745  *
4746  * Return: 0 on success, non-zero value on failure.
4747  */
4748 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4749 {
4750         int err = 0;
4751         u32 reg;
4752
4753         /* Enable required interrupts */
4754         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4755
4756         /* Configure interrupt aggregation */
4757         if (ufshcd_is_intr_aggr_allowed(hba))
4758                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4759         else
4760                 ufshcd_disable_intr_aggr(hba);
4761
4762         /* Configure UTRL and UTMRL base address registers */
4763         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4764                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4765         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4766                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4767         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4768                         REG_UTP_TASK_REQ_LIST_BASE_L);
4769         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4770                         REG_UTP_TASK_REQ_LIST_BASE_H);
4771
4772         /*
4773          * Make sure base address and interrupt setup are updated before
4774          * enabling the run/stop registers below.
4775          */
4776         wmb();
4777
4778         /*
4779          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4780          */
4781         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4782         if (!(ufshcd_get_lists_status(reg))) {
4783                 ufshcd_enable_run_stop_reg(hba);
4784         } else {
4785                 dev_err(hba->dev,
4786                         "Host controller not ready to process requests");
4787                 err = -EIO;
4788         }
4789
4790         return err;
4791 }
4792 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4793
4794 /**
4795  * ufshcd_hba_stop - Send controller to reset state
4796  * @hba: per adapter instance
4797  */
4798 void ufshcd_hba_stop(struct ufs_hba *hba)
4799 {
4800         unsigned long flags;
4801         int err;
4802
4803         /*
4804          * Obtain the host lock to prevent that the controller is disabled
4805          * while the UFS interrupt handler is active on another CPU.
4806          */
4807         spin_lock_irqsave(hba->host->host_lock, flags);
4808         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4809         spin_unlock_irqrestore(hba->host->host_lock, flags);
4810
4811         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4812                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4813                                         10, 1);
4814         if (err)
4815                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4816 }
4817 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4818
4819 /**
4820  * ufshcd_hba_execute_hce - initialize the controller
4821  * @hba: per adapter instance
4822  *
4823  * The controller resets itself and controller firmware initialization
4824  * sequence kicks off. When controller is ready it will set
4825  * the Host Controller Enable bit to 1.
4826  *
4827  * Return: 0 on success, non-zero value on failure.
4828  */
4829 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4830 {
4831         int retry_outer = 3;
4832         int retry_inner;
4833
4834 start:
4835         if (ufshcd_is_hba_active(hba))
4836                 /* change controller state to "reset state" */
4837                 ufshcd_hba_stop(hba);
4838
4839         /* UniPro link is disabled at this point */
4840         ufshcd_set_link_off(hba);
4841
4842         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4843
4844         /* start controller initialization sequence */
4845         ufshcd_hba_start(hba);
4846
4847         /*
4848          * To initialize a UFS host controller HCE bit must be set to 1.
4849          * During initialization the HCE bit value changes from 1->0->1.
4850          * When the host controller completes initialization sequence
4851          * it sets the value of HCE bit to 1. The same HCE bit is read back
4852          * to check if the controller has completed initialization sequence.
4853          * So without this delay the value HCE = 1, set in the previous
4854          * instruction might be read back.
4855          * This delay can be changed based on the controller.
4856          */
4857         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4858
4859         /* wait for the host controller to complete initialization */
4860         retry_inner = 50;
4861         while (!ufshcd_is_hba_active(hba)) {
4862                 if (retry_inner) {
4863                         retry_inner--;
4864                 } else {
4865                         dev_err(hba->dev,
4866                                 "Controller enable failed\n");
4867                         if (retry_outer) {
4868                                 retry_outer--;
4869                                 goto start;
4870                         }
4871                         return -EIO;
4872                 }
4873                 usleep_range(1000, 1100);
4874         }
4875
4876         /* enable UIC related interrupts */
4877         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4878
4879         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4880
4881         return 0;
4882 }
4883
4884 int ufshcd_hba_enable(struct ufs_hba *hba)
4885 {
4886         int ret;
4887
4888         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4889                 ufshcd_set_link_off(hba);
4890                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4891
4892                 /* enable UIC related interrupts */
4893                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4894                 ret = ufshcd_dme_reset(hba);
4895                 if (ret) {
4896                         dev_err(hba->dev, "DME_RESET failed\n");
4897                         return ret;
4898                 }
4899
4900                 ret = ufshcd_dme_enable(hba);
4901                 if (ret) {
4902                         dev_err(hba->dev, "Enabling DME failed\n");
4903                         return ret;
4904                 }
4905
4906                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4907         } else {
4908                 ret = ufshcd_hba_execute_hce(hba);
4909         }
4910
4911         return ret;
4912 }
4913 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4914
4915 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4916 {
4917         int tx_lanes = 0, i, err = 0;
4918
4919         if (!peer)
4920                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4921                                &tx_lanes);
4922         else
4923                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4924                                     &tx_lanes);
4925         for (i = 0; i < tx_lanes; i++) {
4926                 if (!peer)
4927                         err = ufshcd_dme_set(hba,
4928                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4929                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4930                                         0);
4931                 else
4932                         err = ufshcd_dme_peer_set(hba,
4933                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4934                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4935                                         0);
4936                 if (err) {
4937                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4938                                 __func__, peer, i, err);
4939                         break;
4940                 }
4941         }
4942
4943         return err;
4944 }
4945
4946 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4947 {
4948         return ufshcd_disable_tx_lcc(hba, true);
4949 }
4950
4951 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4952 {
4953         struct ufs_event_hist *e;
4954
4955         if (id >= UFS_EVT_CNT)
4956                 return;
4957
4958         e = &hba->ufs_stats.event[id];
4959         e->val[e->pos] = val;
4960         e->tstamp[e->pos] = local_clock();
4961         e->cnt += 1;
4962         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4963
4964         ufshcd_vops_event_notify(hba, id, &val);
4965 }
4966 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4967
4968 /**
4969  * ufshcd_link_startup - Initialize unipro link startup
4970  * @hba: per adapter instance
4971  *
4972  * Return: 0 for success, non-zero in case of failure.
4973  */
4974 static int ufshcd_link_startup(struct ufs_hba *hba)
4975 {
4976         int ret;
4977         int retries = DME_LINKSTARTUP_RETRIES;
4978         bool link_startup_again = false;
4979
4980         /*
4981          * If UFS device isn't active then we will have to issue link startup
4982          * 2 times to make sure the device state move to active.
4983          */
4984         if (!ufshcd_is_ufs_dev_active(hba))
4985                 link_startup_again = true;
4986
4987 link_startup:
4988         do {
4989                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4990
4991                 ret = ufshcd_dme_link_startup(hba);
4992
4993                 /* check if device is detected by inter-connect layer */
4994                 if (!ret && !ufshcd_is_device_present(hba)) {
4995                         ufshcd_update_evt_hist(hba,
4996                                                UFS_EVT_LINK_STARTUP_FAIL,
4997                                                0);
4998                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4999                         ret = -ENXIO;
5000                         goto out;
5001                 }
5002
5003                 /*
5004                  * DME link lost indication is only received when link is up,
5005                  * but we can't be sure if the link is up until link startup
5006                  * succeeds. So reset the local Uni-Pro and try again.
5007                  */
5008                 if (ret && retries && ufshcd_hba_enable(hba)) {
5009                         ufshcd_update_evt_hist(hba,
5010                                                UFS_EVT_LINK_STARTUP_FAIL,
5011                                                (u32)ret);
5012                         goto out;
5013                 }
5014         } while (ret && retries--);
5015
5016         if (ret) {
5017                 /* failed to get the link up... retire */
5018                 ufshcd_update_evt_hist(hba,
5019                                        UFS_EVT_LINK_STARTUP_FAIL,
5020                                        (u32)ret);
5021                 goto out;
5022         }
5023
5024         if (link_startup_again) {
5025                 link_startup_again = false;
5026                 retries = DME_LINKSTARTUP_RETRIES;
5027                 goto link_startup;
5028         }
5029
5030         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5031         ufshcd_init_pwr_info(hba);
5032         ufshcd_print_pwr_info(hba);
5033
5034         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5035                 ret = ufshcd_disable_device_tx_lcc(hba);
5036                 if (ret)
5037                         goto out;
5038         }
5039
5040         /* Include any host controller configuration via UIC commands */
5041         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5042         if (ret)
5043                 goto out;
5044
5045         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
5046         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5047         ret = ufshcd_make_hba_operational(hba);
5048 out:
5049         if (ret) {
5050                 dev_err(hba->dev, "link startup failed %d\n", ret);
5051                 ufshcd_print_host_state(hba);
5052                 ufshcd_print_pwr_info(hba);
5053                 ufshcd_print_evt_hist(hba);
5054         }
5055         return ret;
5056 }
5057
5058 /**
5059  * ufshcd_verify_dev_init() - Verify device initialization
5060  * @hba: per-adapter instance
5061  *
5062  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5063  * device Transport Protocol (UTP) layer is ready after a reset.
5064  * If the UTP layer at the device side is not initialized, it may
5065  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5066  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5067  *
5068  * Return: 0 upon success; < 0 upon failure.
5069  */
5070 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5071 {
5072         int err = 0;
5073         int retries;
5074
5075         ufshcd_hold(hba);
5076         mutex_lock(&hba->dev_cmd.lock);
5077         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5078                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5079                                           hba->nop_out_timeout);
5080
5081                 if (!err || err == -ETIMEDOUT)
5082                         break;
5083
5084                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5085         }
5086         mutex_unlock(&hba->dev_cmd.lock);
5087         ufshcd_release(hba);
5088
5089         if (err)
5090                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5091         return err;
5092 }
5093
5094 /**
5095  * ufshcd_setup_links - associate link b/w device wlun and other luns
5096  * @sdev: pointer to SCSI device
5097  * @hba: pointer to ufs hba
5098  */
5099 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
5100 {
5101         struct device_link *link;
5102
5103         /*
5104          * Device wlun is the supplier & rest of the luns are consumers.
5105          * This ensures that device wlun suspends after all other luns.
5106          */
5107         if (hba->ufs_device_wlun) {
5108                 link = device_link_add(&sdev->sdev_gendev,
5109                                        &hba->ufs_device_wlun->sdev_gendev,
5110                                        DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
5111                 if (!link) {
5112                         dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
5113                                 dev_name(&hba->ufs_device_wlun->sdev_gendev));
5114                         return;
5115                 }
5116                 hba->luns_avail--;
5117                 /* Ignore REPORT_LUN wlun probing */
5118                 if (hba->luns_avail == 1) {
5119                         ufshcd_rpm_put(hba);
5120                         return;
5121                 }
5122         } else {
5123                 /*
5124                  * Device wlun is probed. The assumption is that WLUNs are
5125                  * scanned before other LUNs.
5126                  */
5127                 hba->luns_avail--;
5128         }
5129 }
5130
5131 /**
5132  * ufshcd_lu_init - Initialize the relevant parameters of the LU
5133  * @hba: per-adapter instance
5134  * @sdev: pointer to SCSI device
5135  */
5136 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5137 {
5138         int len = QUERY_DESC_MAX_SIZE;
5139         u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5140         u8 lun_qdepth = hba->nutrs;
5141         u8 *desc_buf;
5142         int ret;
5143
5144         desc_buf = kzalloc(len, GFP_KERNEL);
5145         if (!desc_buf)
5146                 goto set_qdepth;
5147
5148         ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5149         if (ret < 0) {
5150                 if (ret == -EOPNOTSUPP)
5151                         /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5152                         lun_qdepth = 1;
5153                 kfree(desc_buf);
5154                 goto set_qdepth;
5155         }
5156
5157         if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5158                 /*
5159                  * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5160                  * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5161                  */
5162                 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5163         }
5164         /*
5165          * According to UFS device specification, the write protection mode is only supported by
5166          * normal LU, not supported by WLUN.
5167          */
5168         if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5169             !hba->dev_info.is_lu_power_on_wp &&
5170             desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5171                 hba->dev_info.is_lu_power_on_wp = true;
5172
5173         /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5174         if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5175             desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5176                 hba->dev_info.b_advanced_rpmb_en = true;
5177
5178
5179         kfree(desc_buf);
5180 set_qdepth:
5181         /*
5182          * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5183          * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5184          */
5185         dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5186         scsi_change_queue_depth(sdev, lun_qdepth);
5187 }
5188
5189 /**
5190  * ufshcd_slave_alloc - handle initial SCSI device configurations
5191  * @sdev: pointer to SCSI device
5192  *
5193  * Return: success.
5194  */
5195 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5196 {
5197         struct ufs_hba *hba;
5198
5199         hba = shost_priv(sdev->host);
5200
5201         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5202         sdev->use_10_for_ms = 1;
5203
5204         /* DBD field should be set to 1 in mode sense(10) */
5205         sdev->set_dbd_for_ms = 1;
5206
5207         /* allow SCSI layer to restart the device in case of errors */
5208         sdev->allow_restart = 1;
5209
5210         /* REPORT SUPPORTED OPERATION CODES is not supported */
5211         sdev->no_report_opcodes = 1;
5212
5213         /* WRITE_SAME command is not supported */
5214         sdev->no_write_same = 1;
5215
5216         ufshcd_lu_init(hba, sdev);
5217
5218         ufshcd_setup_links(hba, sdev);
5219
5220         return 0;
5221 }
5222
5223 /**
5224  * ufshcd_change_queue_depth - change queue depth
5225  * @sdev: pointer to SCSI device
5226  * @depth: required depth to set
5227  *
5228  * Change queue depth and make sure the max. limits are not crossed.
5229  *
5230  * Return: new queue depth.
5231  */
5232 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5233 {
5234         return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5235 }
5236
5237 /**
5238  * ufshcd_slave_configure - adjust SCSI device configurations
5239  * @sdev: pointer to SCSI device
5240  *
5241  * Return: 0 (success).
5242  */
5243 static int ufshcd_slave_configure(struct scsi_device *sdev)
5244 {
5245         struct ufs_hba *hba = shost_priv(sdev->host);
5246         struct request_queue *q = sdev->request_queue;
5247
5248         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5249
5250         /*
5251          * Block runtime-pm until all consumers are added.
5252          * Refer ufshcd_setup_links().
5253          */
5254         if (is_device_wlun(sdev))
5255                 pm_runtime_get_noresume(&sdev->sdev_gendev);
5256         else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5257                 sdev->rpm_autosuspend = 1;
5258         /*
5259          * Do not print messages during runtime PM to avoid never-ending cycles
5260          * of messages written back to storage by user space causing runtime
5261          * resume, causing more messages and so on.
5262          */
5263         sdev->silence_suspend = 1;
5264
5265         if (hba->vops && hba->vops->config_scsi_dev)
5266                 hba->vops->config_scsi_dev(sdev);
5267
5268         ufshcd_crypto_register(hba, q);
5269
5270         return 0;
5271 }
5272
5273 /**
5274  * ufshcd_slave_destroy - remove SCSI device configurations
5275  * @sdev: pointer to SCSI device
5276  */
5277 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5278 {
5279         struct ufs_hba *hba;
5280         unsigned long flags;
5281
5282         hba = shost_priv(sdev->host);
5283
5284         /* Drop the reference as it won't be needed anymore */
5285         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5286                 spin_lock_irqsave(hba->host->host_lock, flags);
5287                 hba->ufs_device_wlun = NULL;
5288                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5289         } else if (hba->ufs_device_wlun) {
5290                 struct device *supplier = NULL;
5291
5292                 /* Ensure UFS Device WLUN exists and does not disappear */
5293                 spin_lock_irqsave(hba->host->host_lock, flags);
5294                 if (hba->ufs_device_wlun) {
5295                         supplier = &hba->ufs_device_wlun->sdev_gendev;
5296                         get_device(supplier);
5297                 }
5298                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5299
5300                 if (supplier) {
5301                         /*
5302                          * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5303                          * device will not have been registered but can still
5304                          * have a device link holding a reference to the device.
5305                          */
5306                         device_link_remove(&sdev->sdev_gendev, supplier);
5307                         put_device(supplier);
5308                 }
5309         }
5310 }
5311
5312 /**
5313  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5314  * @lrbp: pointer to local reference block of completed command
5315  * @scsi_status: SCSI command status
5316  *
5317  * Return: value base on SCSI command status.
5318  */
5319 static inline int
5320 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5321 {
5322         int result = 0;
5323
5324         switch (scsi_status) {
5325         case SAM_STAT_CHECK_CONDITION:
5326                 ufshcd_copy_sense_data(lrbp);
5327                 fallthrough;
5328         case SAM_STAT_GOOD:
5329                 result |= DID_OK << 16 | scsi_status;
5330                 break;
5331         case SAM_STAT_TASK_SET_FULL:
5332         case SAM_STAT_BUSY:
5333         case SAM_STAT_TASK_ABORTED:
5334                 ufshcd_copy_sense_data(lrbp);
5335                 result |= scsi_status;
5336                 break;
5337         default:
5338                 result |= DID_ERROR << 16;
5339                 break;
5340         } /* end of switch */
5341
5342         return result;
5343 }
5344
5345 /**
5346  * ufshcd_transfer_rsp_status - Get overall status of the response
5347  * @hba: per adapter instance
5348  * @lrbp: pointer to local reference block of completed command
5349  * @cqe: pointer to the completion queue entry
5350  *
5351  * Return: result of the command to notify SCSI midlayer.
5352  */
5353 static inline int
5354 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5355                            struct cq_entry *cqe)
5356 {
5357         int result = 0;
5358         int scsi_status;
5359         enum utp_ocs ocs;
5360         u8 upiu_flags;
5361         u32 resid;
5362
5363         upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
5364         resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
5365         /*
5366          * Test !overflow instead of underflow to support UFS devices that do
5367          * not set either flag.
5368          */
5369         if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
5370                 scsi_set_resid(lrbp->cmd, resid);
5371
5372         /* overall command status of utrd */
5373         ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5374
5375         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5376                 if (lrbp->ucd_rsp_ptr->header.response ||
5377                     lrbp->ucd_rsp_ptr->header.status)
5378                         ocs = OCS_SUCCESS;
5379         }
5380
5381         switch (ocs) {
5382         case OCS_SUCCESS:
5383                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5384                 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
5385                 case UPIU_TRANSACTION_RESPONSE:
5386                         /*
5387                          * get the result based on SCSI status response
5388                          * to notify the SCSI midlayer of the command status
5389                          */
5390                         scsi_status = lrbp->ucd_rsp_ptr->header.status;
5391                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5392
5393                         /*
5394                          * Currently we are only supporting BKOPs exception
5395                          * events hence we can ignore BKOPs exception event
5396                          * during power management callbacks. BKOPs exception
5397                          * event is not expected to be raised in runtime suspend
5398                          * callback as it allows the urgent bkops.
5399                          * During system suspend, we are anyway forcefully
5400                          * disabling the bkops and if urgent bkops is needed
5401                          * it will be enabled on system resume. Long term
5402                          * solution could be to abort the system suspend if
5403                          * UFS device needs urgent BKOPs.
5404                          */
5405                         if (!hba->pm_op_in_progress &&
5406                             !ufshcd_eh_in_progress(hba) &&
5407                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5408                                 /* Flushed in suspend */
5409                                 schedule_work(&hba->eeh_work);
5410                         break;
5411                 case UPIU_TRANSACTION_REJECT_UPIU:
5412                         /* TODO: handle Reject UPIU Response */
5413                         result = DID_ERROR << 16;
5414                         dev_err(hba->dev,
5415                                 "Reject UPIU not fully implemented\n");
5416                         break;
5417                 default:
5418                         dev_err(hba->dev,
5419                                 "Unexpected request response code = %x\n",
5420                                 result);
5421                         result = DID_ERROR << 16;
5422                         break;
5423                 }
5424                 break;
5425         case OCS_ABORTED:
5426                 result |= DID_ABORT << 16;
5427                 break;
5428         case OCS_INVALID_COMMAND_STATUS:
5429                 result |= DID_REQUEUE << 16;
5430                 break;
5431         case OCS_INVALID_CMD_TABLE_ATTR:
5432         case OCS_INVALID_PRDT_ATTR:
5433         case OCS_MISMATCH_DATA_BUF_SIZE:
5434         case OCS_MISMATCH_RESP_UPIU_SIZE:
5435         case OCS_PEER_COMM_FAILURE:
5436         case OCS_FATAL_ERROR:
5437         case OCS_DEVICE_FATAL_ERROR:
5438         case OCS_INVALID_CRYPTO_CONFIG:
5439         case OCS_GENERAL_CRYPTO_ERROR:
5440         default:
5441                 result |= DID_ERROR << 16;
5442                 dev_err(hba->dev,
5443                                 "OCS error from controller = %x for tag %d\n",
5444                                 ocs, lrbp->task_tag);
5445                 ufshcd_print_evt_hist(hba);
5446                 ufshcd_print_host_state(hba);
5447                 break;
5448         } /* end of switch */
5449
5450         if ((host_byte(result) != DID_OK) &&
5451             (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5452                 ufshcd_print_tr(hba, lrbp->task_tag, true);
5453         return result;
5454 }
5455
5456 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5457                                          u32 intr_mask)
5458 {
5459         if (!ufshcd_is_auto_hibern8_supported(hba) ||
5460             !ufshcd_is_auto_hibern8_enabled(hba))
5461                 return false;
5462
5463         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5464                 return false;
5465
5466         if (hba->active_uic_cmd &&
5467             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5468             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5469                 return false;
5470
5471         return true;
5472 }
5473
5474 /**
5475  * ufshcd_uic_cmd_compl - handle completion of uic command
5476  * @hba: per adapter instance
5477  * @intr_status: interrupt status generated by the controller
5478  *
5479  * Return:
5480  *  IRQ_HANDLED - If interrupt is valid
5481  *  IRQ_NONE    - If invalid interrupt
5482  */
5483 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5484 {
5485         irqreturn_t retval = IRQ_NONE;
5486
5487         spin_lock(hba->host->host_lock);
5488         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5489                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5490
5491         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5492                 hba->active_uic_cmd->argument2 |=
5493                         ufshcd_get_uic_cmd_result(hba);
5494                 hba->active_uic_cmd->argument3 =
5495                         ufshcd_get_dme_attr_val(hba);
5496                 if (!hba->uic_async_done)
5497                         hba->active_uic_cmd->cmd_active = 0;
5498                 complete(&hba->active_uic_cmd->done);
5499                 retval = IRQ_HANDLED;
5500         }
5501
5502         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5503                 hba->active_uic_cmd->cmd_active = 0;
5504                 complete(hba->uic_async_done);
5505                 retval = IRQ_HANDLED;
5506         }
5507
5508         if (retval == IRQ_HANDLED)
5509                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5510                                              UFS_CMD_COMP);
5511         spin_unlock(hba->host->host_lock);
5512         return retval;
5513 }
5514
5515 /* Release the resources allocated for processing a SCSI command. */
5516 void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5517                              struct ufshcd_lrb *lrbp)
5518 {
5519         struct scsi_cmnd *cmd = lrbp->cmd;
5520
5521         scsi_dma_unmap(cmd);
5522         ufshcd_release(hba);
5523         ufshcd_clk_scaling_update_busy(hba);
5524 }
5525
5526 /**
5527  * ufshcd_compl_one_cqe - handle a completion queue entry
5528  * @hba: per adapter instance
5529  * @task_tag: the task tag of the request to be completed
5530  * @cqe: pointer to the completion queue entry
5531  */
5532 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5533                           struct cq_entry *cqe)
5534 {
5535         struct ufshcd_lrb *lrbp;
5536         struct scsi_cmnd *cmd;
5537         enum utp_ocs ocs;
5538
5539         lrbp = &hba->lrb[task_tag];
5540         lrbp->compl_time_stamp = ktime_get();
5541         cmd = lrbp->cmd;
5542         if (cmd) {
5543                 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5544                         ufshcd_update_monitor(hba, lrbp);
5545                 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5546                 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5547                 ufshcd_release_scsi_cmd(hba, lrbp);
5548                 /* Do not touch lrbp after scsi done */
5549                 scsi_done(cmd);
5550         } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5551                    lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5552                 if (hba->dev_cmd.complete) {
5553                         if (cqe) {
5554                                 ocs = le32_to_cpu(cqe->status) & MASK_OCS;
5555                                 lrbp->utr_descriptor_ptr->header.ocs = ocs;
5556                         }
5557                         complete(hba->dev_cmd.complete);
5558                 }
5559         }
5560 }
5561
5562 /**
5563  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5564  * @hba: per adapter instance
5565  * @completed_reqs: bitmask that indicates which requests to complete
5566  */
5567 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5568                                         unsigned long completed_reqs)
5569 {
5570         int tag;
5571
5572         for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5573                 ufshcd_compl_one_cqe(hba, tag, NULL);
5574 }
5575
5576 /* Any value that is not an existing queue number is fine for this constant. */
5577 enum {
5578         UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5579 };
5580
5581 static void ufshcd_clear_polled(struct ufs_hba *hba,
5582                                 unsigned long *completed_reqs)
5583 {
5584         int tag;
5585
5586         for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5587                 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5588
5589                 if (!cmd)
5590                         continue;
5591                 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5592                         __clear_bit(tag, completed_reqs);
5593         }
5594 }
5595
5596 /*
5597  * Return: > 0 if one or more commands have been completed or 0 if no
5598  * requests have been completed.
5599  */
5600 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5601 {
5602         struct ufs_hba *hba = shost_priv(shost);
5603         unsigned long completed_reqs, flags;
5604         u32 tr_doorbell;
5605         struct ufs_hw_queue *hwq;
5606
5607         if (is_mcq_enabled(hba)) {
5608                 hwq = &hba->uhq[queue_num];
5609
5610                 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5611         }
5612
5613         spin_lock_irqsave(&hba->outstanding_lock, flags);
5614         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5615         completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5616         WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5617                   "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5618                   hba->outstanding_reqs);
5619         if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5620                 /* Do not complete polled requests from interrupt context. */
5621                 ufshcd_clear_polled(hba, &completed_reqs);
5622         }
5623         hba->outstanding_reqs &= ~completed_reqs;
5624         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5625
5626         if (completed_reqs)
5627                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5628
5629         return completed_reqs != 0;
5630 }
5631
5632 /**
5633  * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5634  * invoked from the error handler context or ufshcd_host_reset_and_restore()
5635  * to complete the pending transfers and free the resources associated with
5636  * the scsi command.
5637  *
5638  * @hba: per adapter instance
5639  * @force_compl: This flag is set to true when invoked
5640  * from ufshcd_host_reset_and_restore() in which case it requires special
5641  * handling because the host controller has been reset by ufshcd_hba_stop().
5642  */
5643 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5644                                               bool force_compl)
5645 {
5646         struct ufs_hw_queue *hwq;
5647         struct ufshcd_lrb *lrbp;
5648         struct scsi_cmnd *cmd;
5649         unsigned long flags;
5650         int tag;
5651
5652         for (tag = 0; tag < hba->nutrs; tag++) {
5653                 lrbp = &hba->lrb[tag];
5654                 cmd = lrbp->cmd;
5655                 if (!ufshcd_cmd_inflight(cmd) ||
5656                     test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5657                         continue;
5658
5659                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
5660
5661                 if (force_compl) {
5662                         ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5663                         /*
5664                          * For those cmds of which the cqes are not present
5665                          * in the cq, complete them explicitly.
5666                          */
5667                         spin_lock_irqsave(&hwq->cq_lock, flags);
5668                         if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5669                                 set_host_byte(cmd, DID_REQUEUE);
5670                                 ufshcd_release_scsi_cmd(hba, lrbp);
5671                                 scsi_done(cmd);
5672                         }
5673                         spin_unlock_irqrestore(&hwq->cq_lock, flags);
5674                 } else {
5675                         ufshcd_mcq_poll_cqe_lock(hba, hwq);
5676                 }
5677         }
5678 }
5679
5680 /**
5681  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5682  * @hba: per adapter instance
5683  *
5684  * Return:
5685  *  IRQ_HANDLED - If interrupt is valid
5686  *  IRQ_NONE    - If invalid interrupt
5687  */
5688 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5689 {
5690         /* Resetting interrupt aggregation counters first and reading the
5691          * DOOR_BELL afterward allows us to handle all the completed requests.
5692          * In order to prevent other interrupts starvation the DB is read once
5693          * after reset. The down side of this solution is the possibility of
5694          * false interrupt if device completes another request after resetting
5695          * aggregation and before reading the DB.
5696          */
5697         if (ufshcd_is_intr_aggr_allowed(hba) &&
5698             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5699                 ufshcd_reset_intr_aggr(hba);
5700
5701         if (ufs_fail_completion(hba))
5702                 return IRQ_HANDLED;
5703
5704         /*
5705          * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5706          * do not want polling to trigger spurious interrupt complaints.
5707          */
5708         ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5709
5710         return IRQ_HANDLED;
5711 }
5712
5713 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5714 {
5715         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5716                                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5717                                        &ee_ctrl_mask);
5718 }
5719
5720 int ufshcd_write_ee_control(struct ufs_hba *hba)
5721 {
5722         int err;
5723
5724         mutex_lock(&hba->ee_ctrl_mutex);
5725         err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5726         mutex_unlock(&hba->ee_ctrl_mutex);
5727         if (err)
5728                 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5729                         __func__, err);
5730         return err;
5731 }
5732
5733 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5734                              const u16 *other_mask, u16 set, u16 clr)
5735 {
5736         u16 new_mask, ee_ctrl_mask;
5737         int err = 0;
5738
5739         mutex_lock(&hba->ee_ctrl_mutex);
5740         new_mask = (*mask & ~clr) | set;
5741         ee_ctrl_mask = new_mask | *other_mask;
5742         if (ee_ctrl_mask != hba->ee_ctrl_mask)
5743                 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5744         /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5745         if (!err) {
5746                 hba->ee_ctrl_mask = ee_ctrl_mask;
5747                 *mask = new_mask;
5748         }
5749         mutex_unlock(&hba->ee_ctrl_mutex);
5750         return err;
5751 }
5752
5753 /**
5754  * ufshcd_disable_ee - disable exception event
5755  * @hba: per-adapter instance
5756  * @mask: exception event to disable
5757  *
5758  * Disables exception event in the device so that the EVENT_ALERT
5759  * bit is not set.
5760  *
5761  * Return: zero on success, non-zero error value on failure.
5762  */
5763 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5764 {
5765         return ufshcd_update_ee_drv_mask(hba, 0, mask);
5766 }
5767
5768 /**
5769  * ufshcd_enable_ee - enable exception event
5770  * @hba: per-adapter instance
5771  * @mask: exception event to enable
5772  *
5773  * Enable corresponding exception event in the device to allow
5774  * device to alert host in critical scenarios.
5775  *
5776  * Return: zero on success, non-zero error value on failure.
5777  */
5778 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5779 {
5780         return ufshcd_update_ee_drv_mask(hba, mask, 0);
5781 }
5782
5783 /**
5784  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5785  * @hba: per-adapter instance
5786  *
5787  * Allow device to manage background operations on its own. Enabling
5788  * this might lead to inconsistent latencies during normal data transfers
5789  * as the device is allowed to manage its own way of handling background
5790  * operations.
5791  *
5792  * Return: zero on success, non-zero on failure.
5793  */
5794 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5795 {
5796         int err = 0;
5797
5798         if (hba->auto_bkops_enabled)
5799                 goto out;
5800
5801         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5802                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5803         if (err) {
5804                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5805                                 __func__, err);
5806                 goto out;
5807         }
5808
5809         hba->auto_bkops_enabled = true;
5810         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5811
5812         /* No need of URGENT_BKOPS exception from the device */
5813         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5814         if (err)
5815                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5816                                 __func__, err);
5817 out:
5818         return err;
5819 }
5820
5821 /**
5822  * ufshcd_disable_auto_bkops - block device in doing background operations
5823  * @hba: per-adapter instance
5824  *
5825  * Disabling background operations improves command response latency but
5826  * has drawback of device moving into critical state where the device is
5827  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5828  * host is idle so that BKOPS are managed effectively without any negative
5829  * impacts.
5830  *
5831  * Return: zero on success, non-zero on failure.
5832  */
5833 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5834 {
5835         int err = 0;
5836
5837         if (!hba->auto_bkops_enabled)
5838                 goto out;
5839
5840         /*
5841          * If host assisted BKOPs is to be enabled, make sure
5842          * urgent bkops exception is allowed.
5843          */
5844         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5845         if (err) {
5846                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5847                                 __func__, err);
5848                 goto out;
5849         }
5850
5851         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5852                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5853         if (err) {
5854                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5855                                 __func__, err);
5856                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5857                 goto out;
5858         }
5859
5860         hba->auto_bkops_enabled = false;
5861         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5862         hba->is_urgent_bkops_lvl_checked = false;
5863 out:
5864         return err;
5865 }
5866
5867 /**
5868  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5869  * @hba: per adapter instance
5870  *
5871  * After a device reset the device may toggle the BKOPS_EN flag
5872  * to default value. The s/w tracking variables should be updated
5873  * as well. This function would change the auto-bkops state based on
5874  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5875  */
5876 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5877 {
5878         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5879                 hba->auto_bkops_enabled = false;
5880                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5881                 ufshcd_enable_auto_bkops(hba);
5882         } else {
5883                 hba->auto_bkops_enabled = true;
5884                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5885                 ufshcd_disable_auto_bkops(hba);
5886         }
5887         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5888         hba->is_urgent_bkops_lvl_checked = false;
5889 }
5890
5891 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5892 {
5893         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5894                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5895 }
5896
5897 /**
5898  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5899  * @hba: per-adapter instance
5900  * @status: bkops_status value
5901  *
5902  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5903  * flag in the device to permit background operations if the device
5904  * bkops_status is greater than or equal to "status" argument passed to
5905  * this function, disable otherwise.
5906  *
5907  * Return: 0 for success, non-zero in case of failure.
5908  *
5909  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5910  * to know whether auto bkops is enabled or disabled after this function
5911  * returns control to it.
5912  */
5913 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5914                              enum bkops_status status)
5915 {
5916         int err;
5917         u32 curr_status = 0;
5918
5919         err = ufshcd_get_bkops_status(hba, &curr_status);
5920         if (err) {
5921                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5922                                 __func__, err);
5923                 goto out;
5924         } else if (curr_status > BKOPS_STATUS_MAX) {
5925                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5926                                 __func__, curr_status);
5927                 err = -EINVAL;
5928                 goto out;
5929         }
5930
5931         if (curr_status >= status)
5932                 err = ufshcd_enable_auto_bkops(hba);
5933         else
5934                 err = ufshcd_disable_auto_bkops(hba);
5935 out:
5936         return err;
5937 }
5938
5939 /**
5940  * ufshcd_urgent_bkops - handle urgent bkops exception event
5941  * @hba: per-adapter instance
5942  *
5943  * Enable fBackgroundOpsEn flag in the device to permit background
5944  * operations.
5945  *
5946  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5947  * and negative error value for any other failure.
5948  *
5949  * Return: 0 upon success; < 0 upon failure.
5950  */
5951 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5952 {
5953         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5954 }
5955
5956 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5957 {
5958         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5959                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5960 }
5961
5962 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5963 {
5964         int err;
5965         u32 curr_status = 0;
5966
5967         if (hba->is_urgent_bkops_lvl_checked)
5968                 goto enable_auto_bkops;
5969
5970         err = ufshcd_get_bkops_status(hba, &curr_status);
5971         if (err) {
5972                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5973                                 __func__, err);
5974                 goto out;
5975         }
5976
5977         /*
5978          * We are seeing that some devices are raising the urgent bkops
5979          * exception events even when BKOPS status doesn't indicate performace
5980          * impacted or critical. Handle these device by determining their urgent
5981          * bkops status at runtime.
5982          */
5983         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5984                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5985                                 __func__, curr_status);
5986                 /* update the current status as the urgent bkops level */
5987                 hba->urgent_bkops_lvl = curr_status;
5988                 hba->is_urgent_bkops_lvl_checked = true;
5989         }
5990
5991 enable_auto_bkops:
5992         err = ufshcd_enable_auto_bkops(hba);
5993 out:
5994         if (err < 0)
5995                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5996                                 __func__, err);
5997 }
5998
5999 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
6000 {
6001         u32 value;
6002
6003         if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6004                                 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
6005                 return;
6006
6007         dev_info(hba->dev, "exception Tcase %d\n", value - 80);
6008
6009         ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
6010
6011         /*
6012          * A placeholder for the platform vendors to add whatever additional
6013          * steps required
6014          */
6015 }
6016
6017 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
6018 {
6019         u8 index;
6020         enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
6021                                    UPIU_QUERY_OPCODE_CLEAR_FLAG;
6022
6023         index = ufshcd_wb_get_query_index(hba);
6024         return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
6025 }
6026
6027 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
6028 {
6029         int ret;
6030
6031         if (!ufshcd_is_wb_allowed(hba) ||
6032             hba->dev_info.wb_enabled == enable)
6033                 return 0;
6034
6035         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
6036         if (ret) {
6037                 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
6038                         __func__, enable ? "enabling" : "disabling", ret);
6039                 return ret;
6040         }
6041
6042         hba->dev_info.wb_enabled = enable;
6043         dev_dbg(hba->dev, "%s: Write Booster %s\n",
6044                         __func__, enable ? "enabled" : "disabled");
6045
6046         return ret;
6047 }
6048
6049 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
6050                                                  bool enable)
6051 {
6052         int ret;
6053
6054         ret = __ufshcd_wb_toggle(hba, enable,
6055                         QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
6056         if (ret) {
6057                 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
6058                         __func__, enable ? "enabling" : "disabling", ret);
6059                 return;
6060         }
6061         dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
6062                         __func__, enable ? "enabled" : "disabled");
6063 }
6064
6065 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
6066 {
6067         int ret;
6068
6069         if (!ufshcd_is_wb_allowed(hba) ||
6070             hba->dev_info.wb_buf_flush_enabled == enable)
6071                 return 0;
6072
6073         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
6074         if (ret) {
6075                 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
6076                         __func__, enable ? "enabling" : "disabling", ret);
6077                 return ret;
6078         }
6079
6080         hba->dev_info.wb_buf_flush_enabled = enable;
6081         dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
6082                         __func__, enable ? "enabled" : "disabled");
6083
6084         return ret;
6085 }
6086
6087 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
6088                                                 u32 avail_buf)
6089 {
6090         u32 cur_buf;
6091         int ret;
6092         u8 index;
6093
6094         index = ufshcd_wb_get_query_index(hba);
6095         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6096                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
6097                                               index, 0, &cur_buf);
6098         if (ret) {
6099                 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6100                         __func__, ret);
6101                 return false;
6102         }
6103
6104         if (!cur_buf) {
6105                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
6106                          cur_buf);
6107                 return false;
6108         }
6109         /* Let it continue to flush when available buffer exceeds threshold */
6110         return avail_buf < hba->vps->wb_flush_threshold;
6111 }
6112
6113 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
6114 {
6115         if (ufshcd_is_wb_buf_flush_allowed(hba))
6116                 ufshcd_wb_toggle_buf_flush(hba, false);
6117
6118         ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
6119         ufshcd_wb_toggle(hba, false);
6120         hba->caps &= ~UFSHCD_CAP_WB_EN;
6121
6122         dev_info(hba->dev, "%s: WB force disabled\n", __func__);
6123 }
6124
6125 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
6126 {
6127         u32 lifetime;
6128         int ret;
6129         u8 index;
6130
6131         index = ufshcd_wb_get_query_index(hba);
6132         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6133                                       QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
6134                                       index, 0, &lifetime);
6135         if (ret) {
6136                 dev_err(hba->dev,
6137                         "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6138                         __func__, ret);
6139                 return false;
6140         }
6141
6142         if (lifetime == UFS_WB_EXCEED_LIFETIME) {
6143                 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
6144                         __func__, lifetime);
6145                 return false;
6146         }
6147
6148         dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
6149                 __func__, lifetime);
6150
6151         return true;
6152 }
6153
6154 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6155 {
6156         int ret;
6157         u32 avail_buf;
6158         u8 index;
6159
6160         if (!ufshcd_is_wb_allowed(hba))
6161                 return false;
6162
6163         if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6164                 ufshcd_wb_force_disable(hba);
6165                 return false;
6166         }
6167
6168         /*
6169          * The ufs device needs the vcc to be ON to flush.
6170          * With user-space reduction enabled, it's enough to enable flush
6171          * by checking only the available buffer. The threshold
6172          * defined here is > 90% full.
6173          * With user-space preserved enabled, the current-buffer
6174          * should be checked too because the wb buffer size can reduce
6175          * when disk tends to be full. This info is provided by current
6176          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6177          * keeping vcc on when current buffer is empty.
6178          */
6179         index = ufshcd_wb_get_query_index(hba);
6180         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6181                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6182                                       index, 0, &avail_buf);
6183         if (ret) {
6184                 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6185                          __func__, ret);
6186                 return false;
6187         }
6188
6189         if (!hba->dev_info.b_presrv_uspc_en)
6190                 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6191
6192         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6193 }
6194
6195 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6196 {
6197         struct ufs_hba *hba = container_of(to_delayed_work(work),
6198                                            struct ufs_hba,
6199                                            rpm_dev_flush_recheck_work);
6200         /*
6201          * To prevent unnecessary VCC power drain after device finishes
6202          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6203          * after a certain delay to recheck the threshold by next runtime
6204          * suspend.
6205          */
6206         ufshcd_rpm_get_sync(hba);
6207         ufshcd_rpm_put_sync(hba);
6208 }
6209
6210 /**
6211  * ufshcd_exception_event_handler - handle exceptions raised by device
6212  * @work: pointer to work data
6213  *
6214  * Read bExceptionEventStatus attribute from the device and handle the
6215  * exception event accordingly.
6216  */
6217 static void ufshcd_exception_event_handler(struct work_struct *work)
6218 {
6219         struct ufs_hba *hba;
6220         int err;
6221         u32 status = 0;
6222         hba = container_of(work, struct ufs_hba, eeh_work);
6223
6224         ufshcd_scsi_block_requests(hba);
6225         err = ufshcd_get_ee_status(hba, &status);
6226         if (err) {
6227                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6228                                 __func__, err);
6229                 goto out;
6230         }
6231
6232         trace_ufshcd_exception_event(dev_name(hba->dev), status);
6233
6234         if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6235                 ufshcd_bkops_exception_event_handler(hba);
6236
6237         if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6238                 ufshcd_temp_exception_event_handler(hba, status);
6239
6240         ufs_debugfs_exception_event(hba, status);
6241 out:
6242         ufshcd_scsi_unblock_requests(hba);
6243 }
6244
6245 /* Complete requests that have door-bell cleared */
6246 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
6247 {
6248         if (is_mcq_enabled(hba))
6249                 ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6250         else
6251                 ufshcd_transfer_req_compl(hba);
6252
6253         ufshcd_tmc_handler(hba);
6254 }
6255
6256 /**
6257  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6258  *                              to recover from the DL NAC errors or not.
6259  * @hba: per-adapter instance
6260  *
6261  * Return: true if error handling is required, false otherwise.
6262  */
6263 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6264 {
6265         unsigned long flags;
6266         bool err_handling = true;
6267
6268         spin_lock_irqsave(hba->host->host_lock, flags);
6269         /*
6270          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6271          * device fatal error and/or DL NAC & REPLAY timeout errors.
6272          */
6273         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6274                 goto out;
6275
6276         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6277             ((hba->saved_err & UIC_ERROR) &&
6278              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6279                 goto out;
6280
6281         if ((hba->saved_err & UIC_ERROR) &&
6282             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6283                 int err;
6284                 /*
6285                  * wait for 50ms to see if we can get any other errors or not.
6286                  */
6287                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6288                 msleep(50);
6289                 spin_lock_irqsave(hba->host->host_lock, flags);
6290
6291                 /*
6292                  * now check if we have got any other severe errors other than
6293                  * DL NAC error?
6294                  */
6295                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6296                     ((hba->saved_err & UIC_ERROR) &&
6297                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6298                         goto out;
6299
6300                 /*
6301                  * As DL NAC is the only error received so far, send out NOP
6302                  * command to confirm if link is still active or not.
6303                  *   - If we don't get any response then do error recovery.
6304                  *   - If we get response then clear the DL NAC error bit.
6305                  */
6306
6307                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6308                 err = ufshcd_verify_dev_init(hba);
6309                 spin_lock_irqsave(hba->host->host_lock, flags);
6310
6311                 if (err)
6312                         goto out;
6313
6314                 /* Link seems to be alive hence ignore the DL NAC errors */
6315                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6316                         hba->saved_err &= ~UIC_ERROR;
6317                 /* clear NAC error */
6318                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6319                 if (!hba->saved_uic_err)
6320                         err_handling = false;
6321         }
6322 out:
6323         spin_unlock_irqrestore(hba->host->host_lock, flags);
6324         return err_handling;
6325 }
6326
6327 /* host lock must be held before calling this func */
6328 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6329 {
6330         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6331                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6332 }
6333
6334 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6335 {
6336         lockdep_assert_held(hba->host->host_lock);
6337
6338         /* handle fatal errors only when link is not in error state */
6339         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6340                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6341                     ufshcd_is_saved_err_fatal(hba))
6342                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6343                 else
6344                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6345                 queue_work(hba->eh_wq, &hba->eh_work);
6346         }
6347 }
6348
6349 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6350 {
6351         spin_lock_irq(hba->host->host_lock);
6352         hba->force_reset = true;
6353         ufshcd_schedule_eh_work(hba);
6354         spin_unlock_irq(hba->host->host_lock);
6355 }
6356
6357 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6358 {
6359         mutex_lock(&hba->wb_mutex);
6360         down_write(&hba->clk_scaling_lock);
6361         hba->clk_scaling.is_allowed = allow;
6362         up_write(&hba->clk_scaling_lock);
6363         mutex_unlock(&hba->wb_mutex);
6364 }
6365
6366 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6367 {
6368         if (suspend) {
6369                 if (hba->clk_scaling.is_enabled)
6370                         ufshcd_suspend_clkscaling(hba);
6371                 ufshcd_clk_scaling_allow(hba, false);
6372         } else {
6373                 ufshcd_clk_scaling_allow(hba, true);
6374                 if (hba->clk_scaling.is_enabled)
6375                         ufshcd_resume_clkscaling(hba);
6376         }
6377 }
6378
6379 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6380 {
6381         ufshcd_rpm_get_sync(hba);
6382         if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6383             hba->is_sys_suspended) {
6384                 enum ufs_pm_op pm_op;
6385
6386                 /*
6387                  * Don't assume anything of resume, if
6388                  * resume fails, irq and clocks can be OFF, and powers
6389                  * can be OFF or in LPM.
6390                  */
6391                 ufshcd_setup_hba_vreg(hba, true);
6392                 ufshcd_enable_irq(hba);
6393                 ufshcd_setup_vreg(hba, true);
6394                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6395                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6396                 ufshcd_hold(hba);
6397                 if (!ufshcd_is_clkgating_allowed(hba))
6398                         ufshcd_setup_clocks(hba, true);
6399                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6400                 ufshcd_vops_resume(hba, pm_op);
6401         } else {
6402                 ufshcd_hold(hba);
6403                 if (ufshcd_is_clkscaling_supported(hba) &&
6404                     hba->clk_scaling.is_enabled)
6405                         ufshcd_suspend_clkscaling(hba);
6406                 ufshcd_clk_scaling_allow(hba, false);
6407         }
6408         ufshcd_scsi_block_requests(hba);
6409         /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6410         blk_mq_wait_quiesce_done(&hba->host->tag_set);
6411         cancel_work_sync(&hba->eeh_work);
6412 }
6413
6414 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6415 {
6416         ufshcd_scsi_unblock_requests(hba);
6417         ufshcd_release(hba);
6418         if (ufshcd_is_clkscaling_supported(hba))
6419                 ufshcd_clk_scaling_suspend(hba, false);
6420         ufshcd_rpm_put(hba);
6421 }
6422
6423 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6424 {
6425         return (!hba->is_powered || hba->shutting_down ||
6426                 !hba->ufs_device_wlun ||
6427                 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6428                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6429                    ufshcd_is_link_broken(hba))));
6430 }
6431
6432 #ifdef CONFIG_PM
6433 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6434 {
6435         struct Scsi_Host *shost = hba->host;
6436         struct scsi_device *sdev;
6437         struct request_queue *q;
6438         int ret;
6439
6440         hba->is_sys_suspended = false;
6441         /*
6442          * Set RPM status of wlun device to RPM_ACTIVE,
6443          * this also clears its runtime error.
6444          */
6445         ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6446
6447         /* hba device might have a runtime error otherwise */
6448         if (ret)
6449                 ret = pm_runtime_set_active(hba->dev);
6450         /*
6451          * If wlun device had runtime error, we also need to resume those
6452          * consumer scsi devices in case any of them has failed to be
6453          * resumed due to supplier runtime resume failure. This is to unblock
6454          * blk_queue_enter in case there are bios waiting inside it.
6455          */
6456         if (!ret) {
6457                 shost_for_each_device(sdev, shost) {
6458                         q = sdev->request_queue;
6459                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6460                                        q->rpm_status == RPM_SUSPENDING))
6461                                 pm_request_resume(q->dev);
6462                 }
6463         }
6464 }
6465 #else
6466 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6467 {
6468 }
6469 #endif
6470
6471 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6472 {
6473         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6474         u32 mode;
6475
6476         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6477
6478         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6479                 return true;
6480
6481         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6482                 return true;
6483
6484         return false;
6485 }
6486
6487 static bool ufshcd_abort_one(struct request *rq, void *priv)
6488 {
6489         int *ret = priv;
6490         u32 tag = rq->tag;
6491         struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
6492         struct scsi_device *sdev = cmd->device;
6493         struct Scsi_Host *shost = sdev->host;
6494         struct ufs_hba *hba = shost_priv(shost);
6495         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6496         struct ufs_hw_queue *hwq;
6497         unsigned long flags;
6498
6499         *ret = ufshcd_try_to_abort_task(hba, tag);
6500         dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6501                 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6502                 *ret ? "failed" : "succeeded");
6503
6504         /* Release cmd in MCQ mode if abort succeeds */
6505         if (is_mcq_enabled(hba) && (*ret == 0)) {
6506                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
6507                 spin_lock_irqsave(&hwq->cq_lock, flags);
6508                 if (ufshcd_cmd_inflight(lrbp->cmd))
6509                         ufshcd_release_scsi_cmd(hba, lrbp);
6510                 spin_unlock_irqrestore(&hwq->cq_lock, flags);
6511         }
6512
6513         return *ret == 0;
6514 }
6515
6516 /**
6517  * ufshcd_abort_all - Abort all pending commands.
6518  * @hba: Host bus adapter pointer.
6519  *
6520  * Return: true if and only if the host controller needs to be reset.
6521  */
6522 static bool ufshcd_abort_all(struct ufs_hba *hba)
6523 {
6524         int tag, ret = 0;
6525
6526         blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
6527         if (ret)
6528                 goto out;
6529
6530         /* Clear pending task management requests */
6531         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6532                 ret = ufshcd_clear_tm_cmd(hba, tag);
6533                 if (ret)
6534                         goto out;
6535         }
6536
6537 out:
6538         /* Complete the requests that are cleared by s/w */
6539         ufshcd_complete_requests(hba, false);
6540
6541         return ret != 0;
6542 }
6543
6544 /**
6545  * ufshcd_err_handler - handle UFS errors that require s/w attention
6546  * @work: pointer to work structure
6547  */
6548 static void ufshcd_err_handler(struct work_struct *work)
6549 {
6550         int retries = MAX_ERR_HANDLER_RETRIES;
6551         struct ufs_hba *hba;
6552         unsigned long flags;
6553         bool needs_restore;
6554         bool needs_reset;
6555         int pmc_err;
6556
6557         hba = container_of(work, struct ufs_hba, eh_work);
6558
6559         dev_info(hba->dev,
6560                  "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6561                  __func__, ufshcd_state_name[hba->ufshcd_state],
6562                  hba->is_powered, hba->shutting_down, hba->saved_err,
6563                  hba->saved_uic_err, hba->force_reset,
6564                  ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6565
6566         down(&hba->host_sem);
6567         spin_lock_irqsave(hba->host->host_lock, flags);
6568         if (ufshcd_err_handling_should_stop(hba)) {
6569                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6570                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6571                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6572                 up(&hba->host_sem);
6573                 return;
6574         }
6575         ufshcd_set_eh_in_progress(hba);
6576         spin_unlock_irqrestore(hba->host->host_lock, flags);
6577         ufshcd_err_handling_prepare(hba);
6578         /* Complete requests that have door-bell cleared by h/w */
6579         ufshcd_complete_requests(hba, false);
6580         spin_lock_irqsave(hba->host->host_lock, flags);
6581 again:
6582         needs_restore = false;
6583         needs_reset = false;
6584
6585         if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6586                 hba->ufshcd_state = UFSHCD_STATE_RESET;
6587         /*
6588          * A full reset and restore might have happened after preparation
6589          * is finished, double check whether we should stop.
6590          */
6591         if (ufshcd_err_handling_should_stop(hba))
6592                 goto skip_err_handling;
6593
6594         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6595                 bool ret;
6596
6597                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6598                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6599                 ret = ufshcd_quirk_dl_nac_errors(hba);
6600                 spin_lock_irqsave(hba->host->host_lock, flags);
6601                 if (!ret && ufshcd_err_handling_should_stop(hba))
6602                         goto skip_err_handling;
6603         }
6604
6605         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6606             (hba->saved_uic_err &&
6607              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6608                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6609
6610                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6611                 ufshcd_print_host_state(hba);
6612                 ufshcd_print_pwr_info(hba);
6613                 ufshcd_print_evt_hist(hba);
6614                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6615                 ufshcd_print_trs_all(hba, pr_prdt);
6616                 spin_lock_irqsave(hba->host->host_lock, flags);
6617         }
6618
6619         /*
6620          * if host reset is required then skip clearing the pending
6621          * transfers forcefully because they will get cleared during
6622          * host reset and restore
6623          */
6624         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6625             ufshcd_is_saved_err_fatal(hba) ||
6626             ((hba->saved_err & UIC_ERROR) &&
6627              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6628                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6629                 needs_reset = true;
6630                 goto do_reset;
6631         }
6632
6633         /*
6634          * If LINERESET was caught, UFS might have been put to PWM mode,
6635          * check if power mode restore is needed.
6636          */
6637         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6638                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6639                 if (!hba->saved_uic_err)
6640                         hba->saved_err &= ~UIC_ERROR;
6641                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6642                 if (ufshcd_is_pwr_mode_restore_needed(hba))
6643                         needs_restore = true;
6644                 spin_lock_irqsave(hba->host->host_lock, flags);
6645                 if (!hba->saved_err && !needs_restore)
6646                         goto skip_err_handling;
6647         }
6648
6649         hba->silence_err_logs = true;
6650         /* release lock as clear command might sleep */
6651         spin_unlock_irqrestore(hba->host->host_lock, flags);
6652
6653         needs_reset = ufshcd_abort_all(hba);
6654
6655         spin_lock_irqsave(hba->host->host_lock, flags);
6656         hba->silence_err_logs = false;
6657         if (needs_reset)
6658                 goto do_reset;
6659
6660         /*
6661          * After all reqs and tasks are cleared from doorbell,
6662          * now it is safe to retore power mode.
6663          */
6664         if (needs_restore) {
6665                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6666                 /*
6667                  * Hold the scaling lock just in case dev cmds
6668                  * are sent via bsg and/or sysfs.
6669                  */
6670                 down_write(&hba->clk_scaling_lock);
6671                 hba->force_pmc = true;
6672                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6673                 if (pmc_err) {
6674                         needs_reset = true;
6675                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6676                                         __func__, pmc_err);
6677                 }
6678                 hba->force_pmc = false;
6679                 ufshcd_print_pwr_info(hba);
6680                 up_write(&hba->clk_scaling_lock);
6681                 spin_lock_irqsave(hba->host->host_lock, flags);
6682         }
6683
6684 do_reset:
6685         /* Fatal errors need reset */
6686         if (needs_reset) {
6687                 int err;
6688
6689                 hba->force_reset = false;
6690                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6691                 err = ufshcd_reset_and_restore(hba);
6692                 if (err)
6693                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6694                                         __func__, err);
6695                 else
6696                         ufshcd_recover_pm_error(hba);
6697                 spin_lock_irqsave(hba->host->host_lock, flags);
6698         }
6699
6700 skip_err_handling:
6701         if (!needs_reset) {
6702                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6703                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6704                 if (hba->saved_err || hba->saved_uic_err)
6705                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6706                             __func__, hba->saved_err, hba->saved_uic_err);
6707         }
6708         /* Exit in an operational state or dead */
6709         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6710             hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6711                 if (--retries)
6712                         goto again;
6713                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6714         }
6715         ufshcd_clear_eh_in_progress(hba);
6716         spin_unlock_irqrestore(hba->host->host_lock, flags);
6717         ufshcd_err_handling_unprepare(hba);
6718         up(&hba->host_sem);
6719
6720         dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6721                  ufshcd_state_name[hba->ufshcd_state]);
6722 }
6723
6724 /**
6725  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6726  * @hba: per-adapter instance
6727  *
6728  * Return:
6729  *  IRQ_HANDLED - If interrupt is valid
6730  *  IRQ_NONE    - If invalid interrupt
6731  */
6732 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6733 {
6734         u32 reg;
6735         irqreturn_t retval = IRQ_NONE;
6736
6737         /* PHY layer error */
6738         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6739         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6740             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6741                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6742                 /*
6743                  * To know whether this error is fatal or not, DB timeout
6744                  * must be checked but this error is handled separately.
6745                  */
6746                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6747                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6748                                         __func__);
6749
6750                 /* Got a LINERESET indication. */
6751                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6752                         struct uic_command *cmd = NULL;
6753
6754                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6755                         if (hba->uic_async_done && hba->active_uic_cmd)
6756                                 cmd = hba->active_uic_cmd;
6757                         /*
6758                          * Ignore the LINERESET during power mode change
6759                          * operation via DME_SET command.
6760                          */
6761                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
6762                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6763                 }
6764                 retval |= IRQ_HANDLED;
6765         }
6766
6767         /* PA_INIT_ERROR is fatal and needs UIC reset */
6768         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6769         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6770             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6771                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6772
6773                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6774                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6775                 else if (hba->dev_quirks &
6776                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6777                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6778                                 hba->uic_error |=
6779                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6780                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6781                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6782                 }
6783                 retval |= IRQ_HANDLED;
6784         }
6785
6786         /* UIC NL/TL/DME errors needs software retry */
6787         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6788         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6789             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6790                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6791                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6792                 retval |= IRQ_HANDLED;
6793         }
6794
6795         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6796         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6797             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6798                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6799                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6800                 retval |= IRQ_HANDLED;
6801         }
6802
6803         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6804         if ((reg & UIC_DME_ERROR) &&
6805             (reg & UIC_DME_ERROR_CODE_MASK)) {
6806                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6807                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6808                 retval |= IRQ_HANDLED;
6809         }
6810
6811         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6812                         __func__, hba->uic_error);
6813         return retval;
6814 }
6815
6816 /**
6817  * ufshcd_check_errors - Check for errors that need s/w attention
6818  * @hba: per-adapter instance
6819  * @intr_status: interrupt status generated by the controller
6820  *
6821  * Return:
6822  *  IRQ_HANDLED - If interrupt is valid
6823  *  IRQ_NONE    - If invalid interrupt
6824  */
6825 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6826 {
6827         bool queue_eh_work = false;
6828         irqreturn_t retval = IRQ_NONE;
6829
6830         spin_lock(hba->host->host_lock);
6831         hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6832
6833         if (hba->errors & INT_FATAL_ERRORS) {
6834                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6835                                        hba->errors);
6836                 queue_eh_work = true;
6837         }
6838
6839         if (hba->errors & UIC_ERROR) {
6840                 hba->uic_error = 0;
6841                 retval = ufshcd_update_uic_error(hba);
6842                 if (hba->uic_error)
6843                         queue_eh_work = true;
6844         }
6845
6846         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6847                 dev_err(hba->dev,
6848                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6849                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6850                         "Enter" : "Exit",
6851                         hba->errors, ufshcd_get_upmcrs(hba));
6852                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6853                                        hba->errors);
6854                 ufshcd_set_link_broken(hba);
6855                 queue_eh_work = true;
6856         }
6857
6858         if (queue_eh_work) {
6859                 /*
6860                  * update the transfer error masks to sticky bits, let's do this
6861                  * irrespective of current ufshcd_state.
6862                  */
6863                 hba->saved_err |= hba->errors;
6864                 hba->saved_uic_err |= hba->uic_error;
6865
6866                 /* dump controller state before resetting */
6867                 if ((hba->saved_err &
6868                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6869                     (hba->saved_uic_err &&
6870                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6871                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6872                                         __func__, hba->saved_err,
6873                                         hba->saved_uic_err);
6874                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6875                                          "host_regs: ");
6876                         ufshcd_print_pwr_info(hba);
6877                 }
6878                 ufshcd_schedule_eh_work(hba);
6879                 retval |= IRQ_HANDLED;
6880         }
6881         /*
6882          * if (!queue_eh_work) -
6883          * Other errors are either non-fatal where host recovers
6884          * itself without s/w intervention or errors that will be
6885          * handled by the SCSI core layer.
6886          */
6887         hba->errors = 0;
6888         hba->uic_error = 0;
6889         spin_unlock(hba->host->host_lock);
6890         return retval;
6891 }
6892
6893 /**
6894  * ufshcd_tmc_handler - handle task management function completion
6895  * @hba: per adapter instance
6896  *
6897  * Return:
6898  *  IRQ_HANDLED - If interrupt is valid
6899  *  IRQ_NONE    - If invalid interrupt
6900  */
6901 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6902 {
6903         unsigned long flags, pending, issued;
6904         irqreturn_t ret = IRQ_NONE;
6905         int tag;
6906
6907         spin_lock_irqsave(hba->host->host_lock, flags);
6908         pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6909         issued = hba->outstanding_tasks & ~pending;
6910         for_each_set_bit(tag, &issued, hba->nutmrs) {
6911                 struct request *req = hba->tmf_rqs[tag];
6912                 struct completion *c = req->end_io_data;
6913
6914                 complete(c);
6915                 ret = IRQ_HANDLED;
6916         }
6917         spin_unlock_irqrestore(hba->host->host_lock, flags);
6918
6919         return ret;
6920 }
6921
6922 /**
6923  * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6924  * @hba: per adapter instance
6925  *
6926  * Return: IRQ_HANDLED if interrupt is handled.
6927  */
6928 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6929 {
6930         struct ufs_hw_queue *hwq;
6931         unsigned long outstanding_cqs;
6932         unsigned int nr_queues;
6933         int i, ret;
6934         u32 events;
6935
6936         ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6937         if (ret)
6938                 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6939
6940         /* Exclude the poll queues */
6941         nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6942         for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6943                 hwq = &hba->uhq[i];
6944
6945                 events = ufshcd_mcq_read_cqis(hba, i);
6946                 if (events)
6947                         ufshcd_mcq_write_cqis(hba, events, i);
6948
6949                 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6950                         ufshcd_mcq_poll_cqe_lock(hba, hwq);
6951         }
6952
6953         return IRQ_HANDLED;
6954 }
6955
6956 /**
6957  * ufshcd_sl_intr - Interrupt service routine
6958  * @hba: per adapter instance
6959  * @intr_status: contains interrupts generated by the controller
6960  *
6961  * Return:
6962  *  IRQ_HANDLED - If interrupt is valid
6963  *  IRQ_NONE    - If invalid interrupt
6964  */
6965 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6966 {
6967         irqreturn_t retval = IRQ_NONE;
6968
6969         if (intr_status & UFSHCD_UIC_MASK)
6970                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6971
6972         if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6973                 retval |= ufshcd_check_errors(hba, intr_status);
6974
6975         if (intr_status & UTP_TASK_REQ_COMPL)
6976                 retval |= ufshcd_tmc_handler(hba);
6977
6978         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6979                 retval |= ufshcd_transfer_req_compl(hba);
6980
6981         if (intr_status & MCQ_CQ_EVENT_STATUS)
6982                 retval |= ufshcd_handle_mcq_cq_events(hba);
6983
6984         return retval;
6985 }
6986
6987 /**
6988  * ufshcd_intr - Main interrupt service routine
6989  * @irq: irq number
6990  * @__hba: pointer to adapter instance
6991  *
6992  * Return:
6993  *  IRQ_HANDLED - If interrupt is valid
6994  *  IRQ_NONE    - If invalid interrupt
6995  */
6996 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6997 {
6998         u32 intr_status, enabled_intr_status = 0;
6999         irqreturn_t retval = IRQ_NONE;
7000         struct ufs_hba *hba = __hba;
7001         int retries = hba->nutrs;
7002
7003         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7004         hba->ufs_stats.last_intr_status = intr_status;
7005         hba->ufs_stats.last_intr_ts = local_clock();
7006
7007         /*
7008          * There could be max of hba->nutrs reqs in flight and in worst case
7009          * if the reqs get finished 1 by 1 after the interrupt status is
7010          * read, make sure we handle them by checking the interrupt status
7011          * again in a loop until we process all of the reqs before returning.
7012          */
7013         while (intr_status && retries--) {
7014                 enabled_intr_status =
7015                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
7016                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7017                 if (enabled_intr_status)
7018                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7019
7020                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7021         }
7022
7023         if (enabled_intr_status && retval == IRQ_NONE &&
7024             (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
7025              hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
7026                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
7027                                         __func__,
7028                                         intr_status,
7029                                         hba->ufs_stats.last_intr_status,
7030                                         enabled_intr_status);
7031                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
7032         }
7033
7034         return retval;
7035 }
7036
7037 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
7038 {
7039         int err = 0;
7040         u32 mask = 1 << tag;
7041         unsigned long flags;
7042
7043         if (!test_bit(tag, &hba->outstanding_tasks))
7044                 goto out;
7045
7046         spin_lock_irqsave(hba->host->host_lock, flags);
7047         ufshcd_utmrl_clear(hba, tag);
7048         spin_unlock_irqrestore(hba->host->host_lock, flags);
7049
7050         /* poll for max. 1 sec to clear door bell register by h/w */
7051         err = ufshcd_wait_for_register(hba,
7052                         REG_UTP_TASK_REQ_DOOR_BELL,
7053                         mask, 0, 1000, 1000);
7054
7055         dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
7056                 tag, err < 0 ? "failed" : "succeeded");
7057
7058 out:
7059         return err;
7060 }
7061
7062 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
7063                 struct utp_task_req_desc *treq, u8 tm_function)
7064 {
7065         struct request_queue *q = hba->tmf_queue;
7066         struct Scsi_Host *host = hba->host;
7067         DECLARE_COMPLETION_ONSTACK(wait);
7068         struct request *req;
7069         unsigned long flags;
7070         int task_tag, err;
7071
7072         /*
7073          * blk_mq_alloc_request() is used here only to get a free tag.
7074          */
7075         req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
7076         if (IS_ERR(req))
7077                 return PTR_ERR(req);
7078
7079         req->end_io_data = &wait;
7080         ufshcd_hold(hba);
7081
7082         spin_lock_irqsave(host->host_lock, flags);
7083
7084         task_tag = req->tag;
7085         hba->tmf_rqs[req->tag] = req;
7086         treq->upiu_req.req_header.task_tag = task_tag;
7087
7088         memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
7089         ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
7090
7091         /* send command to the controller */
7092         __set_bit(task_tag, &hba->outstanding_tasks);
7093
7094         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
7095         /* Make sure that doorbell is committed immediately */
7096         wmb();
7097
7098         spin_unlock_irqrestore(host->host_lock, flags);
7099
7100         ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
7101
7102         /* wait until the task management command is completed */
7103         err = wait_for_completion_io_timeout(&wait,
7104                         msecs_to_jiffies(TM_CMD_TIMEOUT));
7105         if (!err) {
7106                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
7107                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
7108                                 __func__, tm_function);
7109                 if (ufshcd_clear_tm_cmd(hba, task_tag))
7110                         dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7111                                         __func__, task_tag);
7112                 err = -ETIMEDOUT;
7113         } else {
7114                 err = 0;
7115                 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
7116
7117                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7118         }
7119
7120         spin_lock_irqsave(hba->host->host_lock, flags);
7121         hba->tmf_rqs[req->tag] = NULL;
7122         __clear_bit(task_tag, &hba->outstanding_tasks);
7123         spin_unlock_irqrestore(hba->host->host_lock, flags);
7124
7125         ufshcd_release(hba);
7126         blk_mq_free_request(req);
7127
7128         return err;
7129 }
7130
7131 /**
7132  * ufshcd_issue_tm_cmd - issues task management commands to controller
7133  * @hba: per adapter instance
7134  * @lun_id: LUN ID to which TM command is sent
7135  * @task_id: task ID to which the TM command is applicable
7136  * @tm_function: task management function opcode
7137  * @tm_response: task management service response return value
7138  *
7139  * Return: non-zero value on error, zero on success.
7140  */
7141 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
7142                 u8 tm_function, u8 *tm_response)
7143 {
7144         struct utp_task_req_desc treq = { };
7145         enum utp_ocs ocs_value;
7146         int err;
7147
7148         /* Configure task request descriptor */
7149         treq.header.interrupt = 1;
7150         treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7151
7152         /* Configure task request UPIU */
7153         treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
7154         treq.upiu_req.req_header.lun = lun_id;
7155         treq.upiu_req.req_header.tm_function = tm_function;
7156
7157         /*
7158          * The host shall provide the same value for LUN field in the basic
7159          * header and for Input Parameter.
7160          */
7161         treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
7162         treq.upiu_req.input_param2 = cpu_to_be32(task_id);
7163
7164         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
7165         if (err == -ETIMEDOUT)
7166                 return err;
7167
7168         ocs_value = treq.header.ocs & MASK_OCS;
7169         if (ocs_value != OCS_SUCCESS)
7170                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
7171                                 __func__, ocs_value);
7172         else if (tm_response)
7173                 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
7174                                 MASK_TM_SERVICE_RESP;
7175         return err;
7176 }
7177
7178 /**
7179  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7180  * @hba:        per-adapter instance
7181  * @req_upiu:   upiu request
7182  * @rsp_upiu:   upiu reply
7183  * @desc_buff:  pointer to descriptor buffer, NULL if NA
7184  * @buff_len:   descriptor size, 0 if NA
7185  * @cmd_type:   specifies the type (NOP, Query...)
7186  * @desc_op:    descriptor operation
7187  *
7188  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7189  * Therefore, it "rides" the device management infrastructure: uses its tag and
7190  * tasks work queues.
7191  *
7192  * Since there is only one available tag for device management commands,
7193  * the caller is expected to hold the hba->dev_cmd.lock mutex.
7194  *
7195  * Return: 0 upon success; < 0 upon failure.
7196  */
7197 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7198                                         struct utp_upiu_req *req_upiu,
7199                                         struct utp_upiu_req *rsp_upiu,
7200                                         u8 *desc_buff, int *buff_len,
7201                                         enum dev_cmd_type cmd_type,
7202                                         enum query_opcode desc_op)
7203 {
7204         DECLARE_COMPLETION_ONSTACK(wait);
7205         const u32 tag = hba->reserved_slot;
7206         struct ufshcd_lrb *lrbp;
7207         int err = 0;
7208         u8 upiu_flags;
7209
7210         /* Protects use of hba->reserved_slot. */
7211         lockdep_assert_held(&hba->dev_cmd.lock);
7212
7213         down_read(&hba->clk_scaling_lock);
7214
7215         lrbp = &hba->lrb[tag];
7216         lrbp->cmd = NULL;
7217         lrbp->task_tag = tag;
7218         lrbp->lun = 0;
7219         lrbp->intr_cmd = true;
7220         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7221         hba->dev_cmd.type = cmd_type;
7222
7223         if (hba->ufs_version <= ufshci_version(1, 1))
7224                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
7225         else
7226                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7227
7228         /* update the task tag in the request upiu */
7229         req_upiu->header.task_tag = tag;
7230
7231         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
7232
7233         /* just copy the upiu request as it is */
7234         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7235         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7236                 /* The Data Segment Area is optional depending upon the query
7237                  * function value. for WRITE DESCRIPTOR, the data segment
7238                  * follows right after the tsf.
7239                  */
7240                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7241                 *buff_len = 0;
7242         }
7243
7244         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7245
7246         hba->dev_cmd.complete = &wait;
7247
7248         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
7249
7250         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7251         /*
7252          * ignore the returning value here - ufshcd_check_query_response is
7253          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7254          * read the response directly ignoring all errors.
7255          */
7256         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7257
7258         /* just copy the upiu response as it is */
7259         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7260         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7261                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7262                 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
7263                                            .data_segment_length);
7264
7265                 if (*buff_len >= resp_len) {
7266                         memcpy(desc_buff, descp, resp_len);
7267                         *buff_len = resp_len;
7268                 } else {
7269                         dev_warn(hba->dev,
7270                                  "%s: rsp size %d is bigger than buffer size %d",
7271                                  __func__, resp_len, *buff_len);
7272                         *buff_len = 0;
7273                         err = -EINVAL;
7274                 }
7275         }
7276         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7277                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
7278
7279         up_read(&hba->clk_scaling_lock);
7280         return err;
7281 }
7282
7283 /**
7284  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7285  * @hba:        per-adapter instance
7286  * @req_upiu:   upiu request
7287  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
7288  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
7289  * @desc_buff:  pointer to descriptor buffer, NULL if NA
7290  * @buff_len:   descriptor size, 0 if NA
7291  * @desc_op:    descriptor operation
7292  *
7293  * Supports UTP Transfer requests (nop and query), and UTP Task
7294  * Management requests.
7295  * It is up to the caller to fill the upiu conent properly, as it will
7296  * be copied without any further input validations.
7297  *
7298  * Return: 0 upon success; < 0 upon failure.
7299  */
7300 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7301                              struct utp_upiu_req *req_upiu,
7302                              struct utp_upiu_req *rsp_upiu,
7303                              enum upiu_request_transaction msgcode,
7304                              u8 *desc_buff, int *buff_len,
7305                              enum query_opcode desc_op)
7306 {
7307         int err;
7308         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7309         struct utp_task_req_desc treq = { };
7310         enum utp_ocs ocs_value;
7311         u8 tm_f = req_upiu->header.tm_function;
7312
7313         switch (msgcode) {
7314         case UPIU_TRANSACTION_NOP_OUT:
7315                 cmd_type = DEV_CMD_TYPE_NOP;
7316                 fallthrough;
7317         case UPIU_TRANSACTION_QUERY_REQ:
7318                 ufshcd_hold(hba);
7319                 mutex_lock(&hba->dev_cmd.lock);
7320                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7321                                                    desc_buff, buff_len,
7322                                                    cmd_type, desc_op);
7323                 mutex_unlock(&hba->dev_cmd.lock);
7324                 ufshcd_release(hba);
7325
7326                 break;
7327         case UPIU_TRANSACTION_TASK_REQ:
7328                 treq.header.interrupt = 1;
7329                 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7330
7331                 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7332
7333                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7334                 if (err == -ETIMEDOUT)
7335                         break;
7336
7337                 ocs_value = treq.header.ocs & MASK_OCS;
7338                 if (ocs_value != OCS_SUCCESS) {
7339                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7340                                 ocs_value);
7341                         break;
7342                 }
7343
7344                 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7345
7346                 break;
7347         default:
7348                 err = -EINVAL;
7349
7350                 break;
7351         }
7352
7353         return err;
7354 }
7355
7356 /**
7357  * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7358  * @hba:        per adapter instance
7359  * @req_upiu:   upiu request
7360  * @rsp_upiu:   upiu reply
7361  * @req_ehs:    EHS field which contains Advanced RPMB Request Message
7362  * @rsp_ehs:    EHS field which returns Advanced RPMB Response Message
7363  * @sg_cnt:     The number of sg lists actually used
7364  * @sg_list:    Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7365  * @dir:        DMA direction
7366  *
7367  * Return: zero on success, non-zero on failure.
7368  */
7369 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7370                          struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7371                          struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7372                          enum dma_data_direction dir)
7373 {
7374         DECLARE_COMPLETION_ONSTACK(wait);
7375         const u32 tag = hba->reserved_slot;
7376         struct ufshcd_lrb *lrbp;
7377         int err = 0;
7378         int result;
7379         u8 upiu_flags;
7380         u8 *ehs_data;
7381         u16 ehs_len;
7382
7383         /* Protects use of hba->reserved_slot. */
7384         ufshcd_hold(hba);
7385         mutex_lock(&hba->dev_cmd.lock);
7386         down_read(&hba->clk_scaling_lock);
7387
7388         lrbp = &hba->lrb[tag];
7389         lrbp->cmd = NULL;
7390         lrbp->task_tag = tag;
7391         lrbp->lun = UFS_UPIU_RPMB_WLUN;
7392
7393         lrbp->intr_cmd = true;
7394         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7395         hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7396
7397         /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7398         lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7399
7400         /*
7401          * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7402          * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7403          * HW controller takes EHS length from UTRD.
7404          */
7405         if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
7406                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7407         else
7408                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
7409
7410         /* update the task tag */
7411         req_upiu->header.task_tag = tag;
7412
7413         /* copy the UPIU(contains CDB) request as it is */
7414         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7415         /* Copy EHS, starting with byte32, immediately after the CDB package */
7416         memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7417
7418         if (dir != DMA_NONE && sg_list)
7419                 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7420
7421         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7422
7423         hba->dev_cmd.complete = &wait;
7424
7425         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7426
7427         err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7428
7429         if (!err) {
7430                 /* Just copy the upiu response as it is */
7431                 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7432                 /* Get the response UPIU result */
7433                 result = (lrbp->ucd_rsp_ptr->header.response << 8) |
7434                         lrbp->ucd_rsp_ptr->header.status;
7435
7436                 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
7437                 /*
7438                  * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7439                  * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7440                  * Message is 02h
7441                  */
7442                 if (ehs_len == 2 && rsp_ehs) {
7443                         /*
7444                          * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7445                          * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7446                          */
7447                         ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7448                         memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7449                 }
7450         }
7451
7452         up_read(&hba->clk_scaling_lock);
7453         mutex_unlock(&hba->dev_cmd.lock);
7454         ufshcd_release(hba);
7455         return err ? : result;
7456 }
7457
7458 /**
7459  * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7460  * @cmd: SCSI command pointer
7461  *
7462  * Return: SUCCESS or FAILED.
7463  */
7464 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7465 {
7466         unsigned long flags, pending_reqs = 0, not_cleared = 0;
7467         struct Scsi_Host *host;
7468         struct ufs_hba *hba;
7469         struct ufs_hw_queue *hwq;
7470         struct ufshcd_lrb *lrbp;
7471         u32 pos, not_cleared_mask = 0;
7472         int err;
7473         u8 resp = 0xF, lun;
7474
7475         host = cmd->device->host;
7476         hba = shost_priv(host);
7477
7478         lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7479         err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7480         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7481                 if (!err)
7482                         err = resp;
7483                 goto out;
7484         }
7485
7486         if (is_mcq_enabled(hba)) {
7487                 for (pos = 0; pos < hba->nutrs; pos++) {
7488                         lrbp = &hba->lrb[pos];
7489                         if (ufshcd_cmd_inflight(lrbp->cmd) &&
7490                             lrbp->lun == lun) {
7491                                 ufshcd_clear_cmd(hba, pos);
7492                                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7493                                 ufshcd_mcq_poll_cqe_lock(hba, hwq);
7494                         }
7495                 }
7496                 err = 0;
7497                 goto out;
7498         }
7499
7500         /* clear the commands that were pending for corresponding LUN */
7501         spin_lock_irqsave(&hba->outstanding_lock, flags);
7502         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7503                 if (hba->lrb[pos].lun == lun)
7504                         __set_bit(pos, &pending_reqs);
7505         hba->outstanding_reqs &= ~pending_reqs;
7506         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7507
7508         for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
7509                 if (ufshcd_clear_cmd(hba, pos) < 0) {
7510                         spin_lock_irqsave(&hba->outstanding_lock, flags);
7511                         not_cleared = 1U << pos &
7512                                 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7513                         hba->outstanding_reqs |= not_cleared;
7514                         not_cleared_mask |= not_cleared;
7515                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7516
7517                         dev_err(hba->dev, "%s: failed to clear request %d\n",
7518                                 __func__, pos);
7519                 }
7520         }
7521         __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
7522
7523 out:
7524         hba->req_abort_count = 0;
7525         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7526         if (!err) {
7527                 err = SUCCESS;
7528         } else {
7529                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7530                 err = FAILED;
7531         }
7532         return err;
7533 }
7534
7535 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7536 {
7537         struct ufshcd_lrb *lrbp;
7538         int tag;
7539
7540         for_each_set_bit(tag, &bitmap, hba->nutrs) {
7541                 lrbp = &hba->lrb[tag];
7542                 lrbp->req_abort_skip = true;
7543         }
7544 }
7545
7546 /**
7547  * ufshcd_try_to_abort_task - abort a specific task
7548  * @hba: Pointer to adapter instance
7549  * @tag: Task tag/index to be aborted
7550  *
7551  * Abort the pending command in device by sending UFS_ABORT_TASK task management
7552  * command, and in host controller by clearing the door-bell register. There can
7553  * be race between controller sending the command to the device while abort is
7554  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7555  * really issued and then try to abort it.
7556  *
7557  * Return: zero on success, non-zero on failure.
7558  */
7559 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7560 {
7561         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7562         int err = 0;
7563         int poll_cnt;
7564         u8 resp = 0xF;
7565         u32 reg;
7566
7567         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7568                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7569                                 UFS_QUERY_TASK, &resp);
7570                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7571                         /* cmd pending in the device */
7572                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7573                                 __func__, tag);
7574                         break;
7575                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7576                         /*
7577                          * cmd not pending in the device, check if it is
7578                          * in transition.
7579                          */
7580                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7581                                 __func__, tag);
7582                         if (is_mcq_enabled(hba)) {
7583                                 /* MCQ mode */
7584                                 if (ufshcd_cmd_inflight(lrbp->cmd)) {
7585                                         /* sleep for max. 200us same delay as in SDB mode */
7586                                         usleep_range(100, 200);
7587                                         continue;
7588                                 }
7589                                 /* command completed already */
7590                                 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
7591                                         __func__, tag);
7592                                 goto out;
7593                         }
7594
7595                         /* Single Doorbell Mode */
7596                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7597                         if (reg & (1 << tag)) {
7598                                 /* sleep for max. 200us to stabilize */
7599                                 usleep_range(100, 200);
7600                                 continue;
7601                         }
7602                         /* command completed already */
7603                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7604                                 __func__, tag);
7605                         goto out;
7606                 } else {
7607                         dev_err(hba->dev,
7608                                 "%s: no response from device. tag = %d, err %d\n",
7609                                 __func__, tag, err);
7610                         if (!err)
7611                                 err = resp; /* service response error */
7612                         goto out;
7613                 }
7614         }
7615
7616         if (!poll_cnt) {
7617                 err = -EBUSY;
7618                 goto out;
7619         }
7620
7621         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7622                         UFS_ABORT_TASK, &resp);
7623         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7624                 if (!err) {
7625                         err = resp; /* service response error */
7626                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7627                                 __func__, tag, err);
7628                 }
7629                 goto out;
7630         }
7631
7632         err = ufshcd_clear_cmd(hba, tag);
7633         if (err)
7634                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7635                         __func__, tag, err);
7636
7637 out:
7638         return err;
7639 }
7640
7641 /**
7642  * ufshcd_abort - scsi host template eh_abort_handler callback
7643  * @cmd: SCSI command pointer
7644  *
7645  * Return: SUCCESS or FAILED.
7646  */
7647 static int ufshcd_abort(struct scsi_cmnd *cmd)
7648 {
7649         struct Scsi_Host *host = cmd->device->host;
7650         struct ufs_hba *hba = shost_priv(host);
7651         int tag = scsi_cmd_to_rq(cmd)->tag;
7652         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7653         unsigned long flags;
7654         int err = FAILED;
7655         bool outstanding;
7656         u32 reg;
7657
7658         ufshcd_hold(hba);
7659
7660         if (!is_mcq_enabled(hba)) {
7661                 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7662                 if (!test_bit(tag, &hba->outstanding_reqs)) {
7663                         /* If command is already aborted/completed, return FAILED. */
7664                         dev_err(hba->dev,
7665                                 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7666                                 __func__, tag, hba->outstanding_reqs, reg);
7667                         goto release;
7668                 }
7669         }
7670
7671         /* Print Transfer Request of aborted task */
7672         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7673
7674         /*
7675          * Print detailed info about aborted request.
7676          * As more than one request might get aborted at the same time,
7677          * print full information only for the first aborted request in order
7678          * to reduce repeated printouts. For other aborted requests only print
7679          * basic details.
7680          */
7681         scsi_print_command(cmd);
7682         if (!hba->req_abort_count) {
7683                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7684                 ufshcd_print_evt_hist(hba);
7685                 ufshcd_print_host_state(hba);
7686                 ufshcd_print_pwr_info(hba);
7687                 ufshcd_print_tr(hba, tag, true);
7688         } else {
7689                 ufshcd_print_tr(hba, tag, false);
7690         }
7691         hba->req_abort_count++;
7692
7693         if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
7694                 /* only execute this code in single doorbell mode */
7695                 dev_err(hba->dev,
7696                 "%s: cmd was completed, but without a notifying intr, tag = %d",
7697                 __func__, tag);
7698                 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7699                 goto release;
7700         }
7701
7702         /*
7703          * Task abort to the device W-LUN is illegal. When this command
7704          * will fail, due to spec violation, scsi err handling next step
7705          * will be to send LU reset which, again, is a spec violation.
7706          * To avoid these unnecessary/illegal steps, first we clean up
7707          * the lrb taken by this cmd and re-set it in outstanding_reqs,
7708          * then queue the eh_work and bail.
7709          */
7710         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7711                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7712
7713                 spin_lock_irqsave(host->host_lock, flags);
7714                 hba->force_reset = true;
7715                 ufshcd_schedule_eh_work(hba);
7716                 spin_unlock_irqrestore(host->host_lock, flags);
7717                 goto release;
7718         }
7719
7720         if (is_mcq_enabled(hba)) {
7721                 /* MCQ mode. Branch off to handle abort for mcq mode */
7722                 err = ufshcd_mcq_abort(cmd);
7723                 goto release;
7724         }
7725
7726         /* Skip task abort in case previous aborts failed and report failure */
7727         if (lrbp->req_abort_skip) {
7728                 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7729                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7730                 goto release;
7731         }
7732
7733         err = ufshcd_try_to_abort_task(hba, tag);
7734         if (err) {
7735                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7736                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7737                 err = FAILED;
7738                 goto release;
7739         }
7740
7741         /*
7742          * Clear the corresponding bit from outstanding_reqs since the command
7743          * has been aborted successfully.
7744          */
7745         spin_lock_irqsave(&hba->outstanding_lock, flags);
7746         outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7747         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7748
7749         if (outstanding)
7750                 ufshcd_release_scsi_cmd(hba, lrbp);
7751
7752         err = SUCCESS;
7753
7754 release:
7755         /* Matches the ufshcd_hold() call at the start of this function. */
7756         ufshcd_release(hba);
7757         return err;
7758 }
7759
7760 /**
7761  * ufshcd_host_reset_and_restore - reset and restore host controller
7762  * @hba: per-adapter instance
7763  *
7764  * Note that host controller reset may issue DME_RESET to
7765  * local and remote (device) Uni-Pro stack and the attributes
7766  * are reset to default state.
7767  *
7768  * Return: zero on success, non-zero on failure.
7769  */
7770 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7771 {
7772         int err;
7773
7774         /*
7775          * Stop the host controller and complete the requests
7776          * cleared by h/w
7777          */
7778         ufshcd_hba_stop(hba);
7779         hba->silence_err_logs = true;
7780         ufshcd_complete_requests(hba, true);
7781         hba->silence_err_logs = false;
7782
7783         /* scale up clocks to max frequency before full reinitialization */
7784         ufshcd_scale_clks(hba, ULONG_MAX, true);
7785
7786         err = ufshcd_hba_enable(hba);
7787
7788         /* Establish the link again and restore the device */
7789         if (!err)
7790                 err = ufshcd_probe_hba(hba, false);
7791
7792         if (err)
7793                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7794         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7795         return err;
7796 }
7797
7798 /**
7799  * ufshcd_reset_and_restore - reset and re-initialize host/device
7800  * @hba: per-adapter instance
7801  *
7802  * Reset and recover device, host and re-establish link. This
7803  * is helpful to recover the communication in fatal error conditions.
7804  *
7805  * Return: zero on success, non-zero on failure.
7806  */
7807 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7808 {
7809         u32 saved_err = 0;
7810         u32 saved_uic_err = 0;
7811         int err = 0;
7812         unsigned long flags;
7813         int retries = MAX_HOST_RESET_RETRIES;
7814
7815         spin_lock_irqsave(hba->host->host_lock, flags);
7816         do {
7817                 /*
7818                  * This is a fresh start, cache and clear saved error first,
7819                  * in case new error generated during reset and restore.
7820                  */
7821                 saved_err |= hba->saved_err;
7822                 saved_uic_err |= hba->saved_uic_err;
7823                 hba->saved_err = 0;
7824                 hba->saved_uic_err = 0;
7825                 hba->force_reset = false;
7826                 hba->ufshcd_state = UFSHCD_STATE_RESET;
7827                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7828
7829                 /* Reset the attached device */
7830                 ufshcd_device_reset(hba);
7831
7832                 err = ufshcd_host_reset_and_restore(hba);
7833
7834                 spin_lock_irqsave(hba->host->host_lock, flags);
7835                 if (err)
7836                         continue;
7837                 /* Do not exit unless operational or dead */
7838                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7839                     hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7840                     hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7841                         err = -EAGAIN;
7842         } while (err && --retries);
7843
7844         /*
7845          * Inform scsi mid-layer that we did reset and allow to handle
7846          * Unit Attention properly.
7847          */
7848         scsi_report_bus_reset(hba->host, 0);
7849         if (err) {
7850                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7851                 hba->saved_err |= saved_err;
7852                 hba->saved_uic_err |= saved_uic_err;
7853         }
7854         spin_unlock_irqrestore(hba->host->host_lock, flags);
7855
7856         return err;
7857 }
7858
7859 /**
7860  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7861  * @cmd: SCSI command pointer
7862  *
7863  * Return: SUCCESS or FAILED.
7864  */
7865 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7866 {
7867         int err = SUCCESS;
7868         unsigned long flags;
7869         struct ufs_hba *hba;
7870
7871         hba = shost_priv(cmd->device->host);
7872
7873         /*
7874          * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7875          * stuck in this function waiting for flush_work(&hba->eh_work). And
7876          * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7877          * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7878          */
7879         if (hba->pm_op_in_progress) {
7880                 if (ufshcd_link_recovery(hba))
7881                         err = FAILED;
7882
7883                 return err;
7884         }
7885
7886         spin_lock_irqsave(hba->host->host_lock, flags);
7887         hba->force_reset = true;
7888         ufshcd_schedule_eh_work(hba);
7889         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7890         spin_unlock_irqrestore(hba->host->host_lock, flags);
7891
7892         flush_work(&hba->eh_work);
7893
7894         spin_lock_irqsave(hba->host->host_lock, flags);
7895         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7896                 err = FAILED;
7897         spin_unlock_irqrestore(hba->host->host_lock, flags);
7898
7899         return err;
7900 }
7901
7902 /**
7903  * ufshcd_get_max_icc_level - calculate the ICC level
7904  * @sup_curr_uA: max. current supported by the regulator
7905  * @start_scan: row at the desc table to start scan from
7906  * @buff: power descriptor buffer
7907  *
7908  * Return: calculated max ICC level for specific regulator.
7909  */
7910 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7911                                     const char *buff)
7912 {
7913         int i;
7914         int curr_uA;
7915         u16 data;
7916         u16 unit;
7917
7918         for (i = start_scan; i >= 0; i--) {
7919                 data = get_unaligned_be16(&buff[2 * i]);
7920                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7921                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7922                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7923                 switch (unit) {
7924                 case UFSHCD_NANO_AMP:
7925                         curr_uA = curr_uA / 1000;
7926                         break;
7927                 case UFSHCD_MILI_AMP:
7928                         curr_uA = curr_uA * 1000;
7929                         break;
7930                 case UFSHCD_AMP:
7931                         curr_uA = curr_uA * 1000 * 1000;
7932                         break;
7933                 case UFSHCD_MICRO_AMP:
7934                 default:
7935                         break;
7936                 }
7937                 if (sup_curr_uA >= curr_uA)
7938                         break;
7939         }
7940         if (i < 0) {
7941                 i = 0;
7942                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7943         }
7944
7945         return (u32)i;
7946 }
7947
7948 /**
7949  * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7950  * In case regulators are not initialized we'll return 0
7951  * @hba: per-adapter instance
7952  * @desc_buf: power descriptor buffer to extract ICC levels from.
7953  *
7954  * Return: calculated ICC level.
7955  */
7956 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7957                                                 const u8 *desc_buf)
7958 {
7959         u32 icc_level = 0;
7960
7961         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7962                                                 !hba->vreg_info.vccq2) {
7963                 /*
7964                  * Using dev_dbg to avoid messages during runtime PM to avoid
7965                  * never-ending cycles of messages written back to storage by
7966                  * user space causing runtime resume, causing more messages and
7967                  * so on.
7968                  */
7969                 dev_dbg(hba->dev,
7970                         "%s: Regulator capability was not set, actvIccLevel=%d",
7971                                                         __func__, icc_level);
7972                 goto out;
7973         }
7974
7975         if (hba->vreg_info.vcc->max_uA)
7976                 icc_level = ufshcd_get_max_icc_level(
7977                                 hba->vreg_info.vcc->max_uA,
7978                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7979                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7980
7981         if (hba->vreg_info.vccq->max_uA)
7982                 icc_level = ufshcd_get_max_icc_level(
7983                                 hba->vreg_info.vccq->max_uA,
7984                                 icc_level,
7985                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7986
7987         if (hba->vreg_info.vccq2->max_uA)
7988                 icc_level = ufshcd_get_max_icc_level(
7989                                 hba->vreg_info.vccq2->max_uA,
7990                                 icc_level,
7991                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7992 out:
7993         return icc_level;
7994 }
7995
7996 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7997 {
7998         int ret;
7999         u8 *desc_buf;
8000         u32 icc_level;
8001
8002         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8003         if (!desc_buf)
8004                 return;
8005
8006         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
8007                                      desc_buf, QUERY_DESC_MAX_SIZE);
8008         if (ret) {
8009                 dev_err(hba->dev,
8010                         "%s: Failed reading power descriptor ret = %d",
8011                         __func__, ret);
8012                 goto out;
8013         }
8014
8015         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
8016         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
8017
8018         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8019                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
8020
8021         if (ret)
8022                 dev_err(hba->dev,
8023                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
8024                         __func__, icc_level, ret);
8025
8026 out:
8027         kfree(desc_buf);
8028 }
8029
8030 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
8031 {
8032         struct Scsi_Host *shost = sdev->host;
8033
8034         scsi_autopm_get_device(sdev);
8035         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
8036         if (sdev->rpm_autosuspend)
8037                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
8038                                                  shost->rpm_autosuspend_delay);
8039         scsi_autopm_put_device(sdev);
8040 }
8041
8042 /**
8043  * ufshcd_scsi_add_wlus - Adds required W-LUs
8044  * @hba: per-adapter instance
8045  *
8046  * UFS device specification requires the UFS devices to support 4 well known
8047  * logical units:
8048  *      "REPORT_LUNS" (address: 01h)
8049  *      "UFS Device" (address: 50h)
8050  *      "RPMB" (address: 44h)
8051  *      "BOOT" (address: 30h)
8052  * UFS device's power management needs to be controlled by "POWER CONDITION"
8053  * field of SSU (START STOP UNIT) command. But this "power condition" field
8054  * will take effect only when its sent to "UFS device" well known logical unit
8055  * hence we require the scsi_device instance to represent this logical unit in
8056  * order for the UFS host driver to send the SSU command for power management.
8057  *
8058  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
8059  * Block) LU so user space process can control this LU. User space may also
8060  * want to have access to BOOT LU.
8061  *
8062  * This function adds scsi device instances for each of all well known LUs
8063  * (except "REPORT LUNS" LU).
8064  *
8065  * Return: zero on success (all required W-LUs are added successfully),
8066  * non-zero error value on failure (if failed to add any of the required W-LU).
8067  */
8068 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
8069 {
8070         int ret = 0;
8071         struct scsi_device *sdev_boot, *sdev_rpmb;
8072
8073         hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
8074                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
8075         if (IS_ERR(hba->ufs_device_wlun)) {
8076                 ret = PTR_ERR(hba->ufs_device_wlun);
8077                 hba->ufs_device_wlun = NULL;
8078                 goto out;
8079         }
8080         scsi_device_put(hba->ufs_device_wlun);
8081
8082         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
8083                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
8084         if (IS_ERR(sdev_rpmb)) {
8085                 ret = PTR_ERR(sdev_rpmb);
8086                 goto remove_ufs_device_wlun;
8087         }
8088         ufshcd_blk_pm_runtime_init(sdev_rpmb);
8089         scsi_device_put(sdev_rpmb);
8090
8091         sdev_boot = __scsi_add_device(hba->host, 0, 0,
8092                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
8093         if (IS_ERR(sdev_boot)) {
8094                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
8095         } else {
8096                 ufshcd_blk_pm_runtime_init(sdev_boot);
8097                 scsi_device_put(sdev_boot);
8098         }
8099         goto out;
8100
8101 remove_ufs_device_wlun:
8102         scsi_remove_device(hba->ufs_device_wlun);
8103 out:
8104         return ret;
8105 }
8106
8107 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
8108 {
8109         struct ufs_dev_info *dev_info = &hba->dev_info;
8110         u8 lun;
8111         u32 d_lu_wb_buf_alloc;
8112         u32 ext_ufs_feature;
8113
8114         if (!ufshcd_is_wb_allowed(hba))
8115                 return;
8116
8117         /*
8118          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8119          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8120          * enabled
8121          */
8122         if (!(dev_info->wspecversion >= 0x310 ||
8123               dev_info->wspecversion == 0x220 ||
8124              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
8125                 goto wb_disabled;
8126
8127         ext_ufs_feature = get_unaligned_be32(desc_buf +
8128                                         DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8129
8130         if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
8131                 goto wb_disabled;
8132
8133         /*
8134          * WB may be supported but not configured while provisioning. The spec
8135          * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8136          * buffer configured.
8137          */
8138         dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
8139
8140         dev_info->b_presrv_uspc_en =
8141                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
8142
8143         if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
8144                 if (!get_unaligned_be32(desc_buf +
8145                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
8146                         goto wb_disabled;
8147         } else {
8148                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
8149                         d_lu_wb_buf_alloc = 0;
8150                         ufshcd_read_unit_desc_param(hba,
8151                                         lun,
8152                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
8153                                         (u8 *)&d_lu_wb_buf_alloc,
8154                                         sizeof(d_lu_wb_buf_alloc));
8155                         if (d_lu_wb_buf_alloc) {
8156                                 dev_info->wb_dedicated_lu = lun;
8157                                 break;
8158                         }
8159                 }
8160
8161                 if (!d_lu_wb_buf_alloc)
8162                         goto wb_disabled;
8163         }
8164
8165         if (!ufshcd_is_wb_buf_lifetime_available(hba))
8166                 goto wb_disabled;
8167
8168         return;
8169
8170 wb_disabled:
8171         hba->caps &= ~UFSHCD_CAP_WB_EN;
8172 }
8173
8174 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
8175 {
8176         struct ufs_dev_info *dev_info = &hba->dev_info;
8177         u32 ext_ufs_feature;
8178         u8 mask = 0;
8179
8180         if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
8181                 return;
8182
8183         ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8184
8185         if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
8186                 mask |= MASK_EE_TOO_LOW_TEMP;
8187
8188         if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
8189                 mask |= MASK_EE_TOO_HIGH_TEMP;
8190
8191         if (mask) {
8192                 ufshcd_enable_ee(hba, mask);
8193                 ufs_hwmon_probe(hba, mask);
8194         }
8195 }
8196
8197 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
8198 {
8199         struct ufs_dev_info *dev_info = &hba->dev_info;
8200         u32 ext_ufs_feature;
8201         u32 ext_iid_en = 0;
8202         int err;
8203
8204         /* Only UFS-4.0 and above may support EXT_IID */
8205         if (dev_info->wspecversion < 0x400)
8206                 goto out;
8207
8208         ext_ufs_feature = get_unaligned_be32(desc_buf +
8209                                      DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8210         if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
8211                 goto out;
8212
8213         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8214                                       QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
8215         if (err)
8216                 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
8217
8218 out:
8219         dev_info->b_ext_iid_en = ext_iid_en;
8220 }
8221
8222 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
8223                              const struct ufs_dev_quirk *fixups)
8224 {
8225         const struct ufs_dev_quirk *f;
8226         struct ufs_dev_info *dev_info = &hba->dev_info;
8227
8228         if (!fixups)
8229                 return;
8230
8231         for (f = fixups; f->quirk; f++) {
8232                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
8233                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
8234                      ((dev_info->model &&
8235                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
8236                       !strcmp(f->model, UFS_ANY_MODEL)))
8237                         hba->dev_quirks |= f->quirk;
8238         }
8239 }
8240 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
8241
8242 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8243 {
8244         /* fix by general quirk table */
8245         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8246
8247         /* allow vendors to fix quirks */
8248         ufshcd_vops_fixup_dev_quirks(hba);
8249 }
8250
8251 static void ufshcd_update_rtc(struct ufs_hba *hba)
8252 {
8253         struct timespec64 ts64;
8254         int err;
8255         u32 val;
8256
8257         ktime_get_real_ts64(&ts64);
8258
8259         if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
8260                 dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
8261                 return;
8262         }
8263
8264         /*
8265          * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8266          * 2146 is required, it is recommended to choose the relative RTC mode.
8267          */
8268         val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
8269
8270         ufshcd_rpm_get_sync(hba);
8271         err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
8272                                 0, 0, &val);
8273         ufshcd_rpm_put_sync(hba);
8274
8275         if (err)
8276                 dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
8277         else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
8278                 hba->dev_info.rtc_time_baseline = ts64.tv_sec;
8279 }
8280
8281 static void ufshcd_rtc_work(struct work_struct *work)
8282 {
8283         struct ufs_hba *hba;
8284
8285         hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
8286
8287          /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8288         if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
8289                 ufshcd_update_rtc(hba);
8290
8291         if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
8292                 schedule_delayed_work(&hba->ufs_rtc_update_work,
8293                                       msecs_to_jiffies(hba->dev_info.rtc_update_period));
8294 }
8295
8296 static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
8297 {
8298         u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
8299         struct ufs_dev_info *dev_info = &hba->dev_info;
8300
8301         if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
8302                 dev_info->rtc_type = UFS_RTC_ABSOLUTE;
8303
8304                 /*
8305                  * The concept of measuring time in Linux as the number of seconds elapsed since
8306                  * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8307                  * 2010 00:00, here we need to adjust ABS baseline.
8308                  */
8309                 dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
8310                                                         mktime64(1970, 1, 1, 0, 0, 0);
8311         } else {
8312                 dev_info->rtc_type = UFS_RTC_RELATIVE;
8313                 dev_info->rtc_time_baseline = 0;
8314         }
8315
8316         /*
8317          * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
8318          * how to calculate the specific update period for each time unit. And we disable periodic
8319          * RTC update work, let user configure by sysfs node according to specific circumstance.
8320          */
8321         dev_info->rtc_update_period = 0;
8322 }
8323
8324 static int ufs_get_device_desc(struct ufs_hba *hba)
8325 {
8326         int err;
8327         u8 model_index;
8328         u8 *desc_buf;
8329         struct ufs_dev_info *dev_info = &hba->dev_info;
8330
8331         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8332         if (!desc_buf) {
8333                 err = -ENOMEM;
8334                 goto out;
8335         }
8336
8337         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8338                                      QUERY_DESC_MAX_SIZE);
8339         if (err) {
8340                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8341                         __func__, err);
8342                 goto out;
8343         }
8344
8345         /*
8346          * getting vendor (manufacturerID) and Bank Index in big endian
8347          * format
8348          */
8349         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8350                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8351
8352         /* getting Specification Version in big endian format */
8353         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8354                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8355         dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8356
8357         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8358
8359         err = ufshcd_read_string_desc(hba, model_index,
8360                                       &dev_info->model, SD_ASCII_STD);
8361         if (err < 0) {
8362                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8363                         __func__, err);
8364                 goto out;
8365         }
8366
8367         hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8368                 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8369
8370         ufs_fixup_device_setup(hba);
8371
8372         ufshcd_wb_probe(hba, desc_buf);
8373
8374         ufshcd_temp_notif_probe(hba, desc_buf);
8375
8376         ufs_init_rtc(hba, desc_buf);
8377
8378         if (hba->ext_iid_sup)
8379                 ufshcd_ext_iid_probe(hba, desc_buf);
8380
8381         /*
8382          * ufshcd_read_string_desc returns size of the string
8383          * reset the error value
8384          */
8385         err = 0;
8386
8387 out:
8388         kfree(desc_buf);
8389         return err;
8390 }
8391
8392 static void ufs_put_device_desc(struct ufs_hba *hba)
8393 {
8394         struct ufs_dev_info *dev_info = &hba->dev_info;
8395
8396         kfree(dev_info->model);
8397         dev_info->model = NULL;
8398 }
8399
8400 /**
8401  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8402  * @hba: per-adapter instance
8403  *
8404  * PA_TActivate parameter can be tuned manually if UniPro version is less than
8405  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8406  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8407  * the hibern8 exit latency.
8408  *
8409  * Return: zero on success, non-zero error value on failure.
8410  */
8411 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8412 {
8413         int ret = 0;
8414         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8415
8416         ret = ufshcd_dme_peer_get(hba,
8417                                   UIC_ARG_MIB_SEL(
8418                                         RX_MIN_ACTIVATETIME_CAPABILITY,
8419                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8420                                   &peer_rx_min_activatetime);
8421         if (ret)
8422                 goto out;
8423
8424         /* make sure proper unit conversion is applied */
8425         tuned_pa_tactivate =
8426                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8427                  / PA_TACTIVATE_TIME_UNIT_US);
8428         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8429                              tuned_pa_tactivate);
8430
8431 out:
8432         return ret;
8433 }
8434
8435 /**
8436  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8437  * @hba: per-adapter instance
8438  *
8439  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8440  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8441  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8442  * This optimal value can help reduce the hibern8 exit latency.
8443  *
8444  * Return: zero on success, non-zero error value on failure.
8445  */
8446 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8447 {
8448         int ret = 0;
8449         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8450         u32 max_hibern8_time, tuned_pa_hibern8time;
8451
8452         ret = ufshcd_dme_get(hba,
8453                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8454                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8455                                   &local_tx_hibern8_time_cap);
8456         if (ret)
8457                 goto out;
8458
8459         ret = ufshcd_dme_peer_get(hba,
8460                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8461                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8462                                   &peer_rx_hibern8_time_cap);
8463         if (ret)
8464                 goto out;
8465
8466         max_hibern8_time = max(local_tx_hibern8_time_cap,
8467                                peer_rx_hibern8_time_cap);
8468         /* make sure proper unit conversion is applied */
8469         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8470                                 / PA_HIBERN8_TIME_UNIT_US);
8471         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8472                              tuned_pa_hibern8time);
8473 out:
8474         return ret;
8475 }
8476
8477 /**
8478  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8479  * less than device PA_TACTIVATE time.
8480  * @hba: per-adapter instance
8481  *
8482  * Some UFS devices require host PA_TACTIVATE to be lower than device
8483  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8484  * for such devices.
8485  *
8486  * Return: zero on success, non-zero error value on failure.
8487  */
8488 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8489 {
8490         int ret = 0;
8491         u32 granularity, peer_granularity;
8492         u32 pa_tactivate, peer_pa_tactivate;
8493         u32 pa_tactivate_us, peer_pa_tactivate_us;
8494         static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8495
8496         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8497                                   &granularity);
8498         if (ret)
8499                 goto out;
8500
8501         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8502                                   &peer_granularity);
8503         if (ret)
8504                 goto out;
8505
8506         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8507             (granularity > PA_GRANULARITY_MAX_VAL)) {
8508                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8509                         __func__, granularity);
8510                 return -EINVAL;
8511         }
8512
8513         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8514             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8515                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8516                         __func__, peer_granularity);
8517                 return -EINVAL;
8518         }
8519
8520         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8521         if (ret)
8522                 goto out;
8523
8524         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8525                                   &peer_pa_tactivate);
8526         if (ret)
8527                 goto out;
8528
8529         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8530         peer_pa_tactivate_us = peer_pa_tactivate *
8531                              gran_to_us_table[peer_granularity - 1];
8532
8533         if (pa_tactivate_us >= peer_pa_tactivate_us) {
8534                 u32 new_peer_pa_tactivate;
8535
8536                 new_peer_pa_tactivate = pa_tactivate_us /
8537                                       gran_to_us_table[peer_granularity - 1];
8538                 new_peer_pa_tactivate++;
8539                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8540                                           new_peer_pa_tactivate);
8541         }
8542
8543 out:
8544         return ret;
8545 }
8546
8547 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8548 {
8549         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8550                 ufshcd_tune_pa_tactivate(hba);
8551                 ufshcd_tune_pa_hibern8time(hba);
8552         }
8553
8554         ufshcd_vops_apply_dev_quirks(hba);
8555
8556         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8557                 /* set 1ms timeout for PA_TACTIVATE */
8558                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8559
8560         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8561                 ufshcd_quirk_tune_host_pa_tactivate(hba);
8562 }
8563
8564 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8565 {
8566         hba->ufs_stats.hibern8_exit_cnt = 0;
8567         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8568         hba->req_abort_count = 0;
8569 }
8570
8571 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8572 {
8573         int err;
8574         u8 *desc_buf;
8575
8576         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8577         if (!desc_buf) {
8578                 err = -ENOMEM;
8579                 goto out;
8580         }
8581
8582         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8583                                      desc_buf, QUERY_DESC_MAX_SIZE);
8584         if (err) {
8585                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8586                                 __func__, err);
8587                 goto out;
8588         }
8589
8590         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8591                 hba->dev_info.max_lu_supported = 32;
8592         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8593                 hba->dev_info.max_lu_supported = 8;
8594
8595 out:
8596         kfree(desc_buf);
8597         return err;
8598 }
8599
8600 struct ufs_ref_clk {
8601         unsigned long freq_hz;
8602         enum ufs_ref_clk_freq val;
8603 };
8604
8605 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8606         {19200000, REF_CLK_FREQ_19_2_MHZ},
8607         {26000000, REF_CLK_FREQ_26_MHZ},
8608         {38400000, REF_CLK_FREQ_38_4_MHZ},
8609         {52000000, REF_CLK_FREQ_52_MHZ},
8610         {0, REF_CLK_FREQ_INVAL},
8611 };
8612
8613 static enum ufs_ref_clk_freq
8614 ufs_get_bref_clk_from_hz(unsigned long freq)
8615 {
8616         int i;
8617
8618         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8619                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8620                         return ufs_ref_clk_freqs[i].val;
8621
8622         return REF_CLK_FREQ_INVAL;
8623 }
8624
8625 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8626 {
8627         unsigned long freq;
8628
8629         freq = clk_get_rate(refclk);
8630
8631         hba->dev_ref_clk_freq =
8632                 ufs_get_bref_clk_from_hz(freq);
8633
8634         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8635                 dev_err(hba->dev,
8636                 "invalid ref_clk setting = %ld\n", freq);
8637 }
8638
8639 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8640 {
8641         int err;
8642         u32 ref_clk;
8643         u32 freq = hba->dev_ref_clk_freq;
8644
8645         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8646                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8647
8648         if (err) {
8649                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8650                         err);
8651                 goto out;
8652         }
8653
8654         if (ref_clk == freq)
8655                 goto out; /* nothing to update */
8656
8657         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8658                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8659
8660         if (err) {
8661                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8662                         ufs_ref_clk_freqs[freq].freq_hz);
8663                 goto out;
8664         }
8665
8666         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8667                         ufs_ref_clk_freqs[freq].freq_hz);
8668
8669 out:
8670         return err;
8671 }
8672
8673 static int ufshcd_device_params_init(struct ufs_hba *hba)
8674 {
8675         bool flag;
8676         int ret;
8677
8678         /* Init UFS geometry descriptor related parameters */
8679         ret = ufshcd_device_geo_params_init(hba);
8680         if (ret)
8681                 goto out;
8682
8683         /* Check and apply UFS device quirks */
8684         ret = ufs_get_device_desc(hba);
8685         if (ret) {
8686                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8687                         __func__, ret);
8688                 goto out;
8689         }
8690
8691         ufshcd_get_ref_clk_gating_wait(hba);
8692
8693         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8694                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8695                 hba->dev_info.f_power_on_wp_en = flag;
8696
8697         /* Probe maximum power mode co-supported by both UFS host and device */
8698         if (ufshcd_get_max_pwr_mode(hba))
8699                 dev_err(hba->dev,
8700                         "%s: Failed getting max supported power mode\n",
8701                         __func__);
8702 out:
8703         return ret;
8704 }
8705
8706 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
8707 {
8708         int err;
8709         struct ufs_query_req *request = NULL;
8710         struct ufs_query_res *response = NULL;
8711         struct ufs_dev_info *dev_info = &hba->dev_info;
8712         struct utp_upiu_query_v4_0 *upiu_data;
8713
8714         if (dev_info->wspecversion < 0x400)
8715                 return;
8716
8717         ufshcd_hold(hba);
8718
8719         mutex_lock(&hba->dev_cmd.lock);
8720
8721         ufshcd_init_query(hba, &request, &response,
8722                           UPIU_QUERY_OPCODE_WRITE_ATTR,
8723                           QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
8724
8725         request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
8726
8727         upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
8728
8729         put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
8730
8731         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
8732
8733         if (err)
8734                 dev_err(hba->dev, "%s: failed to set timestamp %d\n",
8735                         __func__, err);
8736
8737         mutex_unlock(&hba->dev_cmd.lock);
8738         ufshcd_release(hba);
8739 }
8740
8741 /**
8742  * ufshcd_add_lus - probe and add UFS logical units
8743  * @hba: per-adapter instance
8744  *
8745  * Return: 0 upon success; < 0 upon failure.
8746  */
8747 static int ufshcd_add_lus(struct ufs_hba *hba)
8748 {
8749         int ret;
8750
8751         /* Add required well known logical units to scsi mid layer */
8752         ret = ufshcd_scsi_add_wlus(hba);
8753         if (ret)
8754                 goto out;
8755
8756         /* Initialize devfreq after UFS device is detected */
8757         if (ufshcd_is_clkscaling_supported(hba)) {
8758                 memcpy(&hba->clk_scaling.saved_pwr_info,
8759                         &hba->pwr_info,
8760                         sizeof(struct ufs_pa_layer_attr));
8761                 hba->clk_scaling.is_allowed = true;
8762
8763                 ret = ufshcd_devfreq_init(hba);
8764                 if (ret)
8765                         goto out;
8766
8767                 hba->clk_scaling.is_enabled = true;
8768                 ufshcd_init_clk_scaling_sysfs(hba);
8769         }
8770
8771         ufs_bsg_probe(hba);
8772         scsi_scan_host(hba->host);
8773
8774 out:
8775         return ret;
8776 }
8777
8778 /* SDB - Single Doorbell */
8779 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8780 {
8781         size_t ucdl_size, utrdl_size;
8782
8783         ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
8784         dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8785                            hba->ucdl_dma_addr);
8786
8787         utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8788         dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8789                            hba->utrdl_dma_addr);
8790
8791         devm_kfree(hba->dev, hba->lrb);
8792 }
8793
8794 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8795 {
8796         int ret;
8797         int old_nutrs = hba->nutrs;
8798
8799         ret = ufshcd_mcq_decide_queue_depth(hba);
8800         if (ret < 0)
8801                 return ret;
8802
8803         hba->nutrs = ret;
8804         ret = ufshcd_mcq_init(hba);
8805         if (ret)
8806                 goto err;
8807
8808         /*
8809          * Previously allocated memory for nutrs may not be enough in MCQ mode.
8810          * Number of supported tags in MCQ mode may be larger than SDB mode.
8811          */
8812         if (hba->nutrs != old_nutrs) {
8813                 ufshcd_release_sdb_queue(hba, old_nutrs);
8814                 ret = ufshcd_memory_alloc(hba);
8815                 if (ret)
8816                         goto err;
8817                 ufshcd_host_memory_configure(hba);
8818         }
8819
8820         ret = ufshcd_mcq_memory_alloc(hba);
8821         if (ret)
8822                 goto err;
8823
8824         return 0;
8825 err:
8826         hba->nutrs = old_nutrs;
8827         return ret;
8828 }
8829
8830 static void ufshcd_config_mcq(struct ufs_hba *hba)
8831 {
8832         int ret;
8833         u32 intrs;
8834
8835         ret = ufshcd_mcq_vops_config_esi(hba);
8836         dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8837
8838         intrs = UFSHCD_ENABLE_MCQ_INTRS;
8839         if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
8840                 intrs &= ~MCQ_CQ_EVENT_STATUS;
8841         ufshcd_enable_intr(hba, intrs);
8842         ufshcd_mcq_make_queues_operational(hba);
8843         ufshcd_mcq_config_mac(hba, hba->nutrs);
8844
8845         hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8846         hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8847
8848         ufshcd_mcq_enable(hba);
8849         hba->mcq_enabled = true;
8850
8851         dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8852                  hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8853                  hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8854                  hba->nutrs);
8855 }
8856
8857 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8858 {
8859         int ret;
8860         struct Scsi_Host *host = hba->host;
8861
8862         hba->ufshcd_state = UFSHCD_STATE_RESET;
8863
8864         ret = ufshcd_link_startup(hba);
8865         if (ret)
8866                 return ret;
8867
8868         if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8869                 return ret;
8870
8871         /* Debug counters initialization */
8872         ufshcd_clear_dbg_ufs_stats(hba);
8873
8874         /* UniPro link is active now */
8875         ufshcd_set_link_active(hba);
8876
8877         /* Reconfigure MCQ upon reset */
8878         if (is_mcq_enabled(hba) && !init_dev_params)
8879                 ufshcd_config_mcq(hba);
8880
8881         /* Verify device initialization by sending NOP OUT UPIU */
8882         ret = ufshcd_verify_dev_init(hba);
8883         if (ret)
8884                 return ret;
8885
8886         /* Initiate UFS initialization, and waiting until completion */
8887         ret = ufshcd_complete_dev_init(hba);
8888         if (ret)
8889                 return ret;
8890
8891         /*
8892          * Initialize UFS device parameters used by driver, these
8893          * parameters are associated with UFS descriptors.
8894          */
8895         if (init_dev_params) {
8896                 ret = ufshcd_device_params_init(hba);
8897                 if (ret)
8898                         return ret;
8899                 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
8900                         ret = ufshcd_alloc_mcq(hba);
8901                         if (!ret) {
8902                                 ufshcd_config_mcq(hba);
8903                         } else {
8904                                 /* Continue with SDB mode */
8905                                 use_mcq_mode = false;
8906                                 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8907                                          ret);
8908                         }
8909                         ret = scsi_add_host(host, hba->dev);
8910                         if (ret) {
8911                                 dev_err(hba->dev, "scsi_add_host failed\n");
8912                                 return ret;
8913                         }
8914                         hba->scsi_host_added = true;
8915                 } else if (is_mcq_supported(hba)) {
8916                         /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8917                         ufshcd_config_mcq(hba);
8918                 }
8919         }
8920
8921         ufshcd_tune_unipro_params(hba);
8922
8923         /* UFS device is also active now */
8924         ufshcd_set_ufs_dev_active(hba);
8925         ufshcd_force_reset_auto_bkops(hba);
8926
8927         ufshcd_set_timestamp_attr(hba);
8928         schedule_delayed_work(&hba->ufs_rtc_update_work,
8929                               msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
8930
8931         /* Gear up to HS gear if supported */
8932         if (hba->max_pwr_info.is_valid) {
8933                 /*
8934                  * Set the right value to bRefClkFreq before attempting to
8935                  * switch to HS gears.
8936                  */
8937                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8938                         ufshcd_set_dev_ref_clk(hba);
8939                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8940                 if (ret) {
8941                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8942                                         __func__, ret);
8943                         return ret;
8944                 }
8945         }
8946
8947         return 0;
8948 }
8949
8950 /**
8951  * ufshcd_probe_hba - probe hba to detect device and initialize it
8952  * @hba: per-adapter instance
8953  * @init_dev_params: whether or not to call ufshcd_device_params_init().
8954  *
8955  * Execute link-startup and verify device initialization
8956  *
8957  * Return: 0 upon success; < 0 upon failure.
8958  */
8959 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8960 {
8961         ktime_t start = ktime_get();
8962         unsigned long flags;
8963         int ret;
8964
8965         ret = ufshcd_device_init(hba, init_dev_params);
8966         if (ret)
8967                 goto out;
8968
8969         if (!hba->pm_op_in_progress &&
8970             (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
8971                 /* Reset the device and controller before doing reinit */
8972                 ufshcd_device_reset(hba);
8973                 ufshcd_hba_stop(hba);
8974                 ufshcd_vops_reinit_notify(hba);
8975                 ret = ufshcd_hba_enable(hba);
8976                 if (ret) {
8977                         dev_err(hba->dev, "Host controller enable failed\n");
8978                         ufshcd_print_evt_hist(hba);
8979                         ufshcd_print_host_state(hba);
8980                         goto out;
8981                 }
8982
8983                 /* Reinit the device */
8984                 ret = ufshcd_device_init(hba, init_dev_params);
8985                 if (ret)
8986                         goto out;
8987         }
8988
8989         ufshcd_print_pwr_info(hba);
8990
8991         /*
8992          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8993          * and for removable UFS card as well, hence always set the parameter.
8994          * Note: Error handler may issue the device reset hence resetting
8995          * bActiveICCLevel as well so it is always safe to set this here.
8996          */
8997         ufshcd_set_active_icc_lvl(hba);
8998
8999         /* Enable UFS Write Booster if supported */
9000         ufshcd_configure_wb(hba);
9001
9002         if (hba->ee_usr_mask)
9003                 ufshcd_write_ee_control(hba);
9004         ufshcd_configure_auto_hibern8(hba);
9005
9006 out:
9007         spin_lock_irqsave(hba->host->host_lock, flags);
9008         if (ret)
9009                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
9010         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
9011                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
9012         spin_unlock_irqrestore(hba->host->host_lock, flags);
9013
9014         trace_ufshcd_init(dev_name(hba->dev), ret,
9015                 ktime_to_us(ktime_sub(ktime_get(), start)),
9016                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9017         return ret;
9018 }
9019
9020 /**
9021  * ufshcd_async_scan - asynchronous execution for probing hba
9022  * @data: data pointer to pass to this function
9023  * @cookie: cookie data
9024  */
9025 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
9026 {
9027         struct ufs_hba *hba = (struct ufs_hba *)data;
9028         int ret;
9029
9030         down(&hba->host_sem);
9031         /* Initialize hba, detect and initialize UFS device */
9032         ret = ufshcd_probe_hba(hba, true);
9033         up(&hba->host_sem);
9034         if (ret)
9035                 goto out;
9036
9037         /* Probe and add UFS logical units  */
9038         ret = ufshcd_add_lus(hba);
9039
9040 out:
9041         pm_runtime_put_sync(hba->dev);
9042
9043         if (ret)
9044                 dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
9045 }
9046
9047 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
9048 {
9049         struct ufs_hba *hba = shost_priv(scmd->device->host);
9050
9051         if (!hba->system_suspending) {
9052                 /* Activate the error handler in the SCSI core. */
9053                 return SCSI_EH_NOT_HANDLED;
9054         }
9055
9056         /*
9057          * If we get here we know that no TMFs are outstanding and also that
9058          * the only pending command is a START STOP UNIT command. Handle the
9059          * timeout of that command directly to prevent a deadlock between
9060          * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
9061          */
9062         ufshcd_link_recovery(hba);
9063         dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
9064                  __func__, hba->outstanding_tasks);
9065
9066         return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
9067 }
9068
9069 static const struct attribute_group *ufshcd_driver_groups[] = {
9070         &ufs_sysfs_unit_descriptor_group,
9071         &ufs_sysfs_lun_attributes_group,
9072         NULL,
9073 };
9074
9075 static struct ufs_hba_variant_params ufs_hba_vps = {
9076         .hba_enable_delay_us            = 1000,
9077         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
9078         .devfreq_profile.polling_ms     = 100,
9079         .devfreq_profile.target         = ufshcd_devfreq_target,
9080         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
9081         .ondemand_data.upthreshold      = 70,
9082         .ondemand_data.downdifferential = 5,
9083 };
9084
9085 static const struct scsi_host_template ufshcd_driver_template = {
9086         .module                 = THIS_MODULE,
9087         .name                   = UFSHCD,
9088         .proc_name              = UFSHCD,
9089         .map_queues             = ufshcd_map_queues,
9090         .queuecommand           = ufshcd_queuecommand,
9091         .mq_poll                = ufshcd_poll,
9092         .slave_alloc            = ufshcd_slave_alloc,
9093         .slave_configure        = ufshcd_slave_configure,
9094         .slave_destroy          = ufshcd_slave_destroy,
9095         .change_queue_depth     = ufshcd_change_queue_depth,
9096         .eh_abort_handler       = ufshcd_abort,
9097         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
9098         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
9099         .eh_timed_out           = ufshcd_eh_timed_out,
9100         .this_id                = -1,
9101         .sg_tablesize           = SG_ALL,
9102         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
9103         .can_queue              = UFSHCD_CAN_QUEUE,
9104         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
9105         .max_sectors            = SZ_1M / SECTOR_SIZE,
9106         .max_host_blocked       = 1,
9107         .track_queue_depth      = 1,
9108         .skip_settle_delay      = 1,
9109         .sdev_groups            = ufshcd_driver_groups,
9110 };
9111
9112 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
9113                                    int ua)
9114 {
9115         int ret;
9116
9117         if (!vreg)
9118                 return 0;
9119
9120         /*
9121          * "set_load" operation shall be required on those regulators
9122          * which specifically configured current limitation. Otherwise
9123          * zero max_uA may cause unexpected behavior when regulator is
9124          * enabled or set as high power mode.
9125          */
9126         if (!vreg->max_uA)
9127                 return 0;
9128
9129         ret = regulator_set_load(vreg->reg, ua);
9130         if (ret < 0) {
9131                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
9132                                 __func__, vreg->name, ua, ret);
9133         }
9134
9135         return ret;
9136 }
9137
9138 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
9139                                          struct ufs_vreg *vreg)
9140 {
9141         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
9142 }
9143
9144 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
9145                                          struct ufs_vreg *vreg)
9146 {
9147         if (!vreg)
9148                 return 0;
9149
9150         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
9151 }
9152
9153 static int ufshcd_config_vreg(struct device *dev,
9154                 struct ufs_vreg *vreg, bool on)
9155 {
9156         if (regulator_count_voltages(vreg->reg) <= 0)
9157                 return 0;
9158
9159         return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
9160 }
9161
9162 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
9163 {
9164         int ret = 0;
9165
9166         if (!vreg || vreg->enabled)
9167                 goto out;
9168
9169         ret = ufshcd_config_vreg(dev, vreg, true);
9170         if (!ret)
9171                 ret = regulator_enable(vreg->reg);
9172
9173         if (!ret)
9174                 vreg->enabled = true;
9175         else
9176                 dev_err(dev, "%s: %s enable failed, err=%d\n",
9177                                 __func__, vreg->name, ret);
9178 out:
9179         return ret;
9180 }
9181
9182 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
9183 {
9184         int ret = 0;
9185
9186         if (!vreg || !vreg->enabled || vreg->always_on)
9187                 goto out;
9188
9189         ret = regulator_disable(vreg->reg);
9190
9191         if (!ret) {
9192                 /* ignore errors on applying disable config */
9193                 ufshcd_config_vreg(dev, vreg, false);
9194                 vreg->enabled = false;
9195         } else {
9196                 dev_err(dev, "%s: %s disable failed, err=%d\n",
9197                                 __func__, vreg->name, ret);
9198         }
9199 out:
9200         return ret;
9201 }
9202
9203 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
9204 {
9205         int ret = 0;
9206         struct device *dev = hba->dev;
9207         struct ufs_vreg_info *info = &hba->vreg_info;
9208
9209         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
9210         if (ret)
9211                 goto out;
9212
9213         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
9214         if (ret)
9215                 goto out;
9216
9217         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
9218
9219 out:
9220         if (ret) {
9221                 ufshcd_toggle_vreg(dev, info->vccq2, false);
9222                 ufshcd_toggle_vreg(dev, info->vccq, false);
9223                 ufshcd_toggle_vreg(dev, info->vcc, false);
9224         }
9225         return ret;
9226 }
9227
9228 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
9229 {
9230         struct ufs_vreg_info *info = &hba->vreg_info;
9231
9232         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
9233 }
9234
9235 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
9236 {
9237         int ret = 0;
9238
9239         if (!vreg)
9240                 goto out;
9241
9242         vreg->reg = devm_regulator_get(dev, vreg->name);
9243         if (IS_ERR(vreg->reg)) {
9244                 ret = PTR_ERR(vreg->reg);
9245                 dev_err(dev, "%s: %s get failed, err=%d\n",
9246                                 __func__, vreg->name, ret);
9247         }
9248 out:
9249         return ret;
9250 }
9251 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
9252
9253 static int ufshcd_init_vreg(struct ufs_hba *hba)
9254 {
9255         int ret = 0;
9256         struct device *dev = hba->dev;
9257         struct ufs_vreg_info *info = &hba->vreg_info;
9258
9259         ret = ufshcd_get_vreg(dev, info->vcc);
9260         if (ret)
9261                 goto out;
9262
9263         ret = ufshcd_get_vreg(dev, info->vccq);
9264         if (!ret)
9265                 ret = ufshcd_get_vreg(dev, info->vccq2);
9266 out:
9267         return ret;
9268 }
9269
9270 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
9271 {
9272         struct ufs_vreg_info *info = &hba->vreg_info;
9273
9274         return ufshcd_get_vreg(hba->dev, info->vdd_hba);
9275 }
9276
9277 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9278 {
9279         int ret = 0;
9280         struct ufs_clk_info *clki;
9281         struct list_head *head = &hba->clk_list_head;
9282         unsigned long flags;
9283         ktime_t start = ktime_get();
9284         bool clk_state_changed = false;
9285
9286         if (list_empty(head))
9287                 goto out;
9288
9289         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
9290         if (ret)
9291                 return ret;
9292
9293         list_for_each_entry(clki, head, list) {
9294                 if (!IS_ERR_OR_NULL(clki->clk)) {
9295                         /*
9296                          * Don't disable clocks which are needed
9297                          * to keep the link active.
9298                          */
9299                         if (ufshcd_is_link_active(hba) &&
9300                             clki->keep_link_active)
9301                                 continue;
9302
9303                         clk_state_changed = on ^ clki->enabled;
9304                         if (on && !clki->enabled) {
9305                                 ret = clk_prepare_enable(clki->clk);
9306                                 if (ret) {
9307                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
9308                                                 __func__, clki->name, ret);
9309                                         goto out;
9310                                 }
9311                         } else if (!on && clki->enabled) {
9312                                 clk_disable_unprepare(clki->clk);
9313                         }
9314                         clki->enabled = on;
9315                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
9316                                         clki->name, on ? "en" : "dis");
9317                 }
9318         }
9319
9320         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
9321         if (ret)
9322                 return ret;
9323
9324         if (!ufshcd_is_clkscaling_supported(hba))
9325                 ufshcd_pm_qos_update(hba, on);
9326 out:
9327         if (ret) {
9328                 list_for_each_entry(clki, head, list) {
9329                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
9330                                 clk_disable_unprepare(clki->clk);
9331                 }
9332         } else if (!ret && on) {
9333                 spin_lock_irqsave(hba->host->host_lock, flags);
9334                 hba->clk_gating.state = CLKS_ON;
9335                 trace_ufshcd_clk_gating(dev_name(hba->dev),
9336                                         hba->clk_gating.state);
9337                 spin_unlock_irqrestore(hba->host->host_lock, flags);
9338         }
9339
9340         if (clk_state_changed)
9341                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
9342                         (on ? "on" : "off"),
9343                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9344         return ret;
9345 }
9346
9347 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9348 {
9349         u32 freq;
9350         int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9351
9352         if (ret) {
9353                 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9354                 return REF_CLK_FREQ_INVAL;
9355         }
9356
9357         return ufs_get_bref_clk_from_hz(freq);
9358 }
9359
9360 static int ufshcd_init_clocks(struct ufs_hba *hba)
9361 {
9362         int ret = 0;
9363         struct ufs_clk_info *clki;
9364         struct device *dev = hba->dev;
9365         struct list_head *head = &hba->clk_list_head;
9366
9367         if (list_empty(head))
9368                 goto out;
9369
9370         list_for_each_entry(clki, head, list) {
9371                 if (!clki->name)
9372                         continue;
9373
9374                 clki->clk = devm_clk_get(dev, clki->name);
9375                 if (IS_ERR(clki->clk)) {
9376                         ret = PTR_ERR(clki->clk);
9377                         dev_err(dev, "%s: %s clk get failed, %d\n",
9378                                         __func__, clki->name, ret);
9379                         goto out;
9380                 }
9381
9382                 /*
9383                  * Parse device ref clk freq as per device tree "ref_clk".
9384                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9385                  * in ufshcd_alloc_host().
9386                  */
9387                 if (!strcmp(clki->name, "ref_clk"))
9388                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9389
9390                 if (clki->max_freq) {
9391                         ret = clk_set_rate(clki->clk, clki->max_freq);
9392                         if (ret) {
9393                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9394                                         __func__, clki->name,
9395                                         clki->max_freq, ret);
9396                                 goto out;
9397                         }
9398                         clki->curr_freq = clki->max_freq;
9399                 }
9400                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9401                                 clki->name, clk_get_rate(clki->clk));
9402         }
9403
9404         /* Set Max. frequency for all clocks */
9405         if (hba->use_pm_opp) {
9406                 ret = ufshcd_opp_set_rate(hba, ULONG_MAX);
9407                 if (ret) {
9408                         dev_err(hba->dev, "%s: failed to set OPP: %d", __func__,
9409                                 ret);
9410                         goto out;
9411                 }
9412         }
9413
9414 out:
9415         return ret;
9416 }
9417
9418 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9419 {
9420         int err = 0;
9421
9422         if (!hba->vops)
9423                 goto out;
9424
9425         err = ufshcd_vops_init(hba);
9426         if (err)
9427                 dev_err_probe(hba->dev, err,
9428                               "%s: variant %s init failed with err %d\n",
9429                               __func__, ufshcd_get_var_name(hba), err);
9430 out:
9431         return err;
9432 }
9433
9434 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9435 {
9436         if (!hba->vops)
9437                 return;
9438
9439         ufshcd_vops_exit(hba);
9440 }
9441
9442 static int ufshcd_hba_init(struct ufs_hba *hba)
9443 {
9444         int err;
9445
9446         /*
9447          * Handle host controller power separately from the UFS device power
9448          * rails as it will help controlling the UFS host controller power
9449          * collapse easily which is different than UFS device power collapse.
9450          * Also, enable the host controller power before we go ahead with rest
9451          * of the initialization here.
9452          */
9453         err = ufshcd_init_hba_vreg(hba);
9454         if (err)
9455                 goto out;
9456
9457         err = ufshcd_setup_hba_vreg(hba, true);
9458         if (err)
9459                 goto out;
9460
9461         err = ufshcd_init_clocks(hba);
9462         if (err)
9463                 goto out_disable_hba_vreg;
9464
9465         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9466                 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9467
9468         err = ufshcd_setup_clocks(hba, true);
9469         if (err)
9470                 goto out_disable_hba_vreg;
9471
9472         err = ufshcd_init_vreg(hba);
9473         if (err)
9474                 goto out_disable_clks;
9475
9476         err = ufshcd_setup_vreg(hba, true);
9477         if (err)
9478                 goto out_disable_clks;
9479
9480         err = ufshcd_variant_hba_init(hba);
9481         if (err)
9482                 goto out_disable_vreg;
9483
9484         ufs_debugfs_hba_init(hba);
9485         ufs_fault_inject_hba_init(hba);
9486
9487         hba->is_powered = true;
9488         goto out;
9489
9490 out_disable_vreg:
9491         ufshcd_setup_vreg(hba, false);
9492 out_disable_clks:
9493         ufshcd_setup_clocks(hba, false);
9494 out_disable_hba_vreg:
9495         ufshcd_setup_hba_vreg(hba, false);
9496 out:
9497         return err;
9498 }
9499
9500 static void ufshcd_hba_exit(struct ufs_hba *hba)
9501 {
9502         if (hba->is_powered) {
9503                 ufshcd_pm_qos_exit(hba);
9504                 ufshcd_exit_clk_scaling(hba);
9505                 ufshcd_exit_clk_gating(hba);
9506                 if (hba->eh_wq)
9507                         destroy_workqueue(hba->eh_wq);
9508                 ufs_debugfs_hba_exit(hba);
9509                 ufshcd_variant_hba_exit(hba);
9510                 ufshcd_setup_vreg(hba, false);
9511                 ufshcd_setup_clocks(hba, false);
9512                 ufshcd_setup_hba_vreg(hba, false);
9513                 hba->is_powered = false;
9514                 ufs_put_device_desc(hba);
9515         }
9516 }
9517
9518 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9519                                      enum ufs_dev_pwr_mode pwr_mode,
9520                                      struct scsi_sense_hdr *sshdr)
9521 {
9522         const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9523         struct scsi_failure failure_defs[] = {
9524                 {
9525                         .allowed = 2,
9526                         .result = SCMD_FAILURE_RESULT_ANY,
9527                 },
9528         };
9529         struct scsi_failures failures = {
9530                 .failure_definitions = failure_defs,
9531         };
9532         const struct scsi_exec_args args = {
9533                 .failures = &failures,
9534                 .sshdr = sshdr,
9535                 .req_flags = BLK_MQ_REQ_PM,
9536                 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9537         };
9538
9539         return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9540                         /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
9541                         &args);
9542 }
9543
9544 /**
9545  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9546  *                           power mode
9547  * @hba: per adapter instance
9548  * @pwr_mode: device power mode to set
9549  *
9550  * Return: 0 if requested power mode is set successfully;
9551  *         < 0 if failed to set the requested power mode.
9552  */
9553 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9554                                      enum ufs_dev_pwr_mode pwr_mode)
9555 {
9556         struct scsi_sense_hdr sshdr;
9557         struct scsi_device *sdp;
9558         unsigned long flags;
9559         int ret;
9560
9561         spin_lock_irqsave(hba->host->host_lock, flags);
9562         sdp = hba->ufs_device_wlun;
9563         if (sdp && scsi_device_online(sdp))
9564                 ret = scsi_device_get(sdp);
9565         else
9566                 ret = -ENODEV;
9567         spin_unlock_irqrestore(hba->host->host_lock, flags);
9568
9569         if (ret)
9570                 return ret;
9571
9572         /*
9573          * If scsi commands fail, the scsi mid-layer schedules scsi error-
9574          * handling, which would wait for host to be resumed. Since we know
9575          * we are functional while we are here, skip host resume in error
9576          * handling context.
9577          */
9578         hba->host->eh_noresume = 1;
9579
9580         /*
9581          * Current function would be generally called from the power management
9582          * callbacks hence set the RQF_PM flag so that it doesn't resume the
9583          * already suspended childs.
9584          */
9585         ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9586         if (ret) {
9587                 sdev_printk(KERN_WARNING, sdp,
9588                             "START_STOP failed for power mode: %d, result %x\n",
9589                             pwr_mode, ret);
9590                 if (ret > 0) {
9591                         if (scsi_sense_valid(&sshdr))
9592                                 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9593                         ret = -EIO;
9594                 }
9595         } else {
9596                 hba->curr_dev_pwr_mode = pwr_mode;
9597         }
9598
9599         scsi_device_put(sdp);
9600         hba->host->eh_noresume = 0;
9601         return ret;
9602 }
9603
9604 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9605                                         enum uic_link_state req_link_state,
9606                                         bool check_for_bkops)
9607 {
9608         int ret = 0;
9609
9610         if (req_link_state == hba->uic_link_state)
9611                 return 0;
9612
9613         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9614                 ret = ufshcd_uic_hibern8_enter(hba);
9615                 if (!ret) {
9616                         ufshcd_set_link_hibern8(hba);
9617                 } else {
9618                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9619                                         __func__, ret);
9620                         goto out;
9621                 }
9622         }
9623         /*
9624          * If autobkops is enabled, link can't be turned off because
9625          * turning off the link would also turn off the device, except in the
9626          * case of DeepSleep where the device is expected to remain powered.
9627          */
9628         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9629                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
9630                 /*
9631                  * Let's make sure that link is in low power mode, we are doing
9632                  * this currently by putting the link in Hibern8. Otherway to
9633                  * put the link in low power mode is to send the DME end point
9634                  * to device and then send the DME reset command to local
9635                  * unipro. But putting the link in hibern8 is much faster.
9636                  *
9637                  * Note also that putting the link in Hibern8 is a requirement
9638                  * for entering DeepSleep.
9639                  */
9640                 ret = ufshcd_uic_hibern8_enter(hba);
9641                 if (ret) {
9642                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9643                                         __func__, ret);
9644                         goto out;
9645                 }
9646                 /*
9647                  * Change controller state to "reset state" which
9648                  * should also put the link in off/reset state
9649                  */
9650                 ufshcd_hba_stop(hba);
9651                 /*
9652                  * TODO: Check if we need any delay to make sure that
9653                  * controller is reset
9654                  */
9655                 ufshcd_set_link_off(hba);
9656         }
9657
9658 out:
9659         return ret;
9660 }
9661
9662 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9663 {
9664         bool vcc_off = false;
9665
9666         /*
9667          * It seems some UFS devices may keep drawing more than sleep current
9668          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9669          * To avoid this situation, add 2ms delay before putting these UFS
9670          * rails in LPM mode.
9671          */
9672         if (!ufshcd_is_link_active(hba) &&
9673             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9674                 usleep_range(2000, 2100);
9675
9676         /*
9677          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9678          * power.
9679          *
9680          * If UFS device and link is in OFF state, all power supplies (VCC,
9681          * VCCQ, VCCQ2) can be turned off if power on write protect is not
9682          * required. If UFS link is inactive (Hibern8 or OFF state) and device
9683          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9684          *
9685          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9686          * in low power state which would save some power.
9687          *
9688          * If Write Booster is enabled and the device needs to flush the WB
9689          * buffer OR if bkops status is urgent for WB, keep Vcc on.
9690          */
9691         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9692             !hba->dev_info.is_lu_power_on_wp) {
9693                 ufshcd_setup_vreg(hba, false);
9694                 vcc_off = true;
9695         } else if (!ufshcd_is_ufs_dev_active(hba)) {
9696                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9697                 vcc_off = true;
9698                 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9699                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9700                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9701                 }
9702         }
9703
9704         /*
9705          * Some UFS devices require delay after VCC power rail is turned-off.
9706          */
9707         if (vcc_off && hba->vreg_info.vcc &&
9708                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9709                 usleep_range(5000, 5100);
9710 }
9711
9712 #ifdef CONFIG_PM
9713 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9714 {
9715         int ret = 0;
9716
9717         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9718             !hba->dev_info.is_lu_power_on_wp) {
9719                 ret = ufshcd_setup_vreg(hba, true);
9720         } else if (!ufshcd_is_ufs_dev_active(hba)) {
9721                 if (!ufshcd_is_link_active(hba)) {
9722                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9723                         if (ret)
9724                                 goto vcc_disable;
9725                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9726                         if (ret)
9727                                 goto vccq_lpm;
9728                 }
9729                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9730         }
9731         goto out;
9732
9733 vccq_lpm:
9734         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9735 vcc_disable:
9736         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9737 out:
9738         return ret;
9739 }
9740 #endif /* CONFIG_PM */
9741
9742 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9743 {
9744         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9745                 ufshcd_setup_hba_vreg(hba, false);
9746 }
9747
9748 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9749 {
9750         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9751                 ufshcd_setup_hba_vreg(hba, true);
9752 }
9753
9754 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9755 {
9756         int ret = 0;
9757         bool check_for_bkops;
9758         enum ufs_pm_level pm_lvl;
9759         enum ufs_dev_pwr_mode req_dev_pwr_mode;
9760         enum uic_link_state req_link_state;
9761
9762         hba->pm_op_in_progress = true;
9763         if (pm_op != UFS_SHUTDOWN_PM) {
9764                 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9765                          hba->rpm_lvl : hba->spm_lvl;
9766                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9767                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9768         } else {
9769                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9770                 req_link_state = UIC_LINK_OFF_STATE;
9771         }
9772
9773         /*
9774          * If we can't transition into any of the low power modes
9775          * just gate the clocks.
9776          */
9777         ufshcd_hold(hba);
9778         hba->clk_gating.is_suspended = true;
9779
9780         if (ufshcd_is_clkscaling_supported(hba))
9781                 ufshcd_clk_scaling_suspend(hba, true);
9782
9783         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9784                         req_link_state == UIC_LINK_ACTIVE_STATE) {
9785                 goto vops_suspend;
9786         }
9787
9788         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9789             (req_link_state == hba->uic_link_state))
9790                 goto enable_scaling;
9791
9792         /* UFS device & link must be active before we enter in this function */
9793         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9794                 ret = -EINVAL;
9795                 goto enable_scaling;
9796         }
9797
9798         if (pm_op == UFS_RUNTIME_PM) {
9799                 if (ufshcd_can_autobkops_during_suspend(hba)) {
9800                         /*
9801                          * The device is idle with no requests in the queue,
9802                          * allow background operations if bkops status shows
9803                          * that performance might be impacted.
9804                          */
9805                         ret = ufshcd_urgent_bkops(hba);
9806                         if (ret) {
9807                                 /*
9808                                  * If return err in suspend flow, IO will hang.
9809                                  * Trigger error handler and break suspend for
9810                                  * error recovery.
9811                                  */
9812                                 ufshcd_force_error_recovery(hba);
9813                                 ret = -EBUSY;
9814                                 goto enable_scaling;
9815                         }
9816                 } else {
9817                         /* make sure that auto bkops is disabled */
9818                         ufshcd_disable_auto_bkops(hba);
9819                 }
9820                 /*
9821                  * If device needs to do BKOP or WB buffer flush during
9822                  * Hibern8, keep device power mode as "active power mode"
9823                  * and VCC supply.
9824                  */
9825                 hba->dev_info.b_rpm_dev_flush_capable =
9826                         hba->auto_bkops_enabled ||
9827                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9828                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9829                         ufshcd_is_auto_hibern8_enabled(hba))) &&
9830                         ufshcd_wb_need_flush(hba));
9831         }
9832
9833         flush_work(&hba->eeh_work);
9834
9835         ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9836         if (ret)
9837                 goto enable_scaling;
9838
9839         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9840                 if (pm_op != UFS_RUNTIME_PM)
9841                         /* ensure that bkops is disabled */
9842                         ufshcd_disable_auto_bkops(hba);
9843
9844                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9845                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9846                         if (ret && pm_op != UFS_SHUTDOWN_PM) {
9847                                 /*
9848                                  * If return err in suspend flow, IO will hang.
9849                                  * Trigger error handler and break suspend for
9850                                  * error recovery.
9851                                  */
9852                                 ufshcd_force_error_recovery(hba);
9853                                 ret = -EBUSY;
9854                         }
9855                         if (ret)
9856                                 goto enable_scaling;
9857                 }
9858         }
9859
9860         /*
9861          * In the case of DeepSleep, the device is expected to remain powered
9862          * with the link off, so do not check for bkops.
9863          */
9864         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9865         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9866         if (ret && pm_op != UFS_SHUTDOWN_PM) {
9867                 /*
9868                  * If return err in suspend flow, IO will hang.
9869                  * Trigger error handler and break suspend for
9870                  * error recovery.
9871                  */
9872                 ufshcd_force_error_recovery(hba);
9873                 ret = -EBUSY;
9874         }
9875         if (ret)
9876                 goto set_dev_active;
9877
9878 vops_suspend:
9879         /*
9880          * Call vendor specific suspend callback. As these callbacks may access
9881          * vendor specific host controller register space call them before the
9882          * host clocks are ON.
9883          */
9884         ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9885         if (ret)
9886                 goto set_link_active;
9887
9888         cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
9889         goto out;
9890
9891 set_link_active:
9892         /*
9893          * Device hardware reset is required to exit DeepSleep. Also, for
9894          * DeepSleep, the link is off so host reset and restore will be done
9895          * further below.
9896          */
9897         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9898                 ufshcd_device_reset(hba);
9899                 WARN_ON(!ufshcd_is_link_off(hba));
9900         }
9901         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9902                 ufshcd_set_link_active(hba);
9903         else if (ufshcd_is_link_off(hba))
9904                 ufshcd_host_reset_and_restore(hba);
9905 set_dev_active:
9906         /* Can also get here needing to exit DeepSleep */
9907         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9908                 ufshcd_device_reset(hba);
9909                 ufshcd_host_reset_and_restore(hba);
9910         }
9911         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9912                 ufshcd_disable_auto_bkops(hba);
9913 enable_scaling:
9914         if (ufshcd_is_clkscaling_supported(hba))
9915                 ufshcd_clk_scaling_suspend(hba, false);
9916
9917         hba->dev_info.b_rpm_dev_flush_capable = false;
9918 out:
9919         if (hba->dev_info.b_rpm_dev_flush_capable) {
9920                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9921                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9922         }
9923
9924         if (ret) {
9925                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9926                 hba->clk_gating.is_suspended = false;
9927                 ufshcd_release(hba);
9928         }
9929         hba->pm_op_in_progress = false;
9930         return ret;
9931 }
9932
9933 #ifdef CONFIG_PM
9934 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9935 {
9936         int ret;
9937         enum uic_link_state old_link_state = hba->uic_link_state;
9938
9939         hba->pm_op_in_progress = true;
9940
9941         /*
9942          * Call vendor specific resume callback. As these callbacks may access
9943          * vendor specific host controller register space call them when the
9944          * host clocks are ON.
9945          */
9946         ret = ufshcd_vops_resume(hba, pm_op);
9947         if (ret)
9948                 goto out;
9949
9950         /* For DeepSleep, the only supported option is to have the link off */
9951         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9952
9953         if (ufshcd_is_link_hibern8(hba)) {
9954                 ret = ufshcd_uic_hibern8_exit(hba);
9955                 if (!ret) {
9956                         ufshcd_set_link_active(hba);
9957                 } else {
9958                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9959                                         __func__, ret);
9960                         goto vendor_suspend;
9961                 }
9962         } else if (ufshcd_is_link_off(hba)) {
9963                 /*
9964                  * A full initialization of the host and the device is
9965                  * required since the link was put to off during suspend.
9966                  * Note, in the case of DeepSleep, the device will exit
9967                  * DeepSleep due to device reset.
9968                  */
9969                 ret = ufshcd_reset_and_restore(hba);
9970                 /*
9971                  * ufshcd_reset_and_restore() should have already
9972                  * set the link state as active
9973                  */
9974                 if (ret || !ufshcd_is_link_active(hba))
9975                         goto vendor_suspend;
9976         }
9977
9978         if (!ufshcd_is_ufs_dev_active(hba)) {
9979                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9980                 if (ret)
9981                         goto set_old_link_state;
9982                 ufshcd_set_timestamp_attr(hba);
9983                 schedule_delayed_work(&hba->ufs_rtc_update_work,
9984                                       msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
9985         }
9986
9987         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9988                 ufshcd_enable_auto_bkops(hba);
9989         else
9990                 /*
9991                  * If BKOPs operations are urgently needed at this moment then
9992                  * keep auto-bkops enabled or else disable it.
9993                  */
9994                 ufshcd_urgent_bkops(hba);
9995
9996         if (hba->ee_usr_mask)
9997                 ufshcd_write_ee_control(hba);
9998
9999         if (ufshcd_is_clkscaling_supported(hba))
10000                 ufshcd_clk_scaling_suspend(hba, false);
10001
10002         if (hba->dev_info.b_rpm_dev_flush_capable) {
10003                 hba->dev_info.b_rpm_dev_flush_capable = false;
10004                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
10005         }
10006
10007         ufshcd_configure_auto_hibern8(hba);
10008
10009         goto out;
10010
10011 set_old_link_state:
10012         ufshcd_link_state_transition(hba, old_link_state, 0);
10013 vendor_suspend:
10014         ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
10015         ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
10016 out:
10017         if (ret)
10018                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
10019         hba->clk_gating.is_suspended = false;
10020         ufshcd_release(hba);
10021         hba->pm_op_in_progress = false;
10022         return ret;
10023 }
10024
10025 static int ufshcd_wl_runtime_suspend(struct device *dev)
10026 {
10027         struct scsi_device *sdev = to_scsi_device(dev);
10028         struct ufs_hba *hba;
10029         int ret;
10030         ktime_t start = ktime_get();
10031
10032         hba = shost_priv(sdev->host);
10033
10034         ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
10035         if (ret)
10036                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10037
10038         trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
10039                 ktime_to_us(ktime_sub(ktime_get(), start)),
10040                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10041
10042         return ret;
10043 }
10044
10045 static int ufshcd_wl_runtime_resume(struct device *dev)
10046 {
10047         struct scsi_device *sdev = to_scsi_device(dev);
10048         struct ufs_hba *hba;
10049         int ret = 0;
10050         ktime_t start = ktime_get();
10051
10052         hba = shost_priv(sdev->host);
10053
10054         ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
10055         if (ret)
10056                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10057
10058         trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
10059                 ktime_to_us(ktime_sub(ktime_get(), start)),
10060                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10061
10062         return ret;
10063 }
10064 #endif
10065
10066 #ifdef CONFIG_PM_SLEEP
10067 static int ufshcd_wl_suspend(struct device *dev)
10068 {
10069         struct scsi_device *sdev = to_scsi_device(dev);
10070         struct ufs_hba *hba;
10071         int ret = 0;
10072         ktime_t start = ktime_get();
10073
10074         hba = shost_priv(sdev->host);
10075         down(&hba->host_sem);
10076         hba->system_suspending = true;
10077
10078         if (pm_runtime_suspended(dev))
10079                 goto out;
10080
10081         ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
10082         if (ret) {
10083                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__,  ret);
10084                 up(&hba->host_sem);
10085         }
10086
10087 out:
10088         if (!ret)
10089                 hba->is_sys_suspended = true;
10090         trace_ufshcd_wl_suspend(dev_name(dev), ret,
10091                 ktime_to_us(ktime_sub(ktime_get(), start)),
10092                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10093
10094         return ret;
10095 }
10096
10097 static int ufshcd_wl_resume(struct device *dev)
10098 {
10099         struct scsi_device *sdev = to_scsi_device(dev);
10100         struct ufs_hba *hba;
10101         int ret = 0;
10102         ktime_t start = ktime_get();
10103
10104         hba = shost_priv(sdev->host);
10105
10106         if (pm_runtime_suspended(dev))
10107                 goto out;
10108
10109         ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
10110         if (ret)
10111                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10112 out:
10113         trace_ufshcd_wl_resume(dev_name(dev), ret,
10114                 ktime_to_us(ktime_sub(ktime_get(), start)),
10115                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10116         if (!ret)
10117                 hba->is_sys_suspended = false;
10118         hba->system_suspending = false;
10119         up(&hba->host_sem);
10120         return ret;
10121 }
10122 #endif
10123
10124 /**
10125  * ufshcd_suspend - helper function for suspend operations
10126  * @hba: per adapter instance
10127  *
10128  * This function will put disable irqs, turn off clocks
10129  * and set vreg and hba-vreg in lpm mode.
10130  *
10131  * Return: 0 upon success; < 0 upon failure.
10132  */
10133 static int ufshcd_suspend(struct ufs_hba *hba)
10134 {
10135         int ret;
10136
10137         if (!hba->is_powered)
10138                 return 0;
10139         /*
10140          * Disable the host irq as host controller as there won't be any
10141          * host controller transaction expected till resume.
10142          */
10143         ufshcd_disable_irq(hba);
10144         ret = ufshcd_setup_clocks(hba, false);
10145         if (ret) {
10146                 ufshcd_enable_irq(hba);
10147                 return ret;
10148         }
10149         if (ufshcd_is_clkgating_allowed(hba)) {
10150                 hba->clk_gating.state = CLKS_OFF;
10151                 trace_ufshcd_clk_gating(dev_name(hba->dev),
10152                                         hba->clk_gating.state);
10153         }
10154
10155         ufshcd_vreg_set_lpm(hba);
10156         /* Put the host controller in low power mode if possible */
10157         ufshcd_hba_vreg_set_lpm(hba);
10158         ufshcd_pm_qos_update(hba, false);
10159         return ret;
10160 }
10161
10162 #ifdef CONFIG_PM
10163 /**
10164  * ufshcd_resume - helper function for resume operations
10165  * @hba: per adapter instance
10166  *
10167  * This function basically turns on the regulators, clocks and
10168  * irqs of the hba.
10169  *
10170  * Return: 0 for success and non-zero for failure.
10171  */
10172 static int ufshcd_resume(struct ufs_hba *hba)
10173 {
10174         int ret;
10175
10176         if (!hba->is_powered)
10177                 return 0;
10178
10179         ufshcd_hba_vreg_set_hpm(hba);
10180         ret = ufshcd_vreg_set_hpm(hba);
10181         if (ret)
10182                 goto out;
10183
10184         /* Make sure clocks are enabled before accessing controller */
10185         ret = ufshcd_setup_clocks(hba, true);
10186         if (ret)
10187                 goto disable_vreg;
10188
10189         /* enable the host irq as host controller would be active soon */
10190         ufshcd_enable_irq(hba);
10191
10192         goto out;
10193
10194 disable_vreg:
10195         ufshcd_vreg_set_lpm(hba);
10196 out:
10197         if (ret)
10198                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
10199         return ret;
10200 }
10201 #endif /* CONFIG_PM */
10202
10203 #ifdef CONFIG_PM_SLEEP
10204 /**
10205  * ufshcd_system_suspend - system suspend callback
10206  * @dev: Device associated with the UFS controller.
10207  *
10208  * Executed before putting the system into a sleep state in which the contents
10209  * of main memory are preserved.
10210  *
10211  * Return: 0 for success and non-zero for failure.
10212  */
10213 int ufshcd_system_suspend(struct device *dev)
10214 {
10215         struct ufs_hba *hba = dev_get_drvdata(dev);
10216         int ret = 0;
10217         ktime_t start = ktime_get();
10218
10219         if (pm_runtime_suspended(hba->dev))
10220                 goto out;
10221
10222         ret = ufshcd_suspend(hba);
10223 out:
10224         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
10225                 ktime_to_us(ktime_sub(ktime_get(), start)),
10226                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10227         return ret;
10228 }
10229 EXPORT_SYMBOL(ufshcd_system_suspend);
10230
10231 /**
10232  * ufshcd_system_resume - system resume callback
10233  * @dev: Device associated with the UFS controller.
10234  *
10235  * Executed after waking the system up from a sleep state in which the contents
10236  * of main memory were preserved.
10237  *
10238  * Return: 0 for success and non-zero for failure.
10239  */
10240 int ufshcd_system_resume(struct device *dev)
10241 {
10242         struct ufs_hba *hba = dev_get_drvdata(dev);
10243         ktime_t start = ktime_get();
10244         int ret = 0;
10245
10246         if (pm_runtime_suspended(hba->dev))
10247                 goto out;
10248
10249         ret = ufshcd_resume(hba);
10250
10251 out:
10252         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
10253                 ktime_to_us(ktime_sub(ktime_get(), start)),
10254                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10255
10256         return ret;
10257 }
10258 EXPORT_SYMBOL(ufshcd_system_resume);
10259 #endif /* CONFIG_PM_SLEEP */
10260
10261 #ifdef CONFIG_PM
10262 /**
10263  * ufshcd_runtime_suspend - runtime suspend callback
10264  * @dev: Device associated with the UFS controller.
10265  *
10266  * Check the description of ufshcd_suspend() function for more details.
10267  *
10268  * Return: 0 for success and non-zero for failure.
10269  */
10270 int ufshcd_runtime_suspend(struct device *dev)
10271 {
10272         struct ufs_hba *hba = dev_get_drvdata(dev);
10273         int ret;
10274         ktime_t start = ktime_get();
10275
10276         ret = ufshcd_suspend(hba);
10277
10278         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
10279                 ktime_to_us(ktime_sub(ktime_get(), start)),
10280                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10281         return ret;
10282 }
10283 EXPORT_SYMBOL(ufshcd_runtime_suspend);
10284
10285 /**
10286  * ufshcd_runtime_resume - runtime resume routine
10287  * @dev: Device associated with the UFS controller.
10288  *
10289  * This function basically brings controller
10290  * to active state. Following operations are done in this function:
10291  *
10292  * 1. Turn on all the controller related clocks
10293  * 2. Turn ON VCC rail
10294  *
10295  * Return: 0 upon success; < 0 upon failure.
10296  */
10297 int ufshcd_runtime_resume(struct device *dev)
10298 {
10299         struct ufs_hba *hba = dev_get_drvdata(dev);
10300         int ret;
10301         ktime_t start = ktime_get();
10302
10303         ret = ufshcd_resume(hba);
10304
10305         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
10306                 ktime_to_us(ktime_sub(ktime_get(), start)),
10307                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10308         return ret;
10309 }
10310 EXPORT_SYMBOL(ufshcd_runtime_resume);
10311 #endif /* CONFIG_PM */
10312
10313 static void ufshcd_wl_shutdown(struct device *dev)
10314 {
10315         struct scsi_device *sdev = to_scsi_device(dev);
10316         struct ufs_hba *hba = shost_priv(sdev->host);
10317
10318         down(&hba->host_sem);
10319         hba->shutting_down = true;
10320         up(&hba->host_sem);
10321
10322         /* Turn on everything while shutting down */
10323         ufshcd_rpm_get_sync(hba);
10324         scsi_device_quiesce(sdev);
10325         shost_for_each_device(sdev, hba->host) {
10326                 if (sdev == hba->ufs_device_wlun)
10327                         continue;
10328                 scsi_device_quiesce(sdev);
10329         }
10330         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10331
10332         /*
10333          * Next, turn off the UFS controller and the UFS regulators. Disable
10334          * clocks.
10335          */
10336         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
10337                 ufshcd_suspend(hba);
10338
10339         hba->is_powered = false;
10340 }
10341
10342 /**
10343  * ufshcd_remove - de-allocate SCSI host and host memory space
10344  *              data structure memory
10345  * @hba: per adapter instance
10346  */
10347 void ufshcd_remove(struct ufs_hba *hba)
10348 {
10349         if (hba->ufs_device_wlun)
10350                 ufshcd_rpm_get_sync(hba);
10351         ufs_hwmon_remove(hba);
10352         ufs_bsg_remove(hba);
10353         ufs_sysfs_remove_nodes(hba->dev);
10354         blk_mq_destroy_queue(hba->tmf_queue);
10355         blk_put_queue(hba->tmf_queue);
10356         blk_mq_free_tag_set(&hba->tmf_tag_set);
10357         scsi_remove_host(hba->host);
10358         /* disable interrupts */
10359         ufshcd_disable_intr(hba, hba->intr_mask);
10360         ufshcd_hba_stop(hba);
10361         ufshcd_hba_exit(hba);
10362 }
10363 EXPORT_SYMBOL_GPL(ufshcd_remove);
10364
10365 #ifdef CONFIG_PM_SLEEP
10366 int ufshcd_system_freeze(struct device *dev)
10367 {
10368
10369         return ufshcd_system_suspend(dev);
10370
10371 }
10372 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10373
10374 int ufshcd_system_restore(struct device *dev)
10375 {
10376
10377         struct ufs_hba *hba = dev_get_drvdata(dev);
10378         int ret;
10379
10380         ret = ufshcd_system_resume(dev);
10381         if (ret)
10382                 return ret;
10383
10384         /* Configure UTRL and UTMRL base address registers */
10385         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10386                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10387         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10388                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10389         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10390                         REG_UTP_TASK_REQ_LIST_BASE_L);
10391         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10392                         REG_UTP_TASK_REQ_LIST_BASE_H);
10393         /*
10394          * Make sure that UTRL and UTMRL base address registers
10395          * are updated with the latest queue addresses. Only after
10396          * updating these addresses, we can queue the new commands.
10397          */
10398         mb();
10399
10400         /* Resuming from hibernate, assume that link was OFF */
10401         ufshcd_set_link_off(hba);
10402
10403         return 0;
10404
10405 }
10406 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10407
10408 int ufshcd_system_thaw(struct device *dev)
10409 {
10410         return ufshcd_system_resume(dev);
10411 }
10412 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10413 #endif /* CONFIG_PM_SLEEP  */
10414
10415 /**
10416  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10417  * @hba: pointer to Host Bus Adapter (HBA)
10418  */
10419 void ufshcd_dealloc_host(struct ufs_hba *hba)
10420 {
10421         scsi_host_put(hba->host);
10422 }
10423 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
10424
10425 /**
10426  * ufshcd_set_dma_mask - Set dma mask based on the controller
10427  *                       addressing capability
10428  * @hba: per adapter instance
10429  *
10430  * Return: 0 for success, non-zero for failure.
10431  */
10432 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10433 {
10434         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10435                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10436                         return 0;
10437         }
10438         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10439 }
10440
10441 /**
10442  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10443  * @dev: pointer to device handle
10444  * @hba_handle: driver private handle
10445  *
10446  * Return: 0 on success, non-zero value on failure.
10447  */
10448 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10449 {
10450         struct Scsi_Host *host;
10451         struct ufs_hba *hba;
10452         int err = 0;
10453
10454         if (!dev) {
10455                 dev_err(dev,
10456                 "Invalid memory reference for dev is NULL\n");
10457                 err = -ENODEV;
10458                 goto out_error;
10459         }
10460
10461         host = scsi_host_alloc(&ufshcd_driver_template,
10462                                 sizeof(struct ufs_hba));
10463         if (!host) {
10464                 dev_err(dev, "scsi_host_alloc failed\n");
10465                 err = -ENOMEM;
10466                 goto out_error;
10467         }
10468         host->nr_maps = HCTX_TYPE_POLL + 1;
10469         hba = shost_priv(host);
10470         hba->host = host;
10471         hba->dev = dev;
10472         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10473         hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10474         ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10475         INIT_LIST_HEAD(&hba->clk_list_head);
10476         spin_lock_init(&hba->outstanding_lock);
10477
10478         *hba_handle = hba;
10479
10480 out_error:
10481         return err;
10482 }
10483 EXPORT_SYMBOL(ufshcd_alloc_host);
10484
10485 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10486 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10487                                      const struct blk_mq_queue_data *qd)
10488 {
10489         WARN_ON_ONCE(true);
10490         return BLK_STS_NOTSUPP;
10491 }
10492
10493 static const struct blk_mq_ops ufshcd_tmf_ops = {
10494         .queue_rq = ufshcd_queue_tmf,
10495 };
10496
10497 /**
10498  * ufshcd_init - Driver initialization routine
10499  * @hba: per-adapter instance
10500  * @mmio_base: base register address
10501  * @irq: Interrupt line of device
10502  *
10503  * Return: 0 on success, non-zero value on failure.
10504  */
10505 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10506 {
10507         int err;
10508         struct Scsi_Host *host = hba->host;
10509         struct device *dev = hba->dev;
10510         char eh_wq_name[sizeof("ufs_eh_wq_00")];
10511
10512         /*
10513          * dev_set_drvdata() must be called before any callbacks are registered
10514          * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10515          * sysfs).
10516          */
10517         dev_set_drvdata(dev, hba);
10518
10519         if (!mmio_base) {
10520                 dev_err(hba->dev,
10521                 "Invalid memory reference for mmio_base is NULL\n");
10522                 err = -ENODEV;
10523                 goto out_error;
10524         }
10525
10526         hba->mmio_base = mmio_base;
10527         hba->irq = irq;
10528         hba->vps = &ufs_hba_vps;
10529
10530         err = ufshcd_hba_init(hba);
10531         if (err)
10532                 goto out_error;
10533
10534         /* Read capabilities registers */
10535         err = ufshcd_hba_capabilities(hba);
10536         if (err)
10537                 goto out_disable;
10538
10539         /* Get UFS version supported by the controller */
10540         hba->ufs_version = ufshcd_get_ufs_version(hba);
10541
10542         /* Get Interrupt bit mask per version */
10543         hba->intr_mask = ufshcd_get_intr_mask(hba);
10544
10545         err = ufshcd_set_dma_mask(hba);
10546         if (err) {
10547                 dev_err(hba->dev, "set dma mask failed\n");
10548                 goto out_disable;
10549         }
10550
10551         /* Allocate memory for host memory space */
10552         err = ufshcd_memory_alloc(hba);
10553         if (err) {
10554                 dev_err(hba->dev, "Memory allocation failed\n");
10555                 goto out_disable;
10556         }
10557
10558         /* Configure LRB */
10559         ufshcd_host_memory_configure(hba);
10560
10561         host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10562         host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10563         host->max_id = UFSHCD_MAX_ID;
10564         host->max_lun = UFS_MAX_LUNS;
10565         host->max_channel = UFSHCD_MAX_CHANNEL;
10566         host->unique_id = host->host_no;
10567         host->max_cmd_len = UFS_CDB_SIZE;
10568         host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
10569
10570         /* Use default RPM delay if host not set */
10571         if (host->rpm_autosuspend_delay == 0)
10572                 host->rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS;
10573
10574         hba->max_pwr_info.is_valid = false;
10575
10576         /* Initialize work queues */
10577         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10578                  hba->host->host_no);
10579         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10580         if (!hba->eh_wq) {
10581                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10582                         __func__);
10583                 err = -ENOMEM;
10584                 goto out_disable;
10585         }
10586         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10587         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10588
10589         sema_init(&hba->host_sem, 1);
10590
10591         /* Initialize UIC command mutex */
10592         mutex_init(&hba->uic_cmd_mutex);
10593
10594         /* Initialize mutex for device management commands */
10595         mutex_init(&hba->dev_cmd.lock);
10596
10597         /* Initialize mutex for exception event control */
10598         mutex_init(&hba->ee_ctrl_mutex);
10599
10600         mutex_init(&hba->wb_mutex);
10601         init_rwsem(&hba->clk_scaling_lock);
10602
10603         ufshcd_init_clk_gating(hba);
10604
10605         ufshcd_init_clk_scaling(hba);
10606
10607         /*
10608          * In order to avoid any spurious interrupt immediately after
10609          * registering UFS controller interrupt handler, clear any pending UFS
10610          * interrupt status and disable all the UFS interrupts.
10611          */
10612         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10613                       REG_INTERRUPT_STATUS);
10614         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10615         /*
10616          * Make sure that UFS interrupts are disabled and any pending interrupt
10617          * status is cleared before registering UFS interrupt handler.
10618          */
10619         mb();
10620
10621         /* IRQ registration */
10622         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10623         if (err) {
10624                 dev_err(hba->dev, "request irq failed\n");
10625                 goto out_disable;
10626         } else {
10627                 hba->is_irq_enabled = true;
10628         }
10629
10630         if (!is_mcq_supported(hba)) {
10631                 err = scsi_add_host(host, hba->dev);
10632                 if (err) {
10633                         dev_err(hba->dev, "scsi_add_host failed\n");
10634                         goto out_disable;
10635                 }
10636         }
10637
10638         hba->tmf_tag_set = (struct blk_mq_tag_set) {
10639                 .nr_hw_queues   = 1,
10640                 .queue_depth    = hba->nutmrs,
10641                 .ops            = &ufshcd_tmf_ops,
10642                 .flags          = BLK_MQ_F_NO_SCHED,
10643         };
10644         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10645         if (err < 0)
10646                 goto out_remove_scsi_host;
10647         hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
10648         if (IS_ERR(hba->tmf_queue)) {
10649                 err = PTR_ERR(hba->tmf_queue);
10650                 goto free_tmf_tag_set;
10651         }
10652         hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10653                                     sizeof(*hba->tmf_rqs), GFP_KERNEL);
10654         if (!hba->tmf_rqs) {
10655                 err = -ENOMEM;
10656                 goto free_tmf_queue;
10657         }
10658
10659         /* Reset the attached device */
10660         ufshcd_device_reset(hba);
10661
10662         ufshcd_init_crypto(hba);
10663
10664         /* Host controller enable */
10665         err = ufshcd_hba_enable(hba);
10666         if (err) {
10667                 dev_err(hba->dev, "Host controller enable failed\n");
10668                 ufshcd_print_evt_hist(hba);
10669                 ufshcd_print_host_state(hba);
10670                 goto free_tmf_queue;
10671         }
10672
10673         /*
10674          * Set the default power management level for runtime and system PM.
10675          * Default power saving mode is to keep UFS link in Hibern8 state
10676          * and UFS device in sleep state.
10677          */
10678         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10679                                                 UFS_SLEEP_PWR_MODE,
10680                                                 UIC_LINK_HIBERN8_STATE);
10681         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10682                                                 UFS_SLEEP_PWR_MODE,
10683                                                 UIC_LINK_HIBERN8_STATE);
10684
10685         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
10686         INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
10687
10688         /* Set the default auto-hiberate idle timer value to 150 ms */
10689         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10690                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10691                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10692         }
10693
10694         /* Hold auto suspend until async scan completes */
10695         pm_runtime_get_sync(dev);
10696         atomic_set(&hba->scsi_block_reqs_cnt, 0);
10697         /*
10698          * We are assuming that device wasn't put in sleep/power-down
10699          * state exclusively during the boot stage before kernel.
10700          * This assumption helps avoid doing link startup twice during
10701          * ufshcd_probe_hba().
10702          */
10703         ufshcd_set_ufs_dev_active(hba);
10704
10705         async_schedule(ufshcd_async_scan, hba);
10706         ufs_sysfs_add_nodes(hba->dev);
10707
10708         device_enable_async_suspend(dev);
10709         ufshcd_pm_qos_init(hba);
10710         return 0;
10711
10712 free_tmf_queue:
10713         blk_mq_destroy_queue(hba->tmf_queue);
10714         blk_put_queue(hba->tmf_queue);
10715 free_tmf_tag_set:
10716         blk_mq_free_tag_set(&hba->tmf_tag_set);
10717 out_remove_scsi_host:
10718         scsi_remove_host(hba->host);
10719 out_disable:
10720         hba->is_irq_enabled = false;
10721         ufshcd_hba_exit(hba);
10722 out_error:
10723         return err;
10724 }
10725 EXPORT_SYMBOL_GPL(ufshcd_init);
10726
10727 void ufshcd_resume_complete(struct device *dev)
10728 {
10729         struct ufs_hba *hba = dev_get_drvdata(dev);
10730
10731         if (hba->complete_put) {
10732                 ufshcd_rpm_put(hba);
10733                 hba->complete_put = false;
10734         }
10735 }
10736 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10737
10738 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10739 {
10740         struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10741         enum ufs_dev_pwr_mode dev_pwr_mode;
10742         enum uic_link_state link_state;
10743         unsigned long flags;
10744         bool res;
10745
10746         spin_lock_irqsave(&dev->power.lock, flags);
10747         dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10748         link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10749         res = pm_runtime_suspended(dev) &&
10750               hba->curr_dev_pwr_mode == dev_pwr_mode &&
10751               hba->uic_link_state == link_state &&
10752               !hba->dev_info.b_rpm_dev_flush_capable;
10753         spin_unlock_irqrestore(&dev->power.lock, flags);
10754
10755         return res;
10756 }
10757
10758 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10759 {
10760         struct ufs_hba *hba = dev_get_drvdata(dev);
10761         int ret;
10762
10763         /*
10764          * SCSI assumes that runtime-pm and system-pm for scsi drivers
10765          * are same. And it doesn't wake up the device for system-suspend
10766          * if it's runtime suspended. But ufs doesn't follow that.
10767          * Refer ufshcd_resume_complete()
10768          */
10769         if (hba->ufs_device_wlun) {
10770                 /* Prevent runtime suspend */
10771                 ufshcd_rpm_get_noresume(hba);
10772                 /*
10773                  * Check if already runtime suspended in same state as system
10774                  * suspend would be.
10775                  */
10776                 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10777                         /* RPM state is not ok for SPM, so runtime resume */
10778                         ret = ufshcd_rpm_resume(hba);
10779                         if (ret < 0 && ret != -EACCES) {
10780                                 ufshcd_rpm_put(hba);
10781                                 return ret;
10782                         }
10783                 }
10784                 hba->complete_put = true;
10785         }
10786         return 0;
10787 }
10788 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10789
10790 int ufshcd_suspend_prepare(struct device *dev)
10791 {
10792         return __ufshcd_suspend_prepare(dev, true);
10793 }
10794 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10795
10796 #ifdef CONFIG_PM_SLEEP
10797 static int ufshcd_wl_poweroff(struct device *dev)
10798 {
10799         struct scsi_device *sdev = to_scsi_device(dev);
10800         struct ufs_hba *hba = shost_priv(sdev->host);
10801
10802         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10803         return 0;
10804 }
10805 #endif
10806
10807 static int ufshcd_wl_probe(struct device *dev)
10808 {
10809         struct scsi_device *sdev = to_scsi_device(dev);
10810
10811         if (!is_device_wlun(sdev))
10812                 return -ENODEV;
10813
10814         blk_pm_runtime_init(sdev->request_queue, dev);
10815         pm_runtime_set_autosuspend_delay(dev, 0);
10816         pm_runtime_allow(dev);
10817
10818         return  0;
10819 }
10820
10821 static int ufshcd_wl_remove(struct device *dev)
10822 {
10823         pm_runtime_forbid(dev);
10824         return 0;
10825 }
10826
10827 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10828 #ifdef CONFIG_PM_SLEEP
10829         .suspend = ufshcd_wl_suspend,
10830         .resume = ufshcd_wl_resume,
10831         .freeze = ufshcd_wl_suspend,
10832         .thaw = ufshcd_wl_resume,
10833         .poweroff = ufshcd_wl_poweroff,
10834         .restore = ufshcd_wl_resume,
10835 #endif
10836         SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10837 };
10838
10839 static void ufshcd_check_header_layout(void)
10840 {
10841         /*
10842          * gcc compilers before version 10 cannot do constant-folding for
10843          * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10844          * before.
10845          */
10846         if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
10847                 return;
10848
10849         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10850                                 .cci = 3})[0] != 3);
10851
10852         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10853                                 .ehs_length = 2})[1] != 2);
10854
10855         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10856                                 .enable_crypto = 1})[2]
10857                      != 0x80);
10858
10859         BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
10860                                         .command_type = 5,
10861                                         .data_direction = 3,
10862                                         .interrupt = 1,
10863                                 })[3]) != ((5 << 4) | (3 << 1) | 1));
10864
10865         BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10866                                 .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
10867                 cpu_to_le32(0xdeadbeef));
10868
10869         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10870                                 .ocs = 4})[8] != 4);
10871
10872         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10873                                 .cds = 5})[9] != 5);
10874
10875         BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10876                                 .dunu = cpu_to_le32(0xbadcafe)})[3] !=
10877                 cpu_to_le32(0xbadcafe));
10878
10879         BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10880                              .iid = 0xf })[4] != 0xf0);
10881
10882         BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10883                              .command_set_type = 0xf })[4] != 0xf);
10884 }
10885
10886 /*
10887  * ufs_dev_wlun_template - describes ufs device wlun
10888  * ufs-device wlun - used to send pm commands
10889  * All luns are consumers of ufs-device wlun.
10890  *
10891  * Currently, no sd driver is present for wluns.
10892  * Hence the no specific pm operations are performed.
10893  * With ufs design, SSU should be sent to ufs-device wlun.
10894  * Hence register a scsi driver for ufs wluns only.
10895  */
10896 static struct scsi_driver ufs_dev_wlun_template = {
10897         .gendrv = {
10898                 .name = "ufs_device_wlun",
10899                 .owner = THIS_MODULE,
10900                 .probe = ufshcd_wl_probe,
10901                 .remove = ufshcd_wl_remove,
10902                 .pm = &ufshcd_wl_pm_ops,
10903                 .shutdown = ufshcd_wl_shutdown,
10904         },
10905 };
10906
10907 static int __init ufshcd_core_init(void)
10908 {
10909         int ret;
10910
10911         ufshcd_check_header_layout();
10912
10913         ufs_debugfs_init();
10914
10915         ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10916         if (ret)
10917                 ufs_debugfs_exit();
10918         return ret;
10919 }
10920
10921 static void __exit ufshcd_core_exit(void)
10922 {
10923         ufs_debugfs_exit();
10924         scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10925 }
10926
10927 module_init(ufshcd_core_init);
10928 module_exit(ufshcd_core_exit);
10929
10930 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10931 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10932 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10933 MODULE_SOFTDEP("pre: governor_simpleondemand");
10934 MODULE_LICENSE("GPL");