scsi: ufs: Add quirk to fix abnormal ocs fatal error
[linux-block.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7  *
8  * Authors:
9  *      Santosh Yaraganavi <santosh.sy@samsung.com>
10  *      Vinayak Holikatti <h.vinayak@samsung.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  * See the COPYING file in the top-level directory or visit
17  * <http://www.gnu.org/licenses/gpl-2.0.html>
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * This program is provided "AS IS" and "WITH ALL FAULTS" and
25  * without warranty of any kind. You are solely responsible for
26  * determining the appropriateness of using and distributing
27  * the program and assume all risks associated with your exercise
28  * of rights with respect to the program, including but not limited
29  * to infringement of third party rights, the risks and costs of
30  * program errors, damage to or loss of data, programs or equipment,
31  * and unavailability or interruption of operations. Under no
32  * circumstances will the contributor of this Program be liable for
33  * any damages of any kind arising from your use or distribution of
34  * this program.
35  *
36  * The Linux Foundation chooses to take subject only to the GPLv2
37  * license terms, and distributes only under these terms.
38  */
39
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
43 #include <linux/of.h>
44 #include <linux/bitfield.h>
45 #include <linux/blk-pm.h>
46 #include "ufshcd.h"
47 #include "ufs_quirks.h"
48 #include "unipro.h"
49 #include "ufs-sysfs.h"
50 #include "ufs_bsg.h"
51 #include <asm/unaligned.h>
52 #include <linux/blkdev.h>
53
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ufs.h>
56
57 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
58                                  UTP_TASK_REQ_COMPL |\
59                                  UFSHCD_ERROR_MASK)
60 /* UIC command timeout, unit: ms */
61 #define UIC_CMD_TIMEOUT 500
62
63 /* NOP OUT retries waiting for NOP IN response */
64 #define NOP_OUT_RETRIES    10
65 /* Timeout after 30 msecs if NOP OUT hangs without response */
66 #define NOP_OUT_TIMEOUT    30 /* msecs */
67
68 /* Query request retries */
69 #define QUERY_REQ_RETRIES 3
70 /* Query request timeout */
71 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
72
73 /* Task management command timeout */
74 #define TM_CMD_TIMEOUT  100 /* msecs */
75
76 /* maximum number of retries for a general UIC command  */
77 #define UFS_UIC_COMMAND_RETRIES 3
78
79 /* maximum number of link-startup retries */
80 #define DME_LINKSTARTUP_RETRIES 3
81
82 /* Maximum retries for Hibern8 enter */
83 #define UIC_HIBERN8_ENTER_RETRIES 3
84
85 /* maximum number of reset retries before giving up */
86 #define MAX_HOST_RESET_RETRIES 5
87
88 /* Expose the flag value from utp_upiu_query.value */
89 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
90
91 /* Interrupt aggregation default timeout, unit: 40us */
92 #define INT_AGGR_DEF_TO 0x02
93
94 /* default delay of autosuspend: 2000 ms */
95 #define RPM_AUTOSUSPEND_DELAY_MS 2000
96
97 /* Default delay of RPM device flush delayed work */
98 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
99
100 /* Default value of wait time before gating device ref clock */
101 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
102
103 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
104         ({                                                              \
105                 int _ret;                                               \
106                 if (_on)                                                \
107                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
108                 else                                                    \
109                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
110                 _ret;                                                   \
111         })
112
113 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
114         size_t __len = (len);                                            \
115         print_hex_dump(KERN_ERR, prefix_str,                             \
116                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
117                        16, 4, buf, __len, false);                        \
118 } while (0)
119
120 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
121                      const char *prefix)
122 {
123         u32 *regs;
124         size_t pos;
125
126         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
127                 return -EINVAL;
128
129         regs = kzalloc(len, GFP_ATOMIC);
130         if (!regs)
131                 return -ENOMEM;
132
133         for (pos = 0; pos < len; pos += 4)
134                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
135
136         ufshcd_hex_dump(prefix, regs, len);
137         kfree(regs);
138
139         return 0;
140 }
141 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
142
143 enum {
144         UFSHCD_MAX_CHANNEL      = 0,
145         UFSHCD_MAX_ID           = 1,
146         UFSHCD_CMD_PER_LUN      = 32,
147         UFSHCD_CAN_QUEUE        = 32,
148 };
149
150 /* UFSHCD states */
151 enum {
152         UFSHCD_STATE_RESET,
153         UFSHCD_STATE_ERROR,
154         UFSHCD_STATE_OPERATIONAL,
155         UFSHCD_STATE_EH_SCHEDULED,
156 };
157
158 /* UFSHCD error handling flags */
159 enum {
160         UFSHCD_EH_IN_PROGRESS = (1 << 0),
161 };
162
163 /* UFSHCD UIC layer error flags */
164 enum {
165         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
166         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
167         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
168         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
169         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
170         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
171 };
172
173 #define ufshcd_set_eh_in_progress(h) \
174         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
175 #define ufshcd_eh_in_progress(h) \
176         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
177 #define ufshcd_clear_eh_in_progress(h) \
178         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
179
180 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
181         {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
182         {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
183         {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
184         {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
185         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
186         {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
187 };
188
189 static inline enum ufs_dev_pwr_mode
190 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
191 {
192         return ufs_pm_lvl_states[lvl].dev_state;
193 }
194
195 static inline enum uic_link_state
196 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
197 {
198         return ufs_pm_lvl_states[lvl].link_state;
199 }
200
201 static inline enum ufs_pm_level
202 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
203                                         enum uic_link_state link_state)
204 {
205         enum ufs_pm_level lvl;
206
207         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
208                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
209                         (ufs_pm_lvl_states[lvl].link_state == link_state))
210                         return lvl;
211         }
212
213         /* if no match found, return the level 0 */
214         return UFS_PM_LVL_0;
215 }
216
217 static struct ufs_dev_fix ufs_fixups[] = {
218         /* UFS cards deviations table */
219         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
220                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
221         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
222                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
223         UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
224                 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
225         UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
226                 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
227         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
228                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
229         UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
230                 UFS_DEVICE_QUIRK_PA_TACTIVATE),
231         UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
232                 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
233         UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
234                 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
235
236         END_FIX
237 };
238
239 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
240 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
241 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
242 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
243 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
244 static void ufshcd_hba_exit(struct ufs_hba *hba);
245 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
246 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
247                                  bool skip_ref_clk);
248 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
249 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
250 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
251 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
252 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
253 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
254 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
255 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
256 static irqreturn_t ufshcd_intr(int irq, void *__hba);
257 static int ufshcd_change_power_mode(struct ufs_hba *hba,
258                              struct ufs_pa_layer_attr *pwr_mode);
259 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
260 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
261 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
262 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
263 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
264
265 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
266 {
267         return tag >= 0 && tag < hba->nutrs;
268 }
269
270 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
271 {
272         if (!hba->is_irq_enabled) {
273                 enable_irq(hba->irq);
274                 hba->is_irq_enabled = true;
275         }
276 }
277
278 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
279 {
280         if (hba->is_irq_enabled) {
281                 disable_irq(hba->irq);
282                 hba->is_irq_enabled = false;
283         }
284 }
285
286 static inline void ufshcd_wb_config(struct ufs_hba *hba)
287 {
288         int ret;
289
290         if (!ufshcd_is_wb_allowed(hba))
291                 return;
292
293         ret = ufshcd_wb_ctrl(hba, true);
294         if (ret)
295                 dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
296         else
297                 dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
298         ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
299         if (ret)
300                 dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
301                         __func__, ret);
302         ufshcd_wb_toggle_flush(hba, true);
303 }
304
305 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
306 {
307         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
308                 scsi_unblock_requests(hba->host);
309 }
310
311 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
312 {
313         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
314                 scsi_block_requests(hba->host);
315 }
316
317 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
318                 const char *str)
319 {
320         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
321
322         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
323 }
324
325 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
326                 const char *str)
327 {
328         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
329
330         trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
331 }
332
333 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
334                 const char *str)
335 {
336         int off = (int)tag - hba->nutrs;
337         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
338
339         trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
340                         &descp->input_param1);
341 }
342
343 static void ufshcd_add_command_trace(struct ufs_hba *hba,
344                 unsigned int tag, const char *str)
345 {
346         sector_t lba = -1;
347         u8 opcode = 0;
348         u32 intr, doorbell;
349         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
350         struct scsi_cmnd *cmd = lrbp->cmd;
351         int transfer_len = -1;
352
353         if (!trace_ufshcd_command_enabled()) {
354                 /* trace UPIU W/O tracing command */
355                 if (cmd)
356                         ufshcd_add_cmd_upiu_trace(hba, tag, str);
357                 return;
358         }
359
360         if (cmd) { /* data phase exists */
361                 /* trace UPIU also */
362                 ufshcd_add_cmd_upiu_trace(hba, tag, str);
363                 opcode = cmd->cmnd[0];
364                 if ((opcode == READ_10) || (opcode == WRITE_10)) {
365                         /*
366                          * Currently we only fully trace read(10) and write(10)
367                          * commands
368                          */
369                         if (cmd->request && cmd->request->bio)
370                                 lba = cmd->request->bio->bi_iter.bi_sector;
371                         transfer_len = be32_to_cpu(
372                                 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
373                 }
374         }
375
376         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
377         doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
378         trace_ufshcd_command(dev_name(hba->dev), str, tag,
379                                 doorbell, transfer_len, intr, lba, opcode);
380 }
381
382 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
383 {
384         struct ufs_clk_info *clki;
385         struct list_head *head = &hba->clk_list_head;
386
387         if (list_empty(head))
388                 return;
389
390         list_for_each_entry(clki, head, list) {
391                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
392                                 clki->max_freq)
393                         dev_err(hba->dev, "clk: %s, rate: %u\n",
394                                         clki->name, clki->curr_freq);
395         }
396 }
397
398 static void ufshcd_print_err_hist(struct ufs_hba *hba,
399                                   struct ufs_err_reg_hist *err_hist,
400                                   char *err_name)
401 {
402         int i;
403         bool found = false;
404
405         for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
406                 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
407
408                 if (err_hist->tstamp[p] == 0)
409                         continue;
410                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
411                         err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
412                 found = true;
413         }
414
415         if (!found)
416                 dev_err(hba->dev, "No record of %s\n", err_name);
417 }
418
419 static void ufshcd_print_host_regs(struct ufs_hba *hba)
420 {
421         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
422         dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
423                 hba->ufs_version, hba->capabilities);
424         dev_err(hba->dev,
425                 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
426                 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
427         dev_err(hba->dev,
428                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
429                 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
430                 hba->ufs_stats.hibern8_exit_cnt);
431
432         ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
433         ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
434         ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
435         ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
436         ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
437         ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
438                               "auto_hibern8_err");
439         ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
440         ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
441                               "link_startup_fail");
442         ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
443         ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
444                               "suspend_fail");
445         ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
446         ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
447         ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
448
449         ufshcd_print_clk_freqs(hba);
450
451         ufshcd_vops_dbg_register_dump(hba);
452 }
453
454 static
455 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
456 {
457         struct ufshcd_lrb *lrbp;
458         int prdt_length;
459         int tag;
460
461         for_each_set_bit(tag, &bitmap, hba->nutrs) {
462                 lrbp = &hba->lrb[tag];
463
464                 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
465                                 tag, ktime_to_us(lrbp->issue_time_stamp));
466                 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
467                                 tag, ktime_to_us(lrbp->compl_time_stamp));
468                 dev_err(hba->dev,
469                         "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
470                         tag, (u64)lrbp->utrd_dma_addr);
471
472                 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
473                                 sizeof(struct utp_transfer_req_desc));
474                 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
475                         (u64)lrbp->ucd_req_dma_addr);
476                 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
477                                 sizeof(struct utp_upiu_req));
478                 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
479                         (u64)lrbp->ucd_rsp_dma_addr);
480                 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
481                                 sizeof(struct utp_upiu_rsp));
482
483                 prdt_length = le16_to_cpu(
484                         lrbp->utr_descriptor_ptr->prd_table_length);
485                 dev_err(hba->dev,
486                         "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
487                         tag, prdt_length,
488                         (u64)lrbp->ucd_prdt_dma_addr);
489
490                 if (pr_prdt)
491                         ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
492                                 sizeof(struct ufshcd_sg_entry) * prdt_length);
493         }
494 }
495
496 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
497 {
498         int tag;
499
500         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
501                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
502
503                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
504                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
505         }
506 }
507
508 static void ufshcd_print_host_state(struct ufs_hba *hba)
509 {
510         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
511         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
512                 hba->outstanding_reqs, hba->outstanding_tasks);
513         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
514                 hba->saved_err, hba->saved_uic_err);
515         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
516                 hba->curr_dev_pwr_mode, hba->uic_link_state);
517         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
518                 hba->pm_op_in_progress, hba->is_sys_suspended);
519         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
520                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
521         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
522         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
523                 hba->eh_flags, hba->req_abort_count);
524         dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
525                 hba->capabilities, hba->caps);
526         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
527                 hba->dev_quirks);
528 }
529
530 /**
531  * ufshcd_print_pwr_info - print power params as saved in hba
532  * power info
533  * @hba: per-adapter instance
534  */
535 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
536 {
537         static const char * const names[] = {
538                 "INVALID MODE",
539                 "FAST MODE",
540                 "SLOW_MODE",
541                 "INVALID MODE",
542                 "FASTAUTO_MODE",
543                 "SLOWAUTO_MODE",
544                 "INVALID MODE",
545         };
546
547         dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
548                  __func__,
549                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
550                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
551                  names[hba->pwr_info.pwr_rx],
552                  names[hba->pwr_info.pwr_tx],
553                  hba->pwr_info.hs_rate);
554 }
555
556 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
557 {
558         if (!us)
559                 return;
560
561         if (us < 10)
562                 udelay(us);
563         else
564                 usleep_range(us, us + tolerance);
565 }
566 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
567
568 /**
569  * ufshcd_wait_for_register - wait for register value to change
570  * @hba: per-adapter interface
571  * @reg: mmio register offset
572  * @mask: mask to apply to the read register value
573  * @val: value to wait for
574  * @interval_us: polling interval in microseconds
575  * @timeout_ms: timeout in milliseconds
576  *
577  * Return:
578  * -ETIMEDOUT on error, zero on success.
579  */
580 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
581                                 u32 val, unsigned long interval_us,
582                                 unsigned long timeout_ms)
583 {
584         int err = 0;
585         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
586
587         /* ignore bits that we don't intend to wait on */
588         val = val & mask;
589
590         while ((ufshcd_readl(hba, reg) & mask) != val) {
591                 usleep_range(interval_us, interval_us + 50);
592                 if (time_after(jiffies, timeout)) {
593                         if ((ufshcd_readl(hba, reg) & mask) != val)
594                                 err = -ETIMEDOUT;
595                         break;
596                 }
597         }
598
599         return err;
600 }
601
602 /**
603  * ufshcd_get_intr_mask - Get the interrupt bit mask
604  * @hba: Pointer to adapter instance
605  *
606  * Returns interrupt bit mask per version
607  */
608 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
609 {
610         u32 intr_mask = 0;
611
612         switch (hba->ufs_version) {
613         case UFSHCI_VERSION_10:
614                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
615                 break;
616         case UFSHCI_VERSION_11:
617         case UFSHCI_VERSION_20:
618                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
619                 break;
620         case UFSHCI_VERSION_21:
621         default:
622                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
623                 break;
624         }
625
626         return intr_mask;
627 }
628
629 /**
630  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
631  * @hba: Pointer to adapter instance
632  *
633  * Returns UFSHCI version supported by the controller
634  */
635 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
636 {
637         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
638                 return ufshcd_vops_get_ufs_hci_version(hba);
639
640         return ufshcd_readl(hba, REG_UFS_VERSION);
641 }
642
643 /**
644  * ufshcd_is_device_present - Check if any device connected to
645  *                            the host controller
646  * @hba: pointer to adapter instance
647  *
648  * Returns true if device present, false if no device detected
649  */
650 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
651 {
652         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
653                                                 DEVICE_PRESENT) ? true : false;
654 }
655
656 /**
657  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
658  * @lrbp: pointer to local command reference block
659  *
660  * This function is used to get the OCS field from UTRD
661  * Returns the OCS field in the UTRD
662  */
663 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
664 {
665         return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
666 }
667
668 /**
669  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
670  * @hba: per adapter instance
671  * @pos: position of the bit to be cleared
672  */
673 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
674 {
675         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
676                 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
677         else
678                 ufshcd_writel(hba, ~(1 << pos),
679                                 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
680 }
681
682 /**
683  * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
684  * @hba: per adapter instance
685  * @pos: position of the bit to be cleared
686  */
687 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
688 {
689         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
690                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
691         else
692                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
693 }
694
695 /**
696  * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
697  * @hba: per adapter instance
698  * @tag: position of the bit to be cleared
699  */
700 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
701 {
702         __clear_bit(tag, &hba->outstanding_reqs);
703 }
704
705 /**
706  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
707  * @reg: Register value of host controller status
708  *
709  * Returns integer, 0 on Success and positive value if failed
710  */
711 static inline int ufshcd_get_lists_status(u32 reg)
712 {
713         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
714 }
715
716 /**
717  * ufshcd_get_uic_cmd_result - Get the UIC command result
718  * @hba: Pointer to adapter instance
719  *
720  * This function gets the result of UIC command completion
721  * Returns 0 on success, non zero value on error
722  */
723 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
724 {
725         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
726                MASK_UIC_COMMAND_RESULT;
727 }
728
729 /**
730  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
731  * @hba: Pointer to adapter instance
732  *
733  * This function gets UIC command argument3
734  * Returns 0 on success, non zero value on error
735  */
736 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
737 {
738         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
739 }
740
741 /**
742  * ufshcd_get_req_rsp - returns the TR response transaction type
743  * @ucd_rsp_ptr: pointer to response UPIU
744  */
745 static inline int
746 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
747 {
748         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
749 }
750
751 /**
752  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
753  * @ucd_rsp_ptr: pointer to response UPIU
754  *
755  * This function gets the response status and scsi_status from response UPIU
756  * Returns the response result code.
757  */
758 static inline int
759 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
760 {
761         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
762 }
763
764 /*
765  * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
766  *                              from response UPIU
767  * @ucd_rsp_ptr: pointer to response UPIU
768  *
769  * Return the data segment length.
770  */
771 static inline unsigned int
772 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
773 {
774         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
775                 MASK_RSP_UPIU_DATA_SEG_LEN;
776 }
777
778 /**
779  * ufshcd_is_exception_event - Check if the device raised an exception event
780  * @ucd_rsp_ptr: pointer to response UPIU
781  *
782  * The function checks if the device raised an exception event indicated in
783  * the Device Information field of response UPIU.
784  *
785  * Returns true if exception is raised, false otherwise.
786  */
787 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
788 {
789         return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
790                         MASK_RSP_EXCEPTION_EVENT ? true : false;
791 }
792
793 /**
794  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
795  * @hba: per adapter instance
796  */
797 static inline void
798 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
799 {
800         ufshcd_writel(hba, INT_AGGR_ENABLE |
801                       INT_AGGR_COUNTER_AND_TIMER_RESET,
802                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
803 }
804
805 /**
806  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
807  * @hba: per adapter instance
808  * @cnt: Interrupt aggregation counter threshold
809  * @tmout: Interrupt aggregation timeout value
810  */
811 static inline void
812 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
813 {
814         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
815                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
816                       INT_AGGR_TIMEOUT_VAL(tmout),
817                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
818 }
819
820 /**
821  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
822  * @hba: per adapter instance
823  */
824 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
825 {
826         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
827 }
828
829 /**
830  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
831  *                      When run-stop registers are set to 1, it indicates the
832  *                      host controller that it can process the requests
833  * @hba: per adapter instance
834  */
835 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
836 {
837         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
838                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
839         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
840                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
841 }
842
843 /**
844  * ufshcd_hba_start - Start controller initialization sequence
845  * @hba: per adapter instance
846  */
847 static inline void ufshcd_hba_start(struct ufs_hba *hba)
848 {
849         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
850 }
851
852 /**
853  * ufshcd_is_hba_active - Get controller state
854  * @hba: per adapter instance
855  *
856  * Returns false if controller is active, true otherwise
857  */
858 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
859 {
860         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
861                 ? false : true;
862 }
863
864 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
865 {
866         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
867         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
868             (hba->ufs_version == UFSHCI_VERSION_11))
869                 return UFS_UNIPRO_VER_1_41;
870         else
871                 return UFS_UNIPRO_VER_1_6;
872 }
873 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
874
875 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
876 {
877         /*
878          * If both host and device support UniPro ver1.6 or later, PA layer
879          * parameters tuning happens during link startup itself.
880          *
881          * We can manually tune PA layer parameters if either host or device
882          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
883          * logic simple, we will only do manual tuning if local unipro version
884          * doesn't support ver1.6 or later.
885          */
886         if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
887                 return true;
888         else
889                 return false;
890 }
891
892 /**
893  * ufshcd_set_clk_freq - set UFS controller clock frequencies
894  * @hba: per adapter instance
895  * @scale_up: If True, set max possible frequency othewise set low frequency
896  *
897  * Returns 0 if successful
898  * Returns < 0 for any other errors
899  */
900 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
901 {
902         int ret = 0;
903         struct ufs_clk_info *clki;
904         struct list_head *head = &hba->clk_list_head;
905
906         if (list_empty(head))
907                 goto out;
908
909         list_for_each_entry(clki, head, list) {
910                 if (!IS_ERR_OR_NULL(clki->clk)) {
911                         if (scale_up && clki->max_freq) {
912                                 if (clki->curr_freq == clki->max_freq)
913                                         continue;
914
915                                 ret = clk_set_rate(clki->clk, clki->max_freq);
916                                 if (ret) {
917                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
918                                                 __func__, clki->name,
919                                                 clki->max_freq, ret);
920                                         break;
921                                 }
922                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
923                                                 "scaled up", clki->name,
924                                                 clki->curr_freq,
925                                                 clki->max_freq);
926
927                                 clki->curr_freq = clki->max_freq;
928
929                         } else if (!scale_up && clki->min_freq) {
930                                 if (clki->curr_freq == clki->min_freq)
931                                         continue;
932
933                                 ret = clk_set_rate(clki->clk, clki->min_freq);
934                                 if (ret) {
935                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
936                                                 __func__, clki->name,
937                                                 clki->min_freq, ret);
938                                         break;
939                                 }
940                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
941                                                 "scaled down", clki->name,
942                                                 clki->curr_freq,
943                                                 clki->min_freq);
944                                 clki->curr_freq = clki->min_freq;
945                         }
946                 }
947                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
948                                 clki->name, clk_get_rate(clki->clk));
949         }
950
951 out:
952         return ret;
953 }
954
955 /**
956  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
957  * @hba: per adapter instance
958  * @scale_up: True if scaling up and false if scaling down
959  *
960  * Returns 0 if successful
961  * Returns < 0 for any other errors
962  */
963 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
964 {
965         int ret = 0;
966         ktime_t start = ktime_get();
967
968         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
969         if (ret)
970                 goto out;
971
972         ret = ufshcd_set_clk_freq(hba, scale_up);
973         if (ret)
974                 goto out;
975
976         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
977         if (ret)
978                 ufshcd_set_clk_freq(hba, !scale_up);
979
980 out:
981         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
982                         (scale_up ? "up" : "down"),
983                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
984         return ret;
985 }
986
987 /**
988  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
989  * @hba: per adapter instance
990  * @scale_up: True if scaling up and false if scaling down
991  *
992  * Returns true if scaling is required, false otherwise.
993  */
994 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
995                                                bool scale_up)
996 {
997         struct ufs_clk_info *clki;
998         struct list_head *head = &hba->clk_list_head;
999
1000         if (list_empty(head))
1001                 return false;
1002
1003         list_for_each_entry(clki, head, list) {
1004                 if (!IS_ERR_OR_NULL(clki->clk)) {
1005                         if (scale_up && clki->max_freq) {
1006                                 if (clki->curr_freq == clki->max_freq)
1007                                         continue;
1008                                 return true;
1009                         } else if (!scale_up && clki->min_freq) {
1010                                 if (clki->curr_freq == clki->min_freq)
1011                                         continue;
1012                                 return true;
1013                         }
1014                 }
1015         }
1016
1017         return false;
1018 }
1019
1020 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1021                                         u64 wait_timeout_us)
1022 {
1023         unsigned long flags;
1024         int ret = 0;
1025         u32 tm_doorbell;
1026         u32 tr_doorbell;
1027         bool timeout = false, do_last_check = false;
1028         ktime_t start;
1029
1030         ufshcd_hold(hba, false);
1031         spin_lock_irqsave(hba->host->host_lock, flags);
1032         /*
1033          * Wait for all the outstanding tasks/transfer requests.
1034          * Verify by checking the doorbell registers are clear.
1035          */
1036         start = ktime_get();
1037         do {
1038                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1039                         ret = -EBUSY;
1040                         goto out;
1041                 }
1042
1043                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1044                 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1045                 if (!tm_doorbell && !tr_doorbell) {
1046                         timeout = false;
1047                         break;
1048                 } else if (do_last_check) {
1049                         break;
1050                 }
1051
1052                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1053                 schedule();
1054                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1055                     wait_timeout_us) {
1056                         timeout = true;
1057                         /*
1058                          * We might have scheduled out for long time so make
1059                          * sure to check if doorbells are cleared by this time
1060                          * or not.
1061                          */
1062                         do_last_check = true;
1063                 }
1064                 spin_lock_irqsave(hba->host->host_lock, flags);
1065         } while (tm_doorbell || tr_doorbell);
1066
1067         if (timeout) {
1068                 dev_err(hba->dev,
1069                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1070                         __func__, tm_doorbell, tr_doorbell);
1071                 ret = -EBUSY;
1072         }
1073 out:
1074         spin_unlock_irqrestore(hba->host->host_lock, flags);
1075         ufshcd_release(hba);
1076         return ret;
1077 }
1078
1079 /**
1080  * ufshcd_scale_gear - scale up/down UFS gear
1081  * @hba: per adapter instance
1082  * @scale_up: True for scaling up gear and false for scaling down
1083  *
1084  * Returns 0 for success,
1085  * Returns -EBUSY if scaling can't happen at this time
1086  * Returns non-zero for any other errors
1087  */
1088 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1089 {
1090         #define UFS_MIN_GEAR_TO_SCALE_DOWN      UFS_HS_G1
1091         int ret = 0;
1092         struct ufs_pa_layer_attr new_pwr_info;
1093
1094         if (scale_up) {
1095                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1096                        sizeof(struct ufs_pa_layer_attr));
1097         } else {
1098                 memcpy(&new_pwr_info, &hba->pwr_info,
1099                        sizeof(struct ufs_pa_layer_attr));
1100
1101                 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1102                     || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1103                         /* save the current power mode */
1104                         memcpy(&hba->clk_scaling.saved_pwr_info.info,
1105                                 &hba->pwr_info,
1106                                 sizeof(struct ufs_pa_layer_attr));
1107
1108                         /* scale down gear */
1109                         new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1110                         new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1111                 }
1112         }
1113
1114         /* check if the power mode needs to be changed or not? */
1115         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1116         if (ret)
1117                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1118                         __func__, ret,
1119                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1120                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1121
1122         return ret;
1123 }
1124
1125 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1126 {
1127         #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
1128         int ret = 0;
1129         /*
1130          * make sure that there are no outstanding requests when
1131          * clock scaling is in progress
1132          */
1133         ufshcd_scsi_block_requests(hba);
1134         down_write(&hba->clk_scaling_lock);
1135         if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1136                 ret = -EBUSY;
1137                 up_write(&hba->clk_scaling_lock);
1138                 ufshcd_scsi_unblock_requests(hba);
1139         }
1140
1141         return ret;
1142 }
1143
1144 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1145 {
1146         up_write(&hba->clk_scaling_lock);
1147         ufshcd_scsi_unblock_requests(hba);
1148 }
1149
1150 /**
1151  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1152  * @hba: per adapter instance
1153  * @scale_up: True for scaling up and false for scalin down
1154  *
1155  * Returns 0 for success,
1156  * Returns -EBUSY if scaling can't happen at this time
1157  * Returns non-zero for any other errors
1158  */
1159 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1160 {
1161         int ret = 0;
1162
1163         /* let's not get into low power until clock scaling is completed */
1164         ufshcd_hold(hba, false);
1165
1166         ret = ufshcd_clock_scaling_prepare(hba);
1167         if (ret)
1168                 goto out;
1169
1170         /* scale down the gear before scaling down clocks */
1171         if (!scale_up) {
1172                 ret = ufshcd_scale_gear(hba, false);
1173                 if (ret)
1174                         goto out_unprepare;
1175         }
1176
1177         ret = ufshcd_scale_clks(hba, scale_up);
1178         if (ret) {
1179                 if (!scale_up)
1180                         ufshcd_scale_gear(hba, true);
1181                 goto out_unprepare;
1182         }
1183
1184         /* scale up the gear after scaling up clocks */
1185         if (scale_up) {
1186                 ret = ufshcd_scale_gear(hba, true);
1187                 if (ret) {
1188                         ufshcd_scale_clks(hba, false);
1189                         goto out_unprepare;
1190                 }
1191         }
1192
1193         /* Enable Write Booster if we have scaled up else disable it */
1194         up_write(&hba->clk_scaling_lock);
1195         ufshcd_wb_ctrl(hba, scale_up);
1196         down_write(&hba->clk_scaling_lock);
1197
1198 out_unprepare:
1199         ufshcd_clock_scaling_unprepare(hba);
1200 out:
1201         ufshcd_release(hba);
1202         return ret;
1203 }
1204
1205 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1206 {
1207         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1208                                            clk_scaling.suspend_work);
1209         unsigned long irq_flags;
1210
1211         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1212         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1213                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1214                 return;
1215         }
1216         hba->clk_scaling.is_suspended = true;
1217         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1218
1219         __ufshcd_suspend_clkscaling(hba);
1220 }
1221
1222 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1223 {
1224         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1225                                            clk_scaling.resume_work);
1226         unsigned long irq_flags;
1227
1228         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1229         if (!hba->clk_scaling.is_suspended) {
1230                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1231                 return;
1232         }
1233         hba->clk_scaling.is_suspended = false;
1234         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1235
1236         devfreq_resume_device(hba->devfreq);
1237 }
1238
1239 static int ufshcd_devfreq_target(struct device *dev,
1240                                 unsigned long *freq, u32 flags)
1241 {
1242         int ret = 0;
1243         struct ufs_hba *hba = dev_get_drvdata(dev);
1244         ktime_t start;
1245         bool scale_up, sched_clk_scaling_suspend_work = false;
1246         struct list_head *clk_list = &hba->clk_list_head;
1247         struct ufs_clk_info *clki;
1248         unsigned long irq_flags;
1249
1250         if (!ufshcd_is_clkscaling_supported(hba))
1251                 return -EINVAL;
1252
1253         clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1254         /* Override with the closest supported frequency */
1255         *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1256         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1257         if (ufshcd_eh_in_progress(hba)) {
1258                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1259                 return 0;
1260         }
1261
1262         if (!hba->clk_scaling.active_reqs)
1263                 sched_clk_scaling_suspend_work = true;
1264
1265         if (list_empty(clk_list)) {
1266                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1267                 goto out;
1268         }
1269
1270         /* Decide based on the rounded-off frequency and update */
1271         scale_up = (*freq == clki->max_freq) ? true : false;
1272         if (!scale_up)
1273                 *freq = clki->min_freq;
1274         /* Update the frequency */
1275         if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1276                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1277                 ret = 0;
1278                 goto out; /* no state change required */
1279         }
1280         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1281
1282         start = ktime_get();
1283         ret = ufshcd_devfreq_scale(hba, scale_up);
1284
1285         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1286                 (scale_up ? "up" : "down"),
1287                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1288
1289 out:
1290         if (sched_clk_scaling_suspend_work)
1291                 queue_work(hba->clk_scaling.workq,
1292                            &hba->clk_scaling.suspend_work);
1293
1294         return ret;
1295 }
1296
1297 static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1298 {
1299         int *busy = priv;
1300
1301         WARN_ON_ONCE(reserved);
1302         (*busy)++;
1303         return false;
1304 }
1305
1306 /* Whether or not any tag is in use by a request that is in progress. */
1307 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1308 {
1309         struct request_queue *q = hba->cmd_queue;
1310         int busy = 0;
1311
1312         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1313         return busy;
1314 }
1315
1316 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1317                 struct devfreq_dev_status *stat)
1318 {
1319         struct ufs_hba *hba = dev_get_drvdata(dev);
1320         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1321         unsigned long flags;
1322         struct list_head *clk_list = &hba->clk_list_head;
1323         struct ufs_clk_info *clki;
1324
1325         if (!ufshcd_is_clkscaling_supported(hba))
1326                 return -EINVAL;
1327
1328         memset(stat, 0, sizeof(*stat));
1329
1330         spin_lock_irqsave(hba->host->host_lock, flags);
1331         if (!scaling->window_start_t)
1332                 goto start_window;
1333
1334         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1335         /*
1336          * If current frequency is 0, then the ondemand governor considers
1337          * there's no initial frequency set. And it always requests to set
1338          * to max. frequency.
1339          */
1340         stat->current_frequency = clki->curr_freq;
1341         if (scaling->is_busy_started)
1342                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1343                                         scaling->busy_start_t));
1344
1345         stat->total_time = jiffies_to_usecs((long)jiffies -
1346                                 (long)scaling->window_start_t);
1347         stat->busy_time = scaling->tot_busy_t;
1348 start_window:
1349         scaling->window_start_t = jiffies;
1350         scaling->tot_busy_t = 0;
1351
1352         if (hba->outstanding_reqs) {
1353                 scaling->busy_start_t = ktime_get();
1354                 scaling->is_busy_started = true;
1355         } else {
1356                 scaling->busy_start_t = 0;
1357                 scaling->is_busy_started = false;
1358         }
1359         spin_unlock_irqrestore(hba->host->host_lock, flags);
1360         return 0;
1361 }
1362
1363 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1364 {
1365         struct list_head *clk_list = &hba->clk_list_head;
1366         struct ufs_clk_info *clki;
1367         struct devfreq *devfreq;
1368         int ret;
1369
1370         /* Skip devfreq if we don't have any clocks in the list */
1371         if (list_empty(clk_list))
1372                 return 0;
1373
1374         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1375         dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1376         dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1377
1378         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1379                                          &hba->vps->ondemand_data);
1380         devfreq = devfreq_add_device(hba->dev,
1381                         &hba->vps->devfreq_profile,
1382                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1383                         &hba->vps->ondemand_data);
1384         if (IS_ERR(devfreq)) {
1385                 ret = PTR_ERR(devfreq);
1386                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1387
1388                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1389                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1390                 return ret;
1391         }
1392
1393         hba->devfreq = devfreq;
1394
1395         return 0;
1396 }
1397
1398 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1399 {
1400         struct list_head *clk_list = &hba->clk_list_head;
1401         struct ufs_clk_info *clki;
1402
1403         if (!hba->devfreq)
1404                 return;
1405
1406         devfreq_remove_device(hba->devfreq);
1407         hba->devfreq = NULL;
1408
1409         clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1410         dev_pm_opp_remove(hba->dev, clki->min_freq);
1411         dev_pm_opp_remove(hba->dev, clki->max_freq);
1412 }
1413
1414 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1415 {
1416         unsigned long flags;
1417
1418         devfreq_suspend_device(hba->devfreq);
1419         spin_lock_irqsave(hba->host->host_lock, flags);
1420         hba->clk_scaling.window_start_t = 0;
1421         spin_unlock_irqrestore(hba->host->host_lock, flags);
1422 }
1423
1424 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1425 {
1426         unsigned long flags;
1427         bool suspend = false;
1428
1429         if (!ufshcd_is_clkscaling_supported(hba))
1430                 return;
1431
1432         spin_lock_irqsave(hba->host->host_lock, flags);
1433         if (!hba->clk_scaling.is_suspended) {
1434                 suspend = true;
1435                 hba->clk_scaling.is_suspended = true;
1436         }
1437         spin_unlock_irqrestore(hba->host->host_lock, flags);
1438
1439         if (suspend)
1440                 __ufshcd_suspend_clkscaling(hba);
1441 }
1442
1443 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1444 {
1445         unsigned long flags;
1446         bool resume = false;
1447
1448         if (!ufshcd_is_clkscaling_supported(hba))
1449                 return;
1450
1451         spin_lock_irqsave(hba->host->host_lock, flags);
1452         if (hba->clk_scaling.is_suspended) {
1453                 resume = true;
1454                 hba->clk_scaling.is_suspended = false;
1455         }
1456         spin_unlock_irqrestore(hba->host->host_lock, flags);
1457
1458         if (resume)
1459                 devfreq_resume_device(hba->devfreq);
1460 }
1461
1462 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1463                 struct device_attribute *attr, char *buf)
1464 {
1465         struct ufs_hba *hba = dev_get_drvdata(dev);
1466
1467         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1468 }
1469
1470 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1471                 struct device_attribute *attr, const char *buf, size_t count)
1472 {
1473         struct ufs_hba *hba = dev_get_drvdata(dev);
1474         u32 value;
1475         int err;
1476
1477         if (kstrtou32(buf, 0, &value))
1478                 return -EINVAL;
1479
1480         value = !!value;
1481         if (value == hba->clk_scaling.is_allowed)
1482                 goto out;
1483
1484         pm_runtime_get_sync(hba->dev);
1485         ufshcd_hold(hba, false);
1486
1487         cancel_work_sync(&hba->clk_scaling.suspend_work);
1488         cancel_work_sync(&hba->clk_scaling.resume_work);
1489
1490         hba->clk_scaling.is_allowed = value;
1491
1492         if (value) {
1493                 ufshcd_resume_clkscaling(hba);
1494         } else {
1495                 ufshcd_suspend_clkscaling(hba);
1496                 err = ufshcd_devfreq_scale(hba, true);
1497                 if (err)
1498                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1499                                         __func__, err);
1500         }
1501
1502         ufshcd_release(hba);
1503         pm_runtime_put_sync(hba->dev);
1504 out:
1505         return count;
1506 }
1507
1508 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1509 {
1510         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1511         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1512         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1513         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1514         hba->clk_scaling.enable_attr.attr.mode = 0644;
1515         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1516                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1517 }
1518
1519 static void ufshcd_ungate_work(struct work_struct *work)
1520 {
1521         int ret;
1522         unsigned long flags;
1523         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1524                         clk_gating.ungate_work);
1525
1526         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1527
1528         spin_lock_irqsave(hba->host->host_lock, flags);
1529         if (hba->clk_gating.state == CLKS_ON) {
1530                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1531                 goto unblock_reqs;
1532         }
1533
1534         spin_unlock_irqrestore(hba->host->host_lock, flags);
1535         ufshcd_setup_clocks(hba, true);
1536
1537         ufshcd_enable_irq(hba);
1538
1539         /* Exit from hibern8 */
1540         if (ufshcd_can_hibern8_during_gating(hba)) {
1541                 /* Prevent gating in this path */
1542                 hba->clk_gating.is_suspended = true;
1543                 if (ufshcd_is_link_hibern8(hba)) {
1544                         ret = ufshcd_uic_hibern8_exit(hba);
1545                         if (ret)
1546                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1547                                         __func__, ret);
1548                         else
1549                                 ufshcd_set_link_active(hba);
1550                 }
1551                 hba->clk_gating.is_suspended = false;
1552         }
1553 unblock_reqs:
1554         ufshcd_scsi_unblock_requests(hba);
1555 }
1556
1557 /**
1558  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1559  * Also, exit from hibern8 mode and set the link as active.
1560  * @hba: per adapter instance
1561  * @async: This indicates whether caller should ungate clocks asynchronously.
1562  */
1563 int ufshcd_hold(struct ufs_hba *hba, bool async)
1564 {
1565         int rc = 0;
1566         unsigned long flags;
1567
1568         if (!ufshcd_is_clkgating_allowed(hba))
1569                 goto out;
1570         spin_lock_irqsave(hba->host->host_lock, flags);
1571         hba->clk_gating.active_reqs++;
1572
1573         if (ufshcd_eh_in_progress(hba)) {
1574                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1575                 return 0;
1576         }
1577
1578 start:
1579         switch (hba->clk_gating.state) {
1580         case CLKS_ON:
1581                 /*
1582                  * Wait for the ungate work to complete if in progress.
1583                  * Though the clocks may be in ON state, the link could
1584                  * still be in hibner8 state if hibern8 is allowed
1585                  * during clock gating.
1586                  * Make sure we exit hibern8 state also in addition to
1587                  * clocks being ON.
1588                  */
1589                 if (ufshcd_can_hibern8_during_gating(hba) &&
1590                     ufshcd_is_link_hibern8(hba)) {
1591                         if (async) {
1592                                 rc = -EAGAIN;
1593                                 hba->clk_gating.active_reqs--;
1594                                 break;
1595                         }
1596                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1597                         flush_work(&hba->clk_gating.ungate_work);
1598                         spin_lock_irqsave(hba->host->host_lock, flags);
1599                         goto start;
1600                 }
1601                 break;
1602         case REQ_CLKS_OFF:
1603                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1604                         hba->clk_gating.state = CLKS_ON;
1605                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1606                                                 hba->clk_gating.state);
1607                         break;
1608                 }
1609                 /*
1610                  * If we are here, it means gating work is either done or
1611                  * currently running. Hence, fall through to cancel gating
1612                  * work and to enable clocks.
1613                  */
1614                 /* fallthrough */
1615         case CLKS_OFF:
1616                 ufshcd_scsi_block_requests(hba);
1617                 hba->clk_gating.state = REQ_CLKS_ON;
1618                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1619                                         hba->clk_gating.state);
1620                 queue_work(hba->clk_gating.clk_gating_workq,
1621                            &hba->clk_gating.ungate_work);
1622                 /*
1623                  * fall through to check if we should wait for this
1624                  * work to be done or not.
1625                  */
1626                 /* fallthrough */
1627         case REQ_CLKS_ON:
1628                 if (async) {
1629                         rc = -EAGAIN;
1630                         hba->clk_gating.active_reqs--;
1631                         break;
1632                 }
1633
1634                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1635                 flush_work(&hba->clk_gating.ungate_work);
1636                 /* Make sure state is CLKS_ON before returning */
1637                 spin_lock_irqsave(hba->host->host_lock, flags);
1638                 goto start;
1639         default:
1640                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1641                                 __func__, hba->clk_gating.state);
1642                 break;
1643         }
1644         spin_unlock_irqrestore(hba->host->host_lock, flags);
1645 out:
1646         return rc;
1647 }
1648 EXPORT_SYMBOL_GPL(ufshcd_hold);
1649
1650 static void ufshcd_gate_work(struct work_struct *work)
1651 {
1652         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1653                         clk_gating.gate_work.work);
1654         unsigned long flags;
1655
1656         spin_lock_irqsave(hba->host->host_lock, flags);
1657         /*
1658          * In case you are here to cancel this work the gating state
1659          * would be marked as REQ_CLKS_ON. In this case save time by
1660          * skipping the gating work and exit after changing the clock
1661          * state to CLKS_ON.
1662          */
1663         if (hba->clk_gating.is_suspended ||
1664                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1665                 hba->clk_gating.state = CLKS_ON;
1666                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1667                                         hba->clk_gating.state);
1668                 goto rel_lock;
1669         }
1670
1671         if (hba->clk_gating.active_reqs
1672                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1673                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1674                 || hba->active_uic_cmd || hba->uic_async_done)
1675                 goto rel_lock;
1676
1677         spin_unlock_irqrestore(hba->host->host_lock, flags);
1678
1679         /* put the link into hibern8 mode before turning off clocks */
1680         if (ufshcd_can_hibern8_during_gating(hba)) {
1681                 if (ufshcd_uic_hibern8_enter(hba)) {
1682                         hba->clk_gating.state = CLKS_ON;
1683                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1684                                                 hba->clk_gating.state);
1685                         goto out;
1686                 }
1687                 ufshcd_set_link_hibern8(hba);
1688         }
1689
1690         ufshcd_disable_irq(hba);
1691
1692         if (!ufshcd_is_link_active(hba))
1693                 ufshcd_setup_clocks(hba, false);
1694         else
1695                 /* If link is active, device ref_clk can't be switched off */
1696                 __ufshcd_setup_clocks(hba, false, true);
1697
1698         /*
1699          * In case you are here to cancel this work the gating state
1700          * would be marked as REQ_CLKS_ON. In this case keep the state
1701          * as REQ_CLKS_ON which would anyway imply that clocks are off
1702          * and a request to turn them on is pending. By doing this way,
1703          * we keep the state machine in tact and this would ultimately
1704          * prevent from doing cancel work multiple times when there are
1705          * new requests arriving before the current cancel work is done.
1706          */
1707         spin_lock_irqsave(hba->host->host_lock, flags);
1708         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1709                 hba->clk_gating.state = CLKS_OFF;
1710                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1711                                         hba->clk_gating.state);
1712         }
1713 rel_lock:
1714         spin_unlock_irqrestore(hba->host->host_lock, flags);
1715 out:
1716         return;
1717 }
1718
1719 /* host lock must be held before calling this variant */
1720 static void __ufshcd_release(struct ufs_hba *hba)
1721 {
1722         if (!ufshcd_is_clkgating_allowed(hba))
1723                 return;
1724
1725         hba->clk_gating.active_reqs--;
1726
1727         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1728                 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1729                 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1730                 || hba->active_uic_cmd || hba->uic_async_done
1731                 || ufshcd_eh_in_progress(hba))
1732                 return;
1733
1734         hba->clk_gating.state = REQ_CLKS_OFF;
1735         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1736         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1737                            &hba->clk_gating.gate_work,
1738                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1739 }
1740
1741 void ufshcd_release(struct ufs_hba *hba)
1742 {
1743         unsigned long flags;
1744
1745         spin_lock_irqsave(hba->host->host_lock, flags);
1746         __ufshcd_release(hba);
1747         spin_unlock_irqrestore(hba->host->host_lock, flags);
1748 }
1749 EXPORT_SYMBOL_GPL(ufshcd_release);
1750
1751 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1752                 struct device_attribute *attr, char *buf)
1753 {
1754         struct ufs_hba *hba = dev_get_drvdata(dev);
1755
1756         return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1757 }
1758
1759 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1760                 struct device_attribute *attr, const char *buf, size_t count)
1761 {
1762         struct ufs_hba *hba = dev_get_drvdata(dev);
1763         unsigned long flags, value;
1764
1765         if (kstrtoul(buf, 0, &value))
1766                 return -EINVAL;
1767
1768         spin_lock_irqsave(hba->host->host_lock, flags);
1769         hba->clk_gating.delay_ms = value;
1770         spin_unlock_irqrestore(hba->host->host_lock, flags);
1771         return count;
1772 }
1773
1774 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1775                 struct device_attribute *attr, char *buf)
1776 {
1777         struct ufs_hba *hba = dev_get_drvdata(dev);
1778
1779         return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1780 }
1781
1782 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1783                 struct device_attribute *attr, const char *buf, size_t count)
1784 {
1785         struct ufs_hba *hba = dev_get_drvdata(dev);
1786         unsigned long flags;
1787         u32 value;
1788
1789         if (kstrtou32(buf, 0, &value))
1790                 return -EINVAL;
1791
1792         value = !!value;
1793         if (value == hba->clk_gating.is_enabled)
1794                 goto out;
1795
1796         if (value) {
1797                 ufshcd_release(hba);
1798         } else {
1799                 spin_lock_irqsave(hba->host->host_lock, flags);
1800                 hba->clk_gating.active_reqs++;
1801                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1802         }
1803
1804         hba->clk_gating.is_enabled = value;
1805 out:
1806         return count;
1807 }
1808
1809 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1810 {
1811         char wq_name[sizeof("ufs_clkscaling_00")];
1812
1813         if (!ufshcd_is_clkscaling_supported(hba))
1814                 return;
1815
1816         INIT_WORK(&hba->clk_scaling.suspend_work,
1817                   ufshcd_clk_scaling_suspend_work);
1818         INIT_WORK(&hba->clk_scaling.resume_work,
1819                   ufshcd_clk_scaling_resume_work);
1820
1821         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1822                  hba->host->host_no);
1823         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1824
1825         ufshcd_clkscaling_init_sysfs(hba);
1826 }
1827
1828 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1829 {
1830         if (!ufshcd_is_clkscaling_supported(hba))
1831                 return;
1832
1833         destroy_workqueue(hba->clk_scaling.workq);
1834         ufshcd_devfreq_remove(hba);
1835 }
1836
1837 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1838 {
1839         char wq_name[sizeof("ufs_clk_gating_00")];
1840
1841         if (!ufshcd_is_clkgating_allowed(hba))
1842                 return;
1843
1844         hba->clk_gating.delay_ms = 150;
1845         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1846         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1847
1848         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1849                  hba->host->host_no);
1850         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1851                                                            WQ_MEM_RECLAIM);
1852
1853         hba->clk_gating.is_enabled = true;
1854
1855         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1856         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1857         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1858         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1859         hba->clk_gating.delay_attr.attr.mode = 0644;
1860         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1861                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1862
1863         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1864         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1865         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1866         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1867         hba->clk_gating.enable_attr.attr.mode = 0644;
1868         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1869                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1870 }
1871
1872 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1873 {
1874         if (!ufshcd_is_clkgating_allowed(hba))
1875                 return;
1876         device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1877         device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1878         cancel_work_sync(&hba->clk_gating.ungate_work);
1879         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1880         destroy_workqueue(hba->clk_gating.clk_gating_workq);
1881 }
1882
1883 /* Must be called with host lock acquired */
1884 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1885 {
1886         bool queue_resume_work = false;
1887
1888         if (!ufshcd_is_clkscaling_supported(hba))
1889                 return;
1890
1891         if (!hba->clk_scaling.active_reqs++)
1892                 queue_resume_work = true;
1893
1894         if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1895                 return;
1896
1897         if (queue_resume_work)
1898                 queue_work(hba->clk_scaling.workq,
1899                            &hba->clk_scaling.resume_work);
1900
1901         if (!hba->clk_scaling.window_start_t) {
1902                 hba->clk_scaling.window_start_t = jiffies;
1903                 hba->clk_scaling.tot_busy_t = 0;
1904                 hba->clk_scaling.is_busy_started = false;
1905         }
1906
1907         if (!hba->clk_scaling.is_busy_started) {
1908                 hba->clk_scaling.busy_start_t = ktime_get();
1909                 hba->clk_scaling.is_busy_started = true;
1910         }
1911 }
1912
1913 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1914 {
1915         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1916
1917         if (!ufshcd_is_clkscaling_supported(hba))
1918                 return;
1919
1920         if (!hba->outstanding_reqs && scaling->is_busy_started) {
1921                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1922                                         scaling->busy_start_t));
1923                 scaling->busy_start_t = 0;
1924                 scaling->is_busy_started = false;
1925         }
1926 }
1927 /**
1928  * ufshcd_send_command - Send SCSI or device management commands
1929  * @hba: per adapter instance
1930  * @task_tag: Task tag of the command
1931  */
1932 static inline
1933 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1934 {
1935         hba->lrb[task_tag].issue_time_stamp = ktime_get();
1936         hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1937         ufshcd_add_command_trace(hba, task_tag, "send");
1938         ufshcd_clk_scaling_start_busy(hba);
1939         __set_bit(task_tag, &hba->outstanding_reqs);
1940         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1941         /* Make sure that doorbell is committed immediately */
1942         wmb();
1943 }
1944
1945 /**
1946  * ufshcd_copy_sense_data - Copy sense data in case of check condition
1947  * @lrbp: pointer to local reference block
1948  */
1949 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1950 {
1951         int len;
1952         if (lrbp->sense_buffer &&
1953             ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1954                 int len_to_copy;
1955
1956                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1957                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1958
1959                 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1960                        len_to_copy);
1961         }
1962 }
1963
1964 /**
1965  * ufshcd_copy_query_response() - Copy the Query Response and the data
1966  * descriptor
1967  * @hba: per adapter instance
1968  * @lrbp: pointer to local reference block
1969  */
1970 static
1971 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1972 {
1973         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1974
1975         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1976
1977         /* Get the descriptor */
1978         if (hba->dev_cmd.query.descriptor &&
1979             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1980                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1981                                 GENERAL_UPIU_REQUEST_SIZE;
1982                 u16 resp_len;
1983                 u16 buf_len;
1984
1985                 /* data segment length */
1986                 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1987                                                 MASK_QUERY_DATA_SEG_LEN;
1988                 buf_len = be16_to_cpu(
1989                                 hba->dev_cmd.query.request.upiu_req.length);
1990                 if (likely(buf_len >= resp_len)) {
1991                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1992                 } else {
1993                         dev_warn(hba->dev,
1994                                  "%s: rsp size %d is bigger than buffer size %d",
1995                                  __func__, resp_len, buf_len);
1996                         return -EINVAL;
1997                 }
1998         }
1999
2000         return 0;
2001 }
2002
2003 /**
2004  * ufshcd_hba_capabilities - Read controller capabilities
2005  * @hba: per adapter instance
2006  */
2007 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2008 {
2009         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2010
2011         /* nutrs and nutmrs are 0 based values */
2012         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2013         hba->nutmrs =
2014         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2015 }
2016
2017 /**
2018  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2019  *                            to accept UIC commands
2020  * @hba: per adapter instance
2021  * Return true on success, else false
2022  */
2023 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2024 {
2025         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2026                 return true;
2027         else
2028                 return false;
2029 }
2030
2031 /**
2032  * ufshcd_get_upmcrs - Get the power mode change request status
2033  * @hba: Pointer to adapter instance
2034  *
2035  * This function gets the UPMCRS field of HCS register
2036  * Returns value of UPMCRS field
2037  */
2038 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2039 {
2040         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2041 }
2042
2043 /**
2044  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2045  * @hba: per adapter instance
2046  * @uic_cmd: UIC command
2047  *
2048  * Mutex must be held.
2049  */
2050 static inline void
2051 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2052 {
2053         WARN_ON(hba->active_uic_cmd);
2054
2055         hba->active_uic_cmd = uic_cmd;
2056
2057         /* Write Args */
2058         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2059         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2060         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2061
2062         /* Write UIC Cmd */
2063         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2064                       REG_UIC_COMMAND);
2065 }
2066
2067 /**
2068  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2069  * @hba: per adapter instance
2070  * @uic_cmd: UIC command
2071  *
2072  * Must be called with mutex held.
2073  * Returns 0 only if success.
2074  */
2075 static int
2076 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2077 {
2078         int ret;
2079         unsigned long flags;
2080
2081         if (wait_for_completion_timeout(&uic_cmd->done,
2082                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2083                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2084         else
2085                 ret = -ETIMEDOUT;
2086
2087         spin_lock_irqsave(hba->host->host_lock, flags);
2088         hba->active_uic_cmd = NULL;
2089         spin_unlock_irqrestore(hba->host->host_lock, flags);
2090
2091         return ret;
2092 }
2093
2094 /**
2095  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2096  * @hba: per adapter instance
2097  * @uic_cmd: UIC command
2098  * @completion: initialize the completion only if this is set to true
2099  *
2100  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2101  * with mutex held and host_lock locked.
2102  * Returns 0 only if success.
2103  */
2104 static int
2105 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2106                       bool completion)
2107 {
2108         if (!ufshcd_ready_for_uic_cmd(hba)) {
2109                 dev_err(hba->dev,
2110                         "Controller not ready to accept UIC commands\n");
2111                 return -EIO;
2112         }
2113
2114         if (completion)
2115                 init_completion(&uic_cmd->done);
2116
2117         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2118
2119         return 0;
2120 }
2121
2122 /**
2123  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2124  * @hba: per adapter instance
2125  * @uic_cmd: UIC command
2126  *
2127  * Returns 0 only if success.
2128  */
2129 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2130 {
2131         int ret;
2132         unsigned long flags;
2133
2134         ufshcd_hold(hba, false);
2135         mutex_lock(&hba->uic_cmd_mutex);
2136         ufshcd_add_delay_before_dme_cmd(hba);
2137
2138         spin_lock_irqsave(hba->host->host_lock, flags);
2139         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2140         spin_unlock_irqrestore(hba->host->host_lock, flags);
2141         if (!ret)
2142                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2143
2144         mutex_unlock(&hba->uic_cmd_mutex);
2145
2146         ufshcd_release(hba);
2147         return ret;
2148 }
2149
2150 /**
2151  * ufshcd_map_sg - Map scatter-gather list to prdt
2152  * @hba: per adapter instance
2153  * @lrbp: pointer to local reference block
2154  *
2155  * Returns 0 in case of success, non-zero value in case of failure
2156  */
2157 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2158 {
2159         struct ufshcd_sg_entry *prd_table;
2160         struct scatterlist *sg;
2161         struct scsi_cmnd *cmd;
2162         int sg_segments;
2163         int i;
2164
2165         cmd = lrbp->cmd;
2166         sg_segments = scsi_dma_map(cmd);
2167         if (sg_segments < 0)
2168                 return sg_segments;
2169
2170         if (sg_segments) {
2171
2172                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2173                         lrbp->utr_descriptor_ptr->prd_table_length =
2174                                 cpu_to_le16((sg_segments *
2175                                         sizeof(struct ufshcd_sg_entry)));
2176                 else
2177                         lrbp->utr_descriptor_ptr->prd_table_length =
2178                                 cpu_to_le16((u16) (sg_segments));
2179
2180                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2181
2182                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2183                         prd_table[i].size  =
2184                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2185                         prd_table[i].base_addr =
2186                                 cpu_to_le32(lower_32_bits(sg->dma_address));
2187                         prd_table[i].upper_addr =
2188                                 cpu_to_le32(upper_32_bits(sg->dma_address));
2189                         prd_table[i].reserved = 0;
2190                 }
2191         } else {
2192                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2193         }
2194
2195         return 0;
2196 }
2197
2198 /**
2199  * ufshcd_enable_intr - enable interrupts
2200  * @hba: per adapter instance
2201  * @intrs: interrupt bits
2202  */
2203 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2204 {
2205         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2206
2207         if (hba->ufs_version == UFSHCI_VERSION_10) {
2208                 u32 rw;
2209                 rw = set & INTERRUPT_MASK_RW_VER_10;
2210                 set = rw | ((set ^ intrs) & intrs);
2211         } else {
2212                 set |= intrs;
2213         }
2214
2215         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2216 }
2217
2218 /**
2219  * ufshcd_disable_intr - disable interrupts
2220  * @hba: per adapter instance
2221  * @intrs: interrupt bits
2222  */
2223 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2224 {
2225         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2226
2227         if (hba->ufs_version == UFSHCI_VERSION_10) {
2228                 u32 rw;
2229                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2230                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2231                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2232
2233         } else {
2234                 set &= ~intrs;
2235         }
2236
2237         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2238 }
2239
2240 /**
2241  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2242  * descriptor according to request
2243  * @lrbp: pointer to local reference block
2244  * @upiu_flags: flags required in the header
2245  * @cmd_dir: requests data direction
2246  */
2247 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2248                         u32 *upiu_flags, enum dma_data_direction cmd_dir)
2249 {
2250         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2251         u32 data_direction;
2252         u32 dword_0;
2253
2254         if (cmd_dir == DMA_FROM_DEVICE) {
2255                 data_direction = UTP_DEVICE_TO_HOST;
2256                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2257         } else if (cmd_dir == DMA_TO_DEVICE) {
2258                 data_direction = UTP_HOST_TO_DEVICE;
2259                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2260         } else {
2261                 data_direction = UTP_NO_DATA_TRANSFER;
2262                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2263         }
2264
2265         dword_0 = data_direction | (lrbp->command_type
2266                                 << UPIU_COMMAND_TYPE_OFFSET);
2267         if (lrbp->intr_cmd)
2268                 dword_0 |= UTP_REQ_DESC_INT_CMD;
2269
2270         /* Transfer request descriptor header fields */
2271         req_desc->header.dword_0 = cpu_to_le32(dword_0);
2272         /* dword_1 is reserved, hence it is set to 0 */
2273         req_desc->header.dword_1 = 0;
2274         /*
2275          * assigning invalid value for command status. Controller
2276          * updates OCS on command completion, with the command
2277          * status
2278          */
2279         req_desc->header.dword_2 =
2280                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2281         /* dword_3 is reserved, hence it is set to 0 */
2282         req_desc->header.dword_3 = 0;
2283
2284         req_desc->prd_table_length = 0;
2285 }
2286
2287 /**
2288  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2289  * for scsi commands
2290  * @lrbp: local reference block pointer
2291  * @upiu_flags: flags
2292  */
2293 static
2294 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2295 {
2296         struct scsi_cmnd *cmd = lrbp->cmd;
2297         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2298         unsigned short cdb_len;
2299
2300         /* command descriptor fields */
2301         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2302                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
2303                                 lrbp->lun, lrbp->task_tag);
2304         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2305                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2306
2307         /* Total EHS length and Data segment length will be zero */
2308         ucd_req_ptr->header.dword_2 = 0;
2309
2310         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2311
2312         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2313         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2314         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2315
2316         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2317 }
2318
2319 /**
2320  * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2321  * for query requsts
2322  * @hba: UFS hba
2323  * @lrbp: local reference block pointer
2324  * @upiu_flags: flags
2325  */
2326 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2327                                 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2328 {
2329         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2330         struct ufs_query *query = &hba->dev_cmd.query;
2331         u16 len = be16_to_cpu(query->request.upiu_req.length);
2332
2333         /* Query request header */
2334         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2335                         UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2336                         lrbp->lun, lrbp->task_tag);
2337         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2338                         0, query->request.query_func, 0, 0);
2339
2340         /* Data segment length only need for WRITE_DESC */
2341         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2342                 ucd_req_ptr->header.dword_2 =
2343                         UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2344         else
2345                 ucd_req_ptr->header.dword_2 = 0;
2346
2347         /* Copy the Query Request buffer as is */
2348         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2349                         QUERY_OSF_SIZE);
2350
2351         /* Copy the Descriptor */
2352         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2353                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2354
2355         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2356 }
2357
2358 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2359 {
2360         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2361
2362         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2363
2364         /* command descriptor fields */
2365         ucd_req_ptr->header.dword_0 =
2366                 UPIU_HEADER_DWORD(
2367                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2368         /* clear rest of the fields of basic header */
2369         ucd_req_ptr->header.dword_1 = 0;
2370         ucd_req_ptr->header.dword_2 = 0;
2371
2372         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2373 }
2374
2375 /**
2376  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2377  *                           for Device Management Purposes
2378  * @hba: per adapter instance
2379  * @lrbp: pointer to local reference block
2380  */
2381 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2382 {
2383         u32 upiu_flags;
2384         int ret = 0;
2385
2386         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2387             (hba->ufs_version == UFSHCI_VERSION_11))
2388                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2389         else
2390                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2391
2392         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2393         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2394                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2395         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2396                 ufshcd_prepare_utp_nop_upiu(lrbp);
2397         else
2398                 ret = -EINVAL;
2399
2400         return ret;
2401 }
2402
2403 /**
2404  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2405  *                         for SCSI Purposes
2406  * @hba: per adapter instance
2407  * @lrbp: pointer to local reference block
2408  */
2409 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2410 {
2411         u32 upiu_flags;
2412         int ret = 0;
2413
2414         if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2415             (hba->ufs_version == UFSHCI_VERSION_11))
2416                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2417         else
2418                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2419
2420         if (likely(lrbp->cmd)) {
2421                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2422                                                 lrbp->cmd->sc_data_direction);
2423                 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2424         } else {
2425                 ret = -EINVAL;
2426         }
2427
2428         return ret;
2429 }
2430
2431 /**
2432  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2433  * @upiu_wlun_id: UPIU W-LUN id
2434  *
2435  * Returns SCSI W-LUN id
2436  */
2437 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2438 {
2439         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2440 }
2441
2442 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2443 {
2444         struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
2445         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2446         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2447                 i * sizeof(struct utp_transfer_cmd_desc);
2448         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2449                                        response_upiu);
2450         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2451
2452         lrb->utr_descriptor_ptr = utrdlp + i;
2453         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2454                 i * sizeof(struct utp_transfer_req_desc);
2455         lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
2456         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2457         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2458         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2459         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2460         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2461 }
2462
2463 /**
2464  * ufshcd_queuecommand - main entry point for SCSI requests
2465  * @host: SCSI host pointer
2466  * @cmd: command from SCSI Midlayer
2467  *
2468  * Returns 0 for success, non-zero in case of failure
2469  */
2470 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2471 {
2472         struct ufshcd_lrb *lrbp;
2473         struct ufs_hba *hba;
2474         unsigned long flags;
2475         int tag;
2476         int err = 0;
2477
2478         hba = shost_priv(host);
2479
2480         tag = cmd->request->tag;
2481         if (!ufshcd_valid_tag(hba, tag)) {
2482                 dev_err(hba->dev,
2483                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2484                         __func__, tag, cmd, cmd->request);
2485                 BUG();
2486         }
2487
2488         if (!down_read_trylock(&hba->clk_scaling_lock))
2489                 return SCSI_MLQUEUE_HOST_BUSY;
2490
2491         spin_lock_irqsave(hba->host->host_lock, flags);
2492         switch (hba->ufshcd_state) {
2493         case UFSHCD_STATE_OPERATIONAL:
2494                 break;
2495         case UFSHCD_STATE_EH_SCHEDULED:
2496         case UFSHCD_STATE_RESET:
2497                 err = SCSI_MLQUEUE_HOST_BUSY;
2498                 goto out_unlock;
2499         case UFSHCD_STATE_ERROR:
2500                 set_host_byte(cmd, DID_ERROR);
2501                 cmd->scsi_done(cmd);
2502                 goto out_unlock;
2503         default:
2504                 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2505                                 __func__, hba->ufshcd_state);
2506                 set_host_byte(cmd, DID_BAD_TARGET);
2507                 cmd->scsi_done(cmd);
2508                 goto out_unlock;
2509         }
2510
2511         /* if error handling is in progress, don't issue commands */
2512         if (ufshcd_eh_in_progress(hba)) {
2513                 set_host_byte(cmd, DID_ERROR);
2514                 cmd->scsi_done(cmd);
2515                 goto out_unlock;
2516         }
2517         spin_unlock_irqrestore(hba->host->host_lock, flags);
2518
2519         hba->req_abort_count = 0;
2520
2521         err = ufshcd_hold(hba, true);
2522         if (err) {
2523                 err = SCSI_MLQUEUE_HOST_BUSY;
2524                 goto out;
2525         }
2526         WARN_ON(hba->clk_gating.state != CLKS_ON);
2527
2528         lrbp = &hba->lrb[tag];
2529
2530         WARN_ON(lrbp->cmd);
2531         lrbp->cmd = cmd;
2532         lrbp->sense_bufflen = UFS_SENSE_SIZE;
2533         lrbp->sense_buffer = cmd->sense_buffer;
2534         lrbp->task_tag = tag;
2535         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2536         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2537         lrbp->req_abort_skip = false;
2538
2539         ufshcd_comp_scsi_upiu(hba, lrbp);
2540
2541         err = ufshcd_map_sg(hba, lrbp);
2542         if (err) {
2543                 lrbp->cmd = NULL;
2544                 ufshcd_release(hba);
2545                 goto out;
2546         }
2547         /* Make sure descriptors are ready before ringing the doorbell */
2548         wmb();
2549
2550         /* issue command to the controller */
2551         spin_lock_irqsave(hba->host->host_lock, flags);
2552         ufshcd_vops_setup_xfer_req(hba, tag, true);
2553         ufshcd_send_command(hba, tag);
2554 out_unlock:
2555         spin_unlock_irqrestore(hba->host->host_lock, flags);
2556 out:
2557         up_read(&hba->clk_scaling_lock);
2558         return err;
2559 }
2560
2561 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2562                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2563 {
2564         lrbp->cmd = NULL;
2565         lrbp->sense_bufflen = 0;
2566         lrbp->sense_buffer = NULL;
2567         lrbp->task_tag = tag;
2568         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2569         lrbp->intr_cmd = true; /* No interrupt aggregation */
2570         hba->dev_cmd.type = cmd_type;
2571
2572         return ufshcd_comp_devman_upiu(hba, lrbp);
2573 }
2574
2575 static int
2576 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2577 {
2578         int err = 0;
2579         unsigned long flags;
2580         u32 mask = 1 << tag;
2581
2582         /* clear outstanding transaction before retry */
2583         spin_lock_irqsave(hba->host->host_lock, flags);
2584         ufshcd_utrl_clear(hba, tag);
2585         spin_unlock_irqrestore(hba->host->host_lock, flags);
2586
2587         /*
2588          * wait for for h/w to clear corresponding bit in door-bell.
2589          * max. wait is 1 sec.
2590          */
2591         err = ufshcd_wait_for_register(hba,
2592                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
2593                         mask, ~mask, 1000, 1000);
2594
2595         return err;
2596 }
2597
2598 static int
2599 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2600 {
2601         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2602
2603         /* Get the UPIU response */
2604         query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2605                                 UPIU_RSP_CODE_OFFSET;
2606         return query_res->response;
2607 }
2608
2609 /**
2610  * ufshcd_dev_cmd_completion() - handles device management command responses
2611  * @hba: per adapter instance
2612  * @lrbp: pointer to local reference block
2613  */
2614 static int
2615 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2616 {
2617         int resp;
2618         int err = 0;
2619
2620         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2621         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2622
2623         switch (resp) {
2624         case UPIU_TRANSACTION_NOP_IN:
2625                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2626                         err = -EINVAL;
2627                         dev_err(hba->dev, "%s: unexpected response %x\n",
2628                                         __func__, resp);
2629                 }
2630                 break;
2631         case UPIU_TRANSACTION_QUERY_RSP:
2632                 err = ufshcd_check_query_response(hba, lrbp);
2633                 if (!err)
2634                         err = ufshcd_copy_query_response(hba, lrbp);
2635                 break;
2636         case UPIU_TRANSACTION_REJECT_UPIU:
2637                 /* TODO: handle Reject UPIU Response */
2638                 err = -EPERM;
2639                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2640                                 __func__);
2641                 break;
2642         default:
2643                 err = -EINVAL;
2644                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2645                                 __func__, resp);
2646                 break;
2647         }
2648
2649         return err;
2650 }
2651
2652 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2653                 struct ufshcd_lrb *lrbp, int max_timeout)
2654 {
2655         int err = 0;
2656         unsigned long time_left;
2657         unsigned long flags;
2658
2659         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2660                         msecs_to_jiffies(max_timeout));
2661
2662         /* Make sure descriptors are ready before ringing the doorbell */
2663         wmb();
2664         spin_lock_irqsave(hba->host->host_lock, flags);
2665         hba->dev_cmd.complete = NULL;
2666         if (likely(time_left)) {
2667                 err = ufshcd_get_tr_ocs(lrbp);
2668                 if (!err)
2669                         err = ufshcd_dev_cmd_completion(hba, lrbp);
2670         }
2671         spin_unlock_irqrestore(hba->host->host_lock, flags);
2672
2673         if (!time_left) {
2674                 err = -ETIMEDOUT;
2675                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2676                         __func__, lrbp->task_tag);
2677                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2678                         /* successfully cleared the command, retry if needed */
2679                         err = -EAGAIN;
2680                 /*
2681                  * in case of an error, after clearing the doorbell,
2682                  * we also need to clear the outstanding_request
2683                  * field in hba
2684                  */
2685                 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2686         }
2687
2688         return err;
2689 }
2690
2691 /**
2692  * ufshcd_exec_dev_cmd - API for sending device management requests
2693  * @hba: UFS hba
2694  * @cmd_type: specifies the type (NOP, Query...)
2695  * @timeout: time in seconds
2696  *
2697  * NOTE: Since there is only one available tag for device management commands,
2698  * it is expected you hold the hba->dev_cmd.lock mutex.
2699  */
2700 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2701                 enum dev_cmd_type cmd_type, int timeout)
2702 {
2703         struct request_queue *q = hba->cmd_queue;
2704         struct request *req;
2705         struct ufshcd_lrb *lrbp;
2706         int err;
2707         int tag;
2708         struct completion wait;
2709         unsigned long flags;
2710
2711         down_read(&hba->clk_scaling_lock);
2712
2713         /*
2714          * Get free slot, sleep if slots are unavailable.
2715          * Even though we use wait_event() which sleeps indefinitely,
2716          * the maximum wait time is bounded by SCSI request timeout.
2717          */
2718         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2719         if (IS_ERR(req)) {
2720                 err = PTR_ERR(req);
2721                 goto out_unlock;
2722         }
2723         tag = req->tag;
2724         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2725
2726         init_completion(&wait);
2727         lrbp = &hba->lrb[tag];
2728         WARN_ON(lrbp->cmd);
2729         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2730         if (unlikely(err))
2731                 goto out_put_tag;
2732
2733         hba->dev_cmd.complete = &wait;
2734
2735         ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2736         /* Make sure descriptors are ready before ringing the doorbell */
2737         wmb();
2738         spin_lock_irqsave(hba->host->host_lock, flags);
2739         ufshcd_vops_setup_xfer_req(hba, tag, false);
2740         ufshcd_send_command(hba, tag);
2741         spin_unlock_irqrestore(hba->host->host_lock, flags);
2742
2743         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2744
2745         ufshcd_add_query_upiu_trace(hba, tag,
2746                         err ? "query_complete_err" : "query_complete");
2747
2748 out_put_tag:
2749         blk_put_request(req);
2750 out_unlock:
2751         up_read(&hba->clk_scaling_lock);
2752         return err;
2753 }
2754
2755 /**
2756  * ufshcd_init_query() - init the query response and request parameters
2757  * @hba: per-adapter instance
2758  * @request: address of the request pointer to be initialized
2759  * @response: address of the response pointer to be initialized
2760  * @opcode: operation to perform
2761  * @idn: flag idn to access
2762  * @index: LU number to access
2763  * @selector: query/flag/descriptor further identification
2764  */
2765 static inline void ufshcd_init_query(struct ufs_hba *hba,
2766                 struct ufs_query_req **request, struct ufs_query_res **response,
2767                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2768 {
2769         *request = &hba->dev_cmd.query.request;
2770         *response = &hba->dev_cmd.query.response;
2771         memset(*request, 0, sizeof(struct ufs_query_req));
2772         memset(*response, 0, sizeof(struct ufs_query_res));
2773         (*request)->upiu_req.opcode = opcode;
2774         (*request)->upiu_req.idn = idn;
2775         (*request)->upiu_req.index = index;
2776         (*request)->upiu_req.selector = selector;
2777 }
2778
2779 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2780         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
2781 {
2782         int ret;
2783         int retries;
2784
2785         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2786                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
2787                 if (ret)
2788                         dev_dbg(hba->dev,
2789                                 "%s: failed with error %d, retries %d\n",
2790                                 __func__, ret, retries);
2791                 else
2792                         break;
2793         }
2794
2795         if (ret)
2796                 dev_err(hba->dev,
2797                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2798                         __func__, opcode, idn, ret, retries);
2799         return ret;
2800 }
2801
2802 /**
2803  * ufshcd_query_flag() - API function for sending flag query requests
2804  * @hba: per-adapter instance
2805  * @opcode: flag query to perform
2806  * @idn: flag idn to access
2807  * @index: flag index to access
2808  * @flag_res: the flag value after the query request completes
2809  *
2810  * Returns 0 for success, non-zero in case of failure
2811  */
2812 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2813                         enum flag_idn idn, u8 index, bool *flag_res)
2814 {
2815         struct ufs_query_req *request = NULL;
2816         struct ufs_query_res *response = NULL;
2817         int err, selector = 0;
2818         int timeout = QUERY_REQ_TIMEOUT;
2819
2820         BUG_ON(!hba);
2821
2822         ufshcd_hold(hba, false);
2823         mutex_lock(&hba->dev_cmd.lock);
2824         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2825                         selector);
2826
2827         switch (opcode) {
2828         case UPIU_QUERY_OPCODE_SET_FLAG:
2829         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2830         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2831                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2832                 break;
2833         case UPIU_QUERY_OPCODE_READ_FLAG:
2834                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2835                 if (!flag_res) {
2836                         /* No dummy reads */
2837                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
2838                                         __func__);
2839                         err = -EINVAL;
2840                         goto out_unlock;
2841                 }
2842                 break;
2843         default:
2844                 dev_err(hba->dev,
2845                         "%s: Expected query flag opcode but got = %d\n",
2846                         __func__, opcode);
2847                 err = -EINVAL;
2848                 goto out_unlock;
2849         }
2850
2851         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2852
2853         if (err) {
2854                 dev_err(hba->dev,
2855                         "%s: Sending flag query for idn %d failed, err = %d\n",
2856                         __func__, idn, err);
2857                 goto out_unlock;
2858         }
2859
2860         if (flag_res)
2861                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2862                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2863
2864 out_unlock:
2865         mutex_unlock(&hba->dev_cmd.lock);
2866         ufshcd_release(hba);
2867         return err;
2868 }
2869
2870 /**
2871  * ufshcd_query_attr - API function for sending attribute requests
2872  * @hba: per-adapter instance
2873  * @opcode: attribute opcode
2874  * @idn: attribute idn to access
2875  * @index: index field
2876  * @selector: selector field
2877  * @attr_val: the attribute value after the query request completes
2878  *
2879  * Returns 0 for success, non-zero in case of failure
2880 */
2881 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2882                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2883 {
2884         struct ufs_query_req *request = NULL;
2885         struct ufs_query_res *response = NULL;
2886         int err;
2887
2888         BUG_ON(!hba);
2889
2890         ufshcd_hold(hba, false);
2891         if (!attr_val) {
2892                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2893                                 __func__, opcode);
2894                 err = -EINVAL;
2895                 goto out;
2896         }
2897
2898         mutex_lock(&hba->dev_cmd.lock);
2899         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2900                         selector);
2901
2902         switch (opcode) {
2903         case UPIU_QUERY_OPCODE_WRITE_ATTR:
2904                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2905                 request->upiu_req.value = cpu_to_be32(*attr_val);
2906                 break;
2907         case UPIU_QUERY_OPCODE_READ_ATTR:
2908                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2909                 break;
2910         default:
2911                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2912                                 __func__, opcode);
2913                 err = -EINVAL;
2914                 goto out_unlock;
2915         }
2916
2917         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2918
2919         if (err) {
2920                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2921                                 __func__, opcode, idn, index, err);
2922                 goto out_unlock;
2923         }
2924
2925         *attr_val = be32_to_cpu(response->upiu_res.value);
2926
2927 out_unlock:
2928         mutex_unlock(&hba->dev_cmd.lock);
2929 out:
2930         ufshcd_release(hba);
2931         return err;
2932 }
2933
2934 /**
2935  * ufshcd_query_attr_retry() - API function for sending query
2936  * attribute with retries
2937  * @hba: per-adapter instance
2938  * @opcode: attribute opcode
2939  * @idn: attribute idn to access
2940  * @index: index field
2941  * @selector: selector field
2942  * @attr_val: the attribute value after the query request
2943  * completes
2944  *
2945  * Returns 0 for success, non-zero in case of failure
2946 */
2947 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2948         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2949         u32 *attr_val)
2950 {
2951         int ret = 0;
2952         u32 retries;
2953
2954         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2955                 ret = ufshcd_query_attr(hba, opcode, idn, index,
2956                                                 selector, attr_val);
2957                 if (ret)
2958                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2959                                 __func__, ret, retries);
2960                 else
2961                         break;
2962         }
2963
2964         if (ret)
2965                 dev_err(hba->dev,
2966                         "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2967                         __func__, idn, ret, QUERY_REQ_RETRIES);
2968         return ret;
2969 }
2970
2971 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2972                         enum query_opcode opcode, enum desc_idn idn, u8 index,
2973                         u8 selector, u8 *desc_buf, int *buf_len)
2974 {
2975         struct ufs_query_req *request = NULL;
2976         struct ufs_query_res *response = NULL;
2977         int err;
2978
2979         BUG_ON(!hba);
2980
2981         ufshcd_hold(hba, false);
2982         if (!desc_buf) {
2983                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2984                                 __func__, opcode);
2985                 err = -EINVAL;
2986                 goto out;
2987         }
2988
2989         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2990                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2991                                 __func__, *buf_len);
2992                 err = -EINVAL;
2993                 goto out;
2994         }
2995
2996         mutex_lock(&hba->dev_cmd.lock);
2997         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2998                         selector);
2999         hba->dev_cmd.query.descriptor = desc_buf;
3000         request->upiu_req.length = cpu_to_be16(*buf_len);
3001
3002         switch (opcode) {
3003         case UPIU_QUERY_OPCODE_WRITE_DESC:
3004                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3005                 break;
3006         case UPIU_QUERY_OPCODE_READ_DESC:
3007                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3008                 break;
3009         default:
3010                 dev_err(hba->dev,
3011                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3012                                 __func__, opcode);
3013                 err = -EINVAL;
3014                 goto out_unlock;
3015         }
3016
3017         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3018
3019         if (err) {
3020                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3021                                 __func__, opcode, idn, index, err);
3022                 goto out_unlock;
3023         }
3024
3025         *buf_len = be16_to_cpu(response->upiu_res.length);
3026
3027 out_unlock:
3028         hba->dev_cmd.query.descriptor = NULL;
3029         mutex_unlock(&hba->dev_cmd.lock);
3030 out:
3031         ufshcd_release(hba);
3032         return err;
3033 }
3034
3035 /**
3036  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3037  * @hba: per-adapter instance
3038  * @opcode: attribute opcode
3039  * @idn: attribute idn to access
3040  * @index: index field
3041  * @selector: selector field
3042  * @desc_buf: the buffer that contains the descriptor
3043  * @buf_len: length parameter passed to the device
3044  *
3045  * Returns 0 for success, non-zero in case of failure.
3046  * The buf_len parameter will contain, on return, the length parameter
3047  * received on the response.
3048  */
3049 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3050                                   enum query_opcode opcode,
3051                                   enum desc_idn idn, u8 index,
3052                                   u8 selector,
3053                                   u8 *desc_buf, int *buf_len)
3054 {
3055         int err;
3056         int retries;
3057
3058         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3059                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3060                                                 selector, desc_buf, buf_len);
3061                 if (!err || err == -EINVAL)
3062                         break;
3063         }
3064
3065         return err;
3066 }
3067
3068 /**
3069  * ufshcd_read_desc_length - read the specified descriptor length from header
3070  * @hba: Pointer to adapter instance
3071  * @desc_id: descriptor idn value
3072  * @desc_index: descriptor index
3073  * @desc_length: pointer to variable to read the length of descriptor
3074  *
3075  * Return 0 in case of success, non-zero otherwise
3076  */
3077 static int ufshcd_read_desc_length(struct ufs_hba *hba,
3078         enum desc_idn desc_id,
3079         int desc_index,
3080         int *desc_length)
3081 {
3082         int ret;
3083         u8 header[QUERY_DESC_HDR_SIZE];
3084         int header_len = QUERY_DESC_HDR_SIZE;
3085
3086         if (desc_id >= QUERY_DESC_IDN_MAX)
3087                 return -EINVAL;
3088
3089         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3090                                         desc_id, desc_index, 0, header,
3091                                         &header_len);
3092
3093         if (ret) {
3094                 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3095                         __func__, desc_id);
3096                 return ret;
3097         } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3098                 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3099                         __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3100                         desc_id);
3101                 ret = -EINVAL;
3102         }
3103
3104         *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3105         return ret;
3106
3107 }
3108
3109 /**
3110  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3111  * @hba: Pointer to adapter instance
3112  * @desc_id: descriptor idn value
3113  * @desc_len: mapped desc length (out)
3114  *
3115  * Return 0 in case of success, non-zero otherwise
3116  */
3117 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3118         enum desc_idn desc_id, int *desc_len)
3119 {
3120         switch (desc_id) {
3121         case QUERY_DESC_IDN_DEVICE:
3122                 *desc_len = hba->desc_size.dev_desc;
3123                 break;
3124         case QUERY_DESC_IDN_POWER:
3125                 *desc_len = hba->desc_size.pwr_desc;
3126                 break;
3127         case QUERY_DESC_IDN_GEOMETRY:
3128                 *desc_len = hba->desc_size.geom_desc;
3129                 break;
3130         case QUERY_DESC_IDN_CONFIGURATION:
3131                 *desc_len = hba->desc_size.conf_desc;
3132                 break;
3133         case QUERY_DESC_IDN_UNIT:
3134                 *desc_len = hba->desc_size.unit_desc;
3135                 break;
3136         case QUERY_DESC_IDN_INTERCONNECT:
3137                 *desc_len = hba->desc_size.interc_desc;
3138                 break;
3139         case QUERY_DESC_IDN_STRING:
3140                 *desc_len = QUERY_DESC_MAX_SIZE;
3141                 break;
3142         case QUERY_DESC_IDN_HEALTH:
3143                 *desc_len = hba->desc_size.hlth_desc;
3144                 break;
3145         case QUERY_DESC_IDN_RFU_0:
3146         case QUERY_DESC_IDN_RFU_1:
3147                 *desc_len = 0;
3148                 break;
3149         default:
3150                 *desc_len = 0;
3151                 return -EINVAL;
3152         }
3153         return 0;
3154 }
3155 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3156
3157 /**
3158  * ufshcd_read_desc_param - read the specified descriptor parameter
3159  * @hba: Pointer to adapter instance
3160  * @desc_id: descriptor idn value
3161  * @desc_index: descriptor index
3162  * @param_offset: offset of the parameter to read
3163  * @param_read_buf: pointer to buffer where parameter would be read
3164  * @param_size: sizeof(param_read_buf)
3165  *
3166  * Return 0 in case of success, non-zero otherwise
3167  */
3168 int ufshcd_read_desc_param(struct ufs_hba *hba,
3169                            enum desc_idn desc_id,
3170                            int desc_index,
3171                            u8 param_offset,
3172                            u8 *param_read_buf,
3173                            u8 param_size)
3174 {
3175         int ret;
3176         u8 *desc_buf;
3177         int buff_len;
3178         bool is_kmalloc = true;
3179
3180         /* Safety check */
3181         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3182                 return -EINVAL;
3183
3184         /* Get the max length of descriptor from structure filled up at probe
3185          * time.
3186          */
3187         ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3188
3189         /* Sanity checks */
3190         if (ret || !buff_len) {
3191                 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3192                         __func__);
3193                 return ret;
3194         }
3195
3196         /* Check whether we need temp memory */
3197         if (param_offset != 0 || param_size < buff_len) {
3198                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3199                 if (!desc_buf)
3200                         return -ENOMEM;
3201         } else {
3202                 desc_buf = param_read_buf;
3203                 is_kmalloc = false;
3204         }
3205
3206         /* Request for full descriptor */
3207         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3208                                         desc_id, desc_index, 0,
3209                                         desc_buf, &buff_len);
3210
3211         if (ret) {
3212                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3213                         __func__, desc_id, desc_index, param_offset, ret);
3214                 goto out;
3215         }
3216
3217         /* Sanity check */
3218         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3219                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3220                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3221                 ret = -EINVAL;
3222                 goto out;
3223         }
3224
3225         /* Check wherher we will not copy more data, than available */
3226         if (is_kmalloc && param_size > buff_len)
3227                 param_size = buff_len;
3228
3229         if (is_kmalloc)
3230                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3231 out:
3232         if (is_kmalloc)
3233                 kfree(desc_buf);
3234         return ret;
3235 }
3236
3237 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3238                                    enum desc_idn desc_id,
3239                                    int desc_index,
3240                                    void *buf,
3241                                    u32 size)
3242 {
3243         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3244 }
3245
3246
3247 /**
3248  * struct uc_string_id - unicode string
3249  *
3250  * @len: size of this descriptor inclusive
3251  * @type: descriptor type
3252  * @uc: unicode string character
3253  */
3254 struct uc_string_id {
3255         u8 len;
3256         u8 type;
3257         wchar_t uc[];
3258 } __packed;
3259
3260 /* replace non-printable or non-ASCII characters with spaces */
3261 static inline char ufshcd_remove_non_printable(u8 ch)
3262 {
3263         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3264 }
3265
3266 /**
3267  * ufshcd_read_string_desc - read string descriptor
3268  * @hba: pointer to adapter instance
3269  * @desc_index: descriptor index
3270  * @buf: pointer to buffer where descriptor would be read,
3271  *       the caller should free the memory.
3272  * @ascii: if true convert from unicode to ascii characters
3273  *         null terminated string.
3274  *
3275  * Return:
3276  * *      string size on success.
3277  * *      -ENOMEM: on allocation failure
3278  * *      -EINVAL: on a wrong parameter
3279  */
3280 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3281                             u8 **buf, bool ascii)
3282 {
3283         struct uc_string_id *uc_str;
3284         u8 *str;
3285         int ret;
3286
3287         if (!buf)
3288                 return -EINVAL;
3289
3290         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3291         if (!uc_str)
3292                 return -ENOMEM;
3293
3294         ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3295                                desc_index, uc_str,
3296                                QUERY_DESC_MAX_SIZE);
3297         if (ret < 0) {
3298                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3299                         QUERY_REQ_RETRIES, ret);
3300                 str = NULL;
3301                 goto out;
3302         }
3303
3304         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3305                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3306                 str = NULL;
3307                 ret = 0;
3308                 goto out;
3309         }
3310
3311         if (ascii) {
3312                 ssize_t ascii_len;
3313                 int i;
3314                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3315                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3316                 str = kzalloc(ascii_len, GFP_KERNEL);
3317                 if (!str) {
3318                         ret = -ENOMEM;
3319                         goto out;
3320                 }
3321
3322                 /*
3323                  * the descriptor contains string in UTF16 format
3324                  * we need to convert to utf-8 so it can be displayed
3325                  */
3326                 ret = utf16s_to_utf8s(uc_str->uc,
3327                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3328                                       UTF16_BIG_ENDIAN, str, ascii_len);
3329
3330                 /* replace non-printable or non-ASCII characters with spaces */
3331                 for (i = 0; i < ret; i++)
3332                         str[i] = ufshcd_remove_non_printable(str[i]);
3333
3334                 str[ret++] = '\0';
3335
3336         } else {
3337                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3338                 if (!str) {
3339                         ret = -ENOMEM;
3340                         goto out;
3341                 }
3342                 ret = uc_str->len;
3343         }
3344 out:
3345         *buf = str;
3346         kfree(uc_str);
3347         return ret;
3348 }
3349
3350 /**
3351  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3352  * @hba: Pointer to adapter instance
3353  * @lun: lun id
3354  * @param_offset: offset of the parameter to read
3355  * @param_read_buf: pointer to buffer where parameter would be read
3356  * @param_size: sizeof(param_read_buf)
3357  *
3358  * Return 0 in case of success, non-zero otherwise
3359  */
3360 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3361                                               int lun,
3362                                               enum unit_desc_param param_offset,
3363                                               u8 *param_read_buf,
3364                                               u32 param_size)
3365 {
3366         /*
3367          * Unit descriptors are only available for general purpose LUs (LUN id
3368          * from 0 to 7) and RPMB Well known LU.
3369          */
3370         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3371                 return -EOPNOTSUPP;
3372
3373         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3374                                       param_offset, param_read_buf, param_size);
3375 }
3376
3377 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3378 {
3379         int err = 0;
3380         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3381
3382         if (hba->dev_info.wspecversion >= 0x300) {
3383                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3384                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3385                                 &gating_wait);
3386                 if (err)
3387                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3388                                          err, gating_wait);
3389
3390                 if (gating_wait == 0) {
3391                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3392                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3393                                          gating_wait);
3394                 }
3395
3396                 hba->dev_info.clk_gating_wait_us = gating_wait;
3397         }
3398
3399         return err;
3400 }
3401
3402 /**
3403  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3404  * @hba: per adapter instance
3405  *
3406  * 1. Allocate DMA memory for Command Descriptor array
3407  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3408  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3409  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3410  *      (UTMRDL)
3411  * 4. Allocate memory for local reference block(lrb).
3412  *
3413  * Returns 0 for success, non-zero in case of failure
3414  */
3415 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3416 {
3417         size_t utmrdl_size, utrdl_size, ucdl_size;
3418
3419         /* Allocate memory for UTP command descriptors */
3420         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3421         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3422                                                   ucdl_size,
3423                                                   &hba->ucdl_dma_addr,
3424                                                   GFP_KERNEL);
3425
3426         /*
3427          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3428          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3429          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3430          * be aligned to 128 bytes as well
3431          */
3432         if (!hba->ucdl_base_addr ||
3433             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3434                 dev_err(hba->dev,
3435                         "Command Descriptor Memory allocation failed\n");
3436                 goto out;
3437         }
3438
3439         /*
3440          * Allocate memory for UTP Transfer descriptors
3441          * UFSHCI requires 1024 byte alignment of UTRD
3442          */
3443         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3444         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3445                                                    utrdl_size,
3446                                                    &hba->utrdl_dma_addr,
3447                                                    GFP_KERNEL);
3448         if (!hba->utrdl_base_addr ||
3449             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3450                 dev_err(hba->dev,
3451                         "Transfer Descriptor Memory allocation failed\n");
3452                 goto out;
3453         }
3454
3455         /*
3456          * Allocate memory for UTP Task Management descriptors
3457          * UFSHCI requires 1024 byte alignment of UTMRD
3458          */
3459         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3460         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3461                                                     utmrdl_size,
3462                                                     &hba->utmrdl_dma_addr,
3463                                                     GFP_KERNEL);
3464         if (!hba->utmrdl_base_addr ||
3465             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3466                 dev_err(hba->dev,
3467                 "Task Management Descriptor Memory allocation failed\n");
3468                 goto out;
3469         }
3470
3471         /* Allocate memory for local reference block */
3472         hba->lrb = devm_kcalloc(hba->dev,
3473                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3474                                 GFP_KERNEL);
3475         if (!hba->lrb) {
3476                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3477                 goto out;
3478         }
3479         return 0;
3480 out:
3481         return -ENOMEM;
3482 }
3483
3484 /**
3485  * ufshcd_host_memory_configure - configure local reference block with
3486  *                              memory offsets
3487  * @hba: per adapter instance
3488  *
3489  * Configure Host memory space
3490  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3491  * address.
3492  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3493  * and PRDT offset.
3494  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3495  * into local reference block.
3496  */
3497 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3498 {
3499         struct utp_transfer_req_desc *utrdlp;
3500         dma_addr_t cmd_desc_dma_addr;
3501         dma_addr_t cmd_desc_element_addr;
3502         u16 response_offset;
3503         u16 prdt_offset;
3504         int cmd_desc_size;
3505         int i;
3506
3507         utrdlp = hba->utrdl_base_addr;
3508
3509         response_offset =
3510                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3511         prdt_offset =
3512                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3513
3514         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3515         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3516
3517         for (i = 0; i < hba->nutrs; i++) {
3518                 /* Configure UTRD with command descriptor base address */
3519                 cmd_desc_element_addr =
3520                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3521                 utrdlp[i].command_desc_base_addr_lo =
3522                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3523                 utrdlp[i].command_desc_base_addr_hi =
3524                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3525
3526                 /* Response upiu and prdt offset should be in double words */
3527                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3528                         utrdlp[i].response_upiu_offset =
3529                                 cpu_to_le16(response_offset);
3530                         utrdlp[i].prd_table_offset =
3531                                 cpu_to_le16(prdt_offset);
3532                         utrdlp[i].response_upiu_length =
3533                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3534                 } else {
3535                         utrdlp[i].response_upiu_offset =
3536                                 cpu_to_le16(response_offset >> 2);
3537                         utrdlp[i].prd_table_offset =
3538                                 cpu_to_le16(prdt_offset >> 2);
3539                         utrdlp[i].response_upiu_length =
3540                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3541                 }
3542
3543                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3544         }
3545 }
3546
3547 /**
3548  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3549  * @hba: per adapter instance
3550  *
3551  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3552  * in order to initialize the Unipro link startup procedure.
3553  * Once the Unipro links are up, the device connected to the controller
3554  * is detected.
3555  *
3556  * Returns 0 on success, non-zero value on failure
3557  */
3558 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3559 {
3560         struct uic_command uic_cmd = {0};
3561         int ret;
3562
3563         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3564
3565         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3566         if (ret)
3567                 dev_dbg(hba->dev,
3568                         "dme-link-startup: error code %d\n", ret);
3569         return ret;
3570 }
3571 /**
3572  * ufshcd_dme_reset - UIC command for DME_RESET
3573  * @hba: per adapter instance
3574  *
3575  * DME_RESET command is issued in order to reset UniPro stack.
3576  * This function now deals with cold reset.
3577  *
3578  * Returns 0 on success, non-zero value on failure
3579  */
3580 static int ufshcd_dme_reset(struct ufs_hba *hba)
3581 {
3582         struct uic_command uic_cmd = {0};
3583         int ret;
3584
3585         uic_cmd.command = UIC_CMD_DME_RESET;
3586
3587         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3588         if (ret)
3589                 dev_err(hba->dev,
3590                         "dme-reset: error code %d\n", ret);
3591
3592         return ret;
3593 }
3594
3595 /**
3596  * ufshcd_dme_enable - UIC command for DME_ENABLE
3597  * @hba: per adapter instance
3598  *
3599  * DME_ENABLE command is issued in order to enable UniPro stack.
3600  *
3601  * Returns 0 on success, non-zero value on failure
3602  */
3603 static int ufshcd_dme_enable(struct ufs_hba *hba)
3604 {
3605         struct uic_command uic_cmd = {0};
3606         int ret;
3607
3608         uic_cmd.command = UIC_CMD_DME_ENABLE;
3609
3610         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3611         if (ret)
3612                 dev_err(hba->dev,
3613                         "dme-reset: error code %d\n", ret);
3614
3615         return ret;
3616 }
3617
3618 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3619 {
3620         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
3621         unsigned long min_sleep_time_us;
3622
3623         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3624                 return;
3625
3626         /*
3627          * last_dme_cmd_tstamp will be 0 only for 1st call to
3628          * this function
3629          */
3630         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3631                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3632         } else {
3633                 unsigned long delta =
3634                         (unsigned long) ktime_to_us(
3635                                 ktime_sub(ktime_get(),
3636                                 hba->last_dme_cmd_tstamp));
3637
3638                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3639                         min_sleep_time_us =
3640                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3641                 else
3642                         return; /* no more delay required */
3643         }
3644
3645         /* allow sleep for extra 50us if needed */
3646         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3647 }
3648
3649 /**
3650  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3651  * @hba: per adapter instance
3652  * @attr_sel: uic command argument1
3653  * @attr_set: attribute set type as uic command argument2
3654  * @mib_val: setting value as uic command argument3
3655  * @peer: indicate whether peer or local
3656  *
3657  * Returns 0 on success, non-zero value on failure
3658  */
3659 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3660                         u8 attr_set, u32 mib_val, u8 peer)
3661 {
3662         struct uic_command uic_cmd = {0};
3663         static const char *const action[] = {
3664                 "dme-set",
3665                 "dme-peer-set"
3666         };
3667         const char *set = action[!!peer];
3668         int ret;
3669         int retries = UFS_UIC_COMMAND_RETRIES;
3670
3671         uic_cmd.command = peer ?
3672                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3673         uic_cmd.argument1 = attr_sel;
3674         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3675         uic_cmd.argument3 = mib_val;
3676
3677         do {
3678                 /* for peer attributes we retry upon failure */
3679                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3680                 if (ret)
3681                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3682                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3683         } while (ret && peer && --retries);
3684
3685         if (ret)
3686                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3687                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3688                         UFS_UIC_COMMAND_RETRIES - retries);
3689
3690         return ret;
3691 }
3692 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3693
3694 /**
3695  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3696  * @hba: per adapter instance
3697  * @attr_sel: uic command argument1
3698  * @mib_val: the value of the attribute as returned by the UIC command
3699  * @peer: indicate whether peer or local
3700  *
3701  * Returns 0 on success, non-zero value on failure
3702  */
3703 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3704                         u32 *mib_val, u8 peer)
3705 {
3706         struct uic_command uic_cmd = {0};
3707         static const char *const action[] = {
3708                 "dme-get",
3709                 "dme-peer-get"
3710         };
3711         const char *get = action[!!peer];
3712         int ret;
3713         int retries = UFS_UIC_COMMAND_RETRIES;
3714         struct ufs_pa_layer_attr orig_pwr_info;
3715         struct ufs_pa_layer_attr temp_pwr_info;
3716         bool pwr_mode_change = false;
3717
3718         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3719                 orig_pwr_info = hba->pwr_info;
3720                 temp_pwr_info = orig_pwr_info;
3721
3722                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3723                     orig_pwr_info.pwr_rx == FAST_MODE) {
3724                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3725                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3726                         pwr_mode_change = true;
3727                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3728                     orig_pwr_info.pwr_rx == SLOW_MODE) {
3729                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3730                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3731                         pwr_mode_change = true;
3732                 }
3733                 if (pwr_mode_change) {
3734                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3735                         if (ret)
3736                                 goto out;
3737                 }
3738         }
3739
3740         uic_cmd.command = peer ?
3741                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3742         uic_cmd.argument1 = attr_sel;
3743
3744         do {
3745                 /* for peer attributes we retry upon failure */
3746                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3747                 if (ret)
3748                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3749                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
3750         } while (ret && peer && --retries);
3751
3752         if (ret)
3753                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3754                         get, UIC_GET_ATTR_ID(attr_sel),
3755                         UFS_UIC_COMMAND_RETRIES - retries);
3756
3757         if (mib_val && !ret)
3758                 *mib_val = uic_cmd.argument3;
3759
3760         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3761             && pwr_mode_change)
3762                 ufshcd_change_power_mode(hba, &orig_pwr_info);
3763 out:
3764         return ret;
3765 }
3766 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3767
3768 /**
3769  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3770  * state) and waits for it to take effect.
3771  *
3772  * @hba: per adapter instance
3773  * @cmd: UIC command to execute
3774  *
3775  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3776  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3777  * and device UniPro link and hence it's final completion would be indicated by
3778  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3779  * addition to normal UIC command completion Status (UCCS). This function only
3780  * returns after the relevant status bits indicate the completion.
3781  *
3782  * Returns 0 on success, non-zero value on failure
3783  */
3784 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3785 {
3786         struct completion uic_async_done;
3787         unsigned long flags;
3788         u8 status;
3789         int ret;
3790         bool reenable_intr = false;
3791
3792         mutex_lock(&hba->uic_cmd_mutex);
3793         init_completion(&uic_async_done);
3794         ufshcd_add_delay_before_dme_cmd(hba);
3795
3796         spin_lock_irqsave(hba->host->host_lock, flags);
3797         hba->uic_async_done = &uic_async_done;
3798         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3799                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3800                 /*
3801                  * Make sure UIC command completion interrupt is disabled before
3802                  * issuing UIC command.
3803                  */
3804                 wmb();
3805                 reenable_intr = true;
3806         }
3807         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3808         spin_unlock_irqrestore(hba->host->host_lock, flags);
3809         if (ret) {
3810                 dev_err(hba->dev,
3811                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3812                         cmd->command, cmd->argument3, ret);
3813                 goto out;
3814         }
3815
3816         if (!wait_for_completion_timeout(hba->uic_async_done,
3817                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3818                 dev_err(hba->dev,
3819                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3820                         cmd->command, cmd->argument3);
3821                 ret = -ETIMEDOUT;
3822                 goto out;
3823         }
3824
3825         status = ufshcd_get_upmcrs(hba);
3826         if (status != PWR_LOCAL) {
3827                 dev_err(hba->dev,
3828                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3829                         cmd->command, status);
3830                 ret = (status != PWR_OK) ? status : -1;
3831         }
3832 out:
3833         if (ret) {
3834                 ufshcd_print_host_state(hba);
3835                 ufshcd_print_pwr_info(hba);
3836                 ufshcd_print_host_regs(hba);
3837         }
3838
3839         spin_lock_irqsave(hba->host->host_lock, flags);
3840         hba->active_uic_cmd = NULL;
3841         hba->uic_async_done = NULL;
3842         if (reenable_intr)
3843                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3844         spin_unlock_irqrestore(hba->host->host_lock, flags);
3845         mutex_unlock(&hba->uic_cmd_mutex);
3846
3847         return ret;
3848 }
3849
3850 /**
3851  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3852  *                              using DME_SET primitives.
3853  * @hba: per adapter instance
3854  * @mode: powr mode value
3855  *
3856  * Returns 0 on success, non-zero value on failure
3857  */
3858 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3859 {
3860         struct uic_command uic_cmd = {0};
3861         int ret;
3862
3863         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3864                 ret = ufshcd_dme_set(hba,
3865                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3866                 if (ret) {
3867                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3868                                                 __func__, ret);
3869                         goto out;
3870                 }
3871         }
3872
3873         uic_cmd.command = UIC_CMD_DME_SET;
3874         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3875         uic_cmd.argument3 = mode;
3876         ufshcd_hold(hba, false);
3877         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3878         ufshcd_release(hba);
3879
3880 out:
3881         return ret;
3882 }
3883
3884 int ufshcd_link_recovery(struct ufs_hba *hba)
3885 {
3886         int ret;
3887         unsigned long flags;
3888
3889         spin_lock_irqsave(hba->host->host_lock, flags);
3890         hba->ufshcd_state = UFSHCD_STATE_RESET;
3891         ufshcd_set_eh_in_progress(hba);
3892         spin_unlock_irqrestore(hba->host->host_lock, flags);
3893
3894         /* Reset the attached device */
3895         ufshcd_vops_device_reset(hba);
3896
3897         ret = ufshcd_host_reset_and_restore(hba);
3898
3899         spin_lock_irqsave(hba->host->host_lock, flags);
3900         if (ret)
3901                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3902         ufshcd_clear_eh_in_progress(hba);
3903         spin_unlock_irqrestore(hba->host->host_lock, flags);
3904
3905         if (ret)
3906                 dev_err(hba->dev, "%s: link recovery failed, err %d",
3907                         __func__, ret);
3908
3909         return ret;
3910 }
3911 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
3912
3913 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3914 {
3915         int ret;
3916         struct uic_command uic_cmd = {0};
3917         ktime_t start = ktime_get();
3918
3919         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3920
3921         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3922         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3923         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3924                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3925
3926         if (ret) {
3927                 int err;
3928
3929                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3930                         __func__, ret);
3931
3932                 /*
3933                  * If link recovery fails then return error code returned from
3934                  * ufshcd_link_recovery().
3935                  * If link recovery succeeds then return -EAGAIN to attempt
3936                  * hibern8 enter retry again.
3937                  */
3938                 err = ufshcd_link_recovery(hba);
3939                 if (err) {
3940                         dev_err(hba->dev, "%s: link recovery failed", __func__);
3941                         ret = err;
3942                 } else {
3943                         ret = -EAGAIN;
3944                 }
3945         } else
3946                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3947                                                                 POST_CHANGE);
3948
3949         return ret;
3950 }
3951
3952 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3953 {
3954         int ret = 0, retries;
3955
3956         for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3957                 ret = __ufshcd_uic_hibern8_enter(hba);
3958                 if (!ret)
3959                         goto out;
3960         }
3961 out:
3962         return ret;
3963 }
3964
3965 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3966 {
3967         struct uic_command uic_cmd = {0};
3968         int ret;
3969         ktime_t start = ktime_get();
3970
3971         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3972
3973         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3974         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3975         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3976                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3977
3978         if (ret) {
3979                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3980                         __func__, ret);
3981                 ret = ufshcd_link_recovery(hba);
3982         } else {
3983                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3984                                                                 POST_CHANGE);
3985                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3986                 hba->ufs_stats.hibern8_exit_cnt++;
3987         }
3988
3989         return ret;
3990 }
3991 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
3992
3993 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
3994 {
3995         unsigned long flags;
3996         bool update = false;
3997
3998         if (!ufshcd_is_auto_hibern8_supported(hba))
3999                 return;
4000
4001         spin_lock_irqsave(hba->host->host_lock, flags);
4002         if (hba->ahit != ahit) {
4003                 hba->ahit = ahit;
4004                 update = true;
4005         }
4006         spin_unlock_irqrestore(hba->host->host_lock, flags);
4007
4008         if (update && !pm_runtime_suspended(hba->dev)) {
4009                 pm_runtime_get_sync(hba->dev);
4010                 ufshcd_hold(hba, false);
4011                 ufshcd_auto_hibern8_enable(hba);
4012                 ufshcd_release(hba);
4013                 pm_runtime_put(hba->dev);
4014         }
4015 }
4016 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4017
4018 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4019 {
4020         unsigned long flags;
4021
4022         if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
4023                 return;
4024
4025         spin_lock_irqsave(hba->host->host_lock, flags);
4026         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4027         spin_unlock_irqrestore(hba->host->host_lock, flags);
4028 }
4029
4030  /**
4031  * ufshcd_init_pwr_info - setting the POR (power on reset)
4032  * values in hba power info
4033  * @hba: per-adapter instance
4034  */
4035 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4036 {
4037         hba->pwr_info.gear_rx = UFS_PWM_G1;
4038         hba->pwr_info.gear_tx = UFS_PWM_G1;
4039         hba->pwr_info.lane_rx = 1;
4040         hba->pwr_info.lane_tx = 1;
4041         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4042         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4043         hba->pwr_info.hs_rate = 0;
4044 }
4045
4046 /**
4047  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4048  * @hba: per-adapter instance
4049  */
4050 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4051 {
4052         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4053
4054         if (hba->max_pwr_info.is_valid)
4055                 return 0;
4056
4057         pwr_info->pwr_tx = FAST_MODE;
4058         pwr_info->pwr_rx = FAST_MODE;
4059         pwr_info->hs_rate = PA_HS_MODE_B;
4060
4061         /* Get the connected lane count */
4062         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4063                         &pwr_info->lane_rx);
4064         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4065                         &pwr_info->lane_tx);
4066
4067         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4068                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4069                                 __func__,
4070                                 pwr_info->lane_rx,
4071                                 pwr_info->lane_tx);
4072                 return -EINVAL;
4073         }
4074
4075         /*
4076          * First, get the maximum gears of HS speed.
4077          * If a zero value, it means there is no HSGEAR capability.
4078          * Then, get the maximum gears of PWM speed.
4079          */
4080         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4081         if (!pwr_info->gear_rx) {
4082                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4083                                 &pwr_info->gear_rx);
4084                 if (!pwr_info->gear_rx) {
4085                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4086                                 __func__, pwr_info->gear_rx);
4087                         return -EINVAL;
4088                 }
4089                 pwr_info->pwr_rx = SLOW_MODE;
4090         }
4091
4092         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4093                         &pwr_info->gear_tx);
4094         if (!pwr_info->gear_tx) {
4095                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4096                                 &pwr_info->gear_tx);
4097                 if (!pwr_info->gear_tx) {
4098                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4099                                 __func__, pwr_info->gear_tx);
4100                         return -EINVAL;
4101                 }
4102                 pwr_info->pwr_tx = SLOW_MODE;
4103         }
4104
4105         hba->max_pwr_info.is_valid = true;
4106         return 0;
4107 }
4108
4109 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4110                              struct ufs_pa_layer_attr *pwr_mode)
4111 {
4112         int ret;
4113
4114         /* if already configured to the requested pwr_mode */
4115         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4116             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4117             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4118             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4119             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4120             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4121             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4122                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4123                 return 0;
4124         }
4125
4126         /*
4127          * Configure attributes for power mode change with below.
4128          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4129          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4130          * - PA_HSSERIES
4131          */
4132         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4133         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4134                         pwr_mode->lane_rx);
4135         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4136                         pwr_mode->pwr_rx == FAST_MODE)
4137                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4138         else
4139                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4140
4141         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4142         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4143                         pwr_mode->lane_tx);
4144         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4145                         pwr_mode->pwr_tx == FAST_MODE)
4146                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4147         else
4148                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4149
4150         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4151             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4152             pwr_mode->pwr_rx == FAST_MODE ||
4153             pwr_mode->pwr_tx == FAST_MODE)
4154                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4155                                                 pwr_mode->hs_rate);
4156
4157         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4158                         DL_FC0ProtectionTimeOutVal_Default);
4159         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4160                         DL_TC0ReplayTimeOutVal_Default);
4161         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4162                         DL_AFC0ReqTimeOutVal_Default);
4163         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4164                         DL_FC1ProtectionTimeOutVal_Default);
4165         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4166                         DL_TC1ReplayTimeOutVal_Default);
4167         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4168                         DL_AFC1ReqTimeOutVal_Default);
4169
4170         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4171                         DL_FC0ProtectionTimeOutVal_Default);
4172         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4173                         DL_TC0ReplayTimeOutVal_Default);
4174         ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4175                         DL_AFC0ReqTimeOutVal_Default);
4176
4177         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4178                         | pwr_mode->pwr_tx);
4179
4180         if (ret) {
4181                 dev_err(hba->dev,
4182                         "%s: power mode change failed %d\n", __func__, ret);
4183         } else {
4184                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4185                                                                 pwr_mode);
4186
4187                 memcpy(&hba->pwr_info, pwr_mode,
4188                         sizeof(struct ufs_pa_layer_attr));
4189         }
4190
4191         return ret;
4192 }
4193
4194 /**
4195  * ufshcd_config_pwr_mode - configure a new power mode
4196  * @hba: per-adapter instance
4197  * @desired_pwr_mode: desired power configuration
4198  */
4199 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4200                 struct ufs_pa_layer_attr *desired_pwr_mode)
4201 {
4202         struct ufs_pa_layer_attr final_params = { 0 };
4203         int ret;
4204
4205         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4206                                         desired_pwr_mode, &final_params);
4207
4208         if (ret)
4209                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4210
4211         ret = ufshcd_change_power_mode(hba, &final_params);
4212
4213         return ret;
4214 }
4215 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4216
4217 /**
4218  * ufshcd_complete_dev_init() - checks device readiness
4219  * @hba: per-adapter instance
4220  *
4221  * Set fDeviceInit flag and poll until device toggles it.
4222  */
4223 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4224 {
4225         int i;
4226         int err;
4227         bool flag_res = true;
4228
4229         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4230                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4231         if (err) {
4232                 dev_err(hba->dev,
4233                         "%s setting fDeviceInit flag failed with error %d\n",
4234                         __func__, err);
4235                 goto out;
4236         }
4237
4238         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4239         for (i = 0; i < 1000 && !err && flag_res; i++)
4240                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4241                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4242
4243         if (err)
4244                 dev_err(hba->dev,
4245                         "%s reading fDeviceInit flag failed with error %d\n",
4246                         __func__, err);
4247         else if (flag_res)
4248                 dev_err(hba->dev,
4249                         "%s fDeviceInit was not cleared by the device\n",
4250                         __func__);
4251
4252 out:
4253         return err;
4254 }
4255
4256 /**
4257  * ufshcd_make_hba_operational - Make UFS controller operational
4258  * @hba: per adapter instance
4259  *
4260  * To bring UFS host controller to operational state,
4261  * 1. Enable required interrupts
4262  * 2. Configure interrupt aggregation
4263  * 3. Program UTRL and UTMRL base address
4264  * 4. Configure run-stop-registers
4265  *
4266  * Returns 0 on success, non-zero value on failure
4267  */
4268 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4269 {
4270         int err = 0;
4271         u32 reg;
4272
4273         /* Enable required interrupts */
4274         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4275
4276         /* Configure interrupt aggregation */
4277         if (ufshcd_is_intr_aggr_allowed(hba))
4278                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4279         else
4280                 ufshcd_disable_intr_aggr(hba);
4281
4282         /* Configure UTRL and UTMRL base address registers */
4283         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4284                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4285         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4286                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4287         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4288                         REG_UTP_TASK_REQ_LIST_BASE_L);
4289         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4290                         REG_UTP_TASK_REQ_LIST_BASE_H);
4291
4292         /*
4293          * Make sure base address and interrupt setup are updated before
4294          * enabling the run/stop registers below.
4295          */
4296         wmb();
4297
4298         /*
4299          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4300          */
4301         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4302         if (!(ufshcd_get_lists_status(reg))) {
4303                 ufshcd_enable_run_stop_reg(hba);
4304         } else {
4305                 dev_err(hba->dev,
4306                         "Host controller not ready to process requests");
4307                 err = -EIO;
4308                 goto out;
4309         }
4310
4311 out:
4312         return err;
4313 }
4314 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4315
4316 /**
4317  * ufshcd_hba_stop - Send controller to reset state
4318  * @hba: per adapter instance
4319  */
4320 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
4321 {
4322         unsigned long flags;
4323         int err;
4324
4325         /*
4326          * Obtain the host lock to prevent that the controller is disabled
4327          * while the UFS interrupt handler is active on another CPU.
4328          */
4329         spin_lock_irqsave(hba->host->host_lock, flags);
4330         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4331         spin_unlock_irqrestore(hba->host->host_lock, flags);
4332
4333         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4334                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4335                                         10, 1);
4336         if (err)
4337                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4338 }
4339
4340 /**
4341  * ufshcd_hba_execute_hce - initialize the controller
4342  * @hba: per adapter instance
4343  *
4344  * The controller resets itself and controller firmware initialization
4345  * sequence kicks off. When controller is ready it will set
4346  * the Host Controller Enable bit to 1.
4347  *
4348  * Returns 0 on success, non-zero value on failure
4349  */
4350 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4351 {
4352         int retry;
4353
4354         if (!ufshcd_is_hba_active(hba))
4355                 /* change controller state to "reset state" */
4356                 ufshcd_hba_stop(hba);
4357
4358         /* UniPro link is disabled at this point */
4359         ufshcd_set_link_off(hba);
4360
4361         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4362
4363         /* start controller initialization sequence */
4364         ufshcd_hba_start(hba);
4365
4366         /*
4367          * To initialize a UFS host controller HCE bit must be set to 1.
4368          * During initialization the HCE bit value changes from 1->0->1.
4369          * When the host controller completes initialization sequence
4370          * it sets the value of HCE bit to 1. The same HCE bit is read back
4371          * to check if the controller has completed initialization sequence.
4372          * So without this delay the value HCE = 1, set in the previous
4373          * instruction might be read back.
4374          * This delay can be changed based on the controller.
4375          */
4376         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4377
4378         /* wait for the host controller to complete initialization */
4379         retry = 50;
4380         while (ufshcd_is_hba_active(hba)) {
4381                 if (retry) {
4382                         retry--;
4383                 } else {
4384                         dev_err(hba->dev,
4385                                 "Controller enable failed\n");
4386                         return -EIO;
4387                 }
4388                 usleep_range(1000, 1100);
4389         }
4390
4391         /* enable UIC related interrupts */
4392         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4393
4394         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4395
4396         return 0;
4397 }
4398
4399 int ufshcd_hba_enable(struct ufs_hba *hba)
4400 {
4401         int ret;
4402
4403         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4404                 ufshcd_set_link_off(hba);
4405                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4406
4407                 /* enable UIC related interrupts */
4408                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4409                 ret = ufshcd_dme_reset(hba);
4410                 if (!ret) {
4411                         ret = ufshcd_dme_enable(hba);
4412                         if (!ret)
4413                                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4414                         if (ret)
4415                                 dev_err(hba->dev,
4416                                         "Host controller enable failed with non-hce\n");
4417                 }
4418         } else {
4419                 ret = ufshcd_hba_execute_hce(hba);
4420         }
4421
4422         return ret;
4423 }
4424 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4425
4426 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4427 {
4428         int tx_lanes = 0, i, err = 0;
4429
4430         if (!peer)
4431                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4432                                &tx_lanes);
4433         else
4434                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4435                                     &tx_lanes);
4436         for (i = 0; i < tx_lanes; i++) {
4437                 if (!peer)
4438                         err = ufshcd_dme_set(hba,
4439                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4440                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4441                                         0);
4442                 else
4443                         err = ufshcd_dme_peer_set(hba,
4444                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4445                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4446                                         0);
4447                 if (err) {
4448                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4449                                 __func__, peer, i, err);
4450                         break;
4451                 }
4452         }
4453
4454         return err;
4455 }
4456
4457 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4458 {
4459         return ufshcd_disable_tx_lcc(hba, true);
4460 }
4461
4462 void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4463                             u32 reg)
4464 {
4465         reg_hist->reg[reg_hist->pos] = reg;
4466         reg_hist->tstamp[reg_hist->pos] = ktime_get();
4467         reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4468 }
4469 EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist);
4470
4471 /**
4472  * ufshcd_link_startup - Initialize unipro link startup
4473  * @hba: per adapter instance
4474  *
4475  * Returns 0 for success, non-zero in case of failure
4476  */
4477 static int ufshcd_link_startup(struct ufs_hba *hba)
4478 {
4479         int ret;
4480         int retries = DME_LINKSTARTUP_RETRIES;
4481         bool link_startup_again = false;
4482
4483         /*
4484          * If UFS device isn't active then we will have to issue link startup
4485          * 2 times to make sure the device state move to active.
4486          */
4487         if (!ufshcd_is_ufs_dev_active(hba))
4488                 link_startup_again = true;
4489
4490 link_startup:
4491         do {
4492                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4493
4494                 ret = ufshcd_dme_link_startup(hba);
4495
4496                 /* check if device is detected by inter-connect layer */
4497                 if (!ret && !ufshcd_is_device_present(hba)) {
4498                         ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4499                                                0);
4500                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4501                         ret = -ENXIO;
4502                         goto out;
4503                 }
4504
4505                 /*
4506                  * DME link lost indication is only received when link is up,
4507                  * but we can't be sure if the link is up until link startup
4508                  * succeeds. So reset the local Uni-Pro and try again.
4509                  */
4510                 if (ret && ufshcd_hba_enable(hba)) {
4511                         ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4512                                                (u32)ret);
4513                         goto out;
4514                 }
4515         } while (ret && retries--);
4516
4517         if (ret) {
4518                 /* failed to get the link up... retire */
4519                 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4520                                        (u32)ret);
4521                 goto out;
4522         }
4523
4524         if (link_startup_again) {
4525                 link_startup_again = false;
4526                 retries = DME_LINKSTARTUP_RETRIES;
4527                 goto link_startup;
4528         }
4529
4530         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4531         ufshcd_init_pwr_info(hba);
4532         ufshcd_print_pwr_info(hba);
4533
4534         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4535                 ret = ufshcd_disable_device_tx_lcc(hba);
4536                 if (ret)
4537                         goto out;
4538         }
4539
4540         /* Include any host controller configuration via UIC commands */
4541         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4542         if (ret)
4543                 goto out;
4544
4545         ret = ufshcd_make_hba_operational(hba);
4546 out:
4547         if (ret) {
4548                 dev_err(hba->dev, "link startup failed %d\n", ret);
4549                 ufshcd_print_host_state(hba);
4550                 ufshcd_print_pwr_info(hba);
4551                 ufshcd_print_host_regs(hba);
4552         }
4553         return ret;
4554 }
4555
4556 /**
4557  * ufshcd_verify_dev_init() - Verify device initialization
4558  * @hba: per-adapter instance
4559  *
4560  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4561  * device Transport Protocol (UTP) layer is ready after a reset.
4562  * If the UTP layer at the device side is not initialized, it may
4563  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4564  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4565  */
4566 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4567 {
4568         int err = 0;
4569         int retries;
4570
4571         ufshcd_hold(hba, false);
4572         mutex_lock(&hba->dev_cmd.lock);
4573         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4574                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4575                                                NOP_OUT_TIMEOUT);
4576
4577                 if (!err || err == -ETIMEDOUT)
4578                         break;
4579
4580                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4581         }
4582         mutex_unlock(&hba->dev_cmd.lock);
4583         ufshcd_release(hba);
4584
4585         if (err)
4586                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4587         return err;
4588 }
4589
4590 /**
4591  * ufshcd_set_queue_depth - set lun queue depth
4592  * @sdev: pointer to SCSI device
4593  *
4594  * Read bLUQueueDepth value and activate scsi tagged command
4595  * queueing. For WLUN, queue depth is set to 1. For best-effort
4596  * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4597  * value that host can queue.
4598  */
4599 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4600 {
4601         int ret = 0;
4602         u8 lun_qdepth;
4603         struct ufs_hba *hba;
4604
4605         hba = shost_priv(sdev->host);
4606
4607         lun_qdepth = hba->nutrs;
4608         ret = ufshcd_read_unit_desc_param(hba,
4609                                           ufshcd_scsi_to_upiu_lun(sdev->lun),
4610                                           UNIT_DESC_PARAM_LU_Q_DEPTH,
4611                                           &lun_qdepth,
4612                                           sizeof(lun_qdepth));
4613
4614         /* Some WLUN doesn't support unit descriptor */
4615         if (ret == -EOPNOTSUPP)
4616                 lun_qdepth = 1;
4617         else if (!lun_qdepth)
4618                 /* eventually, we can figure out the real queue depth */
4619                 lun_qdepth = hba->nutrs;
4620         else
4621                 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4622
4623         dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4624                         __func__, lun_qdepth);
4625         scsi_change_queue_depth(sdev, lun_qdepth);
4626 }
4627
4628 /*
4629  * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4630  * @hba: per-adapter instance
4631  * @lun: UFS device lun id
4632  * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4633  *
4634  * Returns 0 in case of success and b_lu_write_protect status would be returned
4635  * @b_lu_write_protect parameter.
4636  * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4637  * Returns -EINVAL in case of invalid parameters passed to this function.
4638  */
4639 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4640                             u8 lun,
4641                             u8 *b_lu_write_protect)
4642 {
4643         int ret;
4644
4645         if (!b_lu_write_protect)
4646                 ret = -EINVAL;
4647         /*
4648          * According to UFS device spec, RPMB LU can't be write
4649          * protected so skip reading bLUWriteProtect parameter for
4650          * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4651          */
4652         else if (lun >= hba->dev_info.max_lu_supported)
4653                 ret = -ENOTSUPP;
4654         else
4655                 ret = ufshcd_read_unit_desc_param(hba,
4656                                           lun,
4657                                           UNIT_DESC_PARAM_LU_WR_PROTECT,
4658                                           b_lu_write_protect,
4659                                           sizeof(*b_lu_write_protect));
4660         return ret;
4661 }
4662
4663 /**
4664  * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4665  * status
4666  * @hba: per-adapter instance
4667  * @sdev: pointer to SCSI device
4668  *
4669  */
4670 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4671                                                     struct scsi_device *sdev)
4672 {
4673         if (hba->dev_info.f_power_on_wp_en &&
4674             !hba->dev_info.is_lu_power_on_wp) {
4675                 u8 b_lu_write_protect;
4676
4677                 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4678                                       &b_lu_write_protect) &&
4679                     (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4680                         hba->dev_info.is_lu_power_on_wp = true;
4681         }
4682 }
4683
4684 /**
4685  * ufshcd_slave_alloc - handle initial SCSI device configurations
4686  * @sdev: pointer to SCSI device
4687  *
4688  * Returns success
4689  */
4690 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4691 {
4692         struct ufs_hba *hba;
4693
4694         hba = shost_priv(sdev->host);
4695
4696         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4697         sdev->use_10_for_ms = 1;
4698
4699         /* DBD field should be set to 1 in mode sense(10) */
4700         sdev->set_dbd_for_ms = 1;
4701
4702         /* allow SCSI layer to restart the device in case of errors */
4703         sdev->allow_restart = 1;
4704
4705         /* REPORT SUPPORTED OPERATION CODES is not supported */
4706         sdev->no_report_opcodes = 1;
4707
4708         /* WRITE_SAME command is not supported */
4709         sdev->no_write_same = 1;
4710
4711         ufshcd_set_queue_depth(sdev);
4712
4713         ufshcd_get_lu_power_on_wp_status(hba, sdev);
4714
4715         return 0;
4716 }
4717
4718 /**
4719  * ufshcd_change_queue_depth - change queue depth
4720  * @sdev: pointer to SCSI device
4721  * @depth: required depth to set
4722  *
4723  * Change queue depth and make sure the max. limits are not crossed.
4724  */
4725 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4726 {
4727         struct ufs_hba *hba = shost_priv(sdev->host);
4728
4729         if (depth > hba->nutrs)
4730                 depth = hba->nutrs;
4731         return scsi_change_queue_depth(sdev, depth);
4732 }
4733
4734 /**
4735  * ufshcd_slave_configure - adjust SCSI device configurations
4736  * @sdev: pointer to SCSI device
4737  */
4738 static int ufshcd_slave_configure(struct scsi_device *sdev)
4739 {
4740         struct ufs_hba *hba = shost_priv(sdev->host);
4741         struct request_queue *q = sdev->request_queue;
4742
4743         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4744
4745         if (ufshcd_is_rpm_autosuspend_allowed(hba))
4746                 sdev->rpm_autosuspend = 1;
4747
4748         return 0;
4749 }
4750
4751 /**
4752  * ufshcd_slave_destroy - remove SCSI device configurations
4753  * @sdev: pointer to SCSI device
4754  */
4755 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4756 {
4757         struct ufs_hba *hba;
4758
4759         hba = shost_priv(sdev->host);
4760         /* Drop the reference as it won't be needed anymore */
4761         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4762                 unsigned long flags;
4763
4764                 spin_lock_irqsave(hba->host->host_lock, flags);
4765                 hba->sdev_ufs_device = NULL;
4766                 spin_unlock_irqrestore(hba->host->host_lock, flags);
4767         }
4768 }
4769
4770 /**
4771  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4772  * @lrbp: pointer to local reference block of completed command
4773  * @scsi_status: SCSI command status
4774  *
4775  * Returns value base on SCSI command status
4776  */
4777 static inline int
4778 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4779 {
4780         int result = 0;
4781
4782         switch (scsi_status) {
4783         case SAM_STAT_CHECK_CONDITION:
4784                 ufshcd_copy_sense_data(lrbp);
4785                 /* fallthrough */
4786         case SAM_STAT_GOOD:
4787                 result |= DID_OK << 16 |
4788                           COMMAND_COMPLETE << 8 |
4789                           scsi_status;
4790                 break;
4791         case SAM_STAT_TASK_SET_FULL:
4792         case SAM_STAT_BUSY:
4793         case SAM_STAT_TASK_ABORTED:
4794                 ufshcd_copy_sense_data(lrbp);
4795                 result |= scsi_status;
4796                 break;
4797         default:
4798                 result |= DID_ERROR << 16;
4799                 break;
4800         } /* end of switch */
4801
4802         return result;
4803 }
4804
4805 /**
4806  * ufshcd_transfer_rsp_status - Get overall status of the response
4807  * @hba: per adapter instance
4808  * @lrbp: pointer to local reference block of completed command
4809  *
4810  * Returns result of the command to notify SCSI midlayer
4811  */
4812 static inline int
4813 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4814 {
4815         int result = 0;
4816         int scsi_status;
4817         int ocs;
4818
4819         /* overall command status of utrd */
4820         ocs = ufshcd_get_tr_ocs(lrbp);
4821
4822         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
4823                 if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
4824                                         MASK_RSP_UPIU_RESULT)
4825                         ocs = OCS_SUCCESS;
4826         }
4827
4828         switch (ocs) {
4829         case OCS_SUCCESS:
4830                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4831                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4832                 switch (result) {
4833                 case UPIU_TRANSACTION_RESPONSE:
4834                         /*
4835                          * get the response UPIU result to extract
4836                          * the SCSI command status
4837                          */
4838                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4839
4840                         /*
4841                          * get the result based on SCSI status response
4842                          * to notify the SCSI midlayer of the command status
4843                          */
4844                         scsi_status = result & MASK_SCSI_STATUS;
4845                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4846
4847                         /*
4848                          * Currently we are only supporting BKOPs exception
4849                          * events hence we can ignore BKOPs exception event
4850                          * during power management callbacks. BKOPs exception
4851                          * event is not expected to be raised in runtime suspend
4852                          * callback as it allows the urgent bkops.
4853                          * During system suspend, we are anyway forcefully
4854                          * disabling the bkops and if urgent bkops is needed
4855                          * it will be enabled on system resume. Long term
4856                          * solution could be to abort the system suspend if
4857                          * UFS device needs urgent BKOPs.
4858                          */
4859                         if (!hba->pm_op_in_progress &&
4860                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
4861                             schedule_work(&hba->eeh_work)) {
4862                                 /*
4863                                  * Prevent suspend once eeh_work is scheduled
4864                                  * to avoid deadlock between ufshcd_suspend
4865                                  * and exception event handler.
4866                                  */
4867                                 pm_runtime_get_noresume(hba->dev);
4868                         }
4869                         break;
4870                 case UPIU_TRANSACTION_REJECT_UPIU:
4871                         /* TODO: handle Reject UPIU Response */
4872                         result = DID_ERROR << 16;
4873                         dev_err(hba->dev,
4874                                 "Reject UPIU not fully implemented\n");
4875                         break;
4876                 default:
4877                         dev_err(hba->dev,
4878                                 "Unexpected request response code = %x\n",
4879                                 result);
4880                         result = DID_ERROR << 16;
4881                         break;
4882                 }
4883                 break;
4884         case OCS_ABORTED:
4885                 result |= DID_ABORT << 16;
4886                 break;
4887         case OCS_INVALID_COMMAND_STATUS:
4888                 result |= DID_REQUEUE << 16;
4889                 break;
4890         case OCS_INVALID_CMD_TABLE_ATTR:
4891         case OCS_INVALID_PRDT_ATTR:
4892         case OCS_MISMATCH_DATA_BUF_SIZE:
4893         case OCS_MISMATCH_RESP_UPIU_SIZE:
4894         case OCS_PEER_COMM_FAILURE:
4895         case OCS_FATAL_ERROR:
4896         default:
4897                 result |= DID_ERROR << 16;
4898                 dev_err(hba->dev,
4899                                 "OCS error from controller = %x for tag %d\n",
4900                                 ocs, lrbp->task_tag);
4901                 ufshcd_print_host_regs(hba);
4902                 ufshcd_print_host_state(hba);
4903                 break;
4904         } /* end of switch */
4905
4906         if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
4907                 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4908         return result;
4909 }
4910
4911 /**
4912  * ufshcd_uic_cmd_compl - handle completion of uic command
4913  * @hba: per adapter instance
4914  * @intr_status: interrupt status generated by the controller
4915  *
4916  * Returns
4917  *  IRQ_HANDLED - If interrupt is valid
4918  *  IRQ_NONE    - If invalid interrupt
4919  */
4920 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4921 {
4922         irqreturn_t retval = IRQ_NONE;
4923
4924         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4925                 hba->active_uic_cmd->argument2 |=
4926                         ufshcd_get_uic_cmd_result(hba);
4927                 hba->active_uic_cmd->argument3 =
4928                         ufshcd_get_dme_attr_val(hba);
4929                 complete(&hba->active_uic_cmd->done);
4930                 retval = IRQ_HANDLED;
4931         }
4932
4933         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
4934                 complete(hba->uic_async_done);
4935                 retval = IRQ_HANDLED;
4936         }
4937         return retval;
4938 }
4939
4940 /**
4941  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4942  * @hba: per adapter instance
4943  * @completed_reqs: requests to complete
4944  */
4945 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4946                                         unsigned long completed_reqs)
4947 {
4948         struct ufshcd_lrb *lrbp;
4949         struct scsi_cmnd *cmd;
4950         int result;
4951         int index;
4952
4953         for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4954                 lrbp = &hba->lrb[index];
4955                 cmd = lrbp->cmd;
4956                 if (cmd) {
4957                         ufshcd_add_command_trace(hba, index, "complete");
4958                         result = ufshcd_transfer_rsp_status(hba, lrbp);
4959                         scsi_dma_unmap(cmd);
4960                         cmd->result = result;
4961                         /* Mark completed command as NULL in LRB */
4962                         lrbp->cmd = NULL;
4963                         lrbp->compl_time_stamp = ktime_get();
4964                         /* Do not touch lrbp after scsi done */
4965                         cmd->scsi_done(cmd);
4966                         __ufshcd_release(hba);
4967                 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4968                         lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4969                         lrbp->compl_time_stamp = ktime_get();
4970                         if (hba->dev_cmd.complete) {
4971                                 ufshcd_add_command_trace(hba, index,
4972                                                 "dev_complete");
4973                                 complete(hba->dev_cmd.complete);
4974                         }
4975                 }
4976                 if (ufshcd_is_clkscaling_supported(hba))
4977                         hba->clk_scaling.active_reqs--;
4978         }
4979
4980         /* clear corresponding bits of completed commands */
4981         hba->outstanding_reqs ^= completed_reqs;
4982
4983         ufshcd_clk_scaling_update_busy(hba);
4984 }
4985
4986 /**
4987  * ufshcd_transfer_req_compl - handle SCSI and query command completion
4988  * @hba: per adapter instance
4989  *
4990  * Returns
4991  *  IRQ_HANDLED - If interrupt is valid
4992  *  IRQ_NONE    - If invalid interrupt
4993  */
4994 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
4995 {
4996         unsigned long completed_reqs;
4997         u32 tr_doorbell;
4998
4999         /* Resetting interrupt aggregation counters first and reading the
5000          * DOOR_BELL afterward allows us to handle all the completed requests.
5001          * In order to prevent other interrupts starvation the DB is read once
5002          * after reset. The down side of this solution is the possibility of
5003          * false interrupt if device completes another request after resetting
5004          * aggregation and before reading the DB.
5005          */
5006         if (ufshcd_is_intr_aggr_allowed(hba) &&
5007             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5008                 ufshcd_reset_intr_aggr(hba);
5009
5010         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5011         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5012
5013         if (completed_reqs) {
5014                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5015                 return IRQ_HANDLED;
5016         } else {
5017                 return IRQ_NONE;
5018         }
5019 }
5020
5021 /**
5022  * ufshcd_disable_ee - disable exception event
5023  * @hba: per-adapter instance
5024  * @mask: exception event to disable
5025  *
5026  * Disables exception event in the device so that the EVENT_ALERT
5027  * bit is not set.
5028  *
5029  * Returns zero on success, non-zero error value on failure.
5030  */
5031 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5032 {
5033         int err = 0;
5034         u32 val;
5035
5036         if (!(hba->ee_ctrl_mask & mask))
5037                 goto out;
5038
5039         val = hba->ee_ctrl_mask & ~mask;
5040         val &= MASK_EE_STATUS;
5041         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5042                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5043         if (!err)
5044                 hba->ee_ctrl_mask &= ~mask;
5045 out:
5046         return err;
5047 }
5048
5049 /**
5050  * ufshcd_enable_ee - enable exception event
5051  * @hba: per-adapter instance
5052  * @mask: exception event to enable
5053  *
5054  * Enable corresponding exception event in the device to allow
5055  * device to alert host in critical scenarios.
5056  *
5057  * Returns zero on success, non-zero error value on failure.
5058  */
5059 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5060 {
5061         int err = 0;
5062         u32 val;
5063
5064         if (hba->ee_ctrl_mask & mask)
5065                 goto out;
5066
5067         val = hba->ee_ctrl_mask | mask;
5068         val &= MASK_EE_STATUS;
5069         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5070                         QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5071         if (!err)
5072                 hba->ee_ctrl_mask |= mask;
5073 out:
5074         return err;
5075 }
5076
5077 /**
5078  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5079  * @hba: per-adapter instance
5080  *
5081  * Allow device to manage background operations on its own. Enabling
5082  * this might lead to inconsistent latencies during normal data transfers
5083  * as the device is allowed to manage its own way of handling background
5084  * operations.
5085  *
5086  * Returns zero on success, non-zero on failure.
5087  */
5088 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5089 {
5090         int err = 0;
5091
5092         if (hba->auto_bkops_enabled)
5093                 goto out;
5094
5095         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5096                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5097         if (err) {
5098                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5099                                 __func__, err);
5100                 goto out;
5101         }
5102
5103         hba->auto_bkops_enabled = true;
5104         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5105
5106         /* No need of URGENT_BKOPS exception from the device */
5107         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5108         if (err)
5109                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5110                                 __func__, err);
5111 out:
5112         return err;
5113 }
5114
5115 /**
5116  * ufshcd_disable_auto_bkops - block device in doing background operations
5117  * @hba: per-adapter instance
5118  *
5119  * Disabling background operations improves command response latency but
5120  * has drawback of device moving into critical state where the device is
5121  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5122  * host is idle so that BKOPS are managed effectively without any negative
5123  * impacts.
5124  *
5125  * Returns zero on success, non-zero on failure.
5126  */
5127 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5128 {
5129         int err = 0;
5130
5131         if (!hba->auto_bkops_enabled)
5132                 goto out;
5133
5134         /*
5135          * If host assisted BKOPs is to be enabled, make sure
5136          * urgent bkops exception is allowed.
5137          */
5138         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5139         if (err) {
5140                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5141                                 __func__, err);
5142                 goto out;
5143         }
5144
5145         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5146                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5147         if (err) {
5148                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5149                                 __func__, err);
5150                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5151                 goto out;
5152         }
5153
5154         hba->auto_bkops_enabled = false;
5155         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5156         hba->is_urgent_bkops_lvl_checked = false;
5157 out:
5158         return err;
5159 }
5160
5161 /**
5162  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5163  * @hba: per adapter instance
5164  *
5165  * After a device reset the device may toggle the BKOPS_EN flag
5166  * to default value. The s/w tracking variables should be updated
5167  * as well. This function would change the auto-bkops state based on
5168  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5169  */
5170 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5171 {
5172         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5173                 hba->auto_bkops_enabled = false;
5174                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5175                 ufshcd_enable_auto_bkops(hba);
5176         } else {
5177                 hba->auto_bkops_enabled = true;
5178                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5179                 ufshcd_disable_auto_bkops(hba);
5180         }
5181         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5182         hba->is_urgent_bkops_lvl_checked = false;
5183 }
5184
5185 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5186 {
5187         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5188                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5189 }
5190
5191 /**
5192  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5193  * @hba: per-adapter instance
5194  * @status: bkops_status value
5195  *
5196  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5197  * flag in the device to permit background operations if the device
5198  * bkops_status is greater than or equal to "status" argument passed to
5199  * this function, disable otherwise.
5200  *
5201  * Returns 0 for success, non-zero in case of failure.
5202  *
5203  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5204  * to know whether auto bkops is enabled or disabled after this function
5205  * returns control to it.
5206  */
5207 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5208                              enum bkops_status status)
5209 {
5210         int err;
5211         u32 curr_status = 0;
5212
5213         err = ufshcd_get_bkops_status(hba, &curr_status);
5214         if (err) {
5215                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5216                                 __func__, err);
5217                 goto out;
5218         } else if (curr_status > BKOPS_STATUS_MAX) {
5219                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5220                                 __func__, curr_status);
5221                 err = -EINVAL;
5222                 goto out;
5223         }
5224
5225         if (curr_status >= status)
5226                 err = ufshcd_enable_auto_bkops(hba);
5227         else
5228                 err = ufshcd_disable_auto_bkops(hba);
5229 out:
5230         return err;
5231 }
5232
5233 /**
5234  * ufshcd_urgent_bkops - handle urgent bkops exception event
5235  * @hba: per-adapter instance
5236  *
5237  * Enable fBackgroundOpsEn flag in the device to permit background
5238  * operations.
5239  *
5240  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5241  * and negative error value for any other failure.
5242  */
5243 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5244 {
5245         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5246 }
5247
5248 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5249 {
5250         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5251                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5252 }
5253
5254 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5255 {
5256         int err;
5257         u32 curr_status = 0;
5258
5259         if (hba->is_urgent_bkops_lvl_checked)
5260                 goto enable_auto_bkops;
5261
5262         err = ufshcd_get_bkops_status(hba, &curr_status);
5263         if (err) {
5264                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5265                                 __func__, err);
5266                 goto out;
5267         }
5268
5269         /*
5270          * We are seeing that some devices are raising the urgent bkops
5271          * exception events even when BKOPS status doesn't indicate performace
5272          * impacted or critical. Handle these device by determining their urgent
5273          * bkops status at runtime.
5274          */
5275         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5276                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5277                                 __func__, curr_status);
5278                 /* update the current status as the urgent bkops level */
5279                 hba->urgent_bkops_lvl = curr_status;
5280                 hba->is_urgent_bkops_lvl_checked = true;
5281         }
5282
5283 enable_auto_bkops:
5284         err = ufshcd_enable_auto_bkops(hba);
5285 out:
5286         if (err < 0)
5287                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5288                                 __func__, err);
5289 }
5290
5291 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5292 {
5293         int ret;
5294         u8 index;
5295         enum query_opcode opcode;
5296
5297         if (!ufshcd_is_wb_allowed(hba))
5298                 return 0;
5299
5300         if (!(enable ^ hba->wb_enabled))
5301                 return 0;
5302         if (enable)
5303                 opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5304         else
5305                 opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5306
5307         index = ufshcd_wb_get_query_index(hba);
5308         ret = ufshcd_query_flag_retry(hba, opcode,
5309                                       QUERY_FLAG_IDN_WB_EN, index, NULL);
5310         if (ret) {
5311                 dev_err(hba->dev, "%s write booster %s failed %d\n",
5312                         __func__, enable ? "enable" : "disable", ret);
5313                 return ret;
5314         }
5315
5316         hba->wb_enabled = enable;
5317         dev_dbg(hba->dev, "%s write booster %s %d\n",
5318                         __func__, enable ? "enable" : "disable", ret);
5319
5320         return ret;
5321 }
5322
5323 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5324 {
5325         int val;
5326         u8 index;
5327
5328         if (set)
5329                 val =  UPIU_QUERY_OPCODE_SET_FLAG;
5330         else
5331                 val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5332
5333         index = ufshcd_wb_get_query_index(hba);
5334         return ufshcd_query_flag_retry(hba, val,
5335                                 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5336                                 index, NULL);
5337 }
5338
5339 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5340 {
5341         if (enable)
5342                 ufshcd_wb_buf_flush_enable(hba);
5343         else
5344                 ufshcd_wb_buf_flush_disable(hba);
5345
5346 }
5347
5348 static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5349 {
5350         int ret;
5351         u8 index;
5352
5353         if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5354                 return 0;
5355
5356         index = ufshcd_wb_get_query_index(hba);
5357         ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5358                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5359                                       index, NULL);
5360         if (ret)
5361                 dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5362                         __func__, ret);
5363         else
5364                 hba->wb_buf_flush_enabled = true;
5365
5366         dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5367         return ret;
5368 }
5369
5370 static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5371 {
5372         int ret;
5373         u8 index;
5374
5375         if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5376                 return 0;
5377
5378         index = ufshcd_wb_get_query_index(hba);
5379         ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5380                                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5381                                       index, NULL);
5382         if (ret) {
5383                 dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5384                          __func__, ret);
5385         } else {
5386                 hba->wb_buf_flush_enabled = false;
5387                 dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5388         }
5389
5390         return ret;
5391 }
5392
5393 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5394                                                 u32 avail_buf)
5395 {
5396         u32 cur_buf;
5397         int ret;
5398         u8 index;
5399
5400         index = ufshcd_wb_get_query_index(hba);
5401         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5402                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5403                                               index, 0, &cur_buf);
5404         if (ret) {
5405                 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5406                         __func__, ret);
5407                 return false;
5408         }
5409
5410         if (!cur_buf) {
5411                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5412                          cur_buf);
5413                 return false;
5414         }
5415         /* Let it continue to flush when available buffer exceeds threshold */
5416         if (avail_buf < hba->vps->wb_flush_threshold)
5417                 return true;
5418
5419         return false;
5420 }
5421
5422 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5423 {
5424         int ret;
5425         u32 avail_buf;
5426         u8 index;
5427
5428         if (!ufshcd_is_wb_allowed(hba))
5429                 return false;
5430         /*
5431          * The ufs device needs the vcc to be ON to flush.
5432          * With user-space reduction enabled, it's enough to enable flush
5433          * by checking only the available buffer. The threshold
5434          * defined here is > 90% full.
5435          * With user-space preserved enabled, the current-buffer
5436          * should be checked too because the wb buffer size can reduce
5437          * when disk tends to be full. This info is provided by current
5438          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5439          * keeping vcc on when current buffer is empty.
5440          */
5441         index = ufshcd_wb_get_query_index(hba);
5442         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5443                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5444                                       index, 0, &avail_buf);
5445         if (ret) {
5446                 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5447                          __func__, ret);
5448                 return false;
5449         }
5450
5451         if (!hba->dev_info.b_presrv_uspc_en) {
5452                 if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5453                         return true;
5454                 return false;
5455         }
5456
5457         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5458 }
5459
5460 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5461 {
5462         struct ufs_hba *hba = container_of(to_delayed_work(work),
5463                                            struct ufs_hba,
5464                                            rpm_dev_flush_recheck_work);
5465         /*
5466          * To prevent unnecessary VCC power drain after device finishes
5467          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5468          * after a certain delay to recheck the threshold by next runtime
5469          * suspend.
5470          */
5471         pm_runtime_get_sync(hba->dev);
5472         pm_runtime_put_sync(hba->dev);
5473 }
5474
5475 /**
5476  * ufshcd_exception_event_handler - handle exceptions raised by device
5477  * @work: pointer to work data
5478  *
5479  * Read bExceptionEventStatus attribute from the device and handle the
5480  * exception event accordingly.
5481  */
5482 static void ufshcd_exception_event_handler(struct work_struct *work)
5483 {
5484         struct ufs_hba *hba;
5485         int err;
5486         u32 status = 0;
5487         hba = container_of(work, struct ufs_hba, eeh_work);
5488
5489         pm_runtime_get_sync(hba->dev);
5490         ufshcd_scsi_block_requests(hba);
5491         err = ufshcd_get_ee_status(hba, &status);
5492         if (err) {
5493                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5494                                 __func__, err);
5495                 goto out;
5496         }
5497
5498         status &= hba->ee_ctrl_mask;
5499
5500         if (status & MASK_EE_URGENT_BKOPS)
5501                 ufshcd_bkops_exception_event_handler(hba);
5502
5503 out:
5504         ufshcd_scsi_unblock_requests(hba);
5505         /*
5506          * pm_runtime_get_noresume is called while scheduling
5507          * eeh_work to avoid suspend racing with exception work.
5508          * Hence decrement usage counter using pm_runtime_put_noidle
5509          * to allow suspend on completion of exception event handler.
5510          */
5511         pm_runtime_put_noidle(hba->dev);
5512         pm_runtime_put(hba->dev);
5513         return;
5514 }
5515
5516 /* Complete requests that have door-bell cleared */
5517 static void ufshcd_complete_requests(struct ufs_hba *hba)
5518 {
5519         ufshcd_transfer_req_compl(hba);
5520         ufshcd_tmc_handler(hba);
5521 }
5522
5523 /**
5524  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5525  *                              to recover from the DL NAC errors or not.
5526  * @hba: per-adapter instance
5527  *
5528  * Returns true if error handling is required, false otherwise
5529  */
5530 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5531 {
5532         unsigned long flags;
5533         bool err_handling = true;
5534
5535         spin_lock_irqsave(hba->host->host_lock, flags);
5536         /*
5537          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5538          * device fatal error and/or DL NAC & REPLAY timeout errors.
5539          */
5540         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5541                 goto out;
5542
5543         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5544             ((hba->saved_err & UIC_ERROR) &&
5545              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5546                 goto out;
5547
5548         if ((hba->saved_err & UIC_ERROR) &&
5549             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5550                 int err;
5551                 /*
5552                  * wait for 50ms to see if we can get any other errors or not.
5553                  */
5554                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5555                 msleep(50);
5556                 spin_lock_irqsave(hba->host->host_lock, flags);
5557
5558                 /*
5559                  * now check if we have got any other severe errors other than
5560                  * DL NAC error?
5561                  */
5562                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5563                     ((hba->saved_err & UIC_ERROR) &&
5564                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5565                         goto out;
5566
5567                 /*
5568                  * As DL NAC is the only error received so far, send out NOP
5569                  * command to confirm if link is still active or not.
5570                  *   - If we don't get any response then do error recovery.
5571                  *   - If we get response then clear the DL NAC error bit.
5572                  */
5573
5574                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5575                 err = ufshcd_verify_dev_init(hba);
5576                 spin_lock_irqsave(hba->host->host_lock, flags);
5577
5578                 if (err)
5579                         goto out;
5580
5581                 /* Link seems to be alive hence ignore the DL NAC errors */
5582                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5583                         hba->saved_err &= ~UIC_ERROR;
5584                 /* clear NAC error */
5585                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5586                 if (!hba->saved_uic_err) {
5587                         err_handling = false;
5588                         goto out;
5589                 }
5590         }
5591 out:
5592         spin_unlock_irqrestore(hba->host->host_lock, flags);
5593         return err_handling;
5594 }
5595
5596 /**
5597  * ufshcd_err_handler - handle UFS errors that require s/w attention
5598  * @work: pointer to work structure
5599  */
5600 static void ufshcd_err_handler(struct work_struct *work)
5601 {
5602         struct ufs_hba *hba;
5603         unsigned long flags;
5604         u32 err_xfer = 0;
5605         u32 err_tm = 0;
5606         int err = 0;
5607         int tag;
5608         bool needs_reset = false;
5609
5610         hba = container_of(work, struct ufs_hba, eh_work);
5611
5612         pm_runtime_get_sync(hba->dev);
5613         ufshcd_hold(hba, false);
5614
5615         spin_lock_irqsave(hba->host->host_lock, flags);
5616         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5617                 goto out;
5618
5619         hba->ufshcd_state = UFSHCD_STATE_RESET;
5620         ufshcd_set_eh_in_progress(hba);
5621
5622         /* Complete requests that have door-bell cleared by h/w */
5623         ufshcd_complete_requests(hba);
5624
5625         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5626                 bool ret;
5627
5628                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5629                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5630                 ret = ufshcd_quirk_dl_nac_errors(hba);
5631                 spin_lock_irqsave(hba->host->host_lock, flags);
5632                 if (!ret)
5633                         goto skip_err_handling;
5634         }
5635         if ((hba->saved_err & INT_FATAL_ERRORS) ||
5636             (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
5637             ((hba->saved_err & UIC_ERROR) &&
5638             (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5639                                    UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5640                                    UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5641                 needs_reset = true;
5642
5643         /*
5644          * if host reset is required then skip clearing the pending
5645          * transfers forcefully because they will get cleared during
5646          * host reset and restore
5647          */
5648         if (needs_reset)
5649                 goto skip_pending_xfer_clear;
5650
5651         /* release lock as clear command might sleep */
5652         spin_unlock_irqrestore(hba->host->host_lock, flags);
5653         /* Clear pending transfer requests */
5654         for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5655                 if (ufshcd_clear_cmd(hba, tag)) {
5656                         err_xfer = true;
5657                         goto lock_skip_pending_xfer_clear;
5658                 }
5659         }
5660
5661         /* Clear pending task management requests */
5662         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5663                 if (ufshcd_clear_tm_cmd(hba, tag)) {
5664                         err_tm = true;
5665                         goto lock_skip_pending_xfer_clear;
5666                 }
5667         }
5668
5669 lock_skip_pending_xfer_clear:
5670         spin_lock_irqsave(hba->host->host_lock, flags);
5671
5672         /* Complete the requests that are cleared by s/w */
5673         ufshcd_complete_requests(hba);
5674
5675         if (err_xfer || err_tm)
5676                 needs_reset = true;
5677
5678 skip_pending_xfer_clear:
5679         /* Fatal errors need reset */
5680         if (needs_reset) {
5681                 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5682
5683                 /*
5684                  * ufshcd_reset_and_restore() does the link reinitialization
5685                  * which will need atleast one empty doorbell slot to send the
5686                  * device management commands (NOP and query commands).
5687                  * If there is no slot empty at this moment then free up last
5688                  * slot forcefully.
5689                  */
5690                 if (hba->outstanding_reqs == max_doorbells)
5691                         __ufshcd_transfer_req_compl(hba,
5692                                                     (1UL << (hba->nutrs - 1)));
5693
5694                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5695                 err = ufshcd_reset_and_restore(hba);
5696                 spin_lock_irqsave(hba->host->host_lock, flags);
5697                 if (err) {
5698                         dev_err(hba->dev, "%s: reset and restore failed\n",
5699                                         __func__);
5700                         hba->ufshcd_state = UFSHCD_STATE_ERROR;
5701                 }
5702                 /*
5703                  * Inform scsi mid-layer that we did reset and allow to handle
5704                  * Unit Attention properly.
5705                  */
5706                 scsi_report_bus_reset(hba->host, 0);
5707                 hba->saved_err = 0;
5708                 hba->saved_uic_err = 0;
5709         }
5710
5711 skip_err_handling:
5712         if (!needs_reset) {
5713                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5714                 if (hba->saved_err || hba->saved_uic_err)
5715                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5716                             __func__, hba->saved_err, hba->saved_uic_err);
5717         }
5718
5719         ufshcd_clear_eh_in_progress(hba);
5720
5721 out:
5722         spin_unlock_irqrestore(hba->host->host_lock, flags);
5723         ufshcd_scsi_unblock_requests(hba);
5724         ufshcd_release(hba);
5725         pm_runtime_put_sync(hba->dev);
5726 }
5727
5728 /**
5729  * ufshcd_update_uic_error - check and set fatal UIC error flags.
5730  * @hba: per-adapter instance
5731  *
5732  * Returns
5733  *  IRQ_HANDLED - If interrupt is valid
5734  *  IRQ_NONE    - If invalid interrupt
5735  */
5736 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
5737 {
5738         u32 reg;
5739         irqreturn_t retval = IRQ_NONE;
5740
5741         /* PHY layer lane error */
5742         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5743         /* Ignore LINERESET indication, as this is not an error */
5744         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5745             (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5746                 /*
5747                  * To know whether this error is fatal or not, DB timeout
5748                  * must be checked but this error is handled separately.
5749                  */
5750                 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5751                 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5752                 retval |= IRQ_HANDLED;
5753         }
5754
5755         /* PA_INIT_ERROR is fatal and needs UIC reset */
5756         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5757         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5758             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
5759                 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5760
5761                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5762                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5763                 else if (hba->dev_quirks &
5764                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5765                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5766                                 hba->uic_error |=
5767                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5768                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5769                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5770                 }
5771                 retval |= IRQ_HANDLED;
5772         }
5773
5774         /* UIC NL/TL/DME errors needs software retry */
5775         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5776         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5777             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
5778                 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5779                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5780                 retval |= IRQ_HANDLED;
5781         }
5782
5783         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5784         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5785             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
5786                 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
5787                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5788                 retval |= IRQ_HANDLED;
5789         }
5790
5791         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5792         if ((reg & UIC_DME_ERROR) &&
5793             (reg & UIC_DME_ERROR_CODE_MASK)) {
5794                 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
5795                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5796                 retval |= IRQ_HANDLED;
5797         }
5798
5799         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5800                         __func__, hba->uic_error);
5801         return retval;
5802 }
5803
5804 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5805                                          u32 intr_mask)
5806 {
5807         if (!ufshcd_is_auto_hibern8_supported(hba) ||
5808             !ufshcd_is_auto_hibern8_enabled(hba))
5809                 return false;
5810
5811         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5812                 return false;
5813
5814         if (hba->active_uic_cmd &&
5815             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5816             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5817                 return false;
5818
5819         return true;
5820 }
5821
5822 /**
5823  * ufshcd_check_errors - Check for errors that need s/w attention
5824  * @hba: per-adapter instance
5825  *
5826  * Returns
5827  *  IRQ_HANDLED - If interrupt is valid
5828  *  IRQ_NONE    - If invalid interrupt
5829  */
5830 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
5831 {
5832         bool queue_eh_work = false;
5833         irqreturn_t retval = IRQ_NONE;
5834
5835         if (hba->errors & INT_FATAL_ERRORS) {
5836                 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
5837                 queue_eh_work = true;
5838         }
5839
5840         if (hba->errors & UIC_ERROR) {
5841                 hba->uic_error = 0;
5842                 retval = ufshcd_update_uic_error(hba);
5843                 if (hba->uic_error)
5844                         queue_eh_work = true;
5845         }
5846
5847         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5848                 dev_err(hba->dev,
5849                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5850                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5851                         "Enter" : "Exit",
5852                         hba->errors, ufshcd_get_upmcrs(hba));
5853                 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5854                                        hba->errors);
5855                 queue_eh_work = true;
5856         }
5857
5858         if (queue_eh_work) {
5859                 /*
5860                  * update the transfer error masks to sticky bits, let's do this
5861                  * irrespective of current ufshcd_state.
5862                  */
5863                 hba->saved_err |= hba->errors;
5864                 hba->saved_uic_err |= hba->uic_error;
5865
5866                 /* handle fatal errors only when link is functional */
5867                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5868                         /* block commands from scsi mid-layer */
5869                         ufshcd_scsi_block_requests(hba);
5870
5871                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5872
5873                         /* dump controller state before resetting */
5874                         if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5875                                 bool pr_prdt = !!(hba->saved_err &
5876                                                 SYSTEM_BUS_FATAL_ERROR);
5877
5878                                 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5879                                         __func__, hba->saved_err,
5880                                         hba->saved_uic_err);
5881
5882                                 ufshcd_print_host_regs(hba);
5883                                 ufshcd_print_pwr_info(hba);
5884                                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5885                                 ufshcd_print_trs(hba, hba->outstanding_reqs,
5886                                                         pr_prdt);
5887                         }
5888                         schedule_work(&hba->eh_work);
5889                 }
5890                 retval |= IRQ_HANDLED;
5891         }
5892         /*
5893          * if (!queue_eh_work) -
5894          * Other errors are either non-fatal where host recovers
5895          * itself without s/w intervention or errors that will be
5896          * handled by the SCSI core layer.
5897          */
5898         return retval;
5899 }
5900
5901 struct ctm_info {
5902         struct ufs_hba  *hba;
5903         unsigned long   pending;
5904         unsigned int    ncpl;
5905 };
5906
5907 static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5908 {
5909         struct ctm_info *const ci = priv;
5910         struct completion *c;
5911
5912         WARN_ON_ONCE(reserved);
5913         if (test_bit(req->tag, &ci->pending))
5914                 return true;
5915         ci->ncpl++;
5916         c = req->end_io_data;
5917         if (c)
5918                 complete(c);
5919         return true;
5920 }
5921
5922 /**
5923  * ufshcd_tmc_handler - handle task management function completion
5924  * @hba: per adapter instance
5925  *
5926  * Returns
5927  *  IRQ_HANDLED - If interrupt is valid
5928  *  IRQ_NONE    - If invalid interrupt
5929  */
5930 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
5931 {
5932         struct request_queue *q = hba->tmf_queue;
5933         struct ctm_info ci = {
5934                 .hba     = hba,
5935                 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5936         };
5937
5938         blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5939         return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
5940 }
5941
5942 /**
5943  * ufshcd_sl_intr - Interrupt service routine
5944  * @hba: per adapter instance
5945  * @intr_status: contains interrupts generated by the controller
5946  *
5947  * Returns
5948  *  IRQ_HANDLED - If interrupt is valid
5949  *  IRQ_NONE    - If invalid interrupt
5950  */
5951 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5952 {
5953         irqreturn_t retval = IRQ_NONE;
5954
5955         hba->errors = UFSHCD_ERROR_MASK & intr_status;
5956
5957         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5958                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5959
5960         if (hba->errors)
5961                 retval |= ufshcd_check_errors(hba);
5962
5963         if (intr_status & UFSHCD_UIC_MASK)
5964                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
5965
5966         if (intr_status & UTP_TASK_REQ_COMPL)
5967                 retval |= ufshcd_tmc_handler(hba);
5968
5969         if (intr_status & UTP_TRANSFER_REQ_COMPL)
5970                 retval |= ufshcd_transfer_req_compl(hba);
5971
5972         return retval;
5973 }
5974
5975 /**
5976  * ufshcd_intr - Main interrupt service routine
5977  * @irq: irq number
5978  * @__hba: pointer to adapter instance
5979  *
5980  * Returns
5981  *  IRQ_HANDLED - If interrupt is valid
5982  *  IRQ_NONE    - If invalid interrupt
5983  */
5984 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5985 {
5986         u32 intr_status, enabled_intr_status;
5987         irqreturn_t retval = IRQ_NONE;
5988         struct ufs_hba *hba = __hba;
5989         int retries = hba->nutrs;
5990
5991         spin_lock(hba->host->host_lock);
5992         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5993
5994         /*
5995          * There could be max of hba->nutrs reqs in flight and in worst case
5996          * if the reqs get finished 1 by 1 after the interrupt status is
5997          * read, make sure we handle them by checking the interrupt status
5998          * again in a loop until we process all of the reqs before returning.
5999          */
6000         do {
6001                 enabled_intr_status =
6002                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6003                 if (intr_status)
6004                         ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6005                 if (enabled_intr_status)
6006                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6007
6008                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6009         } while (intr_status && --retries);
6010
6011         if (retval == IRQ_NONE) {
6012                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
6013                                         __func__, intr_status);
6014                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6015         }
6016
6017         spin_unlock(hba->host->host_lock);
6018         return retval;
6019 }
6020
6021 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6022 {
6023         int err = 0;
6024         u32 mask = 1 << tag;
6025         unsigned long flags;
6026
6027         if (!test_bit(tag, &hba->outstanding_tasks))
6028                 goto out;
6029
6030         spin_lock_irqsave(hba->host->host_lock, flags);
6031         ufshcd_utmrl_clear(hba, tag);
6032         spin_unlock_irqrestore(hba->host->host_lock, flags);
6033
6034         /* poll for max. 1 sec to clear door bell register by h/w */
6035         err = ufshcd_wait_for_register(hba,
6036                         REG_UTP_TASK_REQ_DOOR_BELL,
6037                         mask, 0, 1000, 1000);
6038 out:
6039         return err;
6040 }
6041
6042 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6043                 struct utp_task_req_desc *treq, u8 tm_function)
6044 {
6045         struct request_queue *q = hba->tmf_queue;
6046         struct Scsi_Host *host = hba->host;
6047         DECLARE_COMPLETION_ONSTACK(wait);
6048         struct request *req;
6049         unsigned long flags;
6050         int free_slot, task_tag, err;
6051
6052         /*
6053          * Get free slot, sleep if slots are unavailable.
6054          * Even though we use wait_event() which sleeps indefinitely,
6055          * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
6056          */
6057         req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
6058         req->end_io_data = &wait;
6059         free_slot = req->tag;
6060         WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
6061         ufshcd_hold(hba, false);
6062
6063         spin_lock_irqsave(host->host_lock, flags);
6064         task_tag = hba->nutrs + free_slot;
6065
6066         treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6067
6068         memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
6069         ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
6070
6071         /* send command to the controller */
6072         __set_bit(free_slot, &hba->outstanding_tasks);
6073
6074         /* Make sure descriptors are ready before ringing the task doorbell */
6075         wmb();
6076
6077         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
6078         /* Make sure that doorbell is committed immediately */
6079         wmb();
6080
6081         spin_unlock_irqrestore(host->host_lock, flags);
6082
6083         ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6084
6085         /* wait until the task management command is completed */
6086         err = wait_for_completion_io_timeout(&wait,
6087                         msecs_to_jiffies(TM_CMD_TIMEOUT));
6088         if (!err) {
6089                 /*
6090                  * Make sure that ufshcd_compl_tm() does not trigger a
6091                  * use-after-free.
6092                  */
6093                 req->end_io_data = NULL;
6094                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6095                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6096                                 __func__, tm_function);
6097                 if (ufshcd_clear_tm_cmd(hba, free_slot))
6098                         dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
6099                                         __func__, free_slot);
6100                 err = -ETIMEDOUT;
6101         } else {
6102                 err = 0;
6103                 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
6104
6105                 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6106         }
6107
6108         spin_lock_irqsave(hba->host->host_lock, flags);
6109         __clear_bit(free_slot, &hba->outstanding_tasks);
6110         spin_unlock_irqrestore(hba->host->host_lock, flags);
6111
6112         blk_put_request(req);
6113
6114         ufshcd_release(hba);
6115         return err;
6116 }
6117
6118 /**
6119  * ufshcd_issue_tm_cmd - issues task management commands to controller
6120  * @hba: per adapter instance
6121  * @lun_id: LUN ID to which TM command is sent
6122  * @task_id: task ID to which the TM command is applicable
6123  * @tm_function: task management function opcode
6124  * @tm_response: task management service response return value
6125  *
6126  * Returns non-zero value on error, zero on success.
6127  */
6128 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
6129                 u8 tm_function, u8 *tm_response)
6130 {
6131         struct utp_task_req_desc treq = { { 0 }, };
6132         int ocs_value, err;
6133
6134         /* Configure task request descriptor */
6135         treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6136         treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6137
6138         /* Configure task request UPIU */
6139         treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6140                                   cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6141         treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6142
6143         /*
6144          * The host shall provide the same value for LUN field in the basic
6145          * header and for Input Parameter.
6146          */
6147         treq.input_param1 = cpu_to_be32(lun_id);
6148         treq.input_param2 = cpu_to_be32(task_id);
6149
6150         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6151         if (err == -ETIMEDOUT)
6152                 return err;
6153
6154         ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6155         if (ocs_value != OCS_SUCCESS)
6156                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6157                                 __func__, ocs_value);
6158         else if (tm_response)
6159                 *tm_response = be32_to_cpu(treq.output_param1) &
6160                                 MASK_TM_SERVICE_RESP;
6161         return err;
6162 }
6163
6164 /**
6165  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6166  * @hba:        per-adapter instance
6167  * @req_upiu:   upiu request
6168  * @rsp_upiu:   upiu reply
6169  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6170  * @buff_len:   descriptor size, 0 if NA
6171  * @cmd_type:   specifies the type (NOP, Query...)
6172  * @desc_op:    descriptor operation
6173  *
6174  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6175  * Therefore, it "rides" the device management infrastructure: uses its tag and
6176  * tasks work queues.
6177  *
6178  * Since there is only one available tag for device management commands,
6179  * the caller is expected to hold the hba->dev_cmd.lock mutex.
6180  */
6181 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6182                                         struct utp_upiu_req *req_upiu,
6183                                         struct utp_upiu_req *rsp_upiu,
6184                                         u8 *desc_buff, int *buff_len,
6185                                         enum dev_cmd_type cmd_type,
6186                                         enum query_opcode desc_op)
6187 {
6188         struct request_queue *q = hba->cmd_queue;
6189         struct request *req;
6190         struct ufshcd_lrb *lrbp;
6191         int err = 0;
6192         int tag;
6193         struct completion wait;
6194         unsigned long flags;
6195         u32 upiu_flags;
6196
6197         down_read(&hba->clk_scaling_lock);
6198
6199         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6200         if (IS_ERR(req)) {
6201                 err = PTR_ERR(req);
6202                 goto out_unlock;
6203         }
6204         tag = req->tag;
6205         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
6206
6207         init_completion(&wait);
6208         lrbp = &hba->lrb[tag];
6209         WARN_ON(lrbp->cmd);
6210
6211         lrbp->cmd = NULL;
6212         lrbp->sense_bufflen = 0;
6213         lrbp->sense_buffer = NULL;
6214         lrbp->task_tag = tag;
6215         lrbp->lun = 0;
6216         lrbp->intr_cmd = true;
6217         hba->dev_cmd.type = cmd_type;
6218
6219         switch (hba->ufs_version) {
6220         case UFSHCI_VERSION_10:
6221         case UFSHCI_VERSION_11:
6222                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6223                 break;
6224         default:
6225                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6226                 break;
6227         }
6228
6229         /* update the task tag in the request upiu */
6230         req_upiu->header.dword_0 |= cpu_to_be32(tag);
6231
6232         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6233
6234         /* just copy the upiu request as it is */
6235         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6236         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6237                 /* The Data Segment Area is optional depending upon the query
6238                  * function value. for WRITE DESCRIPTOR, the data segment
6239                  * follows right after the tsf.
6240                  */
6241                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6242                 *buff_len = 0;
6243         }
6244
6245         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
6246
6247         hba->dev_cmd.complete = &wait;
6248
6249         /* Make sure descriptors are ready before ringing the doorbell */
6250         wmb();
6251         spin_lock_irqsave(hba->host->host_lock, flags);
6252         ufshcd_send_command(hba, tag);
6253         spin_unlock_irqrestore(hba->host->host_lock, flags);
6254
6255         /*
6256          * ignore the returning value here - ufshcd_check_query_response is
6257          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6258          * read the response directly ignoring all errors.
6259          */
6260         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6261
6262         /* just copy the upiu response as it is */
6263         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6264         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6265                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6266                 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6267                                MASK_QUERY_DATA_SEG_LEN;
6268
6269                 if (*buff_len >= resp_len) {
6270                         memcpy(desc_buff, descp, resp_len);
6271                         *buff_len = resp_len;
6272                 } else {
6273                         dev_warn(hba->dev,
6274                                  "%s: rsp size %d is bigger than buffer size %d",
6275                                  __func__, resp_len, *buff_len);
6276                         *buff_len = 0;
6277                         err = -EINVAL;
6278                 }
6279         }
6280
6281         blk_put_request(req);
6282 out_unlock:
6283         up_read(&hba->clk_scaling_lock);
6284         return err;
6285 }
6286
6287 /**
6288  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6289  * @hba:        per-adapter instance
6290  * @req_upiu:   upiu request
6291  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
6292  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
6293  * @desc_buff:  pointer to descriptor buffer, NULL if NA
6294  * @buff_len:   descriptor size, 0 if NA
6295  * @desc_op:    descriptor operation
6296  *
6297  * Supports UTP Transfer requests (nop and query), and UTP Task
6298  * Management requests.
6299  * It is up to the caller to fill the upiu conent properly, as it will
6300  * be copied without any further input validations.
6301  */
6302 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6303                              struct utp_upiu_req *req_upiu,
6304                              struct utp_upiu_req *rsp_upiu,
6305                              int msgcode,
6306                              u8 *desc_buff, int *buff_len,
6307                              enum query_opcode desc_op)
6308 {
6309         int err;
6310         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6311         struct utp_task_req_desc treq = { { 0 }, };
6312         int ocs_value;
6313         u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6314
6315         switch (msgcode) {
6316         case UPIU_TRANSACTION_NOP_OUT:
6317                 cmd_type = DEV_CMD_TYPE_NOP;
6318                 /* fall through */
6319         case UPIU_TRANSACTION_QUERY_REQ:
6320                 ufshcd_hold(hba, false);
6321                 mutex_lock(&hba->dev_cmd.lock);
6322                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6323                                                    desc_buff, buff_len,
6324                                                    cmd_type, desc_op);
6325                 mutex_unlock(&hba->dev_cmd.lock);
6326                 ufshcd_release(hba);
6327
6328                 break;
6329         case UPIU_TRANSACTION_TASK_REQ:
6330                 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6331                 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6332
6333                 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6334
6335                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6336                 if (err == -ETIMEDOUT)
6337                         break;
6338
6339                 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6340                 if (ocs_value != OCS_SUCCESS) {
6341                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6342                                 ocs_value);
6343                         break;
6344                 }
6345
6346                 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6347
6348                 break;
6349         default:
6350                 err = -EINVAL;
6351
6352                 break;
6353         }
6354
6355         return err;
6356 }
6357
6358 /**
6359  * ufshcd_eh_device_reset_handler - device reset handler registered to
6360  *                                    scsi layer.
6361  * @cmd: SCSI command pointer
6362  *
6363  * Returns SUCCESS/FAILED
6364  */
6365 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
6366 {
6367         struct Scsi_Host *host;
6368         struct ufs_hba *hba;
6369         unsigned int tag;
6370         u32 pos;
6371         int err;
6372         u8 resp = 0xF;
6373         struct ufshcd_lrb *lrbp;
6374         unsigned long flags;
6375
6376         host = cmd->device->host;
6377         hba = shost_priv(host);
6378         tag = cmd->request->tag;
6379
6380         lrbp = &hba->lrb[tag];
6381         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6382         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6383                 if (!err)
6384                         err = resp;
6385                 goto out;
6386         }
6387
6388         /* clear the commands that were pending for corresponding LUN */
6389         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6390                 if (hba->lrb[pos].lun == lrbp->lun) {
6391                         err = ufshcd_clear_cmd(hba, pos);
6392                         if (err)
6393                                 break;
6394                 }
6395         }
6396         spin_lock_irqsave(host->host_lock, flags);
6397         ufshcd_transfer_req_compl(hba);
6398         spin_unlock_irqrestore(host->host_lock, flags);
6399
6400 out:
6401         hba->req_abort_count = 0;
6402         ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
6403         if (!err) {
6404                 err = SUCCESS;
6405         } else {
6406                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6407                 err = FAILED;
6408         }
6409         return err;
6410 }
6411
6412 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6413 {
6414         struct ufshcd_lrb *lrbp;
6415         int tag;
6416
6417         for_each_set_bit(tag, &bitmap, hba->nutrs) {
6418                 lrbp = &hba->lrb[tag];
6419                 lrbp->req_abort_skip = true;
6420         }
6421 }
6422
6423 /**
6424  * ufshcd_abort - abort a specific command
6425  * @cmd: SCSI command pointer
6426  *
6427  * Abort the pending command in device by sending UFS_ABORT_TASK task management
6428  * command, and in host controller by clearing the door-bell register. There can
6429  * be race between controller sending the command to the device while abort is
6430  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6431  * really issued and then try to abort it.
6432  *
6433  * Returns SUCCESS/FAILED
6434  */
6435 static int ufshcd_abort(struct scsi_cmnd *cmd)
6436 {
6437         struct Scsi_Host *host;
6438         struct ufs_hba *hba;
6439         unsigned long flags;
6440         unsigned int tag;
6441         int err = 0;
6442         int poll_cnt;
6443         u8 resp = 0xF;
6444         struct ufshcd_lrb *lrbp;
6445         u32 reg;
6446
6447         host = cmd->device->host;
6448         hba = shost_priv(host);
6449         tag = cmd->request->tag;
6450         lrbp = &hba->lrb[tag];
6451         if (!ufshcd_valid_tag(hba, tag)) {
6452                 dev_err(hba->dev,
6453                         "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6454                         __func__, tag, cmd, cmd->request);
6455                 BUG();
6456         }
6457
6458         /*
6459          * Task abort to the device W-LUN is illegal. When this command
6460          * will fail, due to spec violation, scsi err handling next step
6461          * will be to send LU reset which, again, is a spec violation.
6462          * To avoid these unnecessary/illegal step we skip to the last error
6463          * handling stage: reset and restore.
6464          */
6465         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6466                 return ufshcd_eh_host_reset_handler(cmd);
6467
6468         ufshcd_hold(hba, false);
6469         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6470         /* If command is already aborted/completed, return SUCCESS */
6471         if (!(test_bit(tag, &hba->outstanding_reqs))) {
6472                 dev_err(hba->dev,
6473                         "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6474                         __func__, tag, hba->outstanding_reqs, reg);
6475                 goto out;
6476         }
6477
6478         if (!(reg & (1 << tag))) {
6479                 dev_err(hba->dev,
6480                 "%s: cmd was completed, but without a notifying intr, tag = %d",
6481                 __func__, tag);
6482         }
6483
6484         /* Print Transfer Request of aborted task */
6485         dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6486
6487         /*
6488          * Print detailed info about aborted request.
6489          * As more than one request might get aborted at the same time,
6490          * print full information only for the first aborted request in order
6491          * to reduce repeated printouts. For other aborted requests only print
6492          * basic details.
6493          */
6494         scsi_print_command(hba->lrb[tag].cmd);
6495         if (!hba->req_abort_count) {
6496                 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6497                 ufshcd_print_host_regs(hba);
6498                 ufshcd_print_host_state(hba);
6499                 ufshcd_print_pwr_info(hba);
6500                 ufshcd_print_trs(hba, 1 << tag, true);
6501         } else {
6502                 ufshcd_print_trs(hba, 1 << tag, false);
6503         }
6504         hba->req_abort_count++;
6505
6506         /* Skip task abort in case previous aborts failed and report failure */
6507         if (lrbp->req_abort_skip) {
6508                 err = -EIO;
6509                 goto out;
6510         }
6511
6512         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6513                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6514                                 UFS_QUERY_TASK, &resp);
6515                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6516                         /* cmd pending in the device */
6517                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6518                                 __func__, tag);
6519                         break;
6520                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6521                         /*
6522                          * cmd not pending in the device, check if it is
6523                          * in transition.
6524                          */
6525                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6526                                 __func__, tag);
6527                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6528                         if (reg & (1 << tag)) {
6529                                 /* sleep for max. 200us to stabilize */
6530                                 usleep_range(100, 200);
6531                                 continue;
6532                         }
6533                         /* command completed already */
6534                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6535                                 __func__, tag);
6536                         goto out;
6537                 } else {
6538                         dev_err(hba->dev,
6539                                 "%s: no response from device. tag = %d, err %d\n",
6540                                 __func__, tag, err);
6541                         if (!err)
6542                                 err = resp; /* service response error */
6543                         goto out;
6544                 }
6545         }
6546
6547         if (!poll_cnt) {
6548                 err = -EBUSY;
6549                 goto out;
6550         }
6551
6552         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6553                         UFS_ABORT_TASK, &resp);
6554         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6555                 if (!err) {
6556                         err = resp; /* service response error */
6557                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6558                                 __func__, tag, err);
6559                 }
6560                 goto out;
6561         }
6562
6563         err = ufshcd_clear_cmd(hba, tag);
6564         if (err) {
6565                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6566                         __func__, tag, err);
6567                 goto out;
6568         }
6569
6570         scsi_dma_unmap(cmd);
6571
6572         spin_lock_irqsave(host->host_lock, flags);
6573         ufshcd_outstanding_req_clear(hba, tag);
6574         hba->lrb[tag].cmd = NULL;
6575         spin_unlock_irqrestore(host->host_lock, flags);
6576
6577 out:
6578         if (!err) {
6579                 err = SUCCESS;
6580         } else {
6581                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6582                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6583                 err = FAILED;
6584         }
6585
6586         /*
6587          * This ufshcd_release() corresponds to the original scsi cmd that got
6588          * aborted here (as we won't get any IRQ for it).
6589          */
6590         ufshcd_release(hba);
6591         return err;
6592 }
6593
6594 /**
6595  * ufshcd_host_reset_and_restore - reset and restore host controller
6596  * @hba: per-adapter instance
6597  *
6598  * Note that host controller reset may issue DME_RESET to
6599  * local and remote (device) Uni-Pro stack and the attributes
6600  * are reset to default state.
6601  *
6602  * Returns zero on success, non-zero on failure
6603  */
6604 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6605 {
6606         int err;
6607         unsigned long flags;
6608
6609         /*
6610          * Stop the host controller and complete the requests
6611          * cleared by h/w
6612          */
6613         ufshcd_hba_stop(hba);
6614
6615         spin_lock_irqsave(hba->host->host_lock, flags);
6616         hba->silence_err_logs = true;
6617         ufshcd_complete_requests(hba);
6618         hba->silence_err_logs = false;
6619         spin_unlock_irqrestore(hba->host->host_lock, flags);
6620
6621         /* scale up clocks to max frequency before full reinitialization */
6622         ufshcd_set_clk_freq(hba, true);
6623
6624         err = ufshcd_hba_enable(hba);
6625         if (err)
6626                 goto out;
6627
6628         /* Establish the link again and restore the device */
6629         err = ufshcd_probe_hba(hba, false);
6630
6631         if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6632                 err = -EIO;
6633 out:
6634         if (err)
6635                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6636         ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6637         return err;
6638 }
6639
6640 /**
6641  * ufshcd_reset_and_restore - reset and re-initialize host/device
6642  * @hba: per-adapter instance
6643  *
6644  * Reset and recover device, host and re-establish link. This
6645  * is helpful to recover the communication in fatal error conditions.
6646  *
6647  * Returns zero on success, non-zero on failure
6648  */
6649 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6650 {
6651         int err = 0;
6652         int retries = MAX_HOST_RESET_RETRIES;
6653
6654         do {
6655                 /* Reset the attached device */
6656                 ufshcd_vops_device_reset(hba);
6657
6658                 err = ufshcd_host_reset_and_restore(hba);
6659         } while (err && --retries);
6660
6661         return err;
6662 }
6663
6664 /**
6665  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6666  * @cmd: SCSI command pointer
6667  *
6668  * Returns SUCCESS/FAILED
6669  */
6670 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6671 {
6672         int err;
6673         unsigned long flags;
6674         struct ufs_hba *hba;
6675
6676         hba = shost_priv(cmd->device->host);
6677
6678         ufshcd_hold(hba, false);
6679         /*
6680          * Check if there is any race with fatal error handling.
6681          * If so, wait for it to complete. Even though fatal error
6682          * handling does reset and restore in some cases, don't assume
6683          * anything out of it. We are just avoiding race here.
6684          */
6685         do {
6686                 spin_lock_irqsave(hba->host->host_lock, flags);
6687                 if (!(work_pending(&hba->eh_work) ||
6688                             hba->ufshcd_state == UFSHCD_STATE_RESET ||
6689                             hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6690                         break;
6691                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6692                 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6693                 flush_work(&hba->eh_work);
6694         } while (1);
6695
6696         hba->ufshcd_state = UFSHCD_STATE_RESET;
6697         ufshcd_set_eh_in_progress(hba);
6698         spin_unlock_irqrestore(hba->host->host_lock, flags);
6699
6700         err = ufshcd_reset_and_restore(hba);
6701
6702         spin_lock_irqsave(hba->host->host_lock, flags);
6703         if (!err) {
6704                 err = SUCCESS;
6705                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6706         } else {
6707                 err = FAILED;
6708                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6709         }
6710         ufshcd_clear_eh_in_progress(hba);
6711         spin_unlock_irqrestore(hba->host->host_lock, flags);
6712
6713         ufshcd_release(hba);
6714         return err;
6715 }
6716
6717 /**
6718  * ufshcd_get_max_icc_level - calculate the ICC level
6719  * @sup_curr_uA: max. current supported by the regulator
6720  * @start_scan: row at the desc table to start scan from
6721  * @buff: power descriptor buffer
6722  *
6723  * Returns calculated max ICC level for specific regulator
6724  */
6725 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6726 {
6727         int i;
6728         int curr_uA;
6729         u16 data;
6730         u16 unit;
6731
6732         for (i = start_scan; i >= 0; i--) {
6733                 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6734                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6735                                                 ATTR_ICC_LVL_UNIT_OFFSET;
6736                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6737                 switch (unit) {
6738                 case UFSHCD_NANO_AMP:
6739                         curr_uA = curr_uA / 1000;
6740                         break;
6741                 case UFSHCD_MILI_AMP:
6742                         curr_uA = curr_uA * 1000;
6743                         break;
6744                 case UFSHCD_AMP:
6745                         curr_uA = curr_uA * 1000 * 1000;
6746                         break;
6747                 case UFSHCD_MICRO_AMP:
6748                 default:
6749                         break;
6750                 }
6751                 if (sup_curr_uA >= curr_uA)
6752                         break;
6753         }
6754         if (i < 0) {
6755                 i = 0;
6756                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6757         }
6758
6759         return (u32)i;
6760 }
6761
6762 /**
6763  * ufshcd_calc_icc_level - calculate the max ICC level
6764  * In case regulators are not initialized we'll return 0
6765  * @hba: per-adapter instance
6766  * @desc_buf: power descriptor buffer to extract ICC levels from.
6767  * @len: length of desc_buff
6768  *
6769  * Returns calculated ICC level
6770  */
6771 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6772                                                         u8 *desc_buf, int len)
6773 {
6774         u32 icc_level = 0;
6775
6776         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6777                                                 !hba->vreg_info.vccq2) {
6778                 dev_err(hba->dev,
6779                         "%s: Regulator capability was not set, actvIccLevel=%d",
6780                                                         __func__, icc_level);
6781                 goto out;
6782         }
6783
6784         if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6785                 icc_level = ufshcd_get_max_icc_level(
6786                                 hba->vreg_info.vcc->max_uA,
6787                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6788                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6789
6790         if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6791                 icc_level = ufshcd_get_max_icc_level(
6792                                 hba->vreg_info.vccq->max_uA,
6793                                 icc_level,
6794                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6795
6796         if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6797                 icc_level = ufshcd_get_max_icc_level(
6798                                 hba->vreg_info.vccq2->max_uA,
6799                                 icc_level,
6800                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6801 out:
6802         return icc_level;
6803 }
6804
6805 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
6806 {
6807         int ret;
6808         int buff_len = hba->desc_size.pwr_desc;
6809         u8 *desc_buf;
6810         u32 icc_level;
6811
6812         desc_buf = kmalloc(buff_len, GFP_KERNEL);
6813         if (!desc_buf)
6814                 return;
6815
6816         ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0,
6817                         desc_buf, buff_len);
6818         if (ret) {
6819                 dev_err(hba->dev,
6820                         "%s: Failed reading power descriptor.len = %d ret = %d",
6821                         __func__, buff_len, ret);
6822                 goto out;
6823         }
6824
6825         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
6826                                                          buff_len);
6827         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
6828
6829         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6830                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
6831
6832         if (ret)
6833                 dev_err(hba->dev,
6834                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6835                         __func__, icc_level, ret);
6836
6837 out:
6838         kfree(desc_buf);
6839 }
6840
6841 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
6842 {
6843         scsi_autopm_get_device(sdev);
6844         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
6845         if (sdev->rpm_autosuspend)
6846                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
6847                                                  RPM_AUTOSUSPEND_DELAY_MS);
6848         scsi_autopm_put_device(sdev);
6849 }
6850
6851 /**
6852  * ufshcd_scsi_add_wlus - Adds required W-LUs
6853  * @hba: per-adapter instance
6854  *
6855  * UFS device specification requires the UFS devices to support 4 well known
6856  * logical units:
6857  *      "REPORT_LUNS" (address: 01h)
6858  *      "UFS Device" (address: 50h)
6859  *      "RPMB" (address: 44h)
6860  *      "BOOT" (address: 30h)
6861  * UFS device's power management needs to be controlled by "POWER CONDITION"
6862  * field of SSU (START STOP UNIT) command. But this "power condition" field
6863  * will take effect only when its sent to "UFS device" well known logical unit
6864  * hence we require the scsi_device instance to represent this logical unit in
6865  * order for the UFS host driver to send the SSU command for power management.
6866  *
6867  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6868  * Block) LU so user space process can control this LU. User space may also
6869  * want to have access to BOOT LU.
6870  *
6871  * This function adds scsi device instances for each of all well known LUs
6872  * (except "REPORT LUNS" LU).
6873  *
6874  * Returns zero on success (all required W-LUs are added successfully),
6875  * non-zero error value on failure (if failed to add any of the required W-LU).
6876  */
6877 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6878 {
6879         int ret = 0;
6880         struct scsi_device *sdev_rpmb;
6881         struct scsi_device *sdev_boot;
6882
6883         hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6884                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6885         if (IS_ERR(hba->sdev_ufs_device)) {
6886                 ret = PTR_ERR(hba->sdev_ufs_device);
6887                 hba->sdev_ufs_device = NULL;
6888                 goto out;
6889         }
6890         ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
6891         scsi_device_put(hba->sdev_ufs_device);
6892
6893         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6894                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6895         if (IS_ERR(sdev_rpmb)) {
6896                 ret = PTR_ERR(sdev_rpmb);
6897                 goto remove_sdev_ufs_device;
6898         }
6899         ufshcd_blk_pm_runtime_init(sdev_rpmb);
6900         scsi_device_put(sdev_rpmb);
6901
6902         sdev_boot = __scsi_add_device(hba->host, 0, 0,
6903                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6904         if (IS_ERR(sdev_boot)) {
6905                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6906         } else {
6907                 ufshcd_blk_pm_runtime_init(sdev_boot);
6908                 scsi_device_put(sdev_boot);
6909         }
6910         goto out;
6911
6912 remove_sdev_ufs_device:
6913         scsi_remove_device(hba->sdev_ufs_device);
6914 out:
6915         return ret;
6916 }
6917
6918 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
6919 {
6920         u8 lun;
6921         u32 d_lu_wb_buf_alloc;
6922
6923         if (!ufshcd_is_wb_allowed(hba))
6924                 return;
6925
6926         if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
6927                 goto wb_disabled;
6928
6929         hba->dev_info.d_ext_ufs_feature_sup =
6930                 get_unaligned_be32(desc_buf +
6931                                    DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
6932
6933         if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
6934                 goto wb_disabled;
6935
6936         /*
6937          * WB may be supported but not configured while provisioning.
6938          * The spec says, in dedicated wb buffer mode,
6939          * a max of 1 lun would have wb buffer configured.
6940          * Now only shared buffer mode is supported.
6941          */
6942         hba->dev_info.b_wb_buffer_type =
6943                 desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
6944
6945         hba->dev_info.b_presrv_uspc_en =
6946                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
6947
6948         if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
6949                 hba->dev_info.d_wb_alloc_units =
6950                 get_unaligned_be32(desc_buf +
6951                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
6952                 if (!hba->dev_info.d_wb_alloc_units)
6953                         goto wb_disabled;
6954         } else {
6955                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
6956                         d_lu_wb_buf_alloc = 0;
6957                         ufshcd_read_unit_desc_param(hba,
6958                                         lun,
6959                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
6960                                         (u8 *)&d_lu_wb_buf_alloc,
6961                                         sizeof(d_lu_wb_buf_alloc));
6962                         if (d_lu_wb_buf_alloc) {
6963                                 hba->dev_info.wb_dedicated_lu = lun;
6964                                 break;
6965                         }
6966                 }
6967
6968                 if (!d_lu_wb_buf_alloc)
6969                         goto wb_disabled;
6970         }
6971         return;
6972
6973 wb_disabled:
6974         hba->caps &= ~UFSHCD_CAP_WB_EN;
6975 }
6976
6977 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
6978 {
6979         struct ufs_dev_fix *f;
6980         struct ufs_dev_info *dev_info = &hba->dev_info;
6981
6982         if (!fixups)
6983                 return;
6984
6985         for (f = fixups; f->quirk; f++) {
6986                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
6987                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
6988                      ((dev_info->model &&
6989                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
6990                       !strcmp(f->model, UFS_ANY_MODEL)))
6991                         hba->dev_quirks |= f->quirk;
6992         }
6993 }
6994 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
6995
6996 static void ufs_fixup_device_setup(struct ufs_hba *hba)
6997 {
6998         /* fix by general quirk table */
6999         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7000
7001         /* allow vendors to fix quirks */
7002         ufshcd_vops_fixup_dev_quirks(hba);
7003 }
7004
7005 static int ufs_get_device_desc(struct ufs_hba *hba)
7006 {
7007         int err;
7008         size_t buff_len;
7009         u8 model_index;
7010         u8 *desc_buf;
7011         struct ufs_dev_info *dev_info = &hba->dev_info;
7012
7013         buff_len = max_t(size_t, hba->desc_size.dev_desc,
7014                          QUERY_DESC_MAX_SIZE + 1);
7015         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7016         if (!desc_buf) {
7017                 err = -ENOMEM;
7018                 goto out;
7019         }
7020
7021         err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf,
7022                         hba->desc_size.dev_desc);
7023         if (err) {
7024                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
7025                         __func__, err);
7026                 goto out;
7027         }
7028
7029         /*
7030          * getting vendor (manufacturerID) and Bank Index in big endian
7031          * format
7032          */
7033         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7034                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7035
7036         /* getting Specification Version in big endian format */
7037         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7038                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7039
7040         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
7041
7042         err = ufshcd_read_string_desc(hba, model_index,
7043                                       &dev_info->model, SD_ASCII_STD);
7044         if (err < 0) {
7045                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
7046                         __func__, err);
7047                 goto out;
7048         }
7049
7050         ufs_fixup_device_setup(hba);
7051
7052         /*
7053          * Probe WB only for UFS-3.1 devices or UFS devices with quirk
7054          * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
7055          */
7056         if (dev_info->wspecversion >= 0x310 ||
7057             dev_info->wspecversion == 0x220 ||
7058             (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
7059                 ufshcd_wb_probe(hba, desc_buf);
7060
7061         /*
7062          * ufshcd_read_string_desc returns size of the string
7063          * reset the error value
7064          */
7065         err = 0;
7066
7067 out:
7068         kfree(desc_buf);
7069         return err;
7070 }
7071
7072 static void ufs_put_device_desc(struct ufs_hba *hba)
7073 {
7074         struct ufs_dev_info *dev_info = &hba->dev_info;
7075
7076         kfree(dev_info->model);
7077         dev_info->model = NULL;
7078 }
7079
7080 /**
7081  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
7082  * @hba: per-adapter instance
7083  *
7084  * PA_TActivate parameter can be tuned manually if UniPro version is less than
7085  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
7086  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
7087  * the hibern8 exit latency.
7088  *
7089  * Returns zero on success, non-zero error value on failure.
7090  */
7091 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
7092 {
7093         int ret = 0;
7094         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
7095
7096         ret = ufshcd_dme_peer_get(hba,
7097                                   UIC_ARG_MIB_SEL(
7098                                         RX_MIN_ACTIVATETIME_CAPABILITY,
7099                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7100                                   &peer_rx_min_activatetime);
7101         if (ret)
7102                 goto out;
7103
7104         /* make sure proper unit conversion is applied */
7105         tuned_pa_tactivate =
7106                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
7107                  / PA_TACTIVATE_TIME_UNIT_US);
7108         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7109                              tuned_pa_tactivate);
7110
7111 out:
7112         return ret;
7113 }
7114
7115 /**
7116  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
7117  * @hba: per-adapter instance
7118  *
7119  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
7120  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
7121  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
7122  * This optimal value can help reduce the hibern8 exit latency.
7123  *
7124  * Returns zero on success, non-zero error value on failure.
7125  */
7126 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
7127 {
7128         int ret = 0;
7129         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
7130         u32 max_hibern8_time, tuned_pa_hibern8time;
7131
7132         ret = ufshcd_dme_get(hba,
7133                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
7134                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
7135                                   &local_tx_hibern8_time_cap);
7136         if (ret)
7137                 goto out;
7138
7139         ret = ufshcd_dme_peer_get(hba,
7140                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
7141                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
7142                                   &peer_rx_hibern8_time_cap);
7143         if (ret)
7144                 goto out;
7145
7146         max_hibern8_time = max(local_tx_hibern8_time_cap,
7147                                peer_rx_hibern8_time_cap);
7148         /* make sure proper unit conversion is applied */
7149         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
7150                                 / PA_HIBERN8_TIME_UNIT_US);
7151         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
7152                              tuned_pa_hibern8time);
7153 out:
7154         return ret;
7155 }
7156
7157 /**
7158  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
7159  * less than device PA_TACTIVATE time.
7160  * @hba: per-adapter instance
7161  *
7162  * Some UFS devices require host PA_TACTIVATE to be lower than device
7163  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
7164  * for such devices.
7165  *
7166  * Returns zero on success, non-zero error value on failure.
7167  */
7168 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
7169 {
7170         int ret = 0;
7171         u32 granularity, peer_granularity;
7172         u32 pa_tactivate, peer_pa_tactivate;
7173         u32 pa_tactivate_us, peer_pa_tactivate_us;
7174         u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
7175
7176         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7177                                   &granularity);
7178         if (ret)
7179                 goto out;
7180
7181         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
7182                                   &peer_granularity);
7183         if (ret)
7184                 goto out;
7185
7186         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
7187             (granularity > PA_GRANULARITY_MAX_VAL)) {
7188                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
7189                         __func__, granularity);
7190                 return -EINVAL;
7191         }
7192
7193         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
7194             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
7195                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
7196                         __func__, peer_granularity);
7197                 return -EINVAL;
7198         }
7199
7200         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
7201         if (ret)
7202                 goto out;
7203
7204         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
7205                                   &peer_pa_tactivate);
7206         if (ret)
7207                 goto out;
7208
7209         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
7210         peer_pa_tactivate_us = peer_pa_tactivate *
7211                              gran_to_us_table[peer_granularity - 1];
7212
7213         if (pa_tactivate_us > peer_pa_tactivate_us) {
7214                 u32 new_peer_pa_tactivate;
7215
7216                 new_peer_pa_tactivate = pa_tactivate_us /
7217                                       gran_to_us_table[peer_granularity - 1];
7218                 new_peer_pa_tactivate++;
7219                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
7220                                           new_peer_pa_tactivate);
7221         }
7222
7223 out:
7224         return ret;
7225 }
7226
7227 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
7228 {
7229         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
7230                 ufshcd_tune_pa_tactivate(hba);
7231                 ufshcd_tune_pa_hibern8time(hba);
7232         }
7233
7234         ufshcd_vops_apply_dev_quirks(hba);
7235
7236         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
7237                 /* set 1ms timeout for PA_TACTIVATE */
7238                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
7239
7240         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
7241                 ufshcd_quirk_tune_host_pa_tactivate(hba);
7242 }
7243
7244 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
7245 {
7246         hba->ufs_stats.hibern8_exit_cnt = 0;
7247         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7248         hba->req_abort_count = 0;
7249 }
7250
7251 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
7252 {
7253         int err;
7254
7255         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
7256                 &hba->desc_size.dev_desc);
7257         if (err)
7258                 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7259
7260         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
7261                 &hba->desc_size.pwr_desc);
7262         if (err)
7263                 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7264
7265         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
7266                 &hba->desc_size.interc_desc);
7267         if (err)
7268                 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7269
7270         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
7271                 &hba->desc_size.conf_desc);
7272         if (err)
7273                 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7274
7275         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
7276                 &hba->desc_size.unit_desc);
7277         if (err)
7278                 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
7279
7280         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7281                 &hba->desc_size.geom_desc);
7282         if (err)
7283                 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
7284
7285         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
7286                 &hba->desc_size.hlth_desc);
7287         if (err)
7288                 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
7289 }
7290
7291 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
7292 {
7293         int err;
7294         size_t buff_len;
7295         u8 *desc_buf;
7296
7297         buff_len = hba->desc_size.geom_desc;
7298         desc_buf = kmalloc(buff_len, GFP_KERNEL);
7299         if (!desc_buf) {
7300                 err = -ENOMEM;
7301                 goto out;
7302         }
7303
7304         err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0,
7305                         desc_buf, buff_len);
7306         if (err) {
7307                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7308                                 __func__, err);
7309                 goto out;
7310         }
7311
7312         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7313                 hba->dev_info.max_lu_supported = 32;
7314         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7315                 hba->dev_info.max_lu_supported = 8;
7316
7317 out:
7318         kfree(desc_buf);
7319         return err;
7320 }
7321
7322 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7323         {19200000, REF_CLK_FREQ_19_2_MHZ},
7324         {26000000, REF_CLK_FREQ_26_MHZ},
7325         {38400000, REF_CLK_FREQ_38_4_MHZ},
7326         {52000000, REF_CLK_FREQ_52_MHZ},
7327         {0, REF_CLK_FREQ_INVAL},
7328 };
7329
7330 static enum ufs_ref_clk_freq
7331 ufs_get_bref_clk_from_hz(unsigned long freq)
7332 {
7333         int i;
7334
7335         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7336                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
7337                         return ufs_ref_clk_freqs[i].val;
7338
7339         return REF_CLK_FREQ_INVAL;
7340 }
7341
7342 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7343 {
7344         unsigned long freq;
7345
7346         freq = clk_get_rate(refclk);
7347
7348         hba->dev_ref_clk_freq =
7349                 ufs_get_bref_clk_from_hz(freq);
7350
7351         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7352                 dev_err(hba->dev,
7353                 "invalid ref_clk setting = %ld\n", freq);
7354 }
7355
7356 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7357 {
7358         int err;
7359         u32 ref_clk;
7360         u32 freq = hba->dev_ref_clk_freq;
7361
7362         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7363                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7364
7365         if (err) {
7366                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7367                         err);
7368                 goto out;
7369         }
7370
7371         if (ref_clk == freq)
7372                 goto out; /* nothing to update */
7373
7374         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7375                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7376
7377         if (err) {
7378                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7379                         ufs_ref_clk_freqs[freq].freq_hz);
7380                 goto out;
7381         }
7382
7383         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7384                         ufs_ref_clk_freqs[freq].freq_hz);
7385
7386 out:
7387         return err;
7388 }
7389
7390 static int ufshcd_device_params_init(struct ufs_hba *hba)
7391 {
7392         bool flag;
7393         int ret;
7394
7395         /* Init check for device descriptor sizes */
7396         ufshcd_init_desc_sizes(hba);
7397
7398         /* Init UFS geometry descriptor related parameters */
7399         ret = ufshcd_device_geo_params_init(hba);
7400         if (ret)
7401                 goto out;
7402
7403         /* Check and apply UFS device quirks */
7404         ret = ufs_get_device_desc(hba);
7405         if (ret) {
7406                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7407                         __func__, ret);
7408                 goto out;
7409         }
7410
7411         ufshcd_get_ref_clk_gating_wait(hba);
7412
7413         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7414                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7415                 hba->dev_info.f_power_on_wp_en = flag;
7416
7417         /* Probe maximum power mode co-supported by both UFS host and device */
7418         if (ufshcd_get_max_pwr_mode(hba))
7419                 dev_err(hba->dev,
7420                         "%s: Failed getting max supported power mode\n",
7421                         __func__);
7422 out:
7423         return ret;
7424 }
7425
7426 /**
7427  * ufshcd_add_lus - probe and add UFS logical units
7428  * @hba: per-adapter instance
7429  */
7430 static int ufshcd_add_lus(struct ufs_hba *hba)
7431 {
7432         int ret;
7433
7434         /* Add required well known logical units to scsi mid layer */
7435         ret = ufshcd_scsi_add_wlus(hba);
7436         if (ret)
7437                 goto out;
7438
7439         /* Initialize devfreq after UFS device is detected */
7440         if (ufshcd_is_clkscaling_supported(hba)) {
7441                 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7442                         &hba->pwr_info,
7443                         sizeof(struct ufs_pa_layer_attr));
7444                 hba->clk_scaling.saved_pwr_info.is_valid = true;
7445                 if (!hba->devfreq) {
7446                         ret = ufshcd_devfreq_init(hba);
7447                         if (ret)
7448                                 goto out;
7449                 }
7450
7451                 hba->clk_scaling.is_allowed = true;
7452         }
7453
7454         ufs_bsg_probe(hba);
7455         scsi_scan_host(hba->host);
7456         pm_runtime_put_sync(hba->dev);
7457
7458 out:
7459         return ret;
7460 }
7461
7462 /**
7463  * ufshcd_probe_hba - probe hba to detect device and initialize
7464  * @hba: per-adapter instance
7465  * @async: asynchronous execution or not
7466  *
7467  * Execute link-startup and verify device initialization
7468  */
7469 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
7470 {
7471         int ret;
7472         ktime_t start = ktime_get();
7473
7474         ret = ufshcd_link_startup(hba);
7475         if (ret)
7476                 goto out;
7477
7478         /* Debug counters initialization */
7479         ufshcd_clear_dbg_ufs_stats(hba);
7480
7481         /* UniPro link is active now */
7482         ufshcd_set_link_active(hba);
7483
7484         /* Verify device initialization by sending NOP OUT UPIU */
7485         ret = ufshcd_verify_dev_init(hba);
7486         if (ret)
7487                 goto out;
7488
7489         /* Initiate UFS initialization, and waiting until completion */
7490         ret = ufshcd_complete_dev_init(hba);
7491         if (ret)
7492                 goto out;
7493
7494         /*
7495          * Initialize UFS device parameters used by driver, these
7496          * parameters are associated with UFS descriptors.
7497          */
7498         if (async) {
7499                 ret = ufshcd_device_params_init(hba);
7500                 if (ret)
7501                         goto out;
7502         }
7503
7504         ufshcd_tune_unipro_params(hba);
7505
7506         /* UFS device is also active now */
7507         ufshcd_set_ufs_dev_active(hba);
7508         ufshcd_force_reset_auto_bkops(hba);
7509         hba->wlun_dev_clr_ua = true;
7510
7511         /* Gear up to HS gear if supported */
7512         if (hba->max_pwr_info.is_valid) {
7513                 /*
7514                  * Set the right value to bRefClkFreq before attempting to
7515                  * switch to HS gears.
7516                  */
7517                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7518                         ufshcd_set_dev_ref_clk(hba);
7519                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
7520                 if (ret) {
7521                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
7522                                         __func__, ret);
7523                         goto out;
7524                 }
7525                 ufshcd_print_pwr_info(hba);
7526         }
7527
7528         /*
7529          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7530          * and for removable UFS card as well, hence always set the parameter.
7531          * Note: Error handler may issue the device reset hence resetting
7532          * bActiveICCLevel as well so it is always safe to set this here.
7533          */
7534         ufshcd_set_active_icc_lvl(hba);
7535
7536         /* set the state as operational after switching to desired gear */
7537         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7538
7539         ufshcd_wb_config(hba);
7540         /* Enable Auto-Hibernate if configured */
7541         ufshcd_auto_hibern8_enable(hba);
7542
7543 out:
7544
7545         trace_ufshcd_init(dev_name(hba->dev), ret,
7546                 ktime_to_us(ktime_sub(ktime_get(), start)),
7547                 hba->curr_dev_pwr_mode, hba->uic_link_state);
7548         return ret;
7549 }
7550
7551 /**
7552  * ufshcd_async_scan - asynchronous execution for probing hba
7553  * @data: data pointer to pass to this function
7554  * @cookie: cookie data
7555  */
7556 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7557 {
7558         struct ufs_hba *hba = (struct ufs_hba *)data;
7559         int ret;
7560
7561         /* Initialize hba, detect and initialize UFS device */
7562         ret = ufshcd_probe_hba(hba, true);
7563         if (ret)
7564                 goto out;
7565
7566         /* Probe and add UFS logical units  */
7567         ret = ufshcd_add_lus(hba);
7568 out:
7569         /*
7570          * If we failed to initialize the device or the device is not
7571          * present, turn off the power/clocks etc.
7572          */
7573         if (ret) {
7574                 pm_runtime_put_sync(hba->dev);
7575                 ufshcd_exit_clk_scaling(hba);
7576                 ufshcd_hba_exit(hba);
7577         }
7578 }
7579
7580 static const struct attribute_group *ufshcd_driver_groups[] = {
7581         &ufs_sysfs_unit_descriptor_group,
7582         &ufs_sysfs_lun_attributes_group,
7583         NULL,
7584 };
7585
7586 static struct ufs_hba_variant_params ufs_hba_vps = {
7587         .hba_enable_delay_us            = 1000,
7588         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
7589         .devfreq_profile.polling_ms     = 100,
7590         .devfreq_profile.target         = ufshcd_devfreq_target,
7591         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
7592         .ondemand_data.upthreshold      = 70,
7593         .ondemand_data.downdifferential = 5,
7594 };
7595
7596 static struct scsi_host_template ufshcd_driver_template = {
7597         .module                 = THIS_MODULE,
7598         .name                   = UFSHCD,
7599         .proc_name              = UFSHCD,
7600         .queuecommand           = ufshcd_queuecommand,
7601         .slave_alloc            = ufshcd_slave_alloc,
7602         .slave_configure        = ufshcd_slave_configure,
7603         .slave_destroy          = ufshcd_slave_destroy,
7604         .change_queue_depth     = ufshcd_change_queue_depth,
7605         .eh_abort_handler       = ufshcd_abort,
7606         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7607         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
7608         .this_id                = -1,
7609         .sg_tablesize           = SG_ALL,
7610         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
7611         .can_queue              = UFSHCD_CAN_QUEUE,
7612         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
7613         .max_host_blocked       = 1,
7614         .track_queue_depth      = 1,
7615         .sdev_groups            = ufshcd_driver_groups,
7616         .dma_boundary           = PAGE_SIZE - 1,
7617         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
7618 };
7619
7620 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7621                                    int ua)
7622 {
7623         int ret;
7624
7625         if (!vreg)
7626                 return 0;
7627
7628         /*
7629          * "set_load" operation shall be required on those regulators
7630          * which specifically configured current limitation. Otherwise
7631          * zero max_uA may cause unexpected behavior when regulator is
7632          * enabled or set as high power mode.
7633          */
7634         if (!vreg->max_uA)
7635                 return 0;
7636
7637         ret = regulator_set_load(vreg->reg, ua);
7638         if (ret < 0) {
7639                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7640                                 __func__, vreg->name, ua, ret);
7641         }
7642
7643         return ret;
7644 }
7645
7646 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7647                                          struct ufs_vreg *vreg)
7648 {
7649         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7650 }
7651
7652 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7653                                          struct ufs_vreg *vreg)
7654 {
7655         if (!vreg)
7656                 return 0;
7657
7658         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7659 }
7660
7661 static int ufshcd_config_vreg(struct device *dev,
7662                 struct ufs_vreg *vreg, bool on)
7663 {
7664         int ret = 0;
7665         struct regulator *reg;
7666         const char *name;
7667         int min_uV, uA_load;
7668
7669         BUG_ON(!vreg);
7670
7671         reg = vreg->reg;
7672         name = vreg->name;
7673
7674         if (regulator_count_voltages(reg) > 0) {
7675                 uA_load = on ? vreg->max_uA : 0;
7676                 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7677                 if (ret)
7678                         goto out;
7679
7680                 if (vreg->min_uV && vreg->max_uV) {
7681                         min_uV = on ? vreg->min_uV : 0;
7682                         ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7683                         if (ret) {
7684                                 dev_err(dev,
7685                                         "%s: %s set voltage failed, err=%d\n",
7686                                         __func__, name, ret);
7687                                 goto out;
7688                         }
7689                 }
7690         }
7691 out:
7692         return ret;
7693 }
7694
7695 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7696 {
7697         int ret = 0;
7698
7699         if (!vreg || vreg->enabled)
7700                 goto out;
7701
7702         ret = ufshcd_config_vreg(dev, vreg, true);
7703         if (!ret)
7704                 ret = regulator_enable(vreg->reg);
7705
7706         if (!ret)
7707                 vreg->enabled = true;
7708         else
7709                 dev_err(dev, "%s: %s enable failed, err=%d\n",
7710                                 __func__, vreg->name, ret);
7711 out:
7712         return ret;
7713 }
7714
7715 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7716 {
7717         int ret = 0;
7718
7719         if (!vreg || !vreg->enabled)
7720                 goto out;
7721
7722         ret = regulator_disable(vreg->reg);
7723
7724         if (!ret) {
7725                 /* ignore errors on applying disable config */
7726                 ufshcd_config_vreg(dev, vreg, false);
7727                 vreg->enabled = false;
7728         } else {
7729                 dev_err(dev, "%s: %s disable failed, err=%d\n",
7730                                 __func__, vreg->name, ret);
7731         }
7732 out:
7733         return ret;
7734 }
7735
7736 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7737 {
7738         int ret = 0;
7739         struct device *dev = hba->dev;
7740         struct ufs_vreg_info *info = &hba->vreg_info;
7741
7742         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7743         if (ret)
7744                 goto out;
7745
7746         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7747         if (ret)
7748                 goto out;
7749
7750         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7751         if (ret)
7752                 goto out;
7753
7754 out:
7755         if (ret) {
7756                 ufshcd_toggle_vreg(dev, info->vccq2, false);
7757                 ufshcd_toggle_vreg(dev, info->vccq, false);
7758                 ufshcd_toggle_vreg(dev, info->vcc, false);
7759         }
7760         return ret;
7761 }
7762
7763 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7764 {
7765         struct ufs_vreg_info *info = &hba->vreg_info;
7766
7767         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7768 }
7769
7770 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7771 {
7772         int ret = 0;
7773
7774         if (!vreg)
7775                 goto out;
7776
7777         vreg->reg = devm_regulator_get(dev, vreg->name);
7778         if (IS_ERR(vreg->reg)) {
7779                 ret = PTR_ERR(vreg->reg);
7780                 dev_err(dev, "%s: %s get failed, err=%d\n",
7781                                 __func__, vreg->name, ret);
7782         }
7783 out:
7784         return ret;
7785 }
7786
7787 static int ufshcd_init_vreg(struct ufs_hba *hba)
7788 {
7789         int ret = 0;
7790         struct device *dev = hba->dev;
7791         struct ufs_vreg_info *info = &hba->vreg_info;
7792
7793         ret = ufshcd_get_vreg(dev, info->vcc);
7794         if (ret)
7795                 goto out;
7796
7797         ret = ufshcd_get_vreg(dev, info->vccq);
7798         if (ret)
7799                 goto out;
7800
7801         ret = ufshcd_get_vreg(dev, info->vccq2);
7802 out:
7803         return ret;
7804 }
7805
7806 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7807 {
7808         struct ufs_vreg_info *info = &hba->vreg_info;
7809
7810         if (info)
7811                 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7812
7813         return 0;
7814 }
7815
7816 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7817                                         bool skip_ref_clk)
7818 {
7819         int ret = 0;
7820         struct ufs_clk_info *clki;
7821         struct list_head *head = &hba->clk_list_head;
7822         unsigned long flags;
7823         ktime_t start = ktime_get();
7824         bool clk_state_changed = false;
7825
7826         if (list_empty(head))
7827                 goto out;
7828
7829         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7830         if (ret)
7831                 return ret;
7832
7833         list_for_each_entry(clki, head, list) {
7834                 if (!IS_ERR_OR_NULL(clki->clk)) {
7835                         if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7836                                 continue;
7837
7838                         clk_state_changed = on ^ clki->enabled;
7839                         if (on && !clki->enabled) {
7840                                 ret = clk_prepare_enable(clki->clk);
7841                                 if (ret) {
7842                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7843                                                 __func__, clki->name, ret);
7844                                         goto out;
7845                                 }
7846                         } else if (!on && clki->enabled) {
7847                                 clk_disable_unprepare(clki->clk);
7848                         }
7849                         clki->enabled = on;
7850                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7851                                         clki->name, on ? "en" : "dis");
7852                 }
7853         }
7854
7855         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7856         if (ret)
7857                 return ret;
7858
7859 out:
7860         if (ret) {
7861                 list_for_each_entry(clki, head, list) {
7862                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7863                                 clk_disable_unprepare(clki->clk);
7864                 }
7865         } else if (!ret && on) {
7866                 spin_lock_irqsave(hba->host->host_lock, flags);
7867                 hba->clk_gating.state = CLKS_ON;
7868                 trace_ufshcd_clk_gating(dev_name(hba->dev),
7869                                         hba->clk_gating.state);
7870                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7871         }
7872
7873         if (clk_state_changed)
7874                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7875                         (on ? "on" : "off"),
7876                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7877         return ret;
7878 }
7879
7880 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7881 {
7882         return  __ufshcd_setup_clocks(hba, on, false);
7883 }
7884
7885 static int ufshcd_init_clocks(struct ufs_hba *hba)
7886 {
7887         int ret = 0;
7888         struct ufs_clk_info *clki;
7889         struct device *dev = hba->dev;
7890         struct list_head *head = &hba->clk_list_head;
7891
7892         if (list_empty(head))
7893                 goto out;
7894
7895         list_for_each_entry(clki, head, list) {
7896                 if (!clki->name)
7897                         continue;
7898
7899                 clki->clk = devm_clk_get(dev, clki->name);
7900                 if (IS_ERR(clki->clk)) {
7901                         ret = PTR_ERR(clki->clk);
7902                         dev_err(dev, "%s: %s clk get failed, %d\n",
7903                                         __func__, clki->name, ret);
7904                         goto out;
7905                 }
7906
7907                 /*
7908                  * Parse device ref clk freq as per device tree "ref_clk".
7909                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7910                  * in ufshcd_alloc_host().
7911                  */
7912                 if (!strcmp(clki->name, "ref_clk"))
7913                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7914
7915                 if (clki->max_freq) {
7916                         ret = clk_set_rate(clki->clk, clki->max_freq);
7917                         if (ret) {
7918                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7919                                         __func__, clki->name,
7920                                         clki->max_freq, ret);
7921                                 goto out;
7922                         }
7923                         clki->curr_freq = clki->max_freq;
7924                 }
7925                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7926                                 clki->name, clk_get_rate(clki->clk));
7927         }
7928 out:
7929         return ret;
7930 }
7931
7932 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7933 {
7934         int err = 0;
7935
7936         if (!hba->vops)
7937                 goto out;
7938
7939         err = ufshcd_vops_init(hba);
7940         if (err)
7941                 goto out;
7942
7943         err = ufshcd_vops_setup_regulators(hba, true);
7944         if (err)
7945                 goto out_exit;
7946
7947         goto out;
7948
7949 out_exit:
7950         ufshcd_vops_exit(hba);
7951 out:
7952         if (err)
7953                 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7954                         __func__, ufshcd_get_var_name(hba), err);
7955         return err;
7956 }
7957
7958 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7959 {
7960         if (!hba->vops)
7961                 return;
7962
7963         ufshcd_vops_setup_regulators(hba, false);
7964
7965         ufshcd_vops_exit(hba);
7966 }
7967
7968 static int ufshcd_hba_init(struct ufs_hba *hba)
7969 {
7970         int err;
7971
7972         /*
7973          * Handle host controller power separately from the UFS device power
7974          * rails as it will help controlling the UFS host controller power
7975          * collapse easily which is different than UFS device power collapse.
7976          * Also, enable the host controller power before we go ahead with rest
7977          * of the initialization here.
7978          */
7979         err = ufshcd_init_hba_vreg(hba);
7980         if (err)
7981                 goto out;
7982
7983         err = ufshcd_setup_hba_vreg(hba, true);
7984         if (err)
7985                 goto out;
7986
7987         err = ufshcd_init_clocks(hba);
7988         if (err)
7989                 goto out_disable_hba_vreg;
7990
7991         err = ufshcd_setup_clocks(hba, true);
7992         if (err)
7993                 goto out_disable_hba_vreg;
7994
7995         err = ufshcd_init_vreg(hba);
7996         if (err)
7997                 goto out_disable_clks;
7998
7999         err = ufshcd_setup_vreg(hba, true);
8000         if (err)
8001                 goto out_disable_clks;
8002
8003         err = ufshcd_variant_hba_init(hba);
8004         if (err)
8005                 goto out_disable_vreg;
8006
8007         hba->is_powered = true;
8008         goto out;
8009
8010 out_disable_vreg:
8011         ufshcd_setup_vreg(hba, false);
8012 out_disable_clks:
8013         ufshcd_setup_clocks(hba, false);
8014 out_disable_hba_vreg:
8015         ufshcd_setup_hba_vreg(hba, false);
8016 out:
8017         return err;
8018 }
8019
8020 static void ufshcd_hba_exit(struct ufs_hba *hba)
8021 {
8022         if (hba->is_powered) {
8023                 ufshcd_variant_hba_exit(hba);
8024                 ufshcd_setup_vreg(hba, false);
8025                 ufshcd_suspend_clkscaling(hba);
8026                 if (ufshcd_is_clkscaling_supported(hba))
8027                         if (hba->devfreq)
8028                                 ufshcd_suspend_clkscaling(hba);
8029                 ufshcd_setup_clocks(hba, false);
8030                 ufshcd_setup_hba_vreg(hba, false);
8031                 hba->is_powered = false;
8032                 ufs_put_device_desc(hba);
8033         }
8034 }
8035
8036 static int
8037 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
8038 {
8039         unsigned char cmd[6] = {REQUEST_SENSE,
8040                                 0,
8041                                 0,
8042                                 0,
8043                                 UFS_SENSE_SIZE,
8044                                 0};
8045         char *buffer;
8046         int ret;
8047
8048         buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
8049         if (!buffer) {
8050                 ret = -ENOMEM;
8051                 goto out;
8052         }
8053
8054         ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
8055                         UFS_SENSE_SIZE, NULL, NULL,
8056                         msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
8057         if (ret)
8058                 pr_err("%s: failed with err %d\n", __func__, ret);
8059
8060         kfree(buffer);
8061 out:
8062         return ret;
8063 }
8064
8065 /**
8066  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
8067  *                           power mode
8068  * @hba: per adapter instance
8069  * @pwr_mode: device power mode to set
8070  *
8071  * Returns 0 if requested power mode is set successfully
8072  * Returns non-zero if failed to set the requested power mode
8073  */
8074 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
8075                                      enum ufs_dev_pwr_mode pwr_mode)
8076 {
8077         unsigned char cmd[6] = { START_STOP };
8078         struct scsi_sense_hdr sshdr;
8079         struct scsi_device *sdp;
8080         unsigned long flags;
8081         int ret;
8082
8083         spin_lock_irqsave(hba->host->host_lock, flags);
8084         sdp = hba->sdev_ufs_device;
8085         if (sdp) {
8086                 ret = scsi_device_get(sdp);
8087                 if (!ret && !scsi_device_online(sdp)) {
8088                         ret = -ENODEV;
8089                         scsi_device_put(sdp);
8090                 }
8091         } else {
8092                 ret = -ENODEV;
8093         }
8094         spin_unlock_irqrestore(hba->host->host_lock, flags);
8095
8096         if (ret)
8097                 return ret;
8098
8099         /*
8100          * If scsi commands fail, the scsi mid-layer schedules scsi error-
8101          * handling, which would wait for host to be resumed. Since we know
8102          * we are functional while we are here, skip host resume in error
8103          * handling context.
8104          */
8105         hba->host->eh_noresume = 1;
8106         if (hba->wlun_dev_clr_ua) {
8107                 ret = ufshcd_send_request_sense(hba, sdp);
8108                 if (ret)
8109                         goto out;
8110                 /* Unit attention condition is cleared now */
8111                 hba->wlun_dev_clr_ua = false;
8112         }
8113
8114         cmd[4] = pwr_mode << 4;
8115
8116         /*
8117          * Current function would be generally called from the power management
8118          * callbacks hence set the RQF_PM flag so that it doesn't resume the
8119          * already suspended childs.
8120          */
8121         ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8122                         START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8123         if (ret) {
8124                 sdev_printk(KERN_WARNING, sdp,
8125                             "START_STOP failed for power mode: %d, result %x\n",
8126                             pwr_mode, ret);
8127                 if (driver_byte(ret) == DRIVER_SENSE)
8128                         scsi_print_sense_hdr(sdp, NULL, &sshdr);
8129         }
8130
8131         if (!ret)
8132                 hba->curr_dev_pwr_mode = pwr_mode;
8133 out:
8134         scsi_device_put(sdp);
8135         hba->host->eh_noresume = 0;
8136         return ret;
8137 }
8138
8139 static int ufshcd_link_state_transition(struct ufs_hba *hba,
8140                                         enum uic_link_state req_link_state,
8141                                         int check_for_bkops)
8142 {
8143         int ret = 0;
8144
8145         if (req_link_state == hba->uic_link_state)
8146                 return 0;
8147
8148         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
8149                 ret = ufshcd_uic_hibern8_enter(hba);
8150                 if (!ret)
8151                         ufshcd_set_link_hibern8(hba);
8152                 else
8153                         goto out;
8154         }
8155         /*
8156          * If autobkops is enabled, link can't be turned off because
8157          * turning off the link would also turn off the device.
8158          */
8159         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
8160                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
8161                 /*
8162                  * Let's make sure that link is in low power mode, we are doing
8163                  * this currently by putting the link in Hibern8. Otherway to
8164                  * put the link in low power mode is to send the DME end point
8165                  * to device and then send the DME reset command to local
8166                  * unipro. But putting the link in hibern8 is much faster.
8167                  */
8168                 ret = ufshcd_uic_hibern8_enter(hba);
8169                 if (ret)
8170                         goto out;
8171                 /*
8172                  * Change controller state to "reset state" which
8173                  * should also put the link in off/reset state
8174                  */
8175                 ufshcd_hba_stop(hba);
8176                 /*
8177                  * TODO: Check if we need any delay to make sure that
8178                  * controller is reset
8179                  */
8180                 ufshcd_set_link_off(hba);
8181         }
8182
8183 out:
8184         return ret;
8185 }
8186
8187 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
8188 {
8189         /*
8190          * It seems some UFS devices may keep drawing more than sleep current
8191          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
8192          * To avoid this situation, add 2ms delay before putting these UFS
8193          * rails in LPM mode.
8194          */
8195         if (!ufshcd_is_link_active(hba) &&
8196             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
8197                 usleep_range(2000, 2100);
8198
8199         /*
8200          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
8201          * power.
8202          *
8203          * If UFS device and link is in OFF state, all power supplies (VCC,
8204          * VCCQ, VCCQ2) can be turned off if power on write protect is not
8205          * required. If UFS link is inactive (Hibern8 or OFF state) and device
8206          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
8207          *
8208          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
8209          * in low power state which would save some power.
8210          *
8211          * If Write Booster is enabled and the device needs to flush the WB
8212          * buffer OR if bkops status is urgent for WB, keep Vcc on.
8213          */
8214         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8215             !hba->dev_info.is_lu_power_on_wp) {
8216                 ufshcd_setup_vreg(hba, false);
8217         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8218                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8219                 if (!ufshcd_is_link_active(hba)) {
8220                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8221                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
8222                 }
8223         }
8224 }
8225
8226 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
8227 {
8228         int ret = 0;
8229
8230         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
8231             !hba->dev_info.is_lu_power_on_wp) {
8232                 ret = ufshcd_setup_vreg(hba, true);
8233         } else if (!ufshcd_is_ufs_dev_active(hba)) {
8234                 if (!ret && !ufshcd_is_link_active(hba)) {
8235                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
8236                         if (ret)
8237                                 goto vcc_disable;
8238                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
8239                         if (ret)
8240                                 goto vccq_lpm;
8241                 }
8242                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
8243         }
8244         goto out;
8245
8246 vccq_lpm:
8247         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
8248 vcc_disable:
8249         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
8250 out:
8251         return ret;
8252 }
8253
8254 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
8255 {
8256         if (ufshcd_is_link_off(hba))
8257                 ufshcd_setup_hba_vreg(hba, false);
8258 }
8259
8260 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
8261 {
8262         if (ufshcd_is_link_off(hba))
8263                 ufshcd_setup_hba_vreg(hba, true);
8264 }
8265
8266 /**
8267  * ufshcd_suspend - helper function for suspend operations
8268  * @hba: per adapter instance
8269  * @pm_op: desired low power operation type
8270  *
8271  * This function will try to put the UFS device and link into low power
8272  * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
8273  * (System PM level).
8274  *
8275  * If this function is called during shutdown, it will make sure that
8276  * both UFS device and UFS link is powered off.
8277  *
8278  * NOTE: UFS device & link must be active before we enter in this function.
8279  *
8280  * Returns 0 for success and non-zero for failure
8281  */
8282 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8283 {
8284         int ret = 0;
8285         enum ufs_pm_level pm_lvl;
8286         enum ufs_dev_pwr_mode req_dev_pwr_mode;
8287         enum uic_link_state req_link_state;
8288
8289         hba->pm_op_in_progress = 1;
8290         if (!ufshcd_is_shutdown_pm(pm_op)) {
8291                 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
8292                          hba->rpm_lvl : hba->spm_lvl;
8293                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
8294                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
8295         } else {
8296                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
8297                 req_link_state = UIC_LINK_OFF_STATE;
8298         }
8299
8300         /*
8301          * If we can't transition into any of the low power modes
8302          * just gate the clocks.
8303          */
8304         ufshcd_hold(hba, false);
8305         hba->clk_gating.is_suspended = true;
8306
8307         if (hba->clk_scaling.is_allowed) {
8308                 cancel_work_sync(&hba->clk_scaling.suspend_work);
8309                 cancel_work_sync(&hba->clk_scaling.resume_work);
8310                 ufshcd_suspend_clkscaling(hba);
8311         }
8312
8313         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
8314                         req_link_state == UIC_LINK_ACTIVE_STATE) {
8315                 goto disable_clks;
8316         }
8317
8318         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
8319             (req_link_state == hba->uic_link_state))
8320                 goto enable_gating;
8321
8322         /* UFS device & link must be active before we enter in this function */
8323         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
8324                 ret = -EINVAL;
8325                 goto enable_gating;
8326         }
8327
8328         if (ufshcd_is_runtime_pm(pm_op)) {
8329                 if (ufshcd_can_autobkops_during_suspend(hba)) {
8330                         /*
8331                          * The device is idle with no requests in the queue,
8332                          * allow background operations if bkops status shows
8333                          * that performance might be impacted.
8334                          */
8335                         ret = ufshcd_urgent_bkops(hba);
8336                         if (ret)
8337                                 goto enable_gating;
8338                 } else {
8339                         /* make sure that auto bkops is disabled */
8340                         ufshcd_disable_auto_bkops(hba);
8341                 }
8342                 /*
8343                  * If device needs to do BKOP or WB buffer flush during
8344                  * Hibern8, keep device power mode as "active power mode"
8345                  * and VCC supply.
8346                  */
8347                 hba->dev_info.b_rpm_dev_flush_capable =
8348                         hba->auto_bkops_enabled ||
8349                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8350                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8351                         ufshcd_is_auto_hibern8_enabled(hba))) &&
8352                         ufshcd_wb_need_flush(hba));
8353         }
8354
8355         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8356                 if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8357                     !ufshcd_is_runtime_pm(pm_op)) {
8358                         /* ensure that bkops is disabled */
8359                         ufshcd_disable_auto_bkops(hba);
8360                 }
8361
8362                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
8363                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8364                         if (ret)
8365                                 goto enable_gating;
8366                 }
8367         }
8368
8369         flush_work(&hba->eeh_work);
8370         ret = ufshcd_link_state_transition(hba, req_link_state, 1);
8371         if (ret)
8372                 goto set_dev_active;
8373
8374         ufshcd_vreg_set_lpm(hba);
8375
8376 disable_clks:
8377         /*
8378          * Call vendor specific suspend callback. As these callbacks may access
8379          * vendor specific host controller register space call them before the
8380          * host clocks are ON.
8381          */
8382         ret = ufshcd_vops_suspend(hba, pm_op);
8383         if (ret)
8384                 goto set_link_active;
8385         /*
8386          * Disable the host irq as host controller as there won't be any
8387          * host controller transaction expected till resume.
8388          */
8389         ufshcd_disable_irq(hba);
8390
8391         if (!ufshcd_is_link_active(hba))
8392                 ufshcd_setup_clocks(hba, false);
8393         else
8394                 /* If link is active, device ref_clk can't be switched off */
8395                 __ufshcd_setup_clocks(hba, false, true);
8396
8397         hba->clk_gating.state = CLKS_OFF;
8398         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
8399
8400         /* Put the host controller in low power mode if possible */
8401         ufshcd_hba_vreg_set_lpm(hba);
8402         goto out;
8403
8404 set_link_active:
8405         if (hba->clk_scaling.is_allowed)
8406                 ufshcd_resume_clkscaling(hba);
8407         ufshcd_vreg_set_hpm(hba);
8408         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
8409                 ufshcd_set_link_active(hba);
8410         else if (ufshcd_is_link_off(hba))
8411                 ufshcd_host_reset_and_restore(hba);
8412 set_dev_active:
8413         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
8414                 ufshcd_disable_auto_bkops(hba);
8415 enable_gating:
8416         if (hba->clk_scaling.is_allowed)
8417                 ufshcd_resume_clkscaling(hba);
8418         hba->clk_gating.is_suspended = false;
8419         hba->dev_info.b_rpm_dev_flush_capable = false;
8420         ufshcd_release(hba);
8421 out:
8422         if (hba->dev_info.b_rpm_dev_flush_capable) {
8423                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8424                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8425         }
8426
8427         hba->pm_op_in_progress = 0;
8428
8429         if (ret)
8430                 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
8431         return ret;
8432 }
8433
8434 /**
8435  * ufshcd_resume - helper function for resume operations
8436  * @hba: per adapter instance
8437  * @pm_op: runtime PM or system PM
8438  *
8439  * This function basically brings the UFS device, UniPro link and controller
8440  * to active state.
8441  *
8442  * Returns 0 for success and non-zero for failure
8443  */
8444 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
8445 {
8446         int ret;
8447         enum uic_link_state old_link_state;
8448
8449         hba->pm_op_in_progress = 1;
8450         old_link_state = hba->uic_link_state;
8451
8452         ufshcd_hba_vreg_set_hpm(hba);
8453         /* Make sure clocks are enabled before accessing controller */
8454         ret = ufshcd_setup_clocks(hba, true);
8455         if (ret)
8456                 goto out;
8457
8458         /* enable the host irq as host controller would be active soon */
8459         ufshcd_enable_irq(hba);
8460
8461         ret = ufshcd_vreg_set_hpm(hba);
8462         if (ret)
8463                 goto disable_irq_and_vops_clks;
8464
8465         /*
8466          * Call vendor specific resume callback. As these callbacks may access
8467          * vendor specific host controller register space call them when the
8468          * host clocks are ON.
8469          */
8470         ret = ufshcd_vops_resume(hba, pm_op);
8471         if (ret)
8472                 goto disable_vreg;
8473
8474         if (ufshcd_is_link_hibern8(hba)) {
8475                 ret = ufshcd_uic_hibern8_exit(hba);
8476                 if (!ret)
8477                         ufshcd_set_link_active(hba);
8478                 else
8479                         goto vendor_suspend;
8480         } else if (ufshcd_is_link_off(hba)) {
8481                 /*
8482                  * A full initialization of the host and the device is
8483                  * required since the link was put to off during suspend.
8484                  */
8485                 ret = ufshcd_reset_and_restore(hba);
8486                 /*
8487                  * ufshcd_reset_and_restore() should have already
8488                  * set the link state as active
8489                  */
8490                 if (ret || !ufshcd_is_link_active(hba))
8491                         goto vendor_suspend;
8492         }
8493
8494         if (!ufshcd_is_ufs_dev_active(hba)) {
8495                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
8496                 if (ret)
8497                         goto set_old_link_state;
8498         }
8499
8500         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
8501                 ufshcd_enable_auto_bkops(hba);
8502         else
8503                 /*
8504                  * If BKOPs operations are urgently needed at this moment then
8505                  * keep auto-bkops enabled or else disable it.
8506                  */
8507                 ufshcd_urgent_bkops(hba);
8508
8509         hba->clk_gating.is_suspended = false;
8510
8511         if (hba->clk_scaling.is_allowed)
8512                 ufshcd_resume_clkscaling(hba);
8513
8514         /* Enable Auto-Hibernate if configured */
8515         ufshcd_auto_hibern8_enable(hba);
8516
8517         if (hba->dev_info.b_rpm_dev_flush_capable) {
8518                 hba->dev_info.b_rpm_dev_flush_capable = false;
8519                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8520         }
8521
8522         /* Schedule clock gating in case of no access to UFS device yet */
8523         ufshcd_release(hba);
8524
8525         goto out;
8526
8527 set_old_link_state:
8528         ufshcd_link_state_transition(hba, old_link_state, 0);
8529 vendor_suspend:
8530         ufshcd_vops_suspend(hba, pm_op);
8531 disable_vreg:
8532         ufshcd_vreg_set_lpm(hba);
8533 disable_irq_and_vops_clks:
8534         ufshcd_disable_irq(hba);
8535         if (hba->clk_scaling.is_allowed)
8536                 ufshcd_suspend_clkscaling(hba);
8537         ufshcd_setup_clocks(hba, false);
8538 out:
8539         hba->pm_op_in_progress = 0;
8540         if (ret)
8541                 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
8542         return ret;
8543 }
8544
8545 /**
8546  * ufshcd_system_suspend - system suspend routine
8547  * @hba: per adapter instance
8548  *
8549  * Check the description of ufshcd_suspend() function for more details.
8550  *
8551  * Returns 0 for success and non-zero for failure
8552  */
8553 int ufshcd_system_suspend(struct ufs_hba *hba)
8554 {
8555         int ret = 0;
8556         ktime_t start = ktime_get();
8557
8558         if (!hba || !hba->is_powered)
8559                 return 0;
8560
8561         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8562              hba->curr_dev_pwr_mode) &&
8563             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8564              hba->uic_link_state))
8565                 goto out;
8566
8567         if (pm_runtime_suspended(hba->dev)) {
8568                 /*
8569                  * UFS device and/or UFS link low power states during runtime
8570                  * suspend seems to be different than what is expected during
8571                  * system suspend. Hence runtime resume the devic & link and
8572                  * let the system suspend low power states to take effect.
8573                  * TODO: If resume takes longer time, we might have optimize
8574                  * it in future by not resuming everything if possible.
8575                  */
8576                 ret = ufshcd_runtime_resume(hba);
8577                 if (ret)
8578                         goto out;
8579         }
8580
8581         ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8582 out:
8583         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8584                 ktime_to_us(ktime_sub(ktime_get(), start)),
8585                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8586         if (!ret)
8587                 hba->is_sys_suspended = true;
8588         return ret;
8589 }
8590 EXPORT_SYMBOL(ufshcd_system_suspend);
8591
8592 /**
8593  * ufshcd_system_resume - system resume routine
8594  * @hba: per adapter instance
8595  *
8596  * Returns 0 for success and non-zero for failure
8597  */
8598
8599 int ufshcd_system_resume(struct ufs_hba *hba)
8600 {
8601         int ret = 0;
8602         ktime_t start = ktime_get();
8603
8604         if (!hba)
8605                 return -EINVAL;
8606
8607         if (!hba->is_powered || pm_runtime_suspended(hba->dev))
8608                 /*
8609                  * Let the runtime resume take care of resuming
8610                  * if runtime suspended.
8611                  */
8612                 goto out;
8613         else
8614                 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8615 out:
8616         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8617                 ktime_to_us(ktime_sub(ktime_get(), start)),
8618                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8619         if (!ret)
8620                 hba->is_sys_suspended = false;
8621         return ret;
8622 }
8623 EXPORT_SYMBOL(ufshcd_system_resume);
8624
8625 /**
8626  * ufshcd_runtime_suspend - runtime suspend routine
8627  * @hba: per adapter instance
8628  *
8629  * Check the description of ufshcd_suspend() function for more details.
8630  *
8631  * Returns 0 for success and non-zero for failure
8632  */
8633 int ufshcd_runtime_suspend(struct ufs_hba *hba)
8634 {
8635         int ret = 0;
8636         ktime_t start = ktime_get();
8637
8638         if (!hba)
8639                 return -EINVAL;
8640
8641         if (!hba->is_powered)
8642                 goto out;
8643         else
8644                 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8645 out:
8646         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8647                 ktime_to_us(ktime_sub(ktime_get(), start)),
8648                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8649         return ret;
8650 }
8651 EXPORT_SYMBOL(ufshcd_runtime_suspend);
8652
8653 /**
8654  * ufshcd_runtime_resume - runtime resume routine
8655  * @hba: per adapter instance
8656  *
8657  * This function basically brings the UFS device, UniPro link and controller
8658  * to active state. Following operations are done in this function:
8659  *
8660  * 1. Turn on all the controller related clocks
8661  * 2. Bring the UniPro link out of Hibernate state
8662  * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8663  *    to active state.
8664  * 4. If auto-bkops is enabled on the device, disable it.
8665  *
8666  * So following would be the possible power state after this function return
8667  * successfully:
8668  *      S1: UFS device in Active state with VCC rail ON
8669  *          UniPro link in Active state
8670  *          All the UFS/UniPro controller clocks are ON
8671  *
8672  * Returns 0 for success and non-zero for failure
8673  */
8674 int ufshcd_runtime_resume(struct ufs_hba *hba)
8675 {
8676         int ret = 0;
8677         ktime_t start = ktime_get();
8678
8679         if (!hba)
8680                 return -EINVAL;
8681
8682         if (!hba->is_powered)
8683                 goto out;
8684         else
8685                 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8686 out:
8687         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8688                 ktime_to_us(ktime_sub(ktime_get(), start)),
8689                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8690         return ret;
8691 }
8692 EXPORT_SYMBOL(ufshcd_runtime_resume);
8693
8694 int ufshcd_runtime_idle(struct ufs_hba *hba)
8695 {
8696         return 0;
8697 }
8698 EXPORT_SYMBOL(ufshcd_runtime_idle);
8699
8700 /**
8701  * ufshcd_shutdown - shutdown routine
8702  * @hba: per adapter instance
8703  *
8704  * This function would power off both UFS device and UFS link.
8705  *
8706  * Returns 0 always to allow force shutdown even in case of errors.
8707  */
8708 int ufshcd_shutdown(struct ufs_hba *hba)
8709 {
8710         int ret = 0;
8711
8712         if (!hba->is_powered)
8713                 goto out;
8714
8715         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8716                 goto out;
8717
8718         if (pm_runtime_suspended(hba->dev)) {
8719                 ret = ufshcd_runtime_resume(hba);
8720                 if (ret)
8721                         goto out;
8722         }
8723
8724         ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8725 out:
8726         if (ret)
8727                 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8728         /* allow force shutdown even in case of errors */
8729         return 0;
8730 }
8731 EXPORT_SYMBOL(ufshcd_shutdown);
8732
8733 /**
8734  * ufshcd_remove - de-allocate SCSI host and host memory space
8735  *              data structure memory
8736  * @hba: per adapter instance
8737  */
8738 void ufshcd_remove(struct ufs_hba *hba)
8739 {
8740         ufs_bsg_remove(hba);
8741         ufs_sysfs_remove_nodes(hba->dev);
8742         blk_cleanup_queue(hba->tmf_queue);
8743         blk_mq_free_tag_set(&hba->tmf_tag_set);
8744         blk_cleanup_queue(hba->cmd_queue);
8745         scsi_remove_host(hba->host);
8746         /* disable interrupts */
8747         ufshcd_disable_intr(hba, hba->intr_mask);
8748         ufshcd_hba_stop(hba);
8749
8750         ufshcd_exit_clk_scaling(hba);
8751         ufshcd_exit_clk_gating(hba);
8752         if (ufshcd_is_clkscaling_supported(hba))
8753                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8754         ufshcd_hba_exit(hba);
8755 }
8756 EXPORT_SYMBOL_GPL(ufshcd_remove);
8757
8758 /**
8759  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8760  * @hba: pointer to Host Bus Adapter (HBA)
8761  */
8762 void ufshcd_dealloc_host(struct ufs_hba *hba)
8763 {
8764         scsi_host_put(hba->host);
8765 }
8766 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8767
8768 /**
8769  * ufshcd_set_dma_mask - Set dma mask based on the controller
8770  *                       addressing capability
8771  * @hba: per adapter instance
8772  *
8773  * Returns 0 for success, non-zero for failure
8774  */
8775 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8776 {
8777         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8778                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8779                         return 0;
8780         }
8781         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8782 }
8783
8784 /**
8785  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8786  * @dev: pointer to device handle
8787  * @hba_handle: driver private handle
8788  * Returns 0 on success, non-zero value on failure
8789  */
8790 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8791 {
8792         struct Scsi_Host *host;
8793         struct ufs_hba *hba;
8794         int err = 0;
8795
8796         if (!dev) {
8797                 dev_err(dev,
8798                 "Invalid memory reference for dev is NULL\n");
8799                 err = -ENODEV;
8800                 goto out_error;
8801         }
8802
8803         host = scsi_host_alloc(&ufshcd_driver_template,
8804                                 sizeof(struct ufs_hba));
8805         if (!host) {
8806                 dev_err(dev, "scsi_host_alloc failed\n");
8807                 err = -ENOMEM;
8808                 goto out_error;
8809         }
8810         hba = shost_priv(host);
8811         hba->host = host;
8812         hba->dev = dev;
8813         *hba_handle = hba;
8814         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
8815
8816         INIT_LIST_HEAD(&hba->clk_list_head);
8817
8818 out_error:
8819         return err;
8820 }
8821 EXPORT_SYMBOL(ufshcd_alloc_host);
8822
8823 /* This function exists because blk_mq_alloc_tag_set() requires this. */
8824 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8825                                      const struct blk_mq_queue_data *qd)
8826 {
8827         WARN_ON_ONCE(true);
8828         return BLK_STS_NOTSUPP;
8829 }
8830
8831 static const struct blk_mq_ops ufshcd_tmf_ops = {
8832         .queue_rq = ufshcd_queue_tmf,
8833 };
8834
8835 /**
8836  * ufshcd_init - Driver initialization routine
8837  * @hba: per-adapter instance
8838  * @mmio_base: base register address
8839  * @irq: Interrupt line of device
8840  * Returns 0 on success, non-zero value on failure
8841  */
8842 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8843 {
8844         int err;
8845         struct Scsi_Host *host = hba->host;
8846         struct device *dev = hba->dev;
8847
8848         if (!mmio_base) {
8849                 dev_err(hba->dev,
8850                 "Invalid memory reference for mmio_base is NULL\n");
8851                 err = -ENODEV;
8852                 goto out_error;
8853         }
8854
8855         hba->mmio_base = mmio_base;
8856         hba->irq = irq;
8857         hba->vps = &ufs_hba_vps;
8858
8859         err = ufshcd_hba_init(hba);
8860         if (err)
8861                 goto out_error;
8862
8863         /* Read capabilities registers */
8864         ufshcd_hba_capabilities(hba);
8865
8866         /* Get UFS version supported by the controller */
8867         hba->ufs_version = ufshcd_get_ufs_version(hba);
8868
8869         if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8870             (hba->ufs_version != UFSHCI_VERSION_11) &&
8871             (hba->ufs_version != UFSHCI_VERSION_20) &&
8872             (hba->ufs_version != UFSHCI_VERSION_21))
8873                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8874                         hba->ufs_version);
8875
8876         /* Get Interrupt bit mask per version */
8877         hba->intr_mask = ufshcd_get_intr_mask(hba);
8878
8879         err = ufshcd_set_dma_mask(hba);
8880         if (err) {
8881                 dev_err(hba->dev, "set dma mask failed\n");
8882                 goto out_disable;
8883         }
8884
8885         /* Allocate memory for host memory space */
8886         err = ufshcd_memory_alloc(hba);
8887         if (err) {
8888                 dev_err(hba->dev, "Memory allocation failed\n");
8889                 goto out_disable;
8890         }
8891
8892         /* Configure LRB */
8893         ufshcd_host_memory_configure(hba);
8894
8895         host->can_queue = hba->nutrs;
8896         host->cmd_per_lun = hba->nutrs;
8897         host->max_id = UFSHCD_MAX_ID;
8898         host->max_lun = UFS_MAX_LUNS;
8899         host->max_channel = UFSHCD_MAX_CHANNEL;
8900         host->unique_id = host->host_no;
8901         host->max_cmd_len = UFS_CDB_SIZE;
8902
8903         hba->max_pwr_info.is_valid = false;
8904
8905         /* Initialize work queues */
8906         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8907         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8908
8909         /* Initialize UIC command mutex */
8910         mutex_init(&hba->uic_cmd_mutex);
8911
8912         /* Initialize mutex for device management commands */
8913         mutex_init(&hba->dev_cmd.lock);
8914
8915         init_rwsem(&hba->clk_scaling_lock);
8916
8917         ufshcd_init_clk_gating(hba);
8918
8919         ufshcd_init_clk_scaling(hba);
8920
8921         /*
8922          * In order to avoid any spurious interrupt immediately after
8923          * registering UFS controller interrupt handler, clear any pending UFS
8924          * interrupt status and disable all the UFS interrupts.
8925          */
8926         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8927                       REG_INTERRUPT_STATUS);
8928         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8929         /*
8930          * Make sure that UFS interrupts are disabled and any pending interrupt
8931          * status is cleared before registering UFS interrupt handler.
8932          */
8933         mb();
8934
8935         /* IRQ registration */
8936         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8937         if (err) {
8938                 dev_err(hba->dev, "request irq failed\n");
8939                 goto exit_gating;
8940         } else {
8941                 hba->is_irq_enabled = true;
8942         }
8943
8944         err = scsi_add_host(host, hba->dev);
8945         if (err) {
8946                 dev_err(hba->dev, "scsi_add_host failed\n");
8947                 goto exit_gating;
8948         }
8949
8950         hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8951         if (IS_ERR(hba->cmd_queue)) {
8952                 err = PTR_ERR(hba->cmd_queue);
8953                 goto out_remove_scsi_host;
8954         }
8955
8956         hba->tmf_tag_set = (struct blk_mq_tag_set) {
8957                 .nr_hw_queues   = 1,
8958                 .queue_depth    = hba->nutmrs,
8959                 .ops            = &ufshcd_tmf_ops,
8960                 .flags          = BLK_MQ_F_NO_SCHED,
8961         };
8962         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8963         if (err < 0)
8964                 goto free_cmd_queue;
8965         hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8966         if (IS_ERR(hba->tmf_queue)) {
8967                 err = PTR_ERR(hba->tmf_queue);
8968                 goto free_tmf_tag_set;
8969         }
8970
8971         /* Reset the attached device */
8972         ufshcd_vops_device_reset(hba);
8973
8974         /* Host controller enable */
8975         err = ufshcd_hba_enable(hba);
8976         if (err) {
8977                 dev_err(hba->dev, "Host controller enable failed\n");
8978                 ufshcd_print_host_regs(hba);
8979                 ufshcd_print_host_state(hba);
8980                 goto free_tmf_queue;
8981         }
8982
8983         /*
8984          * Set the default power management level for runtime and system PM.
8985          * Default power saving mode is to keep UFS link in Hibern8 state
8986          * and UFS device in sleep state.
8987          */
8988         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8989                                                 UFS_SLEEP_PWR_MODE,
8990                                                 UIC_LINK_HIBERN8_STATE);
8991         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8992                                                 UFS_SLEEP_PWR_MODE,
8993                                                 UIC_LINK_HIBERN8_STATE);
8994
8995         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
8996                           ufshcd_rpm_dev_flush_recheck_work);
8997
8998         /* Set the default auto-hiberate idle timer value to 150 ms */
8999         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
9000                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
9001                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
9002         }
9003
9004         /* Hold auto suspend until async scan completes */
9005         pm_runtime_get_sync(dev);
9006         atomic_set(&hba->scsi_block_reqs_cnt, 0);
9007         /*
9008          * We are assuming that device wasn't put in sleep/power-down
9009          * state exclusively during the boot stage before kernel.
9010          * This assumption helps avoid doing link startup twice during
9011          * ufshcd_probe_hba().
9012          */
9013         ufshcd_set_ufs_dev_active(hba);
9014
9015         async_schedule(ufshcd_async_scan, hba);
9016         ufs_sysfs_add_nodes(hba->dev);
9017
9018         return 0;
9019
9020 free_tmf_queue:
9021         blk_cleanup_queue(hba->tmf_queue);
9022 free_tmf_tag_set:
9023         blk_mq_free_tag_set(&hba->tmf_tag_set);
9024 free_cmd_queue:
9025         blk_cleanup_queue(hba->cmd_queue);
9026 out_remove_scsi_host:
9027         scsi_remove_host(hba->host);
9028 exit_gating:
9029         ufshcd_exit_clk_scaling(hba);
9030         ufshcd_exit_clk_gating(hba);
9031 out_disable:
9032         hba->is_irq_enabled = false;
9033         ufshcd_hba_exit(hba);
9034 out_error:
9035         return err;
9036 }
9037 EXPORT_SYMBOL_GPL(ufshcd_init);
9038
9039 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
9040 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
9041 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9042 MODULE_LICENSE("GPL");
9043 MODULE_VERSION(UFSHCD_DRIVER_VERSION);