Merge tag 'firewire-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6-block.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
52ac95fe 6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7a3e97b0 7 *
3b1d0580
VH
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
3b1d0580
VH
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
3b1d0580
VH
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
5c0c28a8
SRT
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
7a3e97b0
SY
38 */
39
6ccf44fe 40#include <linux/async.h>
856b3483 41#include <linux/devfreq.h>
b573d484 42#include <linux/nls.h>
54b879b7 43#include <linux/of.h>
e0eca63e 44#include "ufshcd.h"
c58ab7aa 45#include "ufs_quirks.h"
53b3d9c3 46#include "unipro.h"
7a3e97b0 47
2fbd009b
SJ
48#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
49 UTP_TASK_REQ_COMPL |\
50 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
51/* UIC command timeout, unit: ms */
52#define UIC_CMD_TIMEOUT 500
2fbd009b 53
5a0b0cb9
SRT
54/* NOP OUT retries waiting for NOP IN response */
55#define NOP_OUT_RETRIES 10
56/* Timeout after 30 msecs if NOP OUT hangs without response */
57#define NOP_OUT_TIMEOUT 30 /* msecs */
58
68078d5c
DR
59/* Query request retries */
60#define QUERY_REQ_RETRIES 10
61/* Query request timeout */
62#define QUERY_REQ_TIMEOUT 30 /* msec */
e5ad406c
YG
63/*
64 * Query request timeout for fDeviceInit flag
65 * fDeviceInit query response time for some devices is too large that default
66 * QUERY_REQ_TIMEOUT may not be enough for such devices.
67 */
68#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
68078d5c 69
e2933132
SRT
70/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */
72
64238fbd
YG
73/* maximum number of retries for a general UIC command */
74#define UFS_UIC_COMMAND_RETRIES 3
75
1d337ec2
SRT
76/* maximum number of link-startup retries */
77#define DME_LINKSTARTUP_RETRIES 3
78
87d0b4a6
YG
79/* Maximum retries for Hibern8 enter */
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
1d337ec2
SRT
82/* maximum number of reset retries before giving up */
83#define MAX_HOST_RESET_RETRIES 5
84
68078d5c
DR
85/* Expose the flag value from utp_upiu_query.value */
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
7d568652
SJ
88/* Interrupt aggregation default timeout, unit: 40us */
89#define INT_AGGR_DEF_TO 0x02
90
aa497613
SRT
91#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
92 ({ \
93 int _ret; \
94 if (_on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
96 else \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
98 _ret; \
99 })
100
da461cec
SJ
101static u32 ufs_query_desc_max_size[] = {
102 QUERY_DESC_DEVICE_MAX_SIZE,
103 QUERY_DESC_CONFIGURAION_MAX_SIZE,
104 QUERY_DESC_UNIT_MAX_SIZE,
105 QUERY_DESC_RFU_MAX_SIZE,
106 QUERY_DESC_INTERCONNECT_MAX_SIZE,
107 QUERY_DESC_STRING_MAX_SIZE,
108 QUERY_DESC_RFU_MAX_SIZE,
1ce21794 109 QUERY_DESC_GEOMETRY_MAX_SIZE,
da461cec
SJ
110 QUERY_DESC_POWER_MAX_SIZE,
111 QUERY_DESC_RFU_MAX_SIZE,
112};
113
7a3e97b0
SY
114enum {
115 UFSHCD_MAX_CHANNEL = 0,
116 UFSHCD_MAX_ID = 1,
7a3e97b0
SY
117 UFSHCD_CMD_PER_LUN = 32,
118 UFSHCD_CAN_QUEUE = 32,
119};
120
121/* UFSHCD states */
122enum {
7a3e97b0
SY
123 UFSHCD_STATE_RESET,
124 UFSHCD_STATE_ERROR,
3441da7d
SRT
125 UFSHCD_STATE_OPERATIONAL,
126};
127
128/* UFSHCD error handling flags */
129enum {
130 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
131};
132
e8e7f271
SRT
133/* UFSHCD UIC layer error flags */
134enum {
135 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
9a47ec7c
YG
136 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
137 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
138 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
139 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
140 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
e8e7f271
SRT
141};
142
7a3e97b0
SY
143/* Interrupt configuration options */
144enum {
145 UFSHCD_INT_DISABLE,
146 UFSHCD_INT_ENABLE,
147 UFSHCD_INT_CLEAR,
148};
149
3441da7d
SRT
150#define ufshcd_set_eh_in_progress(h) \
151 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
152#define ufshcd_eh_in_progress(h) \
153 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
154#define ufshcd_clear_eh_in_progress(h) \
155 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
156
57d104c1
SJ
157#define ufshcd_set_ufs_dev_active(h) \
158 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
159#define ufshcd_set_ufs_dev_sleep(h) \
160 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
161#define ufshcd_set_ufs_dev_poweroff(h) \
162 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
163#define ufshcd_is_ufs_dev_active(h) \
164 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
165#define ufshcd_is_ufs_dev_sleep(h) \
166 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
167#define ufshcd_is_ufs_dev_poweroff(h) \
168 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
169
170static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
171 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
172 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
173 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
174 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
175 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
176 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
177};
178
179static inline enum ufs_dev_pwr_mode
180ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
181{
182 return ufs_pm_lvl_states[lvl].dev_state;
183}
184
185static inline enum uic_link_state
186ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
187{
188 return ufs_pm_lvl_states[lvl].link_state;
189}
190
3441da7d
SRT
191static void ufshcd_tmc_handler(struct ufs_hba *hba);
192static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271
SRT
193static int ufshcd_reset_and_restore(struct ufs_hba *hba);
194static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2
SRT
195static void ufshcd_hba_exit(struct ufs_hba *hba);
196static int ufshcd_probe_hba(struct ufs_hba *hba);
1ab27c9c
ST
197static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
198 bool skip_ref_clk);
199static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
60f01870 200static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
1ab27c9c
ST
201static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
202static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
cad2e03d 203static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1
SJ
204static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
205static irqreturn_t ufshcd_intr(int irq, void *__hba);
7eb584db
DR
206static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
207 struct ufs_pa_layer_attr *desired_pwr_mode);
874237f7
YG
208static int ufshcd_change_power_mode(struct ufs_hba *hba,
209 struct ufs_pa_layer_attr *pwr_mode);
14497328
YG
210static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
211{
212 return tag >= 0 && tag < hba->nutrs;
213}
57d104c1
SJ
214
215static inline int ufshcd_enable_irq(struct ufs_hba *hba)
216{
217 int ret = 0;
218
219 if (!hba->is_irq_enabled) {
220 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
221 hba);
222 if (ret)
223 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
224 __func__, ret);
225 hba->is_irq_enabled = true;
226 }
227
228 return ret;
229}
230
231static inline void ufshcd_disable_irq(struct ufs_hba *hba)
232{
233 if (hba->is_irq_enabled) {
234 free_irq(hba->irq, hba);
235 hba->is_irq_enabled = false;
236 }
237}
3441da7d 238
b573d484
YG
239/* replace non-printable or non-ASCII characters with spaces */
240static inline void ufshcd_remove_non_printable(char *val)
241{
242 if (!val)
243 return;
244
245 if (*val < 0x20 || *val > 0x7e)
246 *val = ' ';
247}
248
5a0b0cb9
SRT
249/*
250 * ufshcd_wait_for_register - wait for register value to change
251 * @hba - per-adapter interface
252 * @reg - mmio register offset
253 * @mask - mask to apply to read register value
254 * @val - wait condition
255 * @interval_us - polling interval in microsecs
256 * @timeout_ms - timeout in millisecs
596585a2 257 * @can_sleep - perform sleep or just spin
5a0b0cb9
SRT
258 *
259 * Returns -ETIMEDOUT on error, zero on success
260 */
596585a2
YG
261int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
262 u32 val, unsigned long interval_us,
263 unsigned long timeout_ms, bool can_sleep)
5a0b0cb9
SRT
264{
265 int err = 0;
266 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
267
268 /* ignore bits that we don't intend to wait on */
269 val = val & mask;
270
271 while ((ufshcd_readl(hba, reg) & mask) != val) {
596585a2
YG
272 if (can_sleep)
273 usleep_range(interval_us, interval_us + 50);
274 else
275 udelay(interval_us);
5a0b0cb9
SRT
276 if (time_after(jiffies, timeout)) {
277 if ((ufshcd_readl(hba, reg) & mask) != val)
278 err = -ETIMEDOUT;
279 break;
280 }
281 }
282
283 return err;
284}
285
2fbd009b
SJ
286/**
287 * ufshcd_get_intr_mask - Get the interrupt bit mask
288 * @hba - Pointer to adapter instance
289 *
290 * Returns interrupt bit mask per version
291 */
292static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
293{
294 if (hba->ufs_version == UFSHCI_VERSION_10)
295 return INTERRUPT_MASK_ALL_VER_10;
296 else
297 return INTERRUPT_MASK_ALL_VER_11;
298}
299
7a3e97b0
SY
300/**
301 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
302 * @hba - Pointer to adapter instance
303 *
304 * Returns UFSHCI version supported by the controller
305 */
306static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
307{
0263bcd0
YG
308 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
309 return ufshcd_vops_get_ufs_hci_version(hba);
9949e702 310
b873a275 311 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
312}
313
314/**
315 * ufshcd_is_device_present - Check if any device connected to
316 * the host controller
5c0c28a8 317 * @hba: pointer to adapter instance
7a3e97b0 318 *
73ec513a 319 * Returns 1 if device present, 0 if no device detected
7a3e97b0 320 */
5c0c28a8 321static inline int ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 322{
5c0c28a8
SRT
323 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
324 DEVICE_PRESENT) ? 1 : 0;
7a3e97b0
SY
325}
326
327/**
328 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
329 * @lrb: pointer to local command reference block
330 *
331 * This function is used to get the OCS field from UTRD
332 * Returns the OCS field in the UTRD
333 */
334static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
335{
e8c8e82a 336 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
337}
338
339/**
340 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
341 * @task_req_descp: pointer to utp_task_req_desc structure
342 *
343 * This function is used to get the OCS field from UTMRD
344 * Returns the OCS field in the UTMRD
345 */
346static inline int
347ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
348{
e8c8e82a 349 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
7a3e97b0
SY
350}
351
352/**
353 * ufshcd_get_tm_free_slot - get a free slot for task management request
354 * @hba: per adapter instance
e2933132 355 * @free_slot: pointer to variable with available slot value
7a3e97b0 356 *
e2933132
SRT
357 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
358 * Returns 0 if free slot is not available, else return 1 with tag value
359 * in @free_slot.
7a3e97b0 360 */
e2933132 361static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
7a3e97b0 362{
e2933132
SRT
363 int tag;
364 bool ret = false;
365
366 if (!free_slot)
367 goto out;
368
369 do {
370 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
371 if (tag >= hba->nutmrs)
372 goto out;
373 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
374
375 *free_slot = tag;
376 ret = true;
377out:
378 return ret;
379}
380
381static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
382{
383 clear_bit_unlock(slot, &hba->tm_slots_in_use);
7a3e97b0
SY
384}
385
386/**
387 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
388 * @hba: per adapter instance
389 * @pos: position of the bit to be cleared
390 */
391static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
392{
b873a275 393 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
7a3e97b0
SY
394}
395
a48353f6
YG
396/**
397 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
398 * @hba: per adapter instance
399 * @tag: position of the bit to be cleared
400 */
401static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
402{
403 __clear_bit(tag, &hba->outstanding_reqs);
404}
405
7a3e97b0
SY
406/**
407 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
408 * @reg: Register value of host controller status
409 *
410 * Returns integer, 0 on Success and positive value if failed
411 */
412static inline int ufshcd_get_lists_status(u32 reg)
413{
414 /*
415 * The mask 0xFF is for the following HCS register bits
416 * Bit Description
417 * 0 Device Present
418 * 1 UTRLRDY
419 * 2 UTMRLRDY
420 * 3 UCRDY
897efe62 421 * 4-7 reserved
7a3e97b0 422 */
897efe62 423 return ((reg & 0xFF) >> 1) ^ 0x07;
7a3e97b0
SY
424}
425
426/**
427 * ufshcd_get_uic_cmd_result - Get the UIC command result
428 * @hba: Pointer to adapter instance
429 *
430 * This function gets the result of UIC command completion
431 * Returns 0 on success, non zero value on error
432 */
433static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
434{
b873a275 435 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
436 MASK_UIC_COMMAND_RESULT;
437}
438
12b4fdb4
SJ
439/**
440 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
441 * @hba: Pointer to adapter instance
442 *
443 * This function gets UIC command argument3
444 * Returns 0 on success, non zero value on error
445 */
446static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
447{
448 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
449}
450
7a3e97b0 451/**
5a0b0cb9 452 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 453 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
454 */
455static inline int
5a0b0cb9 456ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 457{
5a0b0cb9 458 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
459}
460
461/**
462 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
463 * @ucd_rsp_ptr: pointer to response UPIU
464 *
465 * This function gets the response status and scsi_status from response UPIU
466 * Returns the response result code.
467 */
468static inline int
469ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
470{
471 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
472}
473
1c2623c5
SJ
474/*
475 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
476 * from response UPIU
477 * @ucd_rsp_ptr: pointer to response UPIU
478 *
479 * Return the data segment length.
480 */
481static inline unsigned int
482ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
483{
484 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
485 MASK_RSP_UPIU_DATA_SEG_LEN;
486}
487
66ec6d59
SRT
488/**
489 * ufshcd_is_exception_event - Check if the device raised an exception event
490 * @ucd_rsp_ptr: pointer to response UPIU
491 *
492 * The function checks if the device raised an exception event indicated in
493 * the Device Information field of response UPIU.
494 *
495 * Returns true if exception is raised, false otherwise.
496 */
497static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
498{
499 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
500 MASK_RSP_EXCEPTION_EVENT ? true : false;
501}
502
7a3e97b0 503/**
7d568652 504 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 505 * @hba: per adapter instance
7a3e97b0
SY
506 */
507static inline void
7d568652 508ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 509{
7d568652
SJ
510 ufshcd_writel(hba, INT_AGGR_ENABLE |
511 INT_AGGR_COUNTER_AND_TIMER_RESET,
512 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
513}
514
515/**
516 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
517 * @hba: per adapter instance
518 * @cnt: Interrupt aggregation counter threshold
519 * @tmout: Interrupt aggregation timeout value
520 */
521static inline void
522ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
523{
524 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
525 INT_AGGR_COUNTER_THLD_VAL(cnt) |
526 INT_AGGR_TIMEOUT_VAL(tmout),
527 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
528}
529
b852190e
YG
530/**
531 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
532 * @hba: per adapter instance
533 */
534static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
535{
536 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
537}
538
7a3e97b0
SY
539/**
540 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
541 * When run-stop registers are set to 1, it indicates the
542 * host controller that it can process the requests
543 * @hba: per adapter instance
544 */
545static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
546{
b873a275
SJ
547 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
548 REG_UTP_TASK_REQ_LIST_RUN_STOP);
549 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
550 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
551}
552
7a3e97b0
SY
553/**
554 * ufshcd_hba_start - Start controller initialization sequence
555 * @hba: per adapter instance
556 */
557static inline void ufshcd_hba_start(struct ufs_hba *hba)
558{
b873a275 559 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
560}
561
562/**
563 * ufshcd_is_hba_active - Get controller state
564 * @hba: per adapter instance
565 *
566 * Returns zero if controller is active, 1 otherwise
567 */
568static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
569{
b873a275 570 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
7a3e97b0
SY
571}
572
37113106
YG
573u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
574{
575 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
576 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
577 (hba->ufs_version == UFSHCI_VERSION_11))
578 return UFS_UNIPRO_VER_1_41;
579 else
580 return UFS_UNIPRO_VER_1_6;
581}
582EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
583
584static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
585{
586 /*
587 * If both host and device support UniPro ver1.6 or later, PA layer
588 * parameters tuning happens during link startup itself.
589 *
590 * We can manually tune PA layer parameters if either host or device
591 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
592 * logic simple, we will only do manual tuning if local unipro version
593 * doesn't support ver1.6 or later.
594 */
595 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
596 return true;
597 else
598 return false;
599}
600
1ab27c9c
ST
601static void ufshcd_ungate_work(struct work_struct *work)
602{
603 int ret;
604 unsigned long flags;
605 struct ufs_hba *hba = container_of(work, struct ufs_hba,
606 clk_gating.ungate_work);
607
608 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
609
610 spin_lock_irqsave(hba->host->host_lock, flags);
611 if (hba->clk_gating.state == CLKS_ON) {
612 spin_unlock_irqrestore(hba->host->host_lock, flags);
613 goto unblock_reqs;
614 }
615
616 spin_unlock_irqrestore(hba->host->host_lock, flags);
617 ufshcd_setup_clocks(hba, true);
618
619 /* Exit from hibern8 */
620 if (ufshcd_can_hibern8_during_gating(hba)) {
621 /* Prevent gating in this path */
622 hba->clk_gating.is_suspended = true;
623 if (ufshcd_is_link_hibern8(hba)) {
624 ret = ufshcd_uic_hibern8_exit(hba);
625 if (ret)
626 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
627 __func__, ret);
628 else
629 ufshcd_set_link_active(hba);
630 }
631 hba->clk_gating.is_suspended = false;
632 }
633unblock_reqs:
856b3483
ST
634 if (ufshcd_is_clkscaling_enabled(hba))
635 devfreq_resume_device(hba->devfreq);
1ab27c9c
ST
636 scsi_unblock_requests(hba->host);
637}
638
639/**
640 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
641 * Also, exit from hibern8 mode and set the link as active.
642 * @hba: per adapter instance
643 * @async: This indicates whether caller should ungate clocks asynchronously.
644 */
645int ufshcd_hold(struct ufs_hba *hba, bool async)
646{
647 int rc = 0;
648 unsigned long flags;
649
650 if (!ufshcd_is_clkgating_allowed(hba))
651 goto out;
1ab27c9c
ST
652 spin_lock_irqsave(hba->host->host_lock, flags);
653 hba->clk_gating.active_reqs++;
654
53c12d0e
YG
655 if (ufshcd_eh_in_progress(hba)) {
656 spin_unlock_irqrestore(hba->host->host_lock, flags);
657 return 0;
658 }
659
856b3483 660start:
1ab27c9c
ST
661 switch (hba->clk_gating.state) {
662 case CLKS_ON:
663 break;
664 case REQ_CLKS_OFF:
665 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
666 hba->clk_gating.state = CLKS_ON;
667 break;
668 }
669 /*
670 * If we here, it means gating work is either done or
671 * currently running. Hence, fall through to cancel gating
672 * work and to enable clocks.
673 */
674 case CLKS_OFF:
675 scsi_block_requests(hba->host);
676 hba->clk_gating.state = REQ_CLKS_ON;
677 schedule_work(&hba->clk_gating.ungate_work);
678 /*
679 * fall through to check if we should wait for this
680 * work to be done or not.
681 */
682 case REQ_CLKS_ON:
683 if (async) {
684 rc = -EAGAIN;
685 hba->clk_gating.active_reqs--;
686 break;
687 }
688
689 spin_unlock_irqrestore(hba->host->host_lock, flags);
690 flush_work(&hba->clk_gating.ungate_work);
691 /* Make sure state is CLKS_ON before returning */
856b3483 692 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
693 goto start;
694 default:
695 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
696 __func__, hba->clk_gating.state);
697 break;
698 }
699 spin_unlock_irqrestore(hba->host->host_lock, flags);
700out:
701 return rc;
702}
6e3fd44d 703EXPORT_SYMBOL_GPL(ufshcd_hold);
1ab27c9c
ST
704
705static void ufshcd_gate_work(struct work_struct *work)
706{
707 struct ufs_hba *hba = container_of(work, struct ufs_hba,
708 clk_gating.gate_work.work);
709 unsigned long flags;
710
711 spin_lock_irqsave(hba->host->host_lock, flags);
712 if (hba->clk_gating.is_suspended) {
713 hba->clk_gating.state = CLKS_ON;
714 goto rel_lock;
715 }
716
717 if (hba->clk_gating.active_reqs
718 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
719 || hba->lrb_in_use || hba->outstanding_tasks
720 || hba->active_uic_cmd || hba->uic_async_done)
721 goto rel_lock;
722
723 spin_unlock_irqrestore(hba->host->host_lock, flags);
724
725 /* put the link into hibern8 mode before turning off clocks */
726 if (ufshcd_can_hibern8_during_gating(hba)) {
727 if (ufshcd_uic_hibern8_enter(hba)) {
728 hba->clk_gating.state = CLKS_ON;
729 goto out;
730 }
731 ufshcd_set_link_hibern8(hba);
732 }
733
856b3483
ST
734 if (ufshcd_is_clkscaling_enabled(hba)) {
735 devfreq_suspend_device(hba->devfreq);
736 hba->clk_scaling.window_start_t = 0;
737 }
738
1ab27c9c
ST
739 if (!ufshcd_is_link_active(hba))
740 ufshcd_setup_clocks(hba, false);
741 else
742 /* If link is active, device ref_clk can't be switched off */
743 __ufshcd_setup_clocks(hba, false, true);
744
745 /*
746 * In case you are here to cancel this work the gating state
747 * would be marked as REQ_CLKS_ON. In this case keep the state
748 * as REQ_CLKS_ON which would anyway imply that clocks are off
749 * and a request to turn them on is pending. By doing this way,
750 * we keep the state machine in tact and this would ultimately
751 * prevent from doing cancel work multiple times when there are
752 * new requests arriving before the current cancel work is done.
753 */
754 spin_lock_irqsave(hba->host->host_lock, flags);
755 if (hba->clk_gating.state == REQ_CLKS_OFF)
756 hba->clk_gating.state = CLKS_OFF;
757
758rel_lock:
759 spin_unlock_irqrestore(hba->host->host_lock, flags);
760out:
761 return;
762}
763
764/* host lock must be held before calling this variant */
765static void __ufshcd_release(struct ufs_hba *hba)
766{
767 if (!ufshcd_is_clkgating_allowed(hba))
768 return;
769
770 hba->clk_gating.active_reqs--;
771
772 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
773 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
774 || hba->lrb_in_use || hba->outstanding_tasks
53c12d0e
YG
775 || hba->active_uic_cmd || hba->uic_async_done
776 || ufshcd_eh_in_progress(hba))
1ab27c9c
ST
777 return;
778
779 hba->clk_gating.state = REQ_CLKS_OFF;
780 schedule_delayed_work(&hba->clk_gating.gate_work,
781 msecs_to_jiffies(hba->clk_gating.delay_ms));
782}
783
784void ufshcd_release(struct ufs_hba *hba)
785{
786 unsigned long flags;
787
788 spin_lock_irqsave(hba->host->host_lock, flags);
789 __ufshcd_release(hba);
790 spin_unlock_irqrestore(hba->host->host_lock, flags);
791}
6e3fd44d 792EXPORT_SYMBOL_GPL(ufshcd_release);
1ab27c9c
ST
793
794static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct ufs_hba *hba = dev_get_drvdata(dev);
798
799 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
800}
801
802static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
803 struct device_attribute *attr, const char *buf, size_t count)
804{
805 struct ufs_hba *hba = dev_get_drvdata(dev);
806 unsigned long flags, value;
807
808 if (kstrtoul(buf, 0, &value))
809 return -EINVAL;
810
811 spin_lock_irqsave(hba->host->host_lock, flags);
812 hba->clk_gating.delay_ms = value;
813 spin_unlock_irqrestore(hba->host->host_lock, flags);
814 return count;
815}
816
817static void ufshcd_init_clk_gating(struct ufs_hba *hba)
818{
819 if (!ufshcd_is_clkgating_allowed(hba))
820 return;
821
822 hba->clk_gating.delay_ms = 150;
823 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
824 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
825
826 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
827 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
828 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
829 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
830 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
831 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
832 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
833}
834
835static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
836{
837 if (!ufshcd_is_clkgating_allowed(hba))
838 return;
839 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
97cd6805
AM
840 cancel_work_sync(&hba->clk_gating.ungate_work);
841 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1ab27c9c
ST
842}
843
856b3483
ST
844/* Must be called with host lock acquired */
845static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
846{
847 if (!ufshcd_is_clkscaling_enabled(hba))
848 return;
849
850 if (!hba->clk_scaling.is_busy_started) {
851 hba->clk_scaling.busy_start_t = ktime_get();
852 hba->clk_scaling.is_busy_started = true;
853 }
854}
855
856static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
857{
858 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
859
860 if (!ufshcd_is_clkscaling_enabled(hba))
861 return;
862
863 if (!hba->outstanding_reqs && scaling->is_busy_started) {
864 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
865 scaling->busy_start_t));
866 scaling->busy_start_t = ktime_set(0, 0);
867 scaling->is_busy_started = false;
868 }
869}
7a3e97b0
SY
870/**
871 * ufshcd_send_command - Send SCSI or device management commands
872 * @hba: per adapter instance
873 * @task_tag: Task tag of the command
874 */
875static inline
876void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
877{
856b3483 878 ufshcd_clk_scaling_start_busy(hba);
7a3e97b0 879 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 880 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
881}
882
883/**
884 * ufshcd_copy_sense_data - Copy sense data in case of check condition
885 * @lrb - pointer to local reference block
886 */
887static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
888{
889 int len;
1c2623c5
SJ
890 if (lrbp->sense_buffer &&
891 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
5a0b0cb9 892 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
7a3e97b0 893 memcpy(lrbp->sense_buffer,
5a0b0cb9 894 lrbp->ucd_rsp_ptr->sr.sense_data,
7a3e97b0
SY
895 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
896 }
897}
898
68078d5c
DR
899/**
900 * ufshcd_copy_query_response() - Copy the Query Response and the data
901 * descriptor
902 * @hba: per adapter instance
903 * @lrb - pointer to local reference block
904 */
905static
c6d4a831 906int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
907{
908 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
909
68078d5c 910 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 911
68078d5c
DR
912 /* Get the descriptor */
913 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 914 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 915 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
916 u16 resp_len;
917 u16 buf_len;
68078d5c
DR
918
919 /* data segment length */
c6d4a831 920 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 921 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
922 buf_len = be16_to_cpu(
923 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
924 if (likely(buf_len >= resp_len)) {
925 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
926 } else {
927 dev_warn(hba->dev,
928 "%s: Response size is bigger than buffer",
929 __func__);
930 return -EINVAL;
931 }
68078d5c 932 }
c6d4a831
DR
933
934 return 0;
68078d5c
DR
935}
936
7a3e97b0
SY
937/**
938 * ufshcd_hba_capabilities - Read controller capabilities
939 * @hba: per adapter instance
940 */
941static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
942{
b873a275 943 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
944
945 /* nutrs and nutmrs are 0 based values */
946 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
947 hba->nutmrs =
948 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
949}
950
951/**
6ccf44fe
SJ
952 * ufshcd_ready_for_uic_cmd - Check if controller is ready
953 * to accept UIC commands
7a3e97b0 954 * @hba: per adapter instance
6ccf44fe
SJ
955 * Return true on success, else false
956 */
957static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
958{
959 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
960 return true;
961 else
962 return false;
963}
964
53b3d9c3
SJ
965/**
966 * ufshcd_get_upmcrs - Get the power mode change request status
967 * @hba: Pointer to adapter instance
968 *
969 * This function gets the UPMCRS field of HCS register
970 * Returns value of UPMCRS field
971 */
972static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
973{
974 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
975}
976
6ccf44fe
SJ
977/**
978 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
979 * @hba: per adapter instance
980 * @uic_cmd: UIC command
981 *
982 * Mutex must be held.
7a3e97b0
SY
983 */
984static inline void
6ccf44fe 985ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 986{
6ccf44fe
SJ
987 WARN_ON(hba->active_uic_cmd);
988
989 hba->active_uic_cmd = uic_cmd;
990
7a3e97b0 991 /* Write Args */
6ccf44fe
SJ
992 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
993 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
994 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
995
996 /* Write UIC Cmd */
6ccf44fe 997 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 998 REG_UIC_COMMAND);
7a3e97b0
SY
999}
1000
6ccf44fe
SJ
1001/**
1002 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1003 * @hba: per adapter instance
1004 * @uic_command: UIC command
1005 *
1006 * Must be called with mutex held.
1007 * Returns 0 only if success.
1008 */
1009static int
1010ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1011{
1012 int ret;
1013 unsigned long flags;
1014
1015 if (wait_for_completion_timeout(&uic_cmd->done,
1016 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1017 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1018 else
1019 ret = -ETIMEDOUT;
1020
1021 spin_lock_irqsave(hba->host->host_lock, flags);
1022 hba->active_uic_cmd = NULL;
1023 spin_unlock_irqrestore(hba->host->host_lock, flags);
1024
1025 return ret;
1026}
1027
1028/**
1029 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1030 * @hba: per adapter instance
1031 * @uic_cmd: UIC command
d75f7fe4 1032 * @completion: initialize the completion only if this is set to true
6ccf44fe
SJ
1033 *
1034 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
57d104c1 1035 * with mutex held and host_lock locked.
6ccf44fe
SJ
1036 * Returns 0 only if success.
1037 */
1038static int
d75f7fe4
YG
1039__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1040 bool completion)
6ccf44fe 1041{
6ccf44fe
SJ
1042 if (!ufshcd_ready_for_uic_cmd(hba)) {
1043 dev_err(hba->dev,
1044 "Controller not ready to accept UIC commands\n");
1045 return -EIO;
1046 }
1047
d75f7fe4
YG
1048 if (completion)
1049 init_completion(&uic_cmd->done);
6ccf44fe 1050
6ccf44fe 1051 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 1052
57d104c1 1053 return 0;
6ccf44fe
SJ
1054}
1055
1056/**
1057 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1058 * @hba: per adapter instance
1059 * @uic_cmd: UIC command
1060 *
1061 * Returns 0 only if success.
1062 */
1063static int
1064ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1065{
1066 int ret;
57d104c1 1067 unsigned long flags;
6ccf44fe 1068
1ab27c9c 1069 ufshcd_hold(hba, false);
6ccf44fe 1070 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
1071 ufshcd_add_delay_before_dme_cmd(hba);
1072
57d104c1 1073 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 1074 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
57d104c1
SJ
1075 spin_unlock_irqrestore(hba->host->host_lock, flags);
1076 if (!ret)
1077 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1078
6ccf44fe
SJ
1079 mutex_unlock(&hba->uic_cmd_mutex);
1080
1ab27c9c 1081 ufshcd_release(hba);
6ccf44fe
SJ
1082 return ret;
1083}
1084
7a3e97b0
SY
1085/**
1086 * ufshcd_map_sg - Map scatter-gather list to prdt
1087 * @lrbp - pointer to local reference block
1088 *
1089 * Returns 0 in case of success, non-zero value in case of failure
1090 */
1091static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
1092{
1093 struct ufshcd_sg_entry *prd_table;
1094 struct scatterlist *sg;
1095 struct scsi_cmnd *cmd;
1096 int sg_segments;
1097 int i;
1098
1099 cmd = lrbp->cmd;
1100 sg_segments = scsi_dma_map(cmd);
1101 if (sg_segments < 0)
1102 return sg_segments;
1103
1104 if (sg_segments) {
1105 lrbp->utr_descriptor_ptr->prd_table_length =
1106 cpu_to_le16((u16) (sg_segments));
1107
1108 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1109
1110 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1111 prd_table[i].size =
1112 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1113 prd_table[i].base_addr =
1114 cpu_to_le32(lower_32_bits(sg->dma_address));
1115 prd_table[i].upper_addr =
1116 cpu_to_le32(upper_32_bits(sg->dma_address));
52ac95fe 1117 prd_table[i].reserved = 0;
7a3e97b0
SY
1118 }
1119 } else {
1120 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1121 }
1122
1123 return 0;
1124}
1125
1126/**
2fbd009b 1127 * ufshcd_enable_intr - enable interrupts
7a3e97b0 1128 * @hba: per adapter instance
2fbd009b 1129 * @intrs: interrupt bits
7a3e97b0 1130 */
2fbd009b 1131static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 1132{
2fbd009b
SJ
1133 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1134
1135 if (hba->ufs_version == UFSHCI_VERSION_10) {
1136 u32 rw;
1137 rw = set & INTERRUPT_MASK_RW_VER_10;
1138 set = rw | ((set ^ intrs) & intrs);
1139 } else {
1140 set |= intrs;
1141 }
1142
1143 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1144}
1145
1146/**
1147 * ufshcd_disable_intr - disable interrupts
1148 * @hba: per adapter instance
1149 * @intrs: interrupt bits
1150 */
1151static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1152{
1153 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1154
1155 if (hba->ufs_version == UFSHCI_VERSION_10) {
1156 u32 rw;
1157 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1158 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1159 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1160
1161 } else {
1162 set &= ~intrs;
7a3e97b0 1163 }
2fbd009b
SJ
1164
1165 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
1166}
1167
5a0b0cb9
SRT
1168/**
1169 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1170 * descriptor according to request
1171 * @lrbp: pointer to local reference block
1172 * @upiu_flags: flags required in the header
1173 * @cmd_dir: requests data direction
1174 */
1175static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
300bb13f 1176 u32 *upiu_flags, enum dma_data_direction cmd_dir)
5a0b0cb9
SRT
1177{
1178 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1179 u32 data_direction;
1180 u32 dword_0;
1181
1182 if (cmd_dir == DMA_FROM_DEVICE) {
1183 data_direction = UTP_DEVICE_TO_HOST;
1184 *upiu_flags = UPIU_CMD_FLAGS_READ;
1185 } else if (cmd_dir == DMA_TO_DEVICE) {
1186 data_direction = UTP_HOST_TO_DEVICE;
1187 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1188 } else {
1189 data_direction = UTP_NO_DATA_TRANSFER;
1190 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1191 }
1192
1193 dword_0 = data_direction | (lrbp->command_type
1194 << UPIU_COMMAND_TYPE_OFFSET);
1195 if (lrbp->intr_cmd)
1196 dword_0 |= UTP_REQ_DESC_INT_CMD;
1197
1198 /* Transfer request descriptor header fields */
1199 req_desc->header.dword_0 = cpu_to_le32(dword_0);
52ac95fe
YG
1200 /* dword_1 is reserved, hence it is set to 0 */
1201 req_desc->header.dword_1 = 0;
5a0b0cb9
SRT
1202 /*
1203 * assigning invalid value for command status. Controller
1204 * updates OCS on command completion, with the command
1205 * status
1206 */
1207 req_desc->header.dword_2 =
1208 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
52ac95fe
YG
1209 /* dword_3 is reserved, hence it is set to 0 */
1210 req_desc->header.dword_3 = 0;
51047266
YG
1211
1212 req_desc->prd_table_length = 0;
5a0b0cb9
SRT
1213}
1214
1215/**
1216 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1217 * for scsi commands
1218 * @lrbp - local reference block pointer
1219 * @upiu_flags - flags
1220 */
1221static
1222void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1223{
1224 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
52ac95fe 1225 unsigned short cdb_len;
5a0b0cb9
SRT
1226
1227 /* command descriptor fields */
1228 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1229 UPIU_TRANSACTION_COMMAND, upiu_flags,
1230 lrbp->lun, lrbp->task_tag);
1231 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1232 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1233
1234 /* Total EHS length and Data segment length will be zero */
1235 ucd_req_ptr->header.dword_2 = 0;
1236
1237 ucd_req_ptr->sc.exp_data_transfer_len =
1238 cpu_to_be32(lrbp->cmd->sdb.length);
1239
52ac95fe
YG
1240 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
1241 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
1242 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
1243
1244 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
1245}
1246
68078d5c
DR
1247/**
1248 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1249 * for query requsts
1250 * @hba: UFS hba
1251 * @lrbp: local reference block pointer
1252 * @upiu_flags: flags
1253 */
1254static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1255 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1256{
1257 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1258 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 1259 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
1260 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1261
1262 /* Query request header */
1263 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1264 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1265 lrbp->lun, lrbp->task_tag);
1266 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1267 0, query->request.query_func, 0, 0);
1268
6861285c
ZL
1269 /* Data segment length only need for WRITE_DESC */
1270 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1271 ucd_req_ptr->header.dword_2 =
1272 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
1273 else
1274 ucd_req_ptr->header.dword_2 = 0;
68078d5c
DR
1275
1276 /* Copy the Query Request buffer as is */
1277 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1278 QUERY_OSF_SIZE);
68078d5c
DR
1279
1280 /* Copy the Descriptor */
c6d4a831
DR
1281 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1282 memcpy(descp, query->descriptor, len);
1283
51047266 1284 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
68078d5c
DR
1285}
1286
5a0b0cb9
SRT
1287static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1288{
1289 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1290
1291 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1292
1293 /* command descriptor fields */
1294 ucd_req_ptr->header.dword_0 =
1295 UPIU_HEADER_DWORD(
1296 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
51047266
YG
1297 /* clear rest of the fields of basic header */
1298 ucd_req_ptr->header.dword_1 = 0;
1299 ucd_req_ptr->header.dword_2 = 0;
1300
1301 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
1302}
1303
7a3e97b0 1304/**
300bb13f
JP
1305 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
1306 * for Device Management Purposes
5a0b0cb9 1307 * @hba - per adapter instance
7a3e97b0
SY
1308 * @lrb - pointer to local reference block
1309 */
300bb13f 1310static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 1311{
7a3e97b0 1312 u32 upiu_flags;
5a0b0cb9 1313 int ret = 0;
7a3e97b0 1314
300bb13f
JP
1315 if (hba->ufs_version == UFSHCI_VERSION_20)
1316 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1317 else
1318 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1319
1320 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
1321 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1322 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
1323 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
1324 ufshcd_prepare_utp_nop_upiu(lrbp);
1325 else
1326 ret = -EINVAL;
1327
1328 return ret;
1329}
1330
1331/**
1332 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
1333 * for SCSI Purposes
1334 * @hba - per adapter instance
1335 * @lrb - pointer to local reference block
1336 */
1337static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1338{
1339 u32 upiu_flags;
1340 int ret = 0;
1341
1342 if (hba->ufs_version == UFSHCI_VERSION_20)
1343 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
1344 else
1345 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1346
1347 if (likely(lrbp->cmd)) {
1348 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1349 lrbp->cmd->sc_data_direction);
1350 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
1351 } else {
1352 ret = -EINVAL;
1353 }
5a0b0cb9
SRT
1354
1355 return ret;
7a3e97b0
SY
1356}
1357
0ce147d4
SJ
1358/*
1359 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1360 * @scsi_lun: scsi LUN id
1361 *
1362 * Returns UPIU LUN id
1363 */
1364static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1365{
1366 if (scsi_is_wlun(scsi_lun))
1367 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1368 | UFS_UPIU_WLUN_ID;
1369 else
1370 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1371}
1372
2a8fa600
SJ
1373/**
1374 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1375 * @scsi_lun: UPIU W-LUN id
1376 *
1377 * Returns SCSI W-LUN id
1378 */
1379static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1380{
1381 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1382}
1383
7a3e97b0
SY
1384/**
1385 * ufshcd_queuecommand - main entry point for SCSI requests
1386 * @cmd: command from SCSI Midlayer
1387 * @done: call back function
1388 *
1389 * Returns 0 for success, non-zero in case of failure
1390 */
1391static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1392{
1393 struct ufshcd_lrb *lrbp;
1394 struct ufs_hba *hba;
1395 unsigned long flags;
1396 int tag;
1397 int err = 0;
1398
1399 hba = shost_priv(host);
1400
1401 tag = cmd->request->tag;
14497328
YG
1402 if (!ufshcd_valid_tag(hba, tag)) {
1403 dev_err(hba->dev,
1404 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
1405 __func__, tag, cmd, cmd->request);
1406 BUG();
1407 }
7a3e97b0 1408
3441da7d
SRT
1409 spin_lock_irqsave(hba->host->host_lock, flags);
1410 switch (hba->ufshcd_state) {
1411 case UFSHCD_STATE_OPERATIONAL:
1412 break;
1413 case UFSHCD_STATE_RESET:
7a3e97b0 1414 err = SCSI_MLQUEUE_HOST_BUSY;
3441da7d
SRT
1415 goto out_unlock;
1416 case UFSHCD_STATE_ERROR:
1417 set_host_byte(cmd, DID_ERROR);
1418 cmd->scsi_done(cmd);
1419 goto out_unlock;
1420 default:
1421 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1422 __func__, hba->ufshcd_state);
1423 set_host_byte(cmd, DID_BAD_TARGET);
1424 cmd->scsi_done(cmd);
1425 goto out_unlock;
7a3e97b0 1426 }
53c12d0e
YG
1427
1428 /* if error handling is in progress, don't issue commands */
1429 if (ufshcd_eh_in_progress(hba)) {
1430 set_host_byte(cmd, DID_ERROR);
1431 cmd->scsi_done(cmd);
1432 goto out_unlock;
1433 }
3441da7d 1434 spin_unlock_irqrestore(hba->host->host_lock, flags);
7a3e97b0 1435
5a0b0cb9
SRT
1436 /* acquire the tag to make sure device cmds don't use it */
1437 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1438 /*
1439 * Dev manage command in progress, requeue the command.
1440 * Requeuing the command helps in cases where the request *may*
1441 * find different tag instead of waiting for dev manage command
1442 * completion.
1443 */
1444 err = SCSI_MLQUEUE_HOST_BUSY;
1445 goto out;
1446 }
1447
1ab27c9c
ST
1448 err = ufshcd_hold(hba, true);
1449 if (err) {
1450 err = SCSI_MLQUEUE_HOST_BUSY;
1451 clear_bit_unlock(tag, &hba->lrb_in_use);
1452 goto out;
1453 }
1454 WARN_ON(hba->clk_gating.state != CLKS_ON);
1455
7a3e97b0
SY
1456 lrbp = &hba->lrb[tag];
1457
5a0b0cb9 1458 WARN_ON(lrbp->cmd);
7a3e97b0
SY
1459 lrbp->cmd = cmd;
1460 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1461 lrbp->sense_buffer = cmd->sense_buffer;
1462 lrbp->task_tag = tag;
0ce147d4 1463 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
b852190e 1464 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
7a3e97b0 1465
300bb13f
JP
1466 ufshcd_comp_scsi_upiu(hba, lrbp);
1467
7a3e97b0 1468 err = ufshcd_map_sg(lrbp);
5a0b0cb9
SRT
1469 if (err) {
1470 lrbp->cmd = NULL;
1471 clear_bit_unlock(tag, &hba->lrb_in_use);
7a3e97b0 1472 goto out;
5a0b0cb9 1473 }
7a3e97b0
SY
1474
1475 /* issue command to the controller */
1476 spin_lock_irqsave(hba->host->host_lock, flags);
1477 ufshcd_send_command(hba, tag);
3441da7d 1478out_unlock:
7a3e97b0
SY
1479 spin_unlock_irqrestore(hba->host->host_lock, flags);
1480out:
1481 return err;
1482}
1483
5a0b0cb9
SRT
1484static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1485 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1486{
1487 lrbp->cmd = NULL;
1488 lrbp->sense_bufflen = 0;
1489 lrbp->sense_buffer = NULL;
1490 lrbp->task_tag = tag;
1491 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
5a0b0cb9
SRT
1492 lrbp->intr_cmd = true; /* No interrupt aggregation */
1493 hba->dev_cmd.type = cmd_type;
1494
300bb13f 1495 return ufshcd_comp_devman_upiu(hba, lrbp);
5a0b0cb9
SRT
1496}
1497
1498static int
1499ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1500{
1501 int err = 0;
1502 unsigned long flags;
1503 u32 mask = 1 << tag;
1504
1505 /* clear outstanding transaction before retry */
1506 spin_lock_irqsave(hba->host->host_lock, flags);
1507 ufshcd_utrl_clear(hba, tag);
1508 spin_unlock_irqrestore(hba->host->host_lock, flags);
1509
1510 /*
1511 * wait for for h/w to clear corresponding bit in door-bell.
1512 * max. wait is 1 sec.
1513 */
1514 err = ufshcd_wait_for_register(hba,
1515 REG_UTP_TRANSFER_REQ_DOOR_BELL,
596585a2 1516 mask, ~mask, 1000, 1000, true);
5a0b0cb9
SRT
1517
1518 return err;
1519}
1520
c6d4a831
DR
1521static int
1522ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1523{
1524 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1525
1526 /* Get the UPIU response */
1527 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1528 UPIU_RSP_CODE_OFFSET;
1529 return query_res->response;
1530}
1531
5a0b0cb9
SRT
1532/**
1533 * ufshcd_dev_cmd_completion() - handles device management command responses
1534 * @hba: per adapter instance
1535 * @lrbp: pointer to local reference block
1536 */
1537static int
1538ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1539{
1540 int resp;
1541 int err = 0;
1542
1543 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1544
1545 switch (resp) {
1546 case UPIU_TRANSACTION_NOP_IN:
1547 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1548 err = -EINVAL;
1549 dev_err(hba->dev, "%s: unexpected response %x\n",
1550 __func__, resp);
1551 }
1552 break;
68078d5c 1553 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
1554 err = ufshcd_check_query_response(hba, lrbp);
1555 if (!err)
1556 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 1557 break;
5a0b0cb9
SRT
1558 case UPIU_TRANSACTION_REJECT_UPIU:
1559 /* TODO: handle Reject UPIU Response */
1560 err = -EPERM;
1561 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1562 __func__);
1563 break;
1564 default:
1565 err = -EINVAL;
1566 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1567 __func__, resp);
1568 break;
1569 }
1570
1571 return err;
1572}
1573
1574static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1575 struct ufshcd_lrb *lrbp, int max_timeout)
1576{
1577 int err = 0;
1578 unsigned long time_left;
1579 unsigned long flags;
1580
1581 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1582 msecs_to_jiffies(max_timeout));
1583
1584 spin_lock_irqsave(hba->host->host_lock, flags);
1585 hba->dev_cmd.complete = NULL;
1586 if (likely(time_left)) {
1587 err = ufshcd_get_tr_ocs(lrbp);
1588 if (!err)
1589 err = ufshcd_dev_cmd_completion(hba, lrbp);
1590 }
1591 spin_unlock_irqrestore(hba->host->host_lock, flags);
1592
1593 if (!time_left) {
1594 err = -ETIMEDOUT;
a48353f6
YG
1595 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
1596 __func__, lrbp->task_tag);
5a0b0cb9 1597 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
a48353f6 1598 /* successfully cleared the command, retry if needed */
5a0b0cb9 1599 err = -EAGAIN;
a48353f6
YG
1600 /*
1601 * in case of an error, after clearing the doorbell,
1602 * we also need to clear the outstanding_request
1603 * field in hba
1604 */
1605 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
5a0b0cb9
SRT
1606 }
1607
1608 return err;
1609}
1610
1611/**
1612 * ufshcd_get_dev_cmd_tag - Get device management command tag
1613 * @hba: per-adapter instance
1614 * @tag: pointer to variable with available slot value
1615 *
1616 * Get a free slot and lock it until device management command
1617 * completes.
1618 *
1619 * Returns false if free slot is unavailable for locking, else
1620 * return true with tag value in @tag.
1621 */
1622static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1623{
1624 int tag;
1625 bool ret = false;
1626 unsigned long tmp;
1627
1628 if (!tag_out)
1629 goto out;
1630
1631 do {
1632 tmp = ~hba->lrb_in_use;
1633 tag = find_last_bit(&tmp, hba->nutrs);
1634 if (tag >= hba->nutrs)
1635 goto out;
1636 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1637
1638 *tag_out = tag;
1639 ret = true;
1640out:
1641 return ret;
1642}
1643
1644static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1645{
1646 clear_bit_unlock(tag, &hba->lrb_in_use);
1647}
1648
1649/**
1650 * ufshcd_exec_dev_cmd - API for sending device management requests
1651 * @hba - UFS hba
1652 * @cmd_type - specifies the type (NOP, Query...)
1653 * @timeout - time in seconds
1654 *
68078d5c
DR
1655 * NOTE: Since there is only one available tag for device management commands,
1656 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
1657 */
1658static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1659 enum dev_cmd_type cmd_type, int timeout)
1660{
1661 struct ufshcd_lrb *lrbp;
1662 int err;
1663 int tag;
1664 struct completion wait;
1665 unsigned long flags;
1666
1667 /*
1668 * Get free slot, sleep if slots are unavailable.
1669 * Even though we use wait_event() which sleeps indefinitely,
1670 * the maximum wait time is bounded by SCSI request timeout.
1671 */
1672 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1673
1674 init_completion(&wait);
1675 lrbp = &hba->lrb[tag];
1676 WARN_ON(lrbp->cmd);
1677 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1678 if (unlikely(err))
1679 goto out_put_tag;
1680
1681 hba->dev_cmd.complete = &wait;
1682
e3dfdc53
YG
1683 /* Make sure descriptors are ready before ringing the doorbell */
1684 wmb();
5a0b0cb9
SRT
1685 spin_lock_irqsave(hba->host->host_lock, flags);
1686 ufshcd_send_command(hba, tag);
1687 spin_unlock_irqrestore(hba->host->host_lock, flags);
1688
1689 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1690
1691out_put_tag:
1692 ufshcd_put_dev_cmd_tag(hba, tag);
1693 wake_up(&hba->dev_cmd.tag_wq);
1694 return err;
1695}
1696
d44a5f98
DR
1697/**
1698 * ufshcd_init_query() - init the query response and request parameters
1699 * @hba: per-adapter instance
1700 * @request: address of the request pointer to be initialized
1701 * @response: address of the response pointer to be initialized
1702 * @opcode: operation to perform
1703 * @idn: flag idn to access
1704 * @index: LU number to access
1705 * @selector: query/flag/descriptor further identification
1706 */
1707static inline void ufshcd_init_query(struct ufs_hba *hba,
1708 struct ufs_query_req **request, struct ufs_query_res **response,
1709 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1710{
1711 *request = &hba->dev_cmd.query.request;
1712 *response = &hba->dev_cmd.query.response;
1713 memset(*request, 0, sizeof(struct ufs_query_req));
1714 memset(*response, 0, sizeof(struct ufs_query_res));
1715 (*request)->upiu_req.opcode = opcode;
1716 (*request)->upiu_req.idn = idn;
1717 (*request)->upiu_req.index = index;
1718 (*request)->upiu_req.selector = selector;
1719}
1720
dc3c8d3a
YG
1721static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1722 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
1723{
1724 int ret;
1725 int retries;
1726
1727 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1728 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1729 if (ret)
1730 dev_dbg(hba->dev,
1731 "%s: failed with error %d, retries %d\n",
1732 __func__, ret, retries);
1733 else
1734 break;
1735 }
1736
1737 if (ret)
1738 dev_err(hba->dev,
1739 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1740 __func__, opcode, idn, ret, retries);
1741 return ret;
1742}
1743
68078d5c
DR
1744/**
1745 * ufshcd_query_flag() - API function for sending flag query requests
1746 * hba: per-adapter instance
1747 * query_opcode: flag query to perform
1748 * idn: flag idn to access
1749 * flag_res: the flag value after the query request completes
1750 *
1751 * Returns 0 for success, non-zero in case of failure
1752 */
dc3c8d3a 1753int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
68078d5c
DR
1754 enum flag_idn idn, bool *flag_res)
1755{
d44a5f98
DR
1756 struct ufs_query_req *request = NULL;
1757 struct ufs_query_res *response = NULL;
1758 int err, index = 0, selector = 0;
e5ad406c 1759 int timeout = QUERY_REQ_TIMEOUT;
68078d5c
DR
1760
1761 BUG_ON(!hba);
1762
1ab27c9c 1763 ufshcd_hold(hba, false);
68078d5c 1764 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
1765 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1766 selector);
68078d5c
DR
1767
1768 switch (opcode) {
1769 case UPIU_QUERY_OPCODE_SET_FLAG:
1770 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1771 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1772 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1773 break;
1774 case UPIU_QUERY_OPCODE_READ_FLAG:
1775 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1776 if (!flag_res) {
1777 /* No dummy reads */
1778 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1779 __func__);
1780 err = -EINVAL;
1781 goto out_unlock;
1782 }
1783 break;
1784 default:
1785 dev_err(hba->dev,
1786 "%s: Expected query flag opcode but got = %d\n",
1787 __func__, opcode);
1788 err = -EINVAL;
1789 goto out_unlock;
1790 }
68078d5c 1791
e5ad406c
YG
1792 if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
1793 timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
1794
1795 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
68078d5c
DR
1796
1797 if (err) {
1798 dev_err(hba->dev,
1799 "%s: Sending flag query for idn %d failed, err = %d\n",
1800 __func__, idn, err);
1801 goto out_unlock;
1802 }
1803
1804 if (flag_res)
e8c8e82a 1805 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
1806 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1807
1808out_unlock:
1809 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 1810 ufshcd_release(hba);
68078d5c
DR
1811 return err;
1812}
1813
66ec6d59
SRT
1814/**
1815 * ufshcd_query_attr - API function for sending attribute requests
1816 * hba: per-adapter instance
1817 * opcode: attribute opcode
1818 * idn: attribute idn to access
1819 * index: index field
1820 * selector: selector field
1821 * attr_val: the attribute value after the query request completes
1822 *
1823 * Returns 0 for success, non-zero in case of failure
1824*/
bdbe5d2f 1825static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
66ec6d59
SRT
1826 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1827{
d44a5f98
DR
1828 struct ufs_query_req *request = NULL;
1829 struct ufs_query_res *response = NULL;
66ec6d59
SRT
1830 int err;
1831
1832 BUG_ON(!hba);
1833
1ab27c9c 1834 ufshcd_hold(hba, false);
66ec6d59
SRT
1835 if (!attr_val) {
1836 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1837 __func__, opcode);
1838 err = -EINVAL;
1839 goto out;
1840 }
1841
1842 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
1843 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1844 selector);
66ec6d59
SRT
1845
1846 switch (opcode) {
1847 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1848 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 1849 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
1850 break;
1851 case UPIU_QUERY_OPCODE_READ_ATTR:
1852 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1853 break;
1854 default:
1855 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1856 __func__, opcode);
1857 err = -EINVAL;
1858 goto out_unlock;
1859 }
1860
d44a5f98 1861 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
1862
1863 if (err) {
1864 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1865 __func__, opcode, idn, err);
1866 goto out_unlock;
1867 }
1868
e8c8e82a 1869 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
1870
1871out_unlock:
1872 mutex_unlock(&hba->dev_cmd.lock);
1873out:
1ab27c9c 1874 ufshcd_release(hba);
66ec6d59
SRT
1875 return err;
1876}
1877
5e86ae44
YG
1878/**
1879 * ufshcd_query_attr_retry() - API function for sending query
1880 * attribute with retries
1881 * @hba: per-adapter instance
1882 * @opcode: attribute opcode
1883 * @idn: attribute idn to access
1884 * @index: index field
1885 * @selector: selector field
1886 * @attr_val: the attribute value after the query request
1887 * completes
1888 *
1889 * Returns 0 for success, non-zero in case of failure
1890*/
1891static int ufshcd_query_attr_retry(struct ufs_hba *hba,
1892 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
1893 u32 *attr_val)
1894{
1895 int ret = 0;
1896 u32 retries;
1897
1898 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1899 ret = ufshcd_query_attr(hba, opcode, idn, index,
1900 selector, attr_val);
1901 if (ret)
1902 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
1903 __func__, ret, retries);
1904 else
1905 break;
1906 }
1907
1908 if (ret)
1909 dev_err(hba->dev,
1910 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
1911 __func__, idn, ret, QUERY_REQ_RETRIES);
1912 return ret;
1913}
1914
a70e91b8 1915static int __ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
1916 enum query_opcode opcode, enum desc_idn idn, u8 index,
1917 u8 selector, u8 *desc_buf, int *buf_len)
1918{
1919 struct ufs_query_req *request = NULL;
1920 struct ufs_query_res *response = NULL;
1921 int err;
1922
1923 BUG_ON(!hba);
1924
1ab27c9c 1925 ufshcd_hold(hba, false);
d44a5f98
DR
1926 if (!desc_buf) {
1927 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1928 __func__, opcode);
1929 err = -EINVAL;
1930 goto out;
1931 }
1932
1933 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1934 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1935 __func__, *buf_len);
1936 err = -EINVAL;
1937 goto out;
1938 }
1939
1940 mutex_lock(&hba->dev_cmd.lock);
1941 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1942 selector);
1943 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 1944 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
1945
1946 switch (opcode) {
1947 case UPIU_QUERY_OPCODE_WRITE_DESC:
1948 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1949 break;
1950 case UPIU_QUERY_OPCODE_READ_DESC:
1951 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1952 break;
1953 default:
1954 dev_err(hba->dev,
1955 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1956 __func__, opcode);
1957 err = -EINVAL;
1958 goto out_unlock;
1959 }
1960
1961 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1962
1963 if (err) {
1964 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1965 __func__, opcode, idn, err);
1966 goto out_unlock;
1967 }
1968
1969 hba->dev_cmd.query.descriptor = NULL;
ea2aab24 1970 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
1971
1972out_unlock:
1973 mutex_unlock(&hba->dev_cmd.lock);
1974out:
1ab27c9c 1975 ufshcd_release(hba);
d44a5f98
DR
1976 return err;
1977}
1978
a70e91b8
YG
1979/**
1980 * ufshcd_query_descriptor_retry - API function for sending descriptor
1981 * requests
1982 * hba: per-adapter instance
1983 * opcode: attribute opcode
1984 * idn: attribute idn to access
1985 * index: index field
1986 * selector: selector field
1987 * desc_buf: the buffer that contains the descriptor
1988 * buf_len: length parameter passed to the device
1989 *
1990 * Returns 0 for success, non-zero in case of failure.
1991 * The buf_len parameter will contain, on return, the length parameter
1992 * received on the response.
1993 */
1994int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1995 enum query_opcode opcode, enum desc_idn idn, u8 index,
1996 u8 selector, u8 *desc_buf, int *buf_len)
1997{
1998 int err;
1999 int retries;
2000
2001 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2002 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2003 selector, desc_buf, buf_len);
2004 if (!err || err == -EINVAL)
2005 break;
2006 }
2007
2008 return err;
2009}
2010EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2011
da461cec
SJ
2012/**
2013 * ufshcd_read_desc_param - read the specified descriptor parameter
2014 * @hba: Pointer to adapter instance
2015 * @desc_id: descriptor idn value
2016 * @desc_index: descriptor index
2017 * @param_offset: offset of the parameter to read
2018 * @param_read_buf: pointer to buffer where parameter would be read
2019 * @param_size: sizeof(param_read_buf)
2020 *
2021 * Return 0 in case of success, non-zero otherwise
2022 */
2023static int ufshcd_read_desc_param(struct ufs_hba *hba,
2024 enum desc_idn desc_id,
2025 int desc_index,
2026 u32 param_offset,
2027 u8 *param_read_buf,
2028 u32 param_size)
2029{
2030 int ret;
2031 u8 *desc_buf;
2032 u32 buff_len;
2033 bool is_kmalloc = true;
2034
2035 /* safety checks */
2036 if (desc_id >= QUERY_DESC_IDN_MAX)
2037 return -EINVAL;
2038
2039 buff_len = ufs_query_desc_max_size[desc_id];
2040 if ((param_offset + param_size) > buff_len)
2041 return -EINVAL;
2042
2043 if (!param_offset && (param_size == buff_len)) {
2044 /* memory space already available to hold full descriptor */
2045 desc_buf = param_read_buf;
2046 is_kmalloc = false;
2047 } else {
2048 /* allocate memory to hold full descriptor */
2049 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2050 if (!desc_buf)
2051 return -ENOMEM;
2052 }
2053
a70e91b8
YG
2054 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2055 desc_id, desc_index, 0, desc_buf,
2056 &buff_len);
da461cec
SJ
2057
2058 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
2059 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
2060 ufs_query_desc_max_size[desc_id])
2061 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
2062 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
2063 __func__, desc_id, param_offset, buff_len, ret);
2064 if (!ret)
2065 ret = -EINVAL;
2066
2067 goto out;
2068 }
2069
2070 if (is_kmalloc)
2071 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
2072out:
2073 if (is_kmalloc)
2074 kfree(desc_buf);
2075 return ret;
2076}
2077
2078static inline int ufshcd_read_desc(struct ufs_hba *hba,
2079 enum desc_idn desc_id,
2080 int desc_index,
2081 u8 *buf,
2082 u32 size)
2083{
2084 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
2085}
2086
2087static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2088 u8 *buf,
2089 u32 size)
2090{
2091 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2092}
2093
b573d484
YG
2094int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2095{
2096 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2097}
2098EXPORT_SYMBOL(ufshcd_read_device_desc);
2099
2100/**
2101 * ufshcd_read_string_desc - read string descriptor
2102 * @hba: pointer to adapter instance
2103 * @desc_index: descriptor index
2104 * @buf: pointer to buffer where descriptor would be read
2105 * @size: size of buf
2106 * @ascii: if true convert from unicode to ascii characters
2107 *
2108 * Return 0 in case of success, non-zero otherwise
2109 */
2110int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2111 u32 size, bool ascii)
2112{
2113 int err = 0;
2114
2115 err = ufshcd_read_desc(hba,
2116 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2117
2118 if (err) {
2119 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2120 __func__, QUERY_REQ_RETRIES, err);
2121 goto out;
2122 }
2123
2124 if (ascii) {
2125 int desc_len;
2126 int ascii_len;
2127 int i;
2128 char *buff_ascii;
2129
2130 desc_len = buf[0];
2131 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2132 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2133 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2134 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2135 __func__);
2136 err = -ENOMEM;
2137 goto out;
2138 }
2139
2140 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2141 if (!buff_ascii) {
2142 err = -ENOMEM;
fcbefc3b 2143 goto out;
b573d484
YG
2144 }
2145
2146 /*
2147 * the descriptor contains string in UTF16 format
2148 * we need to convert to utf-8 so it can be displayed
2149 */
2150 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2151 desc_len - QUERY_DESC_HDR_SIZE,
2152 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2153
2154 /* replace non-printable or non-ASCII characters with spaces */
2155 for (i = 0; i < ascii_len; i++)
2156 ufshcd_remove_non_printable(&buff_ascii[i]);
2157
2158 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2159 size - QUERY_DESC_HDR_SIZE);
2160 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2161 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
b573d484
YG
2162 kfree(buff_ascii);
2163 }
2164out:
2165 return err;
2166}
2167EXPORT_SYMBOL(ufshcd_read_string_desc);
2168
da461cec
SJ
2169/**
2170 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2171 * @hba: Pointer to adapter instance
2172 * @lun: lun id
2173 * @param_offset: offset of the parameter to read
2174 * @param_read_buf: pointer to buffer where parameter would be read
2175 * @param_size: sizeof(param_read_buf)
2176 *
2177 * Return 0 in case of success, non-zero otherwise
2178 */
2179static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
2180 int lun,
2181 enum unit_desc_param param_offset,
2182 u8 *param_read_buf,
2183 u32 param_size)
2184{
2185 /*
2186 * Unit descriptors are only available for general purpose LUs (LUN id
2187 * from 0 to 7) and RPMB Well known LU.
2188 */
0ce147d4 2189 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
da461cec
SJ
2190 return -EOPNOTSUPP;
2191
2192 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
2193 param_offset, param_read_buf, param_size);
2194}
2195
7a3e97b0
SY
2196/**
2197 * ufshcd_memory_alloc - allocate memory for host memory space data structures
2198 * @hba: per adapter instance
2199 *
2200 * 1. Allocate DMA memory for Command Descriptor array
2201 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
2202 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
2203 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
2204 * (UTMRDL)
2205 * 4. Allocate memory for local reference block(lrb).
2206 *
2207 * Returns 0 for success, non-zero in case of failure
2208 */
2209static int ufshcd_memory_alloc(struct ufs_hba *hba)
2210{
2211 size_t utmrdl_size, utrdl_size, ucdl_size;
2212
2213 /* Allocate memory for UTP command descriptors */
2214 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
2215 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
2216 ucdl_size,
2217 &hba->ucdl_dma_addr,
2218 GFP_KERNEL);
7a3e97b0
SY
2219
2220 /*
2221 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
2222 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
2223 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
2224 * be aligned to 128 bytes as well
2225 */
2226 if (!hba->ucdl_base_addr ||
2227 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 2228 dev_err(hba->dev,
7a3e97b0
SY
2229 "Command Descriptor Memory allocation failed\n");
2230 goto out;
2231 }
2232
2233 /*
2234 * Allocate memory for UTP Transfer descriptors
2235 * UFSHCI requires 1024 byte alignment of UTRD
2236 */
2237 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
2238 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
2239 utrdl_size,
2240 &hba->utrdl_dma_addr,
2241 GFP_KERNEL);
7a3e97b0
SY
2242 if (!hba->utrdl_base_addr ||
2243 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 2244 dev_err(hba->dev,
7a3e97b0
SY
2245 "Transfer Descriptor Memory allocation failed\n");
2246 goto out;
2247 }
2248
2249 /*
2250 * Allocate memory for UTP Task Management descriptors
2251 * UFSHCI requires 1024 byte alignment of UTMRD
2252 */
2253 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
2254 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
2255 utmrdl_size,
2256 &hba->utmrdl_dma_addr,
2257 GFP_KERNEL);
7a3e97b0
SY
2258 if (!hba->utmrdl_base_addr ||
2259 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 2260 dev_err(hba->dev,
7a3e97b0
SY
2261 "Task Management Descriptor Memory allocation failed\n");
2262 goto out;
2263 }
2264
2265 /* Allocate memory for local reference block */
2953f850
SJ
2266 hba->lrb = devm_kzalloc(hba->dev,
2267 hba->nutrs * sizeof(struct ufshcd_lrb),
2268 GFP_KERNEL);
7a3e97b0 2269 if (!hba->lrb) {
3b1d0580 2270 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
2271 goto out;
2272 }
2273 return 0;
2274out:
7a3e97b0
SY
2275 return -ENOMEM;
2276}
2277
2278/**
2279 * ufshcd_host_memory_configure - configure local reference block with
2280 * memory offsets
2281 * @hba: per adapter instance
2282 *
2283 * Configure Host memory space
2284 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
2285 * address.
2286 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
2287 * and PRDT offset.
2288 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
2289 * into local reference block.
2290 */
2291static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2292{
2293 struct utp_transfer_cmd_desc *cmd_descp;
2294 struct utp_transfer_req_desc *utrdlp;
2295 dma_addr_t cmd_desc_dma_addr;
2296 dma_addr_t cmd_desc_element_addr;
2297 u16 response_offset;
2298 u16 prdt_offset;
2299 int cmd_desc_size;
2300 int i;
2301
2302 utrdlp = hba->utrdl_base_addr;
2303 cmd_descp = hba->ucdl_base_addr;
2304
2305 response_offset =
2306 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2307 prdt_offset =
2308 offsetof(struct utp_transfer_cmd_desc, prd_table);
2309
2310 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2311 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2312
2313 for (i = 0; i < hba->nutrs; i++) {
2314 /* Configure UTRD with command descriptor base address */
2315 cmd_desc_element_addr =
2316 (cmd_desc_dma_addr + (cmd_desc_size * i));
2317 utrdlp[i].command_desc_base_addr_lo =
2318 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2319 utrdlp[i].command_desc_base_addr_hi =
2320 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2321
2322 /* Response upiu and prdt offset should be in double words */
2323 utrdlp[i].response_upiu_offset =
2324 cpu_to_le16((response_offset >> 2));
2325 utrdlp[i].prd_table_offset =
2326 cpu_to_le16((prdt_offset >> 2));
2327 utrdlp[i].response_upiu_length =
3ca316c5 2328 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0
SY
2329
2330 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
5a0b0cb9
SRT
2331 hba->lrb[i].ucd_req_ptr =
2332 (struct utp_upiu_req *)(cmd_descp + i);
7a3e97b0
SY
2333 hba->lrb[i].ucd_rsp_ptr =
2334 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2335 hba->lrb[i].ucd_prdt_ptr =
2336 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2337 }
2338}
2339
2340/**
2341 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2342 * @hba: per adapter instance
2343 *
2344 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2345 * in order to initialize the Unipro link startup procedure.
2346 * Once the Unipro links are up, the device connected to the controller
2347 * is detected.
2348 *
2349 * Returns 0 on success, non-zero value on failure
2350 */
2351static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2352{
6ccf44fe
SJ
2353 struct uic_command uic_cmd = {0};
2354 int ret;
7a3e97b0 2355
6ccf44fe 2356 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 2357
6ccf44fe
SJ
2358 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2359 if (ret)
2360 dev_err(hba->dev,
2361 "dme-link-startup: error code %d\n", ret);
2362 return ret;
7a3e97b0
SY
2363}
2364
cad2e03d
YG
2365static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2366{
2367 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2368 unsigned long min_sleep_time_us;
2369
2370 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2371 return;
2372
2373 /*
2374 * last_dme_cmd_tstamp will be 0 only for 1st call to
2375 * this function
2376 */
2377 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2378 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2379 } else {
2380 unsigned long delta =
2381 (unsigned long) ktime_to_us(
2382 ktime_sub(ktime_get(),
2383 hba->last_dme_cmd_tstamp));
2384
2385 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2386 min_sleep_time_us =
2387 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2388 else
2389 return; /* no more delay required */
2390 }
2391
2392 /* allow sleep for extra 50us if needed */
2393 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2394}
2395
12b4fdb4
SJ
2396/**
2397 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2398 * @hba: per adapter instance
2399 * @attr_sel: uic command argument1
2400 * @attr_set: attribute set type as uic command argument2
2401 * @mib_val: setting value as uic command argument3
2402 * @peer: indicate whether peer or local
2403 *
2404 * Returns 0 on success, non-zero value on failure
2405 */
2406int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2407 u8 attr_set, u32 mib_val, u8 peer)
2408{
2409 struct uic_command uic_cmd = {0};
2410 static const char *const action[] = {
2411 "dme-set",
2412 "dme-peer-set"
2413 };
2414 const char *set = action[!!peer];
2415 int ret;
64238fbd 2416 int retries = UFS_UIC_COMMAND_RETRIES;
12b4fdb4
SJ
2417
2418 uic_cmd.command = peer ?
2419 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2420 uic_cmd.argument1 = attr_sel;
2421 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2422 uic_cmd.argument3 = mib_val;
2423
64238fbd
YG
2424 do {
2425 /* for peer attributes we retry upon failure */
2426 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2427 if (ret)
2428 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2429 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2430 } while (ret && peer && --retries);
2431
2432 if (!retries)
2433 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
2434 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
2435 retries);
12b4fdb4
SJ
2436
2437 return ret;
2438}
2439EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2440
2441/**
2442 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2443 * @hba: per adapter instance
2444 * @attr_sel: uic command argument1
2445 * @mib_val: the value of the attribute as returned by the UIC command
2446 * @peer: indicate whether peer or local
2447 *
2448 * Returns 0 on success, non-zero value on failure
2449 */
2450int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2451 u32 *mib_val, u8 peer)
2452{
2453 struct uic_command uic_cmd = {0};
2454 static const char *const action[] = {
2455 "dme-get",
2456 "dme-peer-get"
2457 };
2458 const char *get = action[!!peer];
2459 int ret;
64238fbd 2460 int retries = UFS_UIC_COMMAND_RETRIES;
874237f7
YG
2461 struct ufs_pa_layer_attr orig_pwr_info;
2462 struct ufs_pa_layer_attr temp_pwr_info;
2463 bool pwr_mode_change = false;
2464
2465 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
2466 orig_pwr_info = hba->pwr_info;
2467 temp_pwr_info = orig_pwr_info;
2468
2469 if (orig_pwr_info.pwr_tx == FAST_MODE ||
2470 orig_pwr_info.pwr_rx == FAST_MODE) {
2471 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
2472 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
2473 pwr_mode_change = true;
2474 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
2475 orig_pwr_info.pwr_rx == SLOW_MODE) {
2476 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
2477 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
2478 pwr_mode_change = true;
2479 }
2480 if (pwr_mode_change) {
2481 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
2482 if (ret)
2483 goto out;
2484 }
2485 }
12b4fdb4
SJ
2486
2487 uic_cmd.command = peer ?
2488 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2489 uic_cmd.argument1 = attr_sel;
2490
64238fbd
YG
2491 do {
2492 /* for peer attributes we retry upon failure */
2493 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2494 if (ret)
2495 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
2496 get, UIC_GET_ATTR_ID(attr_sel), ret);
2497 } while (ret && peer && --retries);
2498
2499 if (!retries)
2500 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
2501 get, UIC_GET_ATTR_ID(attr_sel), retries);
12b4fdb4 2502
64238fbd 2503 if (mib_val && !ret)
12b4fdb4 2504 *mib_val = uic_cmd.argument3;
874237f7
YG
2505
2506 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
2507 && pwr_mode_change)
2508 ufshcd_change_power_mode(hba, &orig_pwr_info);
12b4fdb4
SJ
2509out:
2510 return ret;
2511}
2512EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2513
53b3d9c3 2514/**
57d104c1
SJ
2515 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2516 * state) and waits for it to take effect.
2517 *
53b3d9c3 2518 * @hba: per adapter instance
57d104c1
SJ
2519 * @cmd: UIC command to execute
2520 *
2521 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2522 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2523 * and device UniPro link and hence it's final completion would be indicated by
2524 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2525 * addition to normal UIC command completion Status (UCCS). This function only
2526 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
2527 *
2528 * Returns 0 on success, non-zero value on failure
2529 */
57d104c1 2530static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 2531{
57d104c1 2532 struct completion uic_async_done;
53b3d9c3
SJ
2533 unsigned long flags;
2534 u8 status;
2535 int ret;
d75f7fe4 2536 bool reenable_intr = false;
53b3d9c3 2537
53b3d9c3 2538 mutex_lock(&hba->uic_cmd_mutex);
57d104c1 2539 init_completion(&uic_async_done);
cad2e03d 2540 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
2541
2542 spin_lock_irqsave(hba->host->host_lock, flags);
57d104c1 2543 hba->uic_async_done = &uic_async_done;
d75f7fe4
YG
2544 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
2545 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
2546 /*
2547 * Make sure UIC command completion interrupt is disabled before
2548 * issuing UIC command.
2549 */
2550 wmb();
2551 reenable_intr = true;
57d104c1 2552 }
d75f7fe4
YG
2553 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
2554 spin_unlock_irqrestore(hba->host->host_lock, flags);
57d104c1
SJ
2555 if (ret) {
2556 dev_err(hba->dev,
2557 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2558 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
2559 goto out;
2560 }
2561
57d104c1 2562 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
2563 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2564 dev_err(hba->dev,
57d104c1
SJ
2565 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2566 cmd->command, cmd->argument3);
53b3d9c3
SJ
2567 ret = -ETIMEDOUT;
2568 goto out;
2569 }
2570
2571 status = ufshcd_get_upmcrs(hba);
2572 if (status != PWR_LOCAL) {
2573 dev_err(hba->dev,
73615428 2574 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
57d104c1 2575 cmd->command, status);
53b3d9c3
SJ
2576 ret = (status != PWR_OK) ? status : -1;
2577 }
2578out:
2579 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 2580 hba->active_uic_cmd = NULL;
57d104c1 2581 hba->uic_async_done = NULL;
d75f7fe4
YG
2582 if (reenable_intr)
2583 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
53b3d9c3
SJ
2584 spin_unlock_irqrestore(hba->host->host_lock, flags);
2585 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 2586
53b3d9c3
SJ
2587 return ret;
2588}
2589
57d104c1
SJ
2590/**
2591 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2592 * using DME_SET primitives.
2593 * @hba: per adapter instance
2594 * @mode: powr mode value
2595 *
2596 * Returns 0 on success, non-zero value on failure
2597 */
2598static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2599{
2600 struct uic_command uic_cmd = {0};
1ab27c9c 2601 int ret;
57d104c1 2602
c3a2f9ee
YG
2603 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
2604 ret = ufshcd_dme_set(hba,
2605 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
2606 if (ret) {
2607 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
2608 __func__, ret);
2609 goto out;
2610 }
2611 }
2612
57d104c1
SJ
2613 uic_cmd.command = UIC_CMD_DME_SET;
2614 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2615 uic_cmd.argument3 = mode;
1ab27c9c
ST
2616 ufshcd_hold(hba, false);
2617 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2618 ufshcd_release(hba);
57d104c1 2619
c3a2f9ee 2620out:
1ab27c9c 2621 return ret;
57d104c1
SJ
2622}
2623
53c12d0e
YG
2624static int ufshcd_link_recovery(struct ufs_hba *hba)
2625{
2626 int ret;
2627 unsigned long flags;
2628
2629 spin_lock_irqsave(hba->host->host_lock, flags);
2630 hba->ufshcd_state = UFSHCD_STATE_RESET;
2631 ufshcd_set_eh_in_progress(hba);
2632 spin_unlock_irqrestore(hba->host->host_lock, flags);
2633
2634 ret = ufshcd_host_reset_and_restore(hba);
2635
2636 spin_lock_irqsave(hba->host->host_lock, flags);
2637 if (ret)
2638 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2639 ufshcd_clear_eh_in_progress(hba);
2640 spin_unlock_irqrestore(hba->host->host_lock, flags);
2641
2642 if (ret)
2643 dev_err(hba->dev, "%s: link recovery failed, err %d",
2644 __func__, ret);
2645
2646 return ret;
2647}
2648
87d0b4a6 2649static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
57d104c1 2650{
87d0b4a6 2651 int ret;
57d104c1
SJ
2652 struct uic_command uic_cmd = {0};
2653
2654 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
87d0b4a6
YG
2655 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2656
53c12d0e 2657 if (ret) {
87d0b4a6
YG
2658 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
2659 __func__, ret);
2660
53c12d0e
YG
2661 /*
2662 * If link recovery fails then return error so that caller
2663 * don't retry the hibern8 enter again.
2664 */
2665 if (ufshcd_link_recovery(hba))
2666 ret = -ENOLINK;
2667 }
2668
87d0b4a6
YG
2669 return ret;
2670}
2671
2672static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2673{
2674 int ret = 0, retries;
57d104c1 2675
87d0b4a6
YG
2676 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
2677 ret = __ufshcd_uic_hibern8_enter(hba);
2678 if (!ret || ret == -ENOLINK)
2679 goto out;
2680 }
2681out:
2682 return ret;
57d104c1
SJ
2683}
2684
2685static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2686{
2687 struct uic_command uic_cmd = {0};
2688 int ret;
2689
2690 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2691 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2692 if (ret) {
53c12d0e
YG
2693 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2694 __func__, ret);
2695 ret = ufshcd_link_recovery(hba);
57d104c1
SJ
2696 }
2697
2698 return ret;
2699}
2700
5064636c
YG
2701 /**
2702 * ufshcd_init_pwr_info - setting the POR (power on reset)
2703 * values in hba power info
2704 * @hba: per-adapter instance
2705 */
2706static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2707{
2708 hba->pwr_info.gear_rx = UFS_PWM_G1;
2709 hba->pwr_info.gear_tx = UFS_PWM_G1;
2710 hba->pwr_info.lane_rx = 1;
2711 hba->pwr_info.lane_tx = 1;
2712 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2713 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2714 hba->pwr_info.hs_rate = 0;
2715}
2716
d3e89bac 2717/**
7eb584db
DR
2718 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2719 * @hba: per-adapter instance
d3e89bac 2720 */
7eb584db 2721static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 2722{
7eb584db
DR
2723 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2724
2725 if (hba->max_pwr_info.is_valid)
2726 return 0;
2727
2728 pwr_info->pwr_tx = FASTAUTO_MODE;
2729 pwr_info->pwr_rx = FASTAUTO_MODE;
2730 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
2731
2732 /* Get the connected lane count */
7eb584db
DR
2733 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2734 &pwr_info->lane_rx);
2735 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2736 &pwr_info->lane_tx);
2737
2738 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2739 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2740 __func__,
2741 pwr_info->lane_rx,
2742 pwr_info->lane_tx);
2743 return -EINVAL;
2744 }
d3e89bac
SJ
2745
2746 /*
2747 * First, get the maximum gears of HS speed.
2748 * If a zero value, it means there is no HSGEAR capability.
2749 * Then, get the maximum gears of PWM speed.
2750 */
7eb584db
DR
2751 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2752 if (!pwr_info->gear_rx) {
2753 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2754 &pwr_info->gear_rx);
2755 if (!pwr_info->gear_rx) {
2756 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2757 __func__, pwr_info->gear_rx);
2758 return -EINVAL;
2759 }
2760 pwr_info->pwr_rx = SLOWAUTO_MODE;
d3e89bac
SJ
2761 }
2762
7eb584db
DR
2763 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2764 &pwr_info->gear_tx);
2765 if (!pwr_info->gear_tx) {
d3e89bac 2766 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
2767 &pwr_info->gear_tx);
2768 if (!pwr_info->gear_tx) {
2769 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2770 __func__, pwr_info->gear_tx);
2771 return -EINVAL;
2772 }
2773 pwr_info->pwr_tx = SLOWAUTO_MODE;
2774 }
2775
2776 hba->max_pwr_info.is_valid = true;
2777 return 0;
2778}
2779
2780static int ufshcd_change_power_mode(struct ufs_hba *hba,
2781 struct ufs_pa_layer_attr *pwr_mode)
2782{
2783 int ret;
2784
2785 /* if already configured to the requested pwr_mode */
2786 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2787 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2788 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2789 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2790 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2791 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2792 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2793 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2794 return 0;
d3e89bac
SJ
2795 }
2796
2797 /*
2798 * Configure attributes for power mode change with below.
2799 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2800 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2801 * - PA_HSSERIES
2802 */
7eb584db
DR
2803 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2804 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2805 pwr_mode->lane_rx);
2806 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2807 pwr_mode->pwr_rx == FAST_MODE)
d3e89bac 2808 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
7eb584db
DR
2809 else
2810 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
d3e89bac 2811
7eb584db
DR
2812 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2813 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2814 pwr_mode->lane_tx);
2815 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2816 pwr_mode->pwr_tx == FAST_MODE)
d3e89bac 2817 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
7eb584db
DR
2818 else
2819 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
d3e89bac 2820
7eb584db
DR
2821 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2822 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2823 pwr_mode->pwr_rx == FAST_MODE ||
2824 pwr_mode->pwr_tx == FAST_MODE)
2825 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2826 pwr_mode->hs_rate);
d3e89bac 2827
7eb584db
DR
2828 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2829 | pwr_mode->pwr_tx);
2830
2831 if (ret) {
d3e89bac 2832 dev_err(hba->dev,
7eb584db
DR
2833 "%s: power mode change failed %d\n", __func__, ret);
2834 } else {
0263bcd0
YG
2835 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
2836 pwr_mode);
7eb584db
DR
2837
2838 memcpy(&hba->pwr_info, pwr_mode,
2839 sizeof(struct ufs_pa_layer_attr));
2840 }
2841
2842 return ret;
2843}
2844
2845/**
2846 * ufshcd_config_pwr_mode - configure a new power mode
2847 * @hba: per-adapter instance
2848 * @desired_pwr_mode: desired power configuration
2849 */
2850static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2851 struct ufs_pa_layer_attr *desired_pwr_mode)
2852{
2853 struct ufs_pa_layer_attr final_params = { 0 };
2854 int ret;
2855
0263bcd0
YG
2856 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
2857 desired_pwr_mode, &final_params);
2858
2859 if (ret)
7eb584db
DR
2860 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2861
2862 ret = ufshcd_change_power_mode(hba, &final_params);
d3e89bac
SJ
2863
2864 return ret;
2865}
2866
68078d5c
DR
2867/**
2868 * ufshcd_complete_dev_init() - checks device readiness
2869 * hba: per-adapter instance
2870 *
2871 * Set fDeviceInit flag and poll until device toggles it.
2872 */
2873static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2874{
dc3c8d3a
YG
2875 int i;
2876 int err;
68078d5c
DR
2877 bool flag_res = 1;
2878
dc3c8d3a
YG
2879 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2880 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
68078d5c
DR
2881 if (err) {
2882 dev_err(hba->dev,
2883 "%s setting fDeviceInit flag failed with error %d\n",
2884 __func__, err);
2885 goto out;
2886 }
2887
dc3c8d3a
YG
2888 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
2889 for (i = 0; i < 1000 && !err && flag_res; i++)
2890 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2891 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2892
68078d5c
DR
2893 if (err)
2894 dev_err(hba->dev,
2895 "%s reading fDeviceInit flag failed with error %d\n",
2896 __func__, err);
2897 else if (flag_res)
2898 dev_err(hba->dev,
2899 "%s fDeviceInit was not cleared by the device\n",
2900 __func__);
2901
2902out:
2903 return err;
2904}
2905
7a3e97b0
SY
2906/**
2907 * ufshcd_make_hba_operational - Make UFS controller operational
2908 * @hba: per adapter instance
2909 *
2910 * To bring UFS host controller to operational state,
5c0c28a8
SRT
2911 * 1. Enable required interrupts
2912 * 2. Configure interrupt aggregation
897efe62 2913 * 3. Program UTRL and UTMRL base address
5c0c28a8 2914 * 4. Configure run-stop-registers
7a3e97b0
SY
2915 *
2916 * Returns 0 on success, non-zero value on failure
2917 */
2918static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2919{
2920 int err = 0;
2921 u32 reg;
2922
6ccf44fe
SJ
2923 /* Enable required interrupts */
2924 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2925
2926 /* Configure interrupt aggregation */
b852190e
YG
2927 if (ufshcd_is_intr_aggr_allowed(hba))
2928 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2929 else
2930 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
2931
2932 /* Configure UTRL and UTMRL base address registers */
2933 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2934 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2935 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2936 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2937 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2938 REG_UTP_TASK_REQ_LIST_BASE_L);
2939 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2940 REG_UTP_TASK_REQ_LIST_BASE_H);
2941
897efe62
YG
2942 /*
2943 * Make sure base address and interrupt setup are updated before
2944 * enabling the run/stop registers below.
2945 */
2946 wmb();
2947
7a3e97b0
SY
2948 /*
2949 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
7a3e97b0 2950 */
5c0c28a8 2951 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
2952 if (!(ufshcd_get_lists_status(reg))) {
2953 ufshcd_enable_run_stop_reg(hba);
2954 } else {
3b1d0580 2955 dev_err(hba->dev,
7a3e97b0
SY
2956 "Host controller not ready to process requests");
2957 err = -EIO;
2958 goto out;
2959 }
2960
7a3e97b0
SY
2961out:
2962 return err;
2963}
2964
596585a2
YG
2965/**
2966 * ufshcd_hba_stop - Send controller to reset state
2967 * @hba: per adapter instance
2968 * @can_sleep: perform sleep or just spin
2969 */
2970static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
2971{
2972 int err;
2973
2974 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
2975 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
2976 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
2977 10, 1, can_sleep);
2978 if (err)
2979 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
2980}
2981
7a3e97b0
SY
2982/**
2983 * ufshcd_hba_enable - initialize the controller
2984 * @hba: per adapter instance
2985 *
2986 * The controller resets itself and controller firmware initialization
2987 * sequence kicks off. When controller is ready it will set
2988 * the Host Controller Enable bit to 1.
2989 *
2990 * Returns 0 on success, non-zero value on failure
2991 */
2992static int ufshcd_hba_enable(struct ufs_hba *hba)
2993{
2994 int retry;
2995
2996 /*
2997 * msleep of 1 and 5 used in this function might result in msleep(20),
2998 * but it was necessary to send the UFS FPGA to reset mode during
2999 * development and testing of this driver. msleep can be changed to
3000 * mdelay and retry count can be reduced based on the controller.
3001 */
596585a2 3002 if (!ufshcd_is_hba_active(hba))
7a3e97b0 3003 /* change controller state to "reset state" */
596585a2 3004 ufshcd_hba_stop(hba, true);
7a3e97b0 3005
57d104c1
SJ
3006 /* UniPro link is disabled at this point */
3007 ufshcd_set_link_off(hba);
3008
0263bcd0 3009 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
5c0c28a8 3010
7a3e97b0
SY
3011 /* start controller initialization sequence */
3012 ufshcd_hba_start(hba);
3013
3014 /*
3015 * To initialize a UFS host controller HCE bit must be set to 1.
3016 * During initialization the HCE bit value changes from 1->0->1.
3017 * When the host controller completes initialization sequence
3018 * it sets the value of HCE bit to 1. The same HCE bit is read back
3019 * to check if the controller has completed initialization sequence.
3020 * So without this delay the value HCE = 1, set in the previous
3021 * instruction might be read back.
3022 * This delay can be changed based on the controller.
3023 */
3024 msleep(1);
3025
3026 /* wait for the host controller to complete initialization */
3027 retry = 10;
3028 while (ufshcd_is_hba_active(hba)) {
3029 if (retry) {
3030 retry--;
3031 } else {
3b1d0580 3032 dev_err(hba->dev,
7a3e97b0
SY
3033 "Controller enable failed\n");
3034 return -EIO;
3035 }
3036 msleep(5);
3037 }
5c0c28a8 3038
1d337ec2 3039 /* enable UIC related interrupts */
57d104c1 3040 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 3041
0263bcd0 3042 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
5c0c28a8 3043
7a3e97b0
SY
3044 return 0;
3045}
3046
7ca38cf3
YG
3047static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
3048{
3049 int tx_lanes, i, err = 0;
3050
3051 if (!peer)
3052 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3053 &tx_lanes);
3054 else
3055 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3056 &tx_lanes);
3057 for (i = 0; i < tx_lanes; i++) {
3058 if (!peer)
3059 err = ufshcd_dme_set(hba,
3060 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3061 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3062 0);
3063 else
3064 err = ufshcd_dme_peer_set(hba,
3065 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
3066 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
3067 0);
3068 if (err) {
3069 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
3070 __func__, peer, i, err);
3071 break;
3072 }
3073 }
3074
3075 return err;
3076}
3077
3078static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
3079{
3080 return ufshcd_disable_tx_lcc(hba, true);
3081}
3082
7a3e97b0 3083/**
6ccf44fe 3084 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
3085 * @hba: per adapter instance
3086 *
6ccf44fe 3087 * Returns 0 for success, non-zero in case of failure
7a3e97b0 3088 */
6ccf44fe 3089static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 3090{
6ccf44fe 3091 int ret;
1d337ec2 3092 int retries = DME_LINKSTARTUP_RETRIES;
7a3e97b0 3093
1d337ec2 3094 do {
0263bcd0 3095 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 3096
1d337ec2 3097 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 3098
1d337ec2
SRT
3099 /* check if device is detected by inter-connect layer */
3100 if (!ret && !ufshcd_is_device_present(hba)) {
3101 dev_err(hba->dev, "%s: Device not present\n", __func__);
3102 ret = -ENXIO;
3103 goto out;
3104 }
6ccf44fe 3105
1d337ec2
SRT
3106 /*
3107 * DME link lost indication is only received when link is up,
3108 * but we can't be sure if the link is up until link startup
3109 * succeeds. So reset the local Uni-Pro and try again.
3110 */
3111 if (ret && ufshcd_hba_enable(hba))
3112 goto out;
3113 } while (ret && retries--);
3114
3115 if (ret)
3116 /* failed to get the link up... retire */
5c0c28a8 3117 goto out;
5c0c28a8 3118
7ca38cf3
YG
3119 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3120 ret = ufshcd_disable_device_tx_lcc(hba);
3121 if (ret)
3122 goto out;
3123 }
3124
5c0c28a8 3125 /* Include any host controller configuration via UIC commands */
0263bcd0
YG
3126 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
3127 if (ret)
3128 goto out;
7a3e97b0 3129
5c0c28a8 3130 ret = ufshcd_make_hba_operational(hba);
6ccf44fe
SJ
3131out:
3132 if (ret)
3133 dev_err(hba->dev, "link startup failed %d\n", ret);
3134 return ret;
7a3e97b0
SY
3135}
3136
5a0b0cb9
SRT
3137/**
3138 * ufshcd_verify_dev_init() - Verify device initialization
3139 * @hba: per-adapter instance
3140 *
3141 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
3142 * device Transport Protocol (UTP) layer is ready after a reset.
3143 * If the UTP layer at the device side is not initialized, it may
3144 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
3145 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
3146 */
3147static int ufshcd_verify_dev_init(struct ufs_hba *hba)
3148{
3149 int err = 0;
3150 int retries;
3151
1ab27c9c 3152 ufshcd_hold(hba, false);
5a0b0cb9
SRT
3153 mutex_lock(&hba->dev_cmd.lock);
3154 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
3155 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
3156 NOP_OUT_TIMEOUT);
3157
3158 if (!err || err == -ETIMEDOUT)
3159 break;
3160
3161 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
3162 }
3163 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 3164 ufshcd_release(hba);
5a0b0cb9
SRT
3165
3166 if (err)
3167 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
3168 return err;
3169}
3170
0ce147d4
SJ
3171/**
3172 * ufshcd_set_queue_depth - set lun queue depth
3173 * @sdev: pointer to SCSI device
3174 *
3175 * Read bLUQueueDepth value and activate scsi tagged command
3176 * queueing. For WLUN, queue depth is set to 1. For best-effort
3177 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
3178 * value that host can queue.
3179 */
3180static void ufshcd_set_queue_depth(struct scsi_device *sdev)
3181{
3182 int ret = 0;
3183 u8 lun_qdepth;
3184 struct ufs_hba *hba;
3185
3186 hba = shost_priv(sdev->host);
3187
3188 lun_qdepth = hba->nutrs;
3189 ret = ufshcd_read_unit_desc_param(hba,
3190 ufshcd_scsi_to_upiu_lun(sdev->lun),
3191 UNIT_DESC_PARAM_LU_Q_DEPTH,
3192 &lun_qdepth,
3193 sizeof(lun_qdepth));
3194
3195 /* Some WLUN doesn't support unit descriptor */
3196 if (ret == -EOPNOTSUPP)
3197 lun_qdepth = 1;
3198 else if (!lun_qdepth)
3199 /* eventually, we can figure out the real queue depth */
3200 lun_qdepth = hba->nutrs;
3201 else
3202 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
3203
3204 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
3205 __func__, lun_qdepth);
db5ed4df 3206 scsi_change_queue_depth(sdev, lun_qdepth);
0ce147d4
SJ
3207}
3208
57d104c1
SJ
3209/*
3210 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
3211 * @hba: per-adapter instance
3212 * @lun: UFS device lun id
3213 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
3214 *
3215 * Returns 0 in case of success and b_lu_write_protect status would be returned
3216 * @b_lu_write_protect parameter.
3217 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
3218 * Returns -EINVAL in case of invalid parameters passed to this function.
3219 */
3220static int ufshcd_get_lu_wp(struct ufs_hba *hba,
3221 u8 lun,
3222 u8 *b_lu_write_protect)
3223{
3224 int ret;
3225
3226 if (!b_lu_write_protect)
3227 ret = -EINVAL;
3228 /*
3229 * According to UFS device spec, RPMB LU can't be write
3230 * protected so skip reading bLUWriteProtect parameter for
3231 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
3232 */
3233 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
3234 ret = -ENOTSUPP;
3235 else
3236 ret = ufshcd_read_unit_desc_param(hba,
3237 lun,
3238 UNIT_DESC_PARAM_LU_WR_PROTECT,
3239 b_lu_write_protect,
3240 sizeof(*b_lu_write_protect));
3241 return ret;
3242}
3243
3244/**
3245 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
3246 * status
3247 * @hba: per-adapter instance
3248 * @sdev: pointer to SCSI device
3249 *
3250 */
3251static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
3252 struct scsi_device *sdev)
3253{
3254 if (hba->dev_info.f_power_on_wp_en &&
3255 !hba->dev_info.is_lu_power_on_wp) {
3256 u8 b_lu_write_protect;
3257
3258 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
3259 &b_lu_write_protect) &&
3260 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
3261 hba->dev_info.is_lu_power_on_wp = true;
3262 }
3263}
3264
7a3e97b0
SY
3265/**
3266 * ufshcd_slave_alloc - handle initial SCSI device configurations
3267 * @sdev: pointer to SCSI device
3268 *
3269 * Returns success
3270 */
3271static int ufshcd_slave_alloc(struct scsi_device *sdev)
3272{
3273 struct ufs_hba *hba;
3274
3275 hba = shost_priv(sdev->host);
7a3e97b0
SY
3276
3277 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
3278 sdev->use_10_for_ms = 1;
7a3e97b0 3279
e8e7f271
SRT
3280 /* allow SCSI layer to restart the device in case of errors */
3281 sdev->allow_restart = 1;
4264fd61 3282
b2a6c522
SRT
3283 /* REPORT SUPPORTED OPERATION CODES is not supported */
3284 sdev->no_report_opcodes = 1;
3285
e8e7f271 3286
0ce147d4 3287 ufshcd_set_queue_depth(sdev);
4264fd61 3288
57d104c1
SJ
3289 ufshcd_get_lu_power_on_wp_status(hba, sdev);
3290
7a3e97b0
SY
3291 return 0;
3292}
3293
4264fd61
SRT
3294/**
3295 * ufshcd_change_queue_depth - change queue depth
3296 * @sdev: pointer to SCSI device
3297 * @depth: required depth to set
4264fd61 3298 *
db5ed4df 3299 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 3300 */
db5ed4df 3301static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61
SRT
3302{
3303 struct ufs_hba *hba = shost_priv(sdev->host);
3304
3305 if (depth > hba->nutrs)
3306 depth = hba->nutrs;
db5ed4df 3307 return scsi_change_queue_depth(sdev, depth);
4264fd61
SRT
3308}
3309
eeda4749
AM
3310/**
3311 * ufshcd_slave_configure - adjust SCSI device configurations
3312 * @sdev: pointer to SCSI device
3313 */
3314static int ufshcd_slave_configure(struct scsi_device *sdev)
3315{
3316 struct request_queue *q = sdev->request_queue;
3317
3318 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
3319 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
3320
3321 return 0;
3322}
3323
7a3e97b0
SY
3324/**
3325 * ufshcd_slave_destroy - remove SCSI device configurations
3326 * @sdev: pointer to SCSI device
3327 */
3328static void ufshcd_slave_destroy(struct scsi_device *sdev)
3329{
3330 struct ufs_hba *hba;
3331
3332 hba = shost_priv(sdev->host);
0ce147d4 3333 /* Drop the reference as it won't be needed anymore */
7c48bfd0
AM
3334 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
3335 unsigned long flags;
3336
3337 spin_lock_irqsave(hba->host->host_lock, flags);
0ce147d4 3338 hba->sdev_ufs_device = NULL;
7c48bfd0
AM
3339 spin_unlock_irqrestore(hba->host->host_lock, flags);
3340 }
7a3e97b0
SY
3341}
3342
3343/**
3344 * ufshcd_task_req_compl - handle task management request completion
3345 * @hba: per adapter instance
3346 * @index: index of the completed request
e2933132 3347 * @resp: task management service response
7a3e97b0 3348 *
e2933132 3349 * Returns non-zero value on error, zero on success
7a3e97b0 3350 */
e2933132 3351static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
7a3e97b0
SY
3352{
3353 struct utp_task_req_desc *task_req_descp;
3354 struct utp_upiu_task_rsp *task_rsp_upiup;
3355 unsigned long flags;
3356 int ocs_value;
3357 int task_result;
3358
3359 spin_lock_irqsave(hba->host->host_lock, flags);
3360
3361 /* Clear completed tasks from outstanding_tasks */
3362 __clear_bit(index, &hba->outstanding_tasks);
3363
3364 task_req_descp = hba->utmrdl_base_addr;
3365 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
3366
3367 if (ocs_value == OCS_SUCCESS) {
3368 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3369 task_req_descp[index].task_rsp_upiu;
8794ee0c
KK
3370 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3371 task_result = task_result & MASK_TM_SERVICE_RESP;
e2933132
SRT
3372 if (resp)
3373 *resp = (u8)task_result;
7a3e97b0 3374 } else {
e2933132
SRT
3375 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
3376 __func__, ocs_value);
7a3e97b0
SY
3377 }
3378 spin_unlock_irqrestore(hba->host->host_lock, flags);
e2933132
SRT
3379
3380 return ocs_value;
7a3e97b0
SY
3381}
3382
7a3e97b0
SY
3383/**
3384 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
3385 * @lrb: pointer to local reference block of completed command
3386 * @scsi_status: SCSI command status
3387 *
3388 * Returns value base on SCSI command status
3389 */
3390static inline int
3391ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
3392{
3393 int result = 0;
3394
3395 switch (scsi_status) {
7a3e97b0 3396 case SAM_STAT_CHECK_CONDITION:
1c2623c5
SJ
3397 ufshcd_copy_sense_data(lrbp);
3398 case SAM_STAT_GOOD:
7a3e97b0
SY
3399 result |= DID_OK << 16 |
3400 COMMAND_COMPLETE << 8 |
1c2623c5 3401 scsi_status;
7a3e97b0
SY
3402 break;
3403 case SAM_STAT_TASK_SET_FULL:
1c2623c5 3404 case SAM_STAT_BUSY:
7a3e97b0 3405 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
3406 ufshcd_copy_sense_data(lrbp);
3407 result |= scsi_status;
7a3e97b0
SY
3408 break;
3409 default:
3410 result |= DID_ERROR << 16;
3411 break;
3412 } /* end of switch */
3413
3414 return result;
3415}
3416
3417/**
3418 * ufshcd_transfer_rsp_status - Get overall status of the response
3419 * @hba: per adapter instance
3420 * @lrb: pointer to local reference block of completed command
3421 *
3422 * Returns result of the command to notify SCSI midlayer
3423 */
3424static inline int
3425ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3426{
3427 int result = 0;
3428 int scsi_status;
3429 int ocs;
3430
3431 /* overall command status of utrd */
3432 ocs = ufshcd_get_tr_ocs(lrbp);
3433
3434 switch (ocs) {
3435 case OCS_SUCCESS:
5a0b0cb9 3436 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
7a3e97b0 3437
5a0b0cb9
SRT
3438 switch (result) {
3439 case UPIU_TRANSACTION_RESPONSE:
3440 /*
3441 * get the response UPIU result to extract
3442 * the SCSI command status
3443 */
3444 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3445
3446 /*
3447 * get the result based on SCSI status response
3448 * to notify the SCSI midlayer of the command status
3449 */
3450 scsi_status = result & MASK_SCSI_STATUS;
3451 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59 3452
f05ac2e5
YG
3453 /*
3454 * Currently we are only supporting BKOPs exception
3455 * events hence we can ignore BKOPs exception event
3456 * during power management callbacks. BKOPs exception
3457 * event is not expected to be raised in runtime suspend
3458 * callback as it allows the urgent bkops.
3459 * During system suspend, we are anyway forcefully
3460 * disabling the bkops and if urgent bkops is needed
3461 * it will be enabled on system resume. Long term
3462 * solution could be to abort the system suspend if
3463 * UFS device needs urgent BKOPs.
3464 */
3465 if (!hba->pm_op_in_progress &&
3466 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
66ec6d59 3467 schedule_work(&hba->eeh_work);
5a0b0cb9
SRT
3468 break;
3469 case UPIU_TRANSACTION_REJECT_UPIU:
3470 /* TODO: handle Reject UPIU Response */
3471 result = DID_ERROR << 16;
3b1d0580 3472 dev_err(hba->dev,
5a0b0cb9
SRT
3473 "Reject UPIU not fully implemented\n");
3474 break;
3475 default:
3476 result = DID_ERROR << 16;
3477 dev_err(hba->dev,
3478 "Unexpected request response code = %x\n",
3479 result);
7a3e97b0
SY
3480 break;
3481 }
7a3e97b0
SY
3482 break;
3483 case OCS_ABORTED:
3484 result |= DID_ABORT << 16;
3485 break;
e8e7f271
SRT
3486 case OCS_INVALID_COMMAND_STATUS:
3487 result |= DID_REQUEUE << 16;
3488 break;
7a3e97b0
SY
3489 case OCS_INVALID_CMD_TABLE_ATTR:
3490 case OCS_INVALID_PRDT_ATTR:
3491 case OCS_MISMATCH_DATA_BUF_SIZE:
3492 case OCS_MISMATCH_RESP_UPIU_SIZE:
3493 case OCS_PEER_COMM_FAILURE:
3494 case OCS_FATAL_ERROR:
3495 default:
3496 result |= DID_ERROR << 16;
3b1d0580 3497 dev_err(hba->dev,
7a3e97b0
SY
3498 "OCS error from controller = %x\n", ocs);
3499 break;
3500 } /* end of switch */
3501
3502 return result;
3503}
3504
6ccf44fe
SJ
3505/**
3506 * ufshcd_uic_cmd_compl - handle completion of uic command
3507 * @hba: per adapter instance
53b3d9c3 3508 * @intr_status: interrupt status generated by the controller
6ccf44fe 3509 */
53b3d9c3 3510static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 3511{
53b3d9c3 3512 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
3513 hba->active_uic_cmd->argument2 |=
3514 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
3515 hba->active_uic_cmd->argument3 =
3516 ufshcd_get_dme_attr_val(hba);
6ccf44fe
SJ
3517 complete(&hba->active_uic_cmd->done);
3518 }
53b3d9c3 3519
57d104c1
SJ
3520 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3521 complete(hba->uic_async_done);
6ccf44fe
SJ
3522}
3523
7a3e97b0 3524/**
9a47ec7c 3525 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
7a3e97b0 3526 * @hba: per adapter instance
9a47ec7c 3527 * @completed_reqs: requests to complete
7a3e97b0 3528 */
9a47ec7c
YG
3529static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3530 unsigned long completed_reqs)
7a3e97b0 3531{
5a0b0cb9
SRT
3532 struct ufshcd_lrb *lrbp;
3533 struct scsi_cmnd *cmd;
7a3e97b0
SY
3534 int result;
3535 int index;
e9d501b1 3536
e9d501b1
DR
3537 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3538 lrbp = &hba->lrb[index];
3539 cmd = lrbp->cmd;
3540 if (cmd) {
3541 result = ufshcd_transfer_rsp_status(hba, lrbp);
3542 scsi_dma_unmap(cmd);
3543 cmd->result = result;
3544 /* Mark completed command as NULL in LRB */
3545 lrbp->cmd = NULL;
3546 clear_bit_unlock(index, &hba->lrb_in_use);
3547 /* Do not touch lrbp after scsi done */
3548 cmd->scsi_done(cmd);
1ab27c9c 3549 __ufshcd_release(hba);
300bb13f
JP
3550 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3551 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
e9d501b1
DR
3552 if (hba->dev_cmd.complete)
3553 complete(hba->dev_cmd.complete);
3554 }
3555 }
7a3e97b0
SY
3556
3557 /* clear corresponding bits of completed commands */
3558 hba->outstanding_reqs ^= completed_reqs;
3559
856b3483
ST
3560 ufshcd_clk_scaling_update_busy(hba);
3561
5a0b0cb9
SRT
3562 /* we might have free'd some tags above */
3563 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0
SY
3564}
3565
9a47ec7c
YG
3566/**
3567 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3568 * @hba: per adapter instance
3569 */
3570static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3571{
3572 unsigned long completed_reqs;
3573 u32 tr_doorbell;
3574
3575 /* Resetting interrupt aggregation counters first and reading the
3576 * DOOR_BELL afterward allows us to handle all the completed requests.
3577 * In order to prevent other interrupts starvation the DB is read once
3578 * after reset. The down side of this solution is the possibility of
3579 * false interrupt if device completes another request after resetting
3580 * aggregation and before reading the DB.
3581 */
3582 if (ufshcd_is_intr_aggr_allowed(hba))
3583 ufshcd_reset_intr_aggr(hba);
3584
3585 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3586 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3587
3588 __ufshcd_transfer_req_compl(hba, completed_reqs);
3589}
3590
66ec6d59
SRT
3591/**
3592 * ufshcd_disable_ee - disable exception event
3593 * @hba: per-adapter instance
3594 * @mask: exception event to disable
3595 *
3596 * Disables exception event in the device so that the EVENT_ALERT
3597 * bit is not set.
3598 *
3599 * Returns zero on success, non-zero error value on failure.
3600 */
3601static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3602{
3603 int err = 0;
3604 u32 val;
3605
3606 if (!(hba->ee_ctrl_mask & mask))
3607 goto out;
3608
3609 val = hba->ee_ctrl_mask & ~mask;
3610 val &= 0xFFFF; /* 2 bytes */
5e86ae44 3611 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
3612 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3613 if (!err)
3614 hba->ee_ctrl_mask &= ~mask;
3615out:
3616 return err;
3617}
3618
3619/**
3620 * ufshcd_enable_ee - enable exception event
3621 * @hba: per-adapter instance
3622 * @mask: exception event to enable
3623 *
3624 * Enable corresponding exception event in the device to allow
3625 * device to alert host in critical scenarios.
3626 *
3627 * Returns zero on success, non-zero error value on failure.
3628 */
3629static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3630{
3631 int err = 0;
3632 u32 val;
3633
3634 if (hba->ee_ctrl_mask & mask)
3635 goto out;
3636
3637 val = hba->ee_ctrl_mask | mask;
3638 val &= 0xFFFF; /* 2 bytes */
5e86ae44 3639 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
3640 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3641 if (!err)
3642 hba->ee_ctrl_mask |= mask;
3643out:
3644 return err;
3645}
3646
3647/**
3648 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3649 * @hba: per-adapter instance
3650 *
3651 * Allow device to manage background operations on its own. Enabling
3652 * this might lead to inconsistent latencies during normal data transfers
3653 * as the device is allowed to manage its own way of handling background
3654 * operations.
3655 *
3656 * Returns zero on success, non-zero on failure.
3657 */
3658static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3659{
3660 int err = 0;
3661
3662 if (hba->auto_bkops_enabled)
3663 goto out;
3664
dc3c8d3a 3665 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
66ec6d59
SRT
3666 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3667 if (err) {
3668 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3669 __func__, err);
3670 goto out;
3671 }
3672
3673 hba->auto_bkops_enabled = true;
3674
3675 /* No need of URGENT_BKOPS exception from the device */
3676 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3677 if (err)
3678 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3679 __func__, err);
3680out:
3681 return err;
3682}
3683
3684/**
3685 * ufshcd_disable_auto_bkops - block device in doing background operations
3686 * @hba: per-adapter instance
3687 *
3688 * Disabling background operations improves command response latency but
3689 * has drawback of device moving into critical state where the device is
3690 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3691 * host is idle so that BKOPS are managed effectively without any negative
3692 * impacts.
3693 *
3694 * Returns zero on success, non-zero on failure.
3695 */
3696static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3697{
3698 int err = 0;
3699
3700 if (!hba->auto_bkops_enabled)
3701 goto out;
3702
3703 /*
3704 * If host assisted BKOPs is to be enabled, make sure
3705 * urgent bkops exception is allowed.
3706 */
3707 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3708 if (err) {
3709 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3710 __func__, err);
3711 goto out;
3712 }
3713
dc3c8d3a 3714 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
66ec6d59
SRT
3715 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3716 if (err) {
3717 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3718 __func__, err);
3719 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3720 goto out;
3721 }
3722
3723 hba->auto_bkops_enabled = false;
3724out:
3725 return err;
3726}
3727
3728/**
3729 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
3730 * @hba: per adapter instance
3731 *
3732 * After a device reset the device may toggle the BKOPS_EN flag
3733 * to default value. The s/w tracking variables should be updated
3734 * as well. Do this by forcing enable of auto bkops.
3735 */
3736static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3737{
3738 hba->auto_bkops_enabled = false;
3739 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3740 ufshcd_enable_auto_bkops(hba);
3741}
3742
3743static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3744{
5e86ae44 3745 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
3746 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3747}
3748
3749/**
57d104c1 3750 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 3751 * @hba: per-adapter instance
57d104c1 3752 * @status: bkops_status value
66ec6d59 3753 *
57d104c1
SJ
3754 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3755 * flag in the device to permit background operations if the device
3756 * bkops_status is greater than or equal to "status" argument passed to
3757 * this function, disable otherwise.
3758 *
3759 * Returns 0 for success, non-zero in case of failure.
3760 *
3761 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3762 * to know whether auto bkops is enabled or disabled after this function
3763 * returns control to it.
66ec6d59 3764 */
57d104c1
SJ
3765static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3766 enum bkops_status status)
66ec6d59
SRT
3767{
3768 int err;
57d104c1 3769 u32 curr_status = 0;
66ec6d59 3770
57d104c1 3771 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
3772 if (err) {
3773 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3774 __func__, err);
3775 goto out;
57d104c1
SJ
3776 } else if (curr_status > BKOPS_STATUS_MAX) {
3777 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3778 __func__, curr_status);
3779 err = -EINVAL;
3780 goto out;
66ec6d59
SRT
3781 }
3782
57d104c1 3783 if (curr_status >= status)
66ec6d59 3784 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
3785 else
3786 err = ufshcd_disable_auto_bkops(hba);
66ec6d59
SRT
3787out:
3788 return err;
3789}
3790
57d104c1
SJ
3791/**
3792 * ufshcd_urgent_bkops - handle urgent bkops exception event
3793 * @hba: per-adapter instance
3794 *
3795 * Enable fBackgroundOpsEn flag in the device to permit background
3796 * operations.
3797 *
3798 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3799 * and negative error value for any other failure.
3800 */
3801static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3802{
afdfff59 3803 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
57d104c1
SJ
3804}
3805
66ec6d59
SRT
3806static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3807{
5e86ae44 3808 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
3809 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3810}
3811
afdfff59
YG
3812static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
3813{
3814 int err;
3815 u32 curr_status = 0;
3816
3817 if (hba->is_urgent_bkops_lvl_checked)
3818 goto enable_auto_bkops;
3819
3820 err = ufshcd_get_bkops_status(hba, &curr_status);
3821 if (err) {
3822 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3823 __func__, err);
3824 goto out;
3825 }
3826
3827 /*
3828 * We are seeing that some devices are raising the urgent bkops
3829 * exception events even when BKOPS status doesn't indicate performace
3830 * impacted or critical. Handle these device by determining their urgent
3831 * bkops status at runtime.
3832 */
3833 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
3834 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
3835 __func__, curr_status);
3836 /* update the current status as the urgent bkops level */
3837 hba->urgent_bkops_lvl = curr_status;
3838 hba->is_urgent_bkops_lvl_checked = true;
3839 }
3840
3841enable_auto_bkops:
3842 err = ufshcd_enable_auto_bkops(hba);
3843out:
3844 if (err < 0)
3845 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3846 __func__, err);
3847}
3848
66ec6d59
SRT
3849/**
3850 * ufshcd_exception_event_handler - handle exceptions raised by device
3851 * @work: pointer to work data
3852 *
3853 * Read bExceptionEventStatus attribute from the device and handle the
3854 * exception event accordingly.
3855 */
3856static void ufshcd_exception_event_handler(struct work_struct *work)
3857{
3858 struct ufs_hba *hba;
3859 int err;
3860 u32 status = 0;
3861 hba = container_of(work, struct ufs_hba, eeh_work);
3862
62694735 3863 pm_runtime_get_sync(hba->dev);
66ec6d59
SRT
3864 err = ufshcd_get_ee_status(hba, &status);
3865 if (err) {
3866 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3867 __func__, err);
3868 goto out;
3869 }
3870
3871 status &= hba->ee_ctrl_mask;
afdfff59
YG
3872
3873 if (status & MASK_EE_URGENT_BKOPS)
3874 ufshcd_bkops_exception_event_handler(hba);
3875
66ec6d59 3876out:
62694735 3877 pm_runtime_put_sync(hba->dev);
66ec6d59
SRT
3878 return;
3879}
3880
9a47ec7c
YG
3881/* Complete requests that have door-bell cleared */
3882static void ufshcd_complete_requests(struct ufs_hba *hba)
3883{
3884 ufshcd_transfer_req_compl(hba);
3885 ufshcd_tmc_handler(hba);
3886}
3887
583fa62d
YG
3888/**
3889 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
3890 * to recover from the DL NAC errors or not.
3891 * @hba: per-adapter instance
3892 *
3893 * Returns true if error handling is required, false otherwise
3894 */
3895static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
3896{
3897 unsigned long flags;
3898 bool err_handling = true;
3899
3900 spin_lock_irqsave(hba->host->host_lock, flags);
3901 /*
3902 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
3903 * device fatal error and/or DL NAC & REPLAY timeout errors.
3904 */
3905 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
3906 goto out;
3907
3908 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
3909 ((hba->saved_err & UIC_ERROR) &&
3910 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
3911 goto out;
3912
3913 if ((hba->saved_err & UIC_ERROR) &&
3914 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
3915 int err;
3916 /*
3917 * wait for 50ms to see if we can get any other errors or not.
3918 */
3919 spin_unlock_irqrestore(hba->host->host_lock, flags);
3920 msleep(50);
3921 spin_lock_irqsave(hba->host->host_lock, flags);
3922
3923 /*
3924 * now check if we have got any other severe errors other than
3925 * DL NAC error?
3926 */
3927 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3928 ((hba->saved_err & UIC_ERROR) &&
3929 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
3930 goto out;
3931
3932 /*
3933 * As DL NAC is the only error received so far, send out NOP
3934 * command to confirm if link is still active or not.
3935 * - If we don't get any response then do error recovery.
3936 * - If we get response then clear the DL NAC error bit.
3937 */
3938
3939 spin_unlock_irqrestore(hba->host->host_lock, flags);
3940 err = ufshcd_verify_dev_init(hba);
3941 spin_lock_irqsave(hba->host->host_lock, flags);
3942
3943 if (err)
3944 goto out;
3945
3946 /* Link seems to be alive hence ignore the DL NAC errors */
3947 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
3948 hba->saved_err &= ~UIC_ERROR;
3949 /* clear NAC error */
3950 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
3951 if (!hba->saved_uic_err) {
3952 err_handling = false;
3953 goto out;
3954 }
3955 }
3956out:
3957 spin_unlock_irqrestore(hba->host->host_lock, flags);
3958 return err_handling;
3959}
3960
7a3e97b0 3961/**
e8e7f271
SRT
3962 * ufshcd_err_handler - handle UFS errors that require s/w attention
3963 * @work: pointer to work structure
7a3e97b0 3964 */
e8e7f271 3965static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0
SY
3966{
3967 struct ufs_hba *hba;
e8e7f271
SRT
3968 unsigned long flags;
3969 u32 err_xfer = 0;
3970 u32 err_tm = 0;
3971 int err = 0;
3972 int tag;
9a47ec7c 3973 bool needs_reset = false;
e8e7f271
SRT
3974
3975 hba = container_of(work, struct ufs_hba, eh_work);
7a3e97b0 3976
62694735 3977 pm_runtime_get_sync(hba->dev);
1ab27c9c 3978 ufshcd_hold(hba, false);
e8e7f271
SRT
3979
3980 spin_lock_irqsave(hba->host->host_lock, flags);
9a47ec7c 3981 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
e8e7f271 3982 goto out;
e8e7f271
SRT
3983
3984 hba->ufshcd_state = UFSHCD_STATE_RESET;
3985 ufshcd_set_eh_in_progress(hba);
3986
3987 /* Complete requests that have door-bell cleared by h/w */
9a47ec7c 3988 ufshcd_complete_requests(hba);
583fa62d
YG
3989
3990 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
3991 bool ret;
3992
3993 spin_unlock_irqrestore(hba->host->host_lock, flags);
3994 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
3995 ret = ufshcd_quirk_dl_nac_errors(hba);
3996 spin_lock_irqsave(hba->host->host_lock, flags);
3997 if (!ret)
3998 goto skip_err_handling;
3999 }
9a47ec7c
YG
4000 if ((hba->saved_err & INT_FATAL_ERRORS) ||
4001 ((hba->saved_err & UIC_ERROR) &&
4002 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
4003 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
4004 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
4005 needs_reset = true;
e8e7f271 4006
9a47ec7c
YG
4007 /*
4008 * if host reset is required then skip clearing the pending
4009 * transfers forcefully because they will automatically get
4010 * cleared after link startup.
4011 */
4012 if (needs_reset)
4013 goto skip_pending_xfer_clear;
4014
4015 /* release lock as clear command might sleep */
4016 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 4017 /* Clear pending transfer requests */
9a47ec7c
YG
4018 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
4019 if (ufshcd_clear_cmd(hba, tag)) {
4020 err_xfer = true;
4021 goto lock_skip_pending_xfer_clear;
4022 }
4023 }
e8e7f271
SRT
4024
4025 /* Clear pending task management requests */
9a47ec7c
YG
4026 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
4027 if (ufshcd_clear_tm_cmd(hba, tag)) {
4028 err_tm = true;
4029 goto lock_skip_pending_xfer_clear;
4030 }
4031 }
e8e7f271 4032
9a47ec7c 4033lock_skip_pending_xfer_clear:
e8e7f271 4034 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 4035
9a47ec7c
YG
4036 /* Complete the requests that are cleared by s/w */
4037 ufshcd_complete_requests(hba);
4038
4039 if (err_xfer || err_tm)
4040 needs_reset = true;
4041
4042skip_pending_xfer_clear:
e8e7f271 4043 /* Fatal errors need reset */
9a47ec7c
YG
4044 if (needs_reset) {
4045 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
4046
4047 /*
4048 * ufshcd_reset_and_restore() does the link reinitialization
4049 * which will need atleast one empty doorbell slot to send the
4050 * device management commands (NOP and query commands).
4051 * If there is no slot empty at this moment then free up last
4052 * slot forcefully.
4053 */
4054 if (hba->outstanding_reqs == max_doorbells)
4055 __ufshcd_transfer_req_compl(hba,
4056 (1UL << (hba->nutrs - 1)));
4057
4058 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 4059 err = ufshcd_reset_and_restore(hba);
9a47ec7c 4060 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271
SRT
4061 if (err) {
4062 dev_err(hba->dev, "%s: reset and restore failed\n",
4063 __func__);
4064 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4065 }
4066 /*
4067 * Inform scsi mid-layer that we did reset and allow to handle
4068 * Unit Attention properly.
4069 */
4070 scsi_report_bus_reset(hba->host, 0);
4071 hba->saved_err = 0;
4072 hba->saved_uic_err = 0;
4073 }
9a47ec7c 4074
583fa62d 4075skip_err_handling:
9a47ec7c
YG
4076 if (!needs_reset) {
4077 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4078 if (hba->saved_err || hba->saved_uic_err)
4079 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4080 __func__, hba->saved_err, hba->saved_uic_err);
4081 }
4082
e8e7f271
SRT
4083 ufshcd_clear_eh_in_progress(hba);
4084
4085out:
9a47ec7c 4086 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 4087 scsi_unblock_requests(hba->host);
1ab27c9c 4088 ufshcd_release(hba);
62694735 4089 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
4090}
4091
4092/**
e8e7f271
SRT
4093 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4094 * @hba: per-adapter instance
7a3e97b0 4095 */
e8e7f271 4096static void ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
4097{
4098 u32 reg;
4099
e8e7f271
SRT
4100 /* PA_INIT_ERROR is fatal and needs UIC reset */
4101 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
4102 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4103 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
583fa62d
YG
4104 else if (hba->dev_quirks &
4105 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4106 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4107 hba->uic_error |=
4108 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4109 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4110 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4111 }
e8e7f271
SRT
4112
4113 /* UIC NL/TL/DME errors needs software retry */
4114 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4115 if (reg)
4116 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
4117
4118 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4119 if (reg)
4120 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
4121
4122 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4123 if (reg)
4124 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
4125
4126 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4127 __func__, hba->uic_error);
4128}
4129
4130/**
4131 * ufshcd_check_errors - Check for errors that need s/w attention
4132 * @hba: per-adapter instance
4133 */
4134static void ufshcd_check_errors(struct ufs_hba *hba)
4135{
4136 bool queue_eh_work = false;
4137
7a3e97b0 4138 if (hba->errors & INT_FATAL_ERRORS)
e8e7f271 4139 queue_eh_work = true;
7a3e97b0
SY
4140
4141 if (hba->errors & UIC_ERROR) {
e8e7f271
SRT
4142 hba->uic_error = 0;
4143 ufshcd_update_uic_error(hba);
4144 if (hba->uic_error)
4145 queue_eh_work = true;
7a3e97b0 4146 }
e8e7f271
SRT
4147
4148 if (queue_eh_work) {
9a47ec7c
YG
4149 /*
4150 * update the transfer error masks to sticky bits, let's do this
4151 * irrespective of current ufshcd_state.
4152 */
4153 hba->saved_err |= hba->errors;
4154 hba->saved_uic_err |= hba->uic_error;
4155
e8e7f271
SRT
4156 /* handle fatal errors only when link is functional */
4157 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
4158 /* block commands from scsi mid-layer */
4159 scsi_block_requests(hba->host);
4160
e8e7f271
SRT
4161 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4162 schedule_work(&hba->eh_work);
4163 }
3441da7d 4164 }
e8e7f271
SRT
4165 /*
4166 * if (!queue_eh_work) -
4167 * Other errors are either non-fatal where host recovers
4168 * itself without s/w intervention or errors that will be
4169 * handled by the SCSI core layer.
4170 */
7a3e97b0
SY
4171}
4172
4173/**
4174 * ufshcd_tmc_handler - handle task management function completion
4175 * @hba: per adapter instance
4176 */
4177static void ufshcd_tmc_handler(struct ufs_hba *hba)
4178{
4179 u32 tm_doorbell;
4180
b873a275 4181 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0 4182 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
e2933132 4183 wake_up(&hba->tm_wq);
7a3e97b0
SY
4184}
4185
4186/**
4187 * ufshcd_sl_intr - Interrupt service routine
4188 * @hba: per adapter instance
4189 * @intr_status: contains interrupts generated by the controller
4190 */
4191static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
4192{
4193 hba->errors = UFSHCD_ERROR_MASK & intr_status;
4194 if (hba->errors)
e8e7f271 4195 ufshcd_check_errors(hba);
7a3e97b0 4196
53b3d9c3
SJ
4197 if (intr_status & UFSHCD_UIC_MASK)
4198 ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
4199
4200 if (intr_status & UTP_TASK_REQ_COMPL)
4201 ufshcd_tmc_handler(hba);
4202
4203 if (intr_status & UTP_TRANSFER_REQ_COMPL)
4204 ufshcd_transfer_req_compl(hba);
4205}
4206
4207/**
4208 * ufshcd_intr - Main interrupt service routine
4209 * @irq: irq number
4210 * @__hba: pointer to adapter instance
4211 *
4212 * Returns IRQ_HANDLED - If interrupt is valid
4213 * IRQ_NONE - If invalid interrupt
4214 */
4215static irqreturn_t ufshcd_intr(int irq, void *__hba)
4216{
d75f7fe4 4217 u32 intr_status, enabled_intr_status;
7a3e97b0
SY
4218 irqreturn_t retval = IRQ_NONE;
4219 struct ufs_hba *hba = __hba;
4220
4221 spin_lock(hba->host->host_lock);
b873a275 4222 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
d75f7fe4
YG
4223 enabled_intr_status =
4224 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
7a3e97b0 4225
d75f7fe4 4226 if (intr_status)
261ea452 4227 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
d75f7fe4
YG
4228
4229 if (enabled_intr_status) {
4230 ufshcd_sl_intr(hba, enabled_intr_status);
7a3e97b0
SY
4231 retval = IRQ_HANDLED;
4232 }
4233 spin_unlock(hba->host->host_lock);
4234 return retval;
4235}
4236
e2933132
SRT
4237static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
4238{
4239 int err = 0;
4240 u32 mask = 1 << tag;
4241 unsigned long flags;
4242
4243 if (!test_bit(tag, &hba->outstanding_tasks))
4244 goto out;
4245
4246 spin_lock_irqsave(hba->host->host_lock, flags);
4247 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
4248 spin_unlock_irqrestore(hba->host->host_lock, flags);
4249
4250 /* poll for max. 1 sec to clear door bell register by h/w */
4251 err = ufshcd_wait_for_register(hba,
4252 REG_UTP_TASK_REQ_DOOR_BELL,
596585a2 4253 mask, 0, 1000, 1000, true);
e2933132
SRT
4254out:
4255 return err;
4256}
4257
7a3e97b0
SY
4258/**
4259 * ufshcd_issue_tm_cmd - issues task management commands to controller
4260 * @hba: per adapter instance
e2933132
SRT
4261 * @lun_id: LUN ID to which TM command is sent
4262 * @task_id: task ID to which the TM command is applicable
4263 * @tm_function: task management function opcode
4264 * @tm_response: task management service response return value
7a3e97b0 4265 *
e2933132 4266 * Returns non-zero value on error, zero on success.
7a3e97b0 4267 */
e2933132
SRT
4268static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
4269 u8 tm_function, u8 *tm_response)
7a3e97b0
SY
4270{
4271 struct utp_task_req_desc *task_req_descp;
4272 struct utp_upiu_task_req *task_req_upiup;
4273 struct Scsi_Host *host;
4274 unsigned long flags;
e2933132 4275 int free_slot;
7a3e97b0 4276 int err;
e2933132 4277 int task_tag;
7a3e97b0
SY
4278
4279 host = hba->host;
4280
e2933132
SRT
4281 /*
4282 * Get free slot, sleep if slots are unavailable.
4283 * Even though we use wait_event() which sleeps indefinitely,
4284 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
4285 */
4286 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
1ab27c9c 4287 ufshcd_hold(hba, false);
7a3e97b0 4288
e2933132 4289 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
4290 task_req_descp = hba->utmrdl_base_addr;
4291 task_req_descp += free_slot;
4292
4293 /* Configure task request descriptor */
4294 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
4295 task_req_descp->header.dword_2 =
4296 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
4297
4298 /* Configure task request UPIU */
4299 task_req_upiup =
4300 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
e2933132 4301 task_tag = hba->nutrs + free_slot;
7a3e97b0 4302 task_req_upiup->header.dword_0 =
5a0b0cb9 4303 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
e2933132 4304 lun_id, task_tag);
7a3e97b0 4305 task_req_upiup->header.dword_1 =
5a0b0cb9 4306 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
0ce147d4
SJ
4307 /*
4308 * The host shall provide the same value for LUN field in the basic
4309 * header and for Input Parameter.
4310 */
e2933132
SRT
4311 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
4312 task_req_upiup->input_param2 = cpu_to_be32(task_id);
7a3e97b0
SY
4313
4314 /* send command to the controller */
4315 __set_bit(free_slot, &hba->outstanding_tasks);
897efe62
YG
4316
4317 /* Make sure descriptors are ready before ringing the task doorbell */
4318 wmb();
4319
b873a275 4320 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
4321
4322 spin_unlock_irqrestore(host->host_lock, flags);
4323
4324 /* wait until the task management command is completed */
e2933132
SRT
4325 err = wait_event_timeout(hba->tm_wq,
4326 test_bit(free_slot, &hba->tm_condition),
4327 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 4328 if (!err) {
e2933132
SRT
4329 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
4330 __func__, tm_function);
4331 if (ufshcd_clear_tm_cmd(hba, free_slot))
4332 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
4333 __func__, free_slot);
4334 err = -ETIMEDOUT;
4335 } else {
4336 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
7a3e97b0 4337 }
e2933132 4338
7a3e97b0 4339 clear_bit(free_slot, &hba->tm_condition);
e2933132
SRT
4340 ufshcd_put_tm_slot(hba, free_slot);
4341 wake_up(&hba->tm_tag_wq);
4342
1ab27c9c 4343 ufshcd_release(hba);
7a3e97b0
SY
4344 return err;
4345}
4346
4347/**
3441da7d
SRT
4348 * ufshcd_eh_device_reset_handler - device reset handler registered to
4349 * scsi layer.
7a3e97b0
SY
4350 * @cmd: SCSI command pointer
4351 *
4352 * Returns SUCCESS/FAILED
4353 */
3441da7d 4354static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
4355{
4356 struct Scsi_Host *host;
4357 struct ufs_hba *hba;
4358 unsigned int tag;
4359 u32 pos;
4360 int err;
e2933132
SRT
4361 u8 resp = 0xF;
4362 struct ufshcd_lrb *lrbp;
3441da7d 4363 unsigned long flags;
7a3e97b0
SY
4364
4365 host = cmd->device->host;
4366 hba = shost_priv(host);
4367 tag = cmd->request->tag;
4368
e2933132
SRT
4369 lrbp = &hba->lrb[tag];
4370 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
4371 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
4372 if (!err)
4373 err = resp;
7a3e97b0 4374 goto out;
e2933132 4375 }
7a3e97b0 4376
3441da7d
SRT
4377 /* clear the commands that were pending for corresponding LUN */
4378 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
4379 if (hba->lrb[pos].lun == lrbp->lun) {
4380 err = ufshcd_clear_cmd(hba, pos);
4381 if (err)
4382 break;
7a3e97b0 4383 }
3441da7d
SRT
4384 }
4385 spin_lock_irqsave(host->host_lock, flags);
4386 ufshcd_transfer_req_compl(hba);
4387 spin_unlock_irqrestore(host->host_lock, flags);
7a3e97b0 4388out:
3441da7d
SRT
4389 if (!err) {
4390 err = SUCCESS;
4391 } else {
4392 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4393 err = FAILED;
4394 }
7a3e97b0
SY
4395 return err;
4396}
4397
7a3e97b0
SY
4398/**
4399 * ufshcd_abort - abort a specific command
4400 * @cmd: SCSI command pointer
4401 *
f20810d8
SRT
4402 * Abort the pending command in device by sending UFS_ABORT_TASK task management
4403 * command, and in host controller by clearing the door-bell register. There can
4404 * be race between controller sending the command to the device while abort is
4405 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
4406 * really issued and then try to abort it.
4407 *
7a3e97b0
SY
4408 * Returns SUCCESS/FAILED
4409 */
4410static int ufshcd_abort(struct scsi_cmnd *cmd)
4411{
4412 struct Scsi_Host *host;
4413 struct ufs_hba *hba;
4414 unsigned long flags;
4415 unsigned int tag;
f20810d8
SRT
4416 int err = 0;
4417 int poll_cnt;
e2933132
SRT
4418 u8 resp = 0xF;
4419 struct ufshcd_lrb *lrbp;
e9d501b1 4420 u32 reg;
7a3e97b0
SY
4421
4422 host = cmd->device->host;
4423 hba = shost_priv(host);
4424 tag = cmd->request->tag;
14497328
YG
4425 if (!ufshcd_valid_tag(hba, tag)) {
4426 dev_err(hba->dev,
4427 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
4428 __func__, tag, cmd, cmd->request);
4429 BUG();
4430 }
7a3e97b0 4431
1ab27c9c 4432 ufshcd_hold(hba, false);
14497328 4433 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
f20810d8 4434 /* If command is already aborted/completed, return SUCCESS */
14497328
YG
4435 if (!(test_bit(tag, &hba->outstanding_reqs))) {
4436 dev_err(hba->dev,
4437 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
4438 __func__, tag, hba->outstanding_reqs, reg);
f20810d8 4439 goto out;
14497328 4440 }
7a3e97b0 4441
e9d501b1
DR
4442 if (!(reg & (1 << tag))) {
4443 dev_err(hba->dev,
4444 "%s: cmd was completed, but without a notifying intr, tag = %d",
4445 __func__, tag);
4446 }
4447
f20810d8
SRT
4448 lrbp = &hba->lrb[tag];
4449 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4450 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4451 UFS_QUERY_TASK, &resp);
4452 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4453 /* cmd pending in the device */
4454 break;
4455 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
4456 /*
4457 * cmd not pending in the device, check if it is
4458 * in transition.
4459 */
4460 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4461 if (reg & (1 << tag)) {
4462 /* sleep for max. 200us to stabilize */
4463 usleep_range(100, 200);
4464 continue;
4465 }
4466 /* command completed already */
4467 goto out;
4468 } else {
4469 if (!err)
4470 err = resp; /* service response error */
4471 goto out;
4472 }
4473 }
4474
4475 if (!poll_cnt) {
4476 err = -EBUSY;
7a3e97b0
SY
4477 goto out;
4478 }
7a3e97b0 4479
e2933132
SRT
4480 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4481 UFS_ABORT_TASK, &resp);
4482 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
4483 if (!err)
4484 err = resp; /* service response error */
7a3e97b0 4485 goto out;
e2933132 4486 }
7a3e97b0 4487
f20810d8
SRT
4488 err = ufshcd_clear_cmd(hba, tag);
4489 if (err)
4490 goto out;
4491
7a3e97b0
SY
4492 scsi_dma_unmap(cmd);
4493
4494 spin_lock_irqsave(host->host_lock, flags);
a48353f6 4495 ufshcd_outstanding_req_clear(hba, tag);
7a3e97b0
SY
4496 hba->lrb[tag].cmd = NULL;
4497 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9
SRT
4498
4499 clear_bit_unlock(tag, &hba->lrb_in_use);
4500 wake_up(&hba->dev_cmd.tag_wq);
1ab27c9c 4501
7a3e97b0 4502out:
f20810d8
SRT
4503 if (!err) {
4504 err = SUCCESS;
4505 } else {
4506 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
4507 err = FAILED;
4508 }
4509
1ab27c9c
ST
4510 /*
4511 * This ufshcd_release() corresponds to the original scsi cmd that got
4512 * aborted here (as we won't get any IRQ for it).
4513 */
4514 ufshcd_release(hba);
7a3e97b0
SY
4515 return err;
4516}
4517
3441da7d
SRT
4518/**
4519 * ufshcd_host_reset_and_restore - reset and restore host controller
4520 * @hba: per-adapter instance
4521 *
4522 * Note that host controller reset may issue DME_RESET to
4523 * local and remote (device) Uni-Pro stack and the attributes
4524 * are reset to default state.
4525 *
4526 * Returns zero on success, non-zero on failure
4527 */
4528static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4529{
4530 int err;
3441da7d
SRT
4531 unsigned long flags;
4532
4533 /* Reset the host controller */
4534 spin_lock_irqsave(hba->host->host_lock, flags);
596585a2 4535 ufshcd_hba_stop(hba, false);
3441da7d
SRT
4536 spin_unlock_irqrestore(hba->host->host_lock, flags);
4537
4538 err = ufshcd_hba_enable(hba);
4539 if (err)
4540 goto out;
4541
4542 /* Establish the link again and restore the device */
1d337ec2
SRT
4543 err = ufshcd_probe_hba(hba);
4544
4545 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3441da7d
SRT
4546 err = -EIO;
4547out:
4548 if (err)
4549 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
4550
4551 return err;
4552}
4553
4554/**
4555 * ufshcd_reset_and_restore - reset and re-initialize host/device
4556 * @hba: per-adapter instance
4557 *
4558 * Reset and recover device, host and re-establish link. This
4559 * is helpful to recover the communication in fatal error conditions.
4560 *
4561 * Returns zero on success, non-zero on failure
4562 */
4563static int ufshcd_reset_and_restore(struct ufs_hba *hba)
4564{
4565 int err = 0;
4566 unsigned long flags;
1d337ec2 4567 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 4568
1d337ec2
SRT
4569 do {
4570 err = ufshcd_host_reset_and_restore(hba);
4571 } while (err && --retries);
3441da7d
SRT
4572
4573 /*
4574 * After reset the door-bell might be cleared, complete
4575 * outstanding requests in s/w here.
4576 */
4577 spin_lock_irqsave(hba->host->host_lock, flags);
4578 ufshcd_transfer_req_compl(hba);
4579 ufshcd_tmc_handler(hba);
4580 spin_unlock_irqrestore(hba->host->host_lock, flags);
4581
4582 return err;
4583}
4584
4585/**
4586 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
4587 * @cmd - SCSI command pointer
4588 *
4589 * Returns SUCCESS/FAILED
4590 */
4591static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
4592{
4593 int err;
4594 unsigned long flags;
4595 struct ufs_hba *hba;
4596
4597 hba = shost_priv(cmd->device->host);
4598
1ab27c9c 4599 ufshcd_hold(hba, false);
3441da7d
SRT
4600 /*
4601 * Check if there is any race with fatal error handling.
4602 * If so, wait for it to complete. Even though fatal error
4603 * handling does reset and restore in some cases, don't assume
4604 * anything out of it. We are just avoiding race here.
4605 */
4606 do {
4607 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 4608 if (!(work_pending(&hba->eh_work) ||
3441da7d
SRT
4609 hba->ufshcd_state == UFSHCD_STATE_RESET))
4610 break;
4611 spin_unlock_irqrestore(hba->host->host_lock, flags);
4612 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
e8e7f271 4613 flush_work(&hba->eh_work);
3441da7d
SRT
4614 } while (1);
4615
4616 hba->ufshcd_state = UFSHCD_STATE_RESET;
4617 ufshcd_set_eh_in_progress(hba);
4618 spin_unlock_irqrestore(hba->host->host_lock, flags);
4619
4620 err = ufshcd_reset_and_restore(hba);
4621
4622 spin_lock_irqsave(hba->host->host_lock, flags);
4623 if (!err) {
4624 err = SUCCESS;
4625 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4626 } else {
4627 err = FAILED;
4628 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4629 }
4630 ufshcd_clear_eh_in_progress(hba);
4631 spin_unlock_irqrestore(hba->host->host_lock, flags);
4632
1ab27c9c 4633 ufshcd_release(hba);
3441da7d
SRT
4634 return err;
4635}
4636
3a4bf06d
YG
4637/**
4638 * ufshcd_get_max_icc_level - calculate the ICC level
4639 * @sup_curr_uA: max. current supported by the regulator
4640 * @start_scan: row at the desc table to start scan from
4641 * @buff: power descriptor buffer
4642 *
4643 * Returns calculated max ICC level for specific regulator
4644 */
4645static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4646{
4647 int i;
4648 int curr_uA;
4649 u16 data;
4650 u16 unit;
4651
4652 for (i = start_scan; i >= 0; i--) {
4653 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
4654 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4655 ATTR_ICC_LVL_UNIT_OFFSET;
4656 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
4657 switch (unit) {
4658 case UFSHCD_NANO_AMP:
4659 curr_uA = curr_uA / 1000;
4660 break;
4661 case UFSHCD_MILI_AMP:
4662 curr_uA = curr_uA * 1000;
4663 break;
4664 case UFSHCD_AMP:
4665 curr_uA = curr_uA * 1000 * 1000;
4666 break;
4667 case UFSHCD_MICRO_AMP:
4668 default:
4669 break;
4670 }
4671 if (sup_curr_uA >= curr_uA)
4672 break;
4673 }
4674 if (i < 0) {
4675 i = 0;
4676 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4677 }
4678
4679 return (u32)i;
4680}
4681
4682/**
4683 * ufshcd_calc_icc_level - calculate the max ICC level
4684 * In case regulators are not initialized we'll return 0
4685 * @hba: per-adapter instance
4686 * @desc_buf: power descriptor buffer to extract ICC levels from.
4687 * @len: length of desc_buff
4688 *
4689 * Returns calculated ICC level
4690 */
4691static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4692 u8 *desc_buf, int len)
4693{
4694 u32 icc_level = 0;
4695
4696 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4697 !hba->vreg_info.vccq2) {
4698 dev_err(hba->dev,
4699 "%s: Regulator capability was not set, actvIccLevel=%d",
4700 __func__, icc_level);
4701 goto out;
4702 }
4703
4704 if (hba->vreg_info.vcc)
4705 icc_level = ufshcd_get_max_icc_level(
4706 hba->vreg_info.vcc->max_uA,
4707 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4708 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4709
4710 if (hba->vreg_info.vccq)
4711 icc_level = ufshcd_get_max_icc_level(
4712 hba->vreg_info.vccq->max_uA,
4713 icc_level,
4714 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4715
4716 if (hba->vreg_info.vccq2)
4717 icc_level = ufshcd_get_max_icc_level(
4718 hba->vreg_info.vccq2->max_uA,
4719 icc_level,
4720 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4721out:
4722 return icc_level;
4723}
4724
4725static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4726{
4727 int ret;
4728 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4729 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4730
4731 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4732 if (ret) {
4733 dev_err(hba->dev,
4734 "%s: Failed reading power descriptor.len = %d ret = %d",
4735 __func__, buff_len, ret);
4736 return;
4737 }
4738
4739 hba->init_prefetch_data.icc_level =
4740 ufshcd_find_max_sup_active_icc_level(hba,
4741 desc_buf, buff_len);
4742 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4743 __func__, hba->init_prefetch_data.icc_level);
4744
5e86ae44
YG
4745 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4746 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4747 &hba->init_prefetch_data.icc_level);
3a4bf06d
YG
4748
4749 if (ret)
4750 dev_err(hba->dev,
4751 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4752 __func__, hba->init_prefetch_data.icc_level , ret);
4753
4754}
4755
2a8fa600
SJ
4756/**
4757 * ufshcd_scsi_add_wlus - Adds required W-LUs
4758 * @hba: per-adapter instance
4759 *
4760 * UFS device specification requires the UFS devices to support 4 well known
4761 * logical units:
4762 * "REPORT_LUNS" (address: 01h)
4763 * "UFS Device" (address: 50h)
4764 * "RPMB" (address: 44h)
4765 * "BOOT" (address: 30h)
4766 * UFS device's power management needs to be controlled by "POWER CONDITION"
4767 * field of SSU (START STOP UNIT) command. But this "power condition" field
4768 * will take effect only when its sent to "UFS device" well known logical unit
4769 * hence we require the scsi_device instance to represent this logical unit in
4770 * order for the UFS host driver to send the SSU command for power management.
4771
4772 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4773 * Block) LU so user space process can control this LU. User space may also
4774 * want to have access to BOOT LU.
4775
4776 * This function adds scsi device instances for each of all well known LUs
4777 * (except "REPORT LUNS" LU).
4778 *
4779 * Returns zero on success (all required W-LUs are added successfully),
4780 * non-zero error value on failure (if failed to add any of the required W-LU).
4781 */
4782static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4783{
4784 int ret = 0;
7c48bfd0
AM
4785 struct scsi_device *sdev_rpmb;
4786 struct scsi_device *sdev_boot;
2a8fa600
SJ
4787
4788 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4789 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4790 if (IS_ERR(hba->sdev_ufs_device)) {
4791 ret = PTR_ERR(hba->sdev_ufs_device);
4792 hba->sdev_ufs_device = NULL;
4793 goto out;
4794 }
7c48bfd0 4795 scsi_device_put(hba->sdev_ufs_device);
2a8fa600 4796
7c48bfd0 4797 sdev_boot = __scsi_add_device(hba->host, 0, 0,
2a8fa600 4798 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7c48bfd0
AM
4799 if (IS_ERR(sdev_boot)) {
4800 ret = PTR_ERR(sdev_boot);
2a8fa600
SJ
4801 goto remove_sdev_ufs_device;
4802 }
7c48bfd0 4803 scsi_device_put(sdev_boot);
2a8fa600 4804
7c48bfd0 4805 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 4806 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7c48bfd0
AM
4807 if (IS_ERR(sdev_rpmb)) {
4808 ret = PTR_ERR(sdev_rpmb);
2a8fa600
SJ
4809 goto remove_sdev_boot;
4810 }
7c48bfd0 4811 scsi_device_put(sdev_rpmb);
2a8fa600
SJ
4812 goto out;
4813
4814remove_sdev_boot:
7c48bfd0 4815 scsi_remove_device(sdev_boot);
2a8fa600
SJ
4816remove_sdev_ufs_device:
4817 scsi_remove_device(hba->sdev_ufs_device);
4818out:
4819 return ret;
4820}
4821
c58ab7aa
YG
4822static int ufs_get_device_info(struct ufs_hba *hba,
4823 struct ufs_device_info *card_data)
4824{
4825 int err;
4826 u8 model_index;
4827 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
4828 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
4829
4830 err = ufshcd_read_device_desc(hba, desc_buf,
4831 QUERY_DESC_DEVICE_MAX_SIZE);
4832 if (err) {
4833 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
4834 __func__, err);
4835 goto out;
4836 }
4837
4838 /*
4839 * getting vendor (manufacturerID) and Bank Index in big endian
4840 * format
4841 */
4842 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
4843 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
4844
4845 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4846
4847 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
4848 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
4849 if (err) {
4850 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
4851 __func__, err);
4852 goto out;
4853 }
4854
4855 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
4856 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
4857 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
4858 MAX_MODEL_LEN));
4859
4860 /* Null terminate the model string */
4861 card_data->model[MAX_MODEL_LEN] = '\0';
4862
4863out:
4864 return err;
4865}
4866
4867void ufs_advertise_fixup_device(struct ufs_hba *hba)
4868{
4869 int err;
4870 struct ufs_dev_fix *f;
4871 struct ufs_device_info card_data;
4872
4873 card_data.wmanufacturerid = 0;
4874
4875 err = ufs_get_device_info(hba, &card_data);
4876 if (err) {
4877 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
4878 __func__, err);
4879 return;
4880 }
4881
4882 for (f = ufs_fixups; f->quirk; f++) {
4883 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
4884 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
4885 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
4886 !strcmp(f->card.model, UFS_ANY_MODEL)))
4887 hba->dev_quirks |= f->quirk;
4888 }
4889}
4890
37113106
YG
4891/**
4892 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
4893 * @hba: per-adapter instance
4894 *
4895 * PA_TActivate parameter can be tuned manually if UniPro version is less than
4896 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
4897 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
4898 * the hibern8 exit latency.
4899 *
4900 * Returns zero on success, non-zero error value on failure.
4901 */
4902static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
4903{
4904 int ret = 0;
4905 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
4906
4907 ret = ufshcd_dme_peer_get(hba,
4908 UIC_ARG_MIB_SEL(
4909 RX_MIN_ACTIVATETIME_CAPABILITY,
4910 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4911 &peer_rx_min_activatetime);
4912 if (ret)
4913 goto out;
4914
4915 /* make sure proper unit conversion is applied */
4916 tuned_pa_tactivate =
4917 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
4918 / PA_TACTIVATE_TIME_UNIT_US);
4919 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
4920 tuned_pa_tactivate);
4921
4922out:
4923 return ret;
4924}
4925
4926/**
4927 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
4928 * @hba: per-adapter instance
4929 *
4930 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
4931 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
4932 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
4933 * This optimal value can help reduce the hibern8 exit latency.
4934 *
4935 * Returns zero on success, non-zero error value on failure.
4936 */
4937static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
4938{
4939 int ret = 0;
4940 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
4941 u32 max_hibern8_time, tuned_pa_hibern8time;
4942
4943 ret = ufshcd_dme_get(hba,
4944 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
4945 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
4946 &local_tx_hibern8_time_cap);
4947 if (ret)
4948 goto out;
4949
4950 ret = ufshcd_dme_peer_get(hba,
4951 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
4952 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4953 &peer_rx_hibern8_time_cap);
4954 if (ret)
4955 goto out;
4956
4957 max_hibern8_time = max(local_tx_hibern8_time_cap,
4958 peer_rx_hibern8_time_cap);
4959 /* make sure proper unit conversion is applied */
4960 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
4961 / PA_HIBERN8_TIME_UNIT_US);
4962 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
4963 tuned_pa_hibern8time);
4964out:
4965 return ret;
4966}
4967
4968static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
4969{
4970 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
4971 ufshcd_tune_pa_tactivate(hba);
4972 ufshcd_tune_pa_hibern8time(hba);
4973 }
4974
4975 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
4976 /* set 1ms timeout for PA_TACTIVATE */
4977 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
4978}
4979
6ccf44fe 4980/**
1d337ec2
SRT
4981 * ufshcd_probe_hba - probe hba to detect device and initialize
4982 * @hba: per-adapter instance
4983 *
4984 * Execute link-startup and verify device initialization
6ccf44fe 4985 */
1d337ec2 4986static int ufshcd_probe_hba(struct ufs_hba *hba)
6ccf44fe 4987{
6ccf44fe
SJ
4988 int ret;
4989
4990 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
4991 if (ret)
4992 goto out;
4993
5064636c
YG
4994 ufshcd_init_pwr_info(hba);
4995
afdfff59
YG
4996 /* set the default level for urgent bkops */
4997 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
4998 hba->is_urgent_bkops_lvl_checked = false;
4999
57d104c1
SJ
5000 /* UniPro link is active now */
5001 ufshcd_set_link_active(hba);
d3e89bac 5002
5a0b0cb9
SRT
5003 ret = ufshcd_verify_dev_init(hba);
5004 if (ret)
5005 goto out;
68078d5c
DR
5006
5007 ret = ufshcd_complete_dev_init(hba);
5008 if (ret)
5009 goto out;
5a0b0cb9 5010
c58ab7aa 5011 ufs_advertise_fixup_device(hba);
37113106 5012 ufshcd_tune_unipro_params(hba);
60f01870
YG
5013
5014 ret = ufshcd_set_vccq_rail_unused(hba,
5015 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5016 if (ret)
5017 goto out;
5018
57d104c1
SJ
5019 /* UFS device is also active now */
5020 ufshcd_set_ufs_dev_active(hba);
66ec6d59 5021 ufshcd_force_reset_auto_bkops(hba);
57d104c1
SJ
5022 hba->wlun_dev_clr_ua = true;
5023
7eb584db
DR
5024 if (ufshcd_get_max_pwr_mode(hba)) {
5025 dev_err(hba->dev,
5026 "%s: Failed getting max supported power mode\n",
5027 __func__);
5028 } else {
5029 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
5030 if (ret)
5031 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
5032 __func__, ret);
5033 }
57d104c1 5034
53c12d0e
YG
5035 /* set the state as operational after switching to desired gear */
5036 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
57d104c1
SJ
5037 /*
5038 * If we are in error handling context or in power management callbacks
5039 * context, no need to scan the host
5040 */
5041 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5042 bool flag;
5043
5044 /* clear any previous UFS device information */
5045 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
dc3c8d3a
YG
5046 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
5047 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
57d104c1 5048 hba->dev_info.f_power_on_wp_en = flag;
3441da7d 5049
3a4bf06d
YG
5050 if (!hba->is_init_prefetch)
5051 ufshcd_init_icc_levels(hba);
5052
2a8fa600
SJ
5053 /* Add required well known logical units to scsi mid layer */
5054 if (ufshcd_scsi_add_wlus(hba))
5055 goto out;
5056
3441da7d
SRT
5057 scsi_scan_host(hba->host);
5058 pm_runtime_put_sync(hba->dev);
5059 }
3a4bf06d
YG
5060
5061 if (!hba->is_init_prefetch)
5062 hba->is_init_prefetch = true;
5063
856b3483
ST
5064 /* Resume devfreq after UFS device is detected */
5065 if (ufshcd_is_clkscaling_enabled(hba))
5066 devfreq_resume_device(hba->devfreq);
5067
5a0b0cb9 5068out:
1d337ec2
SRT
5069 /*
5070 * If we failed to initialize the device or the device is not
5071 * present, turn off the power/clocks etc.
5072 */
57d104c1
SJ
5073 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
5074 pm_runtime_put_sync(hba->dev);
1d337ec2 5075 ufshcd_hba_exit(hba);
57d104c1 5076 }
1d337ec2
SRT
5077
5078 return ret;
5079}
5080
5081/**
5082 * ufshcd_async_scan - asynchronous execution for probing hba
5083 * @data: data pointer to pass to this function
5084 * @cookie: cookie data
5085 */
5086static void ufshcd_async_scan(void *data, async_cookie_t cookie)
5087{
5088 struct ufs_hba *hba = (struct ufs_hba *)data;
5089
5090 ufshcd_probe_hba(hba);
6ccf44fe
SJ
5091}
5092
f550c65b
YG
5093static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5094{
5095 unsigned long flags;
5096 struct Scsi_Host *host;
5097 struct ufs_hba *hba;
5098 int index;
5099 bool found = false;
5100
5101 if (!scmd || !scmd->device || !scmd->device->host)
5102 return BLK_EH_NOT_HANDLED;
5103
5104 host = scmd->device->host;
5105 hba = shost_priv(host);
5106 if (!hba)
5107 return BLK_EH_NOT_HANDLED;
5108
5109 spin_lock_irqsave(host->host_lock, flags);
5110
5111 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5112 if (hba->lrb[index].cmd == scmd) {
5113 found = true;
5114 break;
5115 }
5116 }
5117
5118 spin_unlock_irqrestore(host->host_lock, flags);
5119
5120 /*
5121 * Bypass SCSI error handling and reset the block layer timer if this
5122 * SCSI command was not actually dispatched to UFS driver, otherwise
5123 * let SCSI layer handle the error as usual.
5124 */
5125 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5126}
5127
7a3e97b0
SY
5128static struct scsi_host_template ufshcd_driver_template = {
5129 .module = THIS_MODULE,
5130 .name = UFSHCD,
5131 .proc_name = UFSHCD,
5132 .queuecommand = ufshcd_queuecommand,
5133 .slave_alloc = ufshcd_slave_alloc,
eeda4749 5134 .slave_configure = ufshcd_slave_configure,
7a3e97b0 5135 .slave_destroy = ufshcd_slave_destroy,
4264fd61 5136 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 5137 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
5138 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
5139 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
f550c65b 5140 .eh_timed_out = ufshcd_eh_timed_out,
7a3e97b0
SY
5141 .this_id = -1,
5142 .sg_tablesize = SG_ALL,
5143 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
5144 .can_queue = UFSHCD_CAN_QUEUE,
1ab27c9c 5145 .max_host_blocked = 1,
c40ecc12 5146 .track_queue_depth = 1,
7a3e97b0
SY
5147};
5148
57d104c1
SJ
5149static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
5150 int ua)
5151{
7b16a07c 5152 int ret;
57d104c1 5153
7b16a07c
BA
5154 if (!vreg)
5155 return 0;
57d104c1 5156
7b16a07c
BA
5157 ret = regulator_set_load(vreg->reg, ua);
5158 if (ret < 0) {
5159 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
5160 __func__, vreg->name, ua, ret);
57d104c1
SJ
5161 }
5162
5163 return ret;
5164}
5165
5166static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
5167 struct ufs_vreg *vreg)
5168{
60f01870
YG
5169 if (!vreg)
5170 return 0;
5171 else if (vreg->unused)
5172 return 0;
5173 else
5174 return ufshcd_config_vreg_load(hba->dev, vreg,
5175 UFS_VREG_LPM_LOAD_UA);
57d104c1
SJ
5176}
5177
5178static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
5179 struct ufs_vreg *vreg)
5180{
60f01870
YG
5181 if (!vreg)
5182 return 0;
5183 else if (vreg->unused)
5184 return 0;
5185 else
5186 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
57d104c1
SJ
5187}
5188
aa497613
SRT
5189static int ufshcd_config_vreg(struct device *dev,
5190 struct ufs_vreg *vreg, bool on)
5191{
5192 int ret = 0;
5193 struct regulator *reg = vreg->reg;
5194 const char *name = vreg->name;
5195 int min_uV, uA_load;
5196
5197 BUG_ON(!vreg);
5198
5199 if (regulator_count_voltages(reg) > 0) {
5200 min_uV = on ? vreg->min_uV : 0;
5201 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
5202 if (ret) {
5203 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
5204 __func__, name, ret);
5205 goto out;
5206 }
5207
5208 uA_load = on ? vreg->max_uA : 0;
57d104c1
SJ
5209 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
5210 if (ret)
aa497613 5211 goto out;
aa497613
SRT
5212 }
5213out:
5214 return ret;
5215}
5216
5217static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
5218{
5219 int ret = 0;
5220
60f01870
YG
5221 if (!vreg)
5222 goto out;
5223 else if (vreg->enabled || vreg->unused)
aa497613
SRT
5224 goto out;
5225
5226 ret = ufshcd_config_vreg(dev, vreg, true);
5227 if (!ret)
5228 ret = regulator_enable(vreg->reg);
5229
5230 if (!ret)
5231 vreg->enabled = true;
5232 else
5233 dev_err(dev, "%s: %s enable failed, err=%d\n",
5234 __func__, vreg->name, ret);
5235out:
5236 return ret;
5237}
5238
5239static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
5240{
5241 int ret = 0;
5242
60f01870
YG
5243 if (!vreg)
5244 goto out;
5245 else if (!vreg->enabled || vreg->unused)
aa497613
SRT
5246 goto out;
5247
5248 ret = regulator_disable(vreg->reg);
5249
5250 if (!ret) {
5251 /* ignore errors on applying disable config */
5252 ufshcd_config_vreg(dev, vreg, false);
5253 vreg->enabled = false;
5254 } else {
5255 dev_err(dev, "%s: %s disable failed, err=%d\n",
5256 __func__, vreg->name, ret);
5257 }
5258out:
5259 return ret;
5260}
5261
5262static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
5263{
5264 int ret = 0;
5265 struct device *dev = hba->dev;
5266 struct ufs_vreg_info *info = &hba->vreg_info;
5267
5268 if (!info)
5269 goto out;
5270
5271 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
5272 if (ret)
5273 goto out;
5274
5275 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
5276 if (ret)
5277 goto out;
5278
5279 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
5280 if (ret)
5281 goto out;
5282
5283out:
5284 if (ret) {
5285 ufshcd_toggle_vreg(dev, info->vccq2, false);
5286 ufshcd_toggle_vreg(dev, info->vccq, false);
5287 ufshcd_toggle_vreg(dev, info->vcc, false);
5288 }
5289 return ret;
5290}
5291
6a771a65
RS
5292static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
5293{
5294 struct ufs_vreg_info *info = &hba->vreg_info;
5295
5296 if (info)
5297 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
5298
5299 return 0;
5300}
5301
aa497613
SRT
5302static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
5303{
5304 int ret = 0;
5305
5306 if (!vreg)
5307 goto out;
5308
5309 vreg->reg = devm_regulator_get(dev, vreg->name);
5310 if (IS_ERR(vreg->reg)) {
5311 ret = PTR_ERR(vreg->reg);
5312 dev_err(dev, "%s: %s get failed, err=%d\n",
5313 __func__, vreg->name, ret);
5314 }
5315out:
5316 return ret;
5317}
5318
5319static int ufshcd_init_vreg(struct ufs_hba *hba)
5320{
5321 int ret = 0;
5322 struct device *dev = hba->dev;
5323 struct ufs_vreg_info *info = &hba->vreg_info;
5324
5325 if (!info)
5326 goto out;
5327
5328 ret = ufshcd_get_vreg(dev, info->vcc);
5329 if (ret)
5330 goto out;
5331
5332 ret = ufshcd_get_vreg(dev, info->vccq);
5333 if (ret)
5334 goto out;
5335
5336 ret = ufshcd_get_vreg(dev, info->vccq2);
5337out:
5338 return ret;
5339}
5340
6a771a65
RS
5341static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
5342{
5343 struct ufs_vreg_info *info = &hba->vreg_info;
5344
5345 if (info)
5346 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
5347
5348 return 0;
5349}
5350
60f01870
YG
5351static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5352{
5353 int ret = 0;
5354 struct ufs_vreg_info *info = &hba->vreg_info;
5355
5356 if (!info)
5357 goto out;
5358 else if (!info->vccq)
5359 goto out;
5360
5361 if (unused) {
5362 /* shut off the rail here */
5363 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5364 /*
5365 * Mark this rail as no longer used, so it doesn't get enabled
5366 * later by mistake
5367 */
5368 if (!ret)
5369 info->vccq->unused = true;
5370 } else {
5371 /*
5372 * rail should have been already enabled hence just make sure
5373 * that unused flag is cleared.
5374 */
5375 info->vccq->unused = false;
5376 }
5377out:
5378 return ret;
5379}
5380
57d104c1
SJ
5381static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5382 bool skip_ref_clk)
c6e79dac
SRT
5383{
5384 int ret = 0;
5385 struct ufs_clk_info *clki;
5386 struct list_head *head = &hba->clk_list_head;
1ab27c9c 5387 unsigned long flags;
c6e79dac
SRT
5388
5389 if (!head || list_empty(head))
5390 goto out;
5391
5392 list_for_each_entry(clki, head, list) {
5393 if (!IS_ERR_OR_NULL(clki->clk)) {
57d104c1
SJ
5394 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5395 continue;
5396
c6e79dac
SRT
5397 if (on && !clki->enabled) {
5398 ret = clk_prepare_enable(clki->clk);
5399 if (ret) {
5400 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
5401 __func__, clki->name, ret);
5402 goto out;
5403 }
5404 } else if (!on && clki->enabled) {
5405 clk_disable_unprepare(clki->clk);
5406 }
5407 clki->enabled = on;
5408 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
5409 clki->name, on ? "en" : "dis");
5410 }
5411 }
1ab27c9c 5412
0263bcd0 5413 ret = ufshcd_vops_setup_clocks(hba, on);
c6e79dac
SRT
5414out:
5415 if (ret) {
5416 list_for_each_entry(clki, head, list) {
5417 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5418 clk_disable_unprepare(clki->clk);
5419 }
eda910e4 5420 } else if (on) {
1ab27c9c
ST
5421 spin_lock_irqsave(hba->host->host_lock, flags);
5422 hba->clk_gating.state = CLKS_ON;
5423 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac
SRT
5424 }
5425 return ret;
5426}
5427
57d104c1
SJ
5428static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
5429{
5430 return __ufshcd_setup_clocks(hba, on, false);
5431}
5432
c6e79dac
SRT
5433static int ufshcd_init_clocks(struct ufs_hba *hba)
5434{
5435 int ret = 0;
5436 struct ufs_clk_info *clki;
5437 struct device *dev = hba->dev;
5438 struct list_head *head = &hba->clk_list_head;
5439
5440 if (!head || list_empty(head))
5441 goto out;
5442
5443 list_for_each_entry(clki, head, list) {
5444 if (!clki->name)
5445 continue;
5446
5447 clki->clk = devm_clk_get(dev, clki->name);
5448 if (IS_ERR(clki->clk)) {
5449 ret = PTR_ERR(clki->clk);
5450 dev_err(dev, "%s: %s clk get failed, %d\n",
5451 __func__, clki->name, ret);
5452 goto out;
5453 }
5454
5455 if (clki->max_freq) {
5456 ret = clk_set_rate(clki->clk, clki->max_freq);
5457 if (ret) {
5458 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5459 __func__, clki->name,
5460 clki->max_freq, ret);
5461 goto out;
5462 }
856b3483 5463 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
5464 }
5465 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
5466 clki->name, clk_get_rate(clki->clk));
5467 }
5468out:
5469 return ret;
5470}
5471
5c0c28a8
SRT
5472static int ufshcd_variant_hba_init(struct ufs_hba *hba)
5473{
5474 int err = 0;
5475
5476 if (!hba->vops)
5477 goto out;
5478
0263bcd0
YG
5479 err = ufshcd_vops_init(hba);
5480 if (err)
5481 goto out;
5c0c28a8 5482
0263bcd0
YG
5483 err = ufshcd_vops_setup_regulators(hba, true);
5484 if (err)
5485 goto out_exit;
5c0c28a8
SRT
5486
5487 goto out;
5488
5c0c28a8 5489out_exit:
0263bcd0 5490 ufshcd_vops_exit(hba);
5c0c28a8
SRT
5491out:
5492 if (err)
5493 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
0263bcd0 5494 __func__, ufshcd_get_var_name(hba), err);
5c0c28a8
SRT
5495 return err;
5496}
5497
5498static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
5499{
5500 if (!hba->vops)
5501 return;
5502
0263bcd0 5503 ufshcd_vops_setup_clocks(hba, false);
5c0c28a8 5504
0263bcd0 5505 ufshcd_vops_setup_regulators(hba, false);
5c0c28a8 5506
0263bcd0 5507 ufshcd_vops_exit(hba);
5c0c28a8
SRT
5508}
5509
aa497613
SRT
5510static int ufshcd_hba_init(struct ufs_hba *hba)
5511{
5512 int err;
5513
6a771a65
RS
5514 /*
5515 * Handle host controller power separately from the UFS device power
5516 * rails as it will help controlling the UFS host controller power
5517 * collapse easily which is different than UFS device power collapse.
5518 * Also, enable the host controller power before we go ahead with rest
5519 * of the initialization here.
5520 */
5521 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
5522 if (err)
5523 goto out;
5524
6a771a65 5525 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
5526 if (err)
5527 goto out;
5528
6a771a65
RS
5529 err = ufshcd_init_clocks(hba);
5530 if (err)
5531 goto out_disable_hba_vreg;
5532
5533 err = ufshcd_setup_clocks(hba, true);
5534 if (err)
5535 goto out_disable_hba_vreg;
5536
c6e79dac
SRT
5537 err = ufshcd_init_vreg(hba);
5538 if (err)
5539 goto out_disable_clks;
5540
5541 err = ufshcd_setup_vreg(hba, true);
5542 if (err)
5543 goto out_disable_clks;
5544
aa497613
SRT
5545 err = ufshcd_variant_hba_init(hba);
5546 if (err)
5547 goto out_disable_vreg;
5548
1d337ec2 5549 hba->is_powered = true;
aa497613
SRT
5550 goto out;
5551
5552out_disable_vreg:
5553 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
5554out_disable_clks:
5555 ufshcd_setup_clocks(hba, false);
6a771a65
RS
5556out_disable_hba_vreg:
5557 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
5558out:
5559 return err;
5560}
5561
5562static void ufshcd_hba_exit(struct ufs_hba *hba)
5563{
1d337ec2
SRT
5564 if (hba->is_powered) {
5565 ufshcd_variant_hba_exit(hba);
5566 ufshcd_setup_vreg(hba, false);
5567 ufshcd_setup_clocks(hba, false);
5568 ufshcd_setup_hba_vreg(hba, false);
5569 hba->is_powered = false;
5570 }
aa497613
SRT
5571}
5572
57d104c1
SJ
5573static int
5574ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
5575{
5576 unsigned char cmd[6] = {REQUEST_SENSE,
5577 0,
5578 0,
5579 0,
5580 SCSI_SENSE_BUFFERSIZE,
5581 0};
5582 char *buffer;
5583 int ret;
5584
5585 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5586 if (!buffer) {
5587 ret = -ENOMEM;
5588 goto out;
5589 }
5590
5591 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5592 SCSI_SENSE_BUFFERSIZE, NULL,
5593 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
5594 if (ret)
5595 pr_err("%s: failed with err %d\n", __func__, ret);
5596
5597 kfree(buffer);
5598out:
5599 return ret;
5600}
5601
5602/**
5603 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
5604 * power mode
5605 * @hba: per adapter instance
5606 * @pwr_mode: device power mode to set
5607 *
5608 * Returns 0 if requested power mode is set successfully
5609 * Returns non-zero if failed to set the requested power mode
5610 */
5611static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
5612 enum ufs_dev_pwr_mode pwr_mode)
5613{
5614 unsigned char cmd[6] = { START_STOP };
5615 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
5616 struct scsi_device *sdp;
5617 unsigned long flags;
57d104c1
SJ
5618 int ret;
5619
7c48bfd0
AM
5620 spin_lock_irqsave(hba->host->host_lock, flags);
5621 sdp = hba->sdev_ufs_device;
5622 if (sdp) {
5623 ret = scsi_device_get(sdp);
5624 if (!ret && !scsi_device_online(sdp)) {
5625 ret = -ENODEV;
5626 scsi_device_put(sdp);
5627 }
5628 } else {
5629 ret = -ENODEV;
5630 }
5631 spin_unlock_irqrestore(hba->host->host_lock, flags);
5632
5633 if (ret)
5634 return ret;
57d104c1
SJ
5635
5636 /*
5637 * If scsi commands fail, the scsi mid-layer schedules scsi error-
5638 * handling, which would wait for host to be resumed. Since we know
5639 * we are functional while we are here, skip host resume in error
5640 * handling context.
5641 */
5642 hba->host->eh_noresume = 1;
5643 if (hba->wlun_dev_clr_ua) {
5644 ret = ufshcd_send_request_sense(hba, sdp);
5645 if (ret)
5646 goto out;
5647 /* Unit attention condition is cleared now */
5648 hba->wlun_dev_clr_ua = false;
5649 }
5650
5651 cmd[4] = pwr_mode << 4;
5652
5653 /*
5654 * Current function would be generally called from the power management
5655 * callbacks hence set the REQ_PM flag so that it doesn't resume the
5656 * already suspended childs.
5657 */
5658 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
5659 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
5660 if (ret) {
5661 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
5662 "START_STOP failed for power mode: %d, result %x\n",
5663 pwr_mode, ret);
21045519
HR
5664 if (driver_byte(ret) & DRIVER_SENSE)
5665 scsi_print_sense_hdr(sdp, NULL, &sshdr);
57d104c1
SJ
5666 }
5667
5668 if (!ret)
5669 hba->curr_dev_pwr_mode = pwr_mode;
5670out:
7c48bfd0 5671 scsi_device_put(sdp);
57d104c1
SJ
5672 hba->host->eh_noresume = 0;
5673 return ret;
5674}
5675
5676static int ufshcd_link_state_transition(struct ufs_hba *hba,
5677 enum uic_link_state req_link_state,
5678 int check_for_bkops)
5679{
5680 int ret = 0;
5681
5682 if (req_link_state == hba->uic_link_state)
5683 return 0;
5684
5685 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
5686 ret = ufshcd_uic_hibern8_enter(hba);
5687 if (!ret)
5688 ufshcd_set_link_hibern8(hba);
5689 else
5690 goto out;
5691 }
5692 /*
5693 * If autobkops is enabled, link can't be turned off because
5694 * turning off the link would also turn off the device.
5695 */
5696 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
5697 (!check_for_bkops || (check_for_bkops &&
5698 !hba->auto_bkops_enabled))) {
f3099fbd
YG
5699 /*
5700 * Let's make sure that link is in low power mode, we are doing
5701 * this currently by putting the link in Hibern8. Otherway to
5702 * put the link in low power mode is to send the DME end point
5703 * to device and then send the DME reset command to local
5704 * unipro. But putting the link in hibern8 is much faster.
5705 */
5706 ret = ufshcd_uic_hibern8_enter(hba);
5707 if (ret)
5708 goto out;
57d104c1
SJ
5709 /*
5710 * Change controller state to "reset state" which
5711 * should also put the link in off/reset state
5712 */
596585a2 5713 ufshcd_hba_stop(hba, true);
57d104c1
SJ
5714 /*
5715 * TODO: Check if we need any delay to make sure that
5716 * controller is reset
5717 */
5718 ufshcd_set_link_off(hba);
5719 }
5720
5721out:
5722 return ret;
5723}
5724
5725static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
5726{
b799fdf7
YG
5727 /*
5728 * It seems some UFS devices may keep drawing more than sleep current
5729 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
5730 * To avoid this situation, add 2ms delay before putting these UFS
5731 * rails in LPM mode.
5732 */
5733 if (!ufshcd_is_link_active(hba) &&
5734 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
5735 usleep_range(2000, 2100);
5736
57d104c1
SJ
5737 /*
5738 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
5739 * power.
5740 *
5741 * If UFS device and link is in OFF state, all power supplies (VCC,
5742 * VCCQ, VCCQ2) can be turned off if power on write protect is not
5743 * required. If UFS link is inactive (Hibern8 or OFF state) and device
5744 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
5745 *
5746 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
5747 * in low power state which would save some power.
5748 */
5749 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5750 !hba->dev_info.is_lu_power_on_wp) {
5751 ufshcd_setup_vreg(hba, false);
5752 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5753 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5754 if (!ufshcd_is_link_active(hba)) {
5755 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5756 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
5757 }
5758 }
5759}
5760
5761static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
5762{
5763 int ret = 0;
5764
5765 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
5766 !hba->dev_info.is_lu_power_on_wp) {
5767 ret = ufshcd_setup_vreg(hba, true);
5768 } else if (!ufshcd_is_ufs_dev_active(hba)) {
5769 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
5770 if (!ret && !ufshcd_is_link_active(hba)) {
5771 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5772 if (ret)
5773 goto vcc_disable;
5774 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5775 if (ret)
5776 goto vccq_lpm;
5777 }
5778 }
5779 goto out;
5780
5781vccq_lpm:
5782 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
5783vcc_disable:
5784 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
5785out:
5786 return ret;
5787}
5788
5789static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
5790{
5791 if (ufshcd_is_link_off(hba))
5792 ufshcd_setup_hba_vreg(hba, false);
5793}
5794
5795static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
5796{
5797 if (ufshcd_is_link_off(hba))
5798 ufshcd_setup_hba_vreg(hba, true);
5799}
5800
7a3e97b0 5801/**
57d104c1 5802 * ufshcd_suspend - helper function for suspend operations
3b1d0580 5803 * @hba: per adapter instance
57d104c1
SJ
5804 * @pm_op: desired low power operation type
5805 *
5806 * This function will try to put the UFS device and link into low power
5807 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
5808 * (System PM level).
5809 *
5810 * If this function is called during shutdown, it will make sure that
5811 * both UFS device and UFS link is powered off.
7a3e97b0 5812 *
57d104c1
SJ
5813 * NOTE: UFS device & link must be active before we enter in this function.
5814 *
5815 * Returns 0 for success and non-zero for failure
7a3e97b0 5816 */
57d104c1 5817static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 5818{
57d104c1
SJ
5819 int ret = 0;
5820 enum ufs_pm_level pm_lvl;
5821 enum ufs_dev_pwr_mode req_dev_pwr_mode;
5822 enum uic_link_state req_link_state;
5823
5824 hba->pm_op_in_progress = 1;
5825 if (!ufshcd_is_shutdown_pm(pm_op)) {
5826 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
5827 hba->rpm_lvl : hba->spm_lvl;
5828 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
5829 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
5830 } else {
5831 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
5832 req_link_state = UIC_LINK_OFF_STATE;
5833 }
5834
7a3e97b0 5835 /*
57d104c1
SJ
5836 * If we can't transition into any of the low power modes
5837 * just gate the clocks.
7a3e97b0 5838 */
1ab27c9c
ST
5839 ufshcd_hold(hba, false);
5840 hba->clk_gating.is_suspended = true;
5841
57d104c1
SJ
5842 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
5843 req_link_state == UIC_LINK_ACTIVE_STATE) {
5844 goto disable_clks;
5845 }
7a3e97b0 5846
57d104c1
SJ
5847 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
5848 (req_link_state == hba->uic_link_state))
5849 goto out;
5850
5851 /* UFS device & link must be active before we enter in this function */
5852 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
5853 ret = -EINVAL;
5854 goto out;
5855 }
5856
5857 if (ufshcd_is_runtime_pm(pm_op)) {
374a246e
SJ
5858 if (ufshcd_can_autobkops_during_suspend(hba)) {
5859 /*
5860 * The device is idle with no requests in the queue,
5861 * allow background operations if bkops status shows
5862 * that performance might be impacted.
5863 */
5864 ret = ufshcd_urgent_bkops(hba);
5865 if (ret)
5866 goto enable_gating;
5867 } else {
5868 /* make sure that auto bkops is disabled */
5869 ufshcd_disable_auto_bkops(hba);
5870 }
57d104c1
SJ
5871 }
5872
5873 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
5874 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
5875 !ufshcd_is_runtime_pm(pm_op))) {
5876 /* ensure that bkops is disabled */
5877 ufshcd_disable_auto_bkops(hba);
5878 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
5879 if (ret)
1ab27c9c 5880 goto enable_gating;
57d104c1
SJ
5881 }
5882
5883 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
5884 if (ret)
5885 goto set_dev_active;
5886
5887 ufshcd_vreg_set_lpm(hba);
5888
5889disable_clks:
856b3483
ST
5890 /*
5891 * The clock scaling needs access to controller registers. Hence, Wait
5892 * for pending clock scaling work to be done before clocks are
5893 * turned off.
5894 */
5895 if (ufshcd_is_clkscaling_enabled(hba)) {
5896 devfreq_suspend_device(hba->devfreq);
5897 hba->clk_scaling.window_start_t = 0;
5898 }
57d104c1
SJ
5899 /*
5900 * Call vendor specific suspend callback. As these callbacks may access
5901 * vendor specific host controller register space call them before the
5902 * host clocks are ON.
5903 */
0263bcd0
YG
5904 ret = ufshcd_vops_suspend(hba, pm_op);
5905 if (ret)
5906 goto set_link_active;
57d104c1 5907
0263bcd0
YG
5908 ret = ufshcd_vops_setup_clocks(hba, false);
5909 if (ret)
5910 goto vops_resume;
57d104c1
SJ
5911
5912 if (!ufshcd_is_link_active(hba))
5913 ufshcd_setup_clocks(hba, false);
5914 else
5915 /* If link is active, device ref_clk can't be switched off */
5916 __ufshcd_setup_clocks(hba, false, true);
5917
1ab27c9c 5918 hba->clk_gating.state = CLKS_OFF;
57d104c1
SJ
5919 /*
5920 * Disable the host irq as host controller as there won't be any
0263bcd0 5921 * host controller transaction expected till resume.
57d104c1
SJ
5922 */
5923 ufshcd_disable_irq(hba);
5924 /* Put the host controller in low power mode if possible */
5925 ufshcd_hba_vreg_set_lpm(hba);
5926 goto out;
5927
5928vops_resume:
0263bcd0 5929 ufshcd_vops_resume(hba, pm_op);
57d104c1
SJ
5930set_link_active:
5931 ufshcd_vreg_set_hpm(hba);
5932 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5933 ufshcd_set_link_active(hba);
5934 else if (ufshcd_is_link_off(hba))
5935 ufshcd_host_reset_and_restore(hba);
5936set_dev_active:
5937 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5938 ufshcd_disable_auto_bkops(hba);
1ab27c9c
ST
5939enable_gating:
5940 hba->clk_gating.is_suspended = false;
5941 ufshcd_release(hba);
57d104c1
SJ
5942out:
5943 hba->pm_op_in_progress = 0;
5944 return ret;
7a3e97b0
SY
5945}
5946
5947/**
57d104c1 5948 * ufshcd_resume - helper function for resume operations
3b1d0580 5949 * @hba: per adapter instance
57d104c1 5950 * @pm_op: runtime PM or system PM
7a3e97b0 5951 *
57d104c1
SJ
5952 * This function basically brings the UFS device, UniPro link and controller
5953 * to active state.
5954 *
5955 * Returns 0 for success and non-zero for failure
7a3e97b0 5956 */
57d104c1 5957static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 5958{
57d104c1
SJ
5959 int ret;
5960 enum uic_link_state old_link_state;
5961
5962 hba->pm_op_in_progress = 1;
5963 old_link_state = hba->uic_link_state;
5964
5965 ufshcd_hba_vreg_set_hpm(hba);
5966 /* Make sure clocks are enabled before accessing controller */
5967 ret = ufshcd_setup_clocks(hba, true);
5968 if (ret)
5969 goto out;
5970
57d104c1
SJ
5971 /* enable the host irq as host controller would be active soon */
5972 ret = ufshcd_enable_irq(hba);
5973 if (ret)
5974 goto disable_irq_and_vops_clks;
5975
5976 ret = ufshcd_vreg_set_hpm(hba);
5977 if (ret)
5978 goto disable_irq_and_vops_clks;
5979
7a3e97b0 5980 /*
57d104c1
SJ
5981 * Call vendor specific resume callback. As these callbacks may access
5982 * vendor specific host controller register space call them when the
5983 * host clocks are ON.
7a3e97b0 5984 */
0263bcd0
YG
5985 ret = ufshcd_vops_resume(hba, pm_op);
5986 if (ret)
5987 goto disable_vreg;
57d104c1
SJ
5988
5989 if (ufshcd_is_link_hibern8(hba)) {
5990 ret = ufshcd_uic_hibern8_exit(hba);
5991 if (!ret)
5992 ufshcd_set_link_active(hba);
5993 else
5994 goto vendor_suspend;
5995 } else if (ufshcd_is_link_off(hba)) {
5996 ret = ufshcd_host_reset_and_restore(hba);
5997 /*
5998 * ufshcd_host_reset_and_restore() should have already
5999 * set the link state as active
6000 */
6001 if (ret || !ufshcd_is_link_active(hba))
6002 goto vendor_suspend;
6003 }
6004
6005 if (!ufshcd_is_ufs_dev_active(hba)) {
6006 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
6007 if (ret)
6008 goto set_old_link_state;
6009 }
6010
374a246e
SJ
6011 /*
6012 * If BKOPs operations are urgently needed at this moment then
6013 * keep auto-bkops enabled or else disable it.
6014 */
6015 ufshcd_urgent_bkops(hba);
1ab27c9c
ST
6016 hba->clk_gating.is_suspended = false;
6017
856b3483
ST
6018 if (ufshcd_is_clkscaling_enabled(hba))
6019 devfreq_resume_device(hba->devfreq);
6020
1ab27c9c
ST
6021 /* Schedule clock gating in case of no access to UFS device yet */
6022 ufshcd_release(hba);
57d104c1
SJ
6023 goto out;
6024
6025set_old_link_state:
6026 ufshcd_link_state_transition(hba, old_link_state, 0);
6027vendor_suspend:
0263bcd0 6028 ufshcd_vops_suspend(hba, pm_op);
57d104c1
SJ
6029disable_vreg:
6030 ufshcd_vreg_set_lpm(hba);
6031disable_irq_and_vops_clks:
6032 ufshcd_disable_irq(hba);
57d104c1
SJ
6033 ufshcd_setup_clocks(hba, false);
6034out:
6035 hba->pm_op_in_progress = 0;
6036 return ret;
6037}
6038
6039/**
6040 * ufshcd_system_suspend - system suspend routine
6041 * @hba: per adapter instance
6042 * @pm_op: runtime PM or system PM
6043 *
6044 * Check the description of ufshcd_suspend() function for more details.
6045 *
6046 * Returns 0 for success and non-zero for failure
6047 */
6048int ufshcd_system_suspend(struct ufs_hba *hba)
6049{
6050 int ret = 0;
6051
6052 if (!hba || !hba->is_powered)
233b594b 6053 return 0;
57d104c1
SJ
6054
6055 if (pm_runtime_suspended(hba->dev)) {
6056 if (hba->rpm_lvl == hba->spm_lvl)
6057 /*
6058 * There is possibility that device may still be in
6059 * active state during the runtime suspend.
6060 */
6061 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
6062 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
6063 goto out;
6064
6065 /*
6066 * UFS device and/or UFS link low power states during runtime
6067 * suspend seems to be different than what is expected during
6068 * system suspend. Hence runtime resume the devic & link and
6069 * let the system suspend low power states to take effect.
6070 * TODO: If resume takes longer time, we might have optimize
6071 * it in future by not resuming everything if possible.
6072 */
6073 ret = ufshcd_runtime_resume(hba);
6074 if (ret)
6075 goto out;
6076 }
6077
6078 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6079out:
e785060e
DR
6080 if (!ret)
6081 hba->is_sys_suspended = true;
57d104c1
SJ
6082 return ret;
6083}
6084EXPORT_SYMBOL(ufshcd_system_suspend);
6085
6086/**
6087 * ufshcd_system_resume - system resume routine
6088 * @hba: per adapter instance
6089 *
6090 * Returns 0 for success and non-zero for failure
6091 */
7a3e97b0 6092
57d104c1
SJ
6093int ufshcd_system_resume(struct ufs_hba *hba)
6094{
6095 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
6096 /*
6097 * Let the runtime resume take care of resuming
6098 * if runtime suspended.
6099 */
6100 return 0;
6101
6102 return ufshcd_resume(hba, UFS_SYSTEM_PM);
7a3e97b0 6103}
57d104c1 6104EXPORT_SYMBOL(ufshcd_system_resume);
3b1d0580 6105
57d104c1
SJ
6106/**
6107 * ufshcd_runtime_suspend - runtime suspend routine
6108 * @hba: per adapter instance
6109 *
6110 * Check the description of ufshcd_suspend() function for more details.
6111 *
6112 * Returns 0 for success and non-zero for failure
6113 */
66ec6d59
SRT
6114int ufshcd_runtime_suspend(struct ufs_hba *hba)
6115{
57d104c1 6116 if (!hba || !hba->is_powered)
66ec6d59
SRT
6117 return 0;
6118
57d104c1 6119 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
66ec6d59
SRT
6120}
6121EXPORT_SYMBOL(ufshcd_runtime_suspend);
6122
57d104c1
SJ
6123/**
6124 * ufshcd_runtime_resume - runtime resume routine
6125 * @hba: per adapter instance
6126 *
6127 * This function basically brings the UFS device, UniPro link and controller
6128 * to active state. Following operations are done in this function:
6129 *
6130 * 1. Turn on all the controller related clocks
6131 * 2. Bring the UniPro link out of Hibernate state
6132 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
6133 * to active state.
6134 * 4. If auto-bkops is enabled on the device, disable it.
6135 *
6136 * So following would be the possible power state after this function return
6137 * successfully:
6138 * S1: UFS device in Active state with VCC rail ON
6139 * UniPro link in Active state
6140 * All the UFS/UniPro controller clocks are ON
6141 *
6142 * Returns 0 for success and non-zero for failure
6143 */
66ec6d59
SRT
6144int ufshcd_runtime_resume(struct ufs_hba *hba)
6145{
57d104c1 6146 if (!hba || !hba->is_powered)
66ec6d59 6147 return 0;
57d104c1
SJ
6148 else
6149 return ufshcd_resume(hba, UFS_RUNTIME_PM);
66ec6d59
SRT
6150}
6151EXPORT_SYMBOL(ufshcd_runtime_resume);
6152
6153int ufshcd_runtime_idle(struct ufs_hba *hba)
6154{
6155 return 0;
6156}
6157EXPORT_SYMBOL(ufshcd_runtime_idle);
6158
57d104c1
SJ
6159/**
6160 * ufshcd_shutdown - shutdown routine
6161 * @hba: per adapter instance
6162 *
6163 * This function would power off both UFS device and UFS link.
6164 *
6165 * Returns 0 always to allow force shutdown even in case of errors.
6166 */
6167int ufshcd_shutdown(struct ufs_hba *hba)
6168{
6169 int ret = 0;
6170
6171 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
6172 goto out;
6173
6174 if (pm_runtime_suspended(hba->dev)) {
6175 ret = ufshcd_runtime_resume(hba);
6176 if (ret)
6177 goto out;
6178 }
6179
6180 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
6181out:
6182 if (ret)
6183 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
6184 /* allow force shutdown even in case of errors */
6185 return 0;
6186}
6187EXPORT_SYMBOL(ufshcd_shutdown);
6188
7a3e97b0 6189/**
3b1d0580 6190 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 6191 * data structure memory
3b1d0580 6192 * @hba - per adapter instance
7a3e97b0 6193 */
3b1d0580 6194void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 6195{
cfdf9c91 6196 scsi_remove_host(hba->host);
7a3e97b0 6197 /* disable interrupts */
2fbd009b 6198 ufshcd_disable_intr(hba, hba->intr_mask);
596585a2 6199 ufshcd_hba_stop(hba, true);
7a3e97b0 6200
7a3e97b0 6201 scsi_host_put(hba->host);
5c0c28a8 6202
1ab27c9c 6203 ufshcd_exit_clk_gating(hba);
856b3483
ST
6204 if (ufshcd_is_clkscaling_enabled(hba))
6205 devfreq_remove_device(hba->devfreq);
aa497613 6206 ufshcd_hba_exit(hba);
3b1d0580
VH
6207}
6208EXPORT_SYMBOL_GPL(ufshcd_remove);
6209
47555a5c
YG
6210/**
6211 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
6212 * @hba: pointer to Host Bus Adapter (HBA)
6213 */
6214void ufshcd_dealloc_host(struct ufs_hba *hba)
6215{
6216 scsi_host_put(hba->host);
6217}
6218EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
6219
ca3d7bf9
AM
6220/**
6221 * ufshcd_set_dma_mask - Set dma mask based on the controller
6222 * addressing capability
6223 * @hba: per adapter instance
6224 *
6225 * Returns 0 for success, non-zero for failure
6226 */
6227static int ufshcd_set_dma_mask(struct ufs_hba *hba)
6228{
6229 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
6230 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
6231 return 0;
6232 }
6233 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
6234}
6235
7a3e97b0 6236/**
5c0c28a8 6237 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
6238 * @dev: pointer to device handle
6239 * @hba_handle: driver private handle
7a3e97b0
SY
6240 * Returns 0 on success, non-zero value on failure
6241 */
5c0c28a8 6242int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
6243{
6244 struct Scsi_Host *host;
6245 struct ufs_hba *hba;
5c0c28a8 6246 int err = 0;
7a3e97b0 6247
3b1d0580
VH
6248 if (!dev) {
6249 dev_err(dev,
6250 "Invalid memory reference for dev is NULL\n");
6251 err = -ENODEV;
7a3e97b0
SY
6252 goto out_error;
6253 }
6254
7a3e97b0
SY
6255 host = scsi_host_alloc(&ufshcd_driver_template,
6256 sizeof(struct ufs_hba));
6257 if (!host) {
3b1d0580 6258 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 6259 err = -ENOMEM;
3b1d0580 6260 goto out_error;
7a3e97b0
SY
6261 }
6262 hba = shost_priv(host);
7a3e97b0 6263 hba->host = host;
3b1d0580 6264 hba->dev = dev;
5c0c28a8
SRT
6265 *hba_handle = hba;
6266
6267out_error:
6268 return err;
6269}
6270EXPORT_SYMBOL(ufshcd_alloc_host);
6271
856b3483
ST
6272static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6273{
6274 int ret = 0;
6275 struct ufs_clk_info *clki;
6276 struct list_head *head = &hba->clk_list_head;
6277
6278 if (!head || list_empty(head))
6279 goto out;
6280
f06fcc71
YG
6281 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6282 if (ret)
6283 return ret;
6284
856b3483
ST
6285 list_for_each_entry(clki, head, list) {
6286 if (!IS_ERR_OR_NULL(clki->clk)) {
6287 if (scale_up && clki->max_freq) {
6288 if (clki->curr_freq == clki->max_freq)
6289 continue;
6290 ret = clk_set_rate(clki->clk, clki->max_freq);
6291 if (ret) {
6292 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6293 __func__, clki->name,
6294 clki->max_freq, ret);
6295 break;
6296 }
6297 clki->curr_freq = clki->max_freq;
6298
6299 } else if (!scale_up && clki->min_freq) {
6300 if (clki->curr_freq == clki->min_freq)
6301 continue;
6302 ret = clk_set_rate(clki->clk, clki->min_freq);
6303 if (ret) {
6304 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6305 __func__, clki->name,
6306 clki->min_freq, ret);
6307 break;
6308 }
6309 clki->curr_freq = clki->min_freq;
6310 }
6311 }
6312 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6313 clki->name, clk_get_rate(clki->clk));
6314 }
f06fcc71
YG
6315
6316 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6317
856b3483
ST
6318out:
6319 return ret;
6320}
6321
6322static int ufshcd_devfreq_target(struct device *dev,
6323 unsigned long *freq, u32 flags)
6324{
6325 int err = 0;
6326 struct ufs_hba *hba = dev_get_drvdata(dev);
6327
6328 if (!ufshcd_is_clkscaling_enabled(hba))
6329 return -EINVAL;
6330
6331 if (*freq == UINT_MAX)
6332 err = ufshcd_scale_clks(hba, true);
6333 else if (*freq == 0)
6334 err = ufshcd_scale_clks(hba, false);
6335
6336 return err;
6337}
6338
6339static int ufshcd_devfreq_get_dev_status(struct device *dev,
6340 struct devfreq_dev_status *stat)
6341{
6342 struct ufs_hba *hba = dev_get_drvdata(dev);
6343 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6344 unsigned long flags;
6345
6346 if (!ufshcd_is_clkscaling_enabled(hba))
6347 return -EINVAL;
6348
6349 memset(stat, 0, sizeof(*stat));
6350
6351 spin_lock_irqsave(hba->host->host_lock, flags);
6352 if (!scaling->window_start_t)
6353 goto start_window;
6354
6355 if (scaling->is_busy_started)
6356 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6357 scaling->busy_start_t));
6358
6359 stat->total_time = jiffies_to_usecs((long)jiffies -
6360 (long)scaling->window_start_t);
6361 stat->busy_time = scaling->tot_busy_t;
6362start_window:
6363 scaling->window_start_t = jiffies;
6364 scaling->tot_busy_t = 0;
6365
6366 if (hba->outstanding_reqs) {
6367 scaling->busy_start_t = ktime_get();
6368 scaling->is_busy_started = true;
6369 } else {
6370 scaling->busy_start_t = ktime_set(0, 0);
6371 scaling->is_busy_started = false;
6372 }
6373 spin_unlock_irqrestore(hba->host->host_lock, flags);
6374 return 0;
6375}
6376
6377static struct devfreq_dev_profile ufs_devfreq_profile = {
6378 .polling_ms = 100,
6379 .target = ufshcd_devfreq_target,
6380 .get_dev_status = ufshcd_devfreq_get_dev_status,
6381};
6382
5c0c28a8
SRT
6383/**
6384 * ufshcd_init - Driver initialization routine
6385 * @hba: per-adapter instance
6386 * @mmio_base: base register address
6387 * @irq: Interrupt line of device
6388 * Returns 0 on success, non-zero value on failure
6389 */
6390int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6391{
6392 int err;
6393 struct Scsi_Host *host = hba->host;
6394 struct device *dev = hba->dev;
6395
6396 if (!mmio_base) {
6397 dev_err(hba->dev,
6398 "Invalid memory reference for mmio_base is NULL\n");
6399 err = -ENODEV;
6400 goto out_error;
6401 }
6402
3b1d0580
VH
6403 hba->mmio_base = mmio_base;
6404 hba->irq = irq;
7a3e97b0 6405
aa497613 6406 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
6407 if (err)
6408 goto out_error;
6409
7a3e97b0
SY
6410 /* Read capabilities registers */
6411 ufshcd_hba_capabilities(hba);
6412
6413 /* Get UFS version supported by the controller */
6414 hba->ufs_version = ufshcd_get_ufs_version(hba);
6415
2fbd009b
SJ
6416 /* Get Interrupt bit mask per version */
6417 hba->intr_mask = ufshcd_get_intr_mask(hba);
6418
ca3d7bf9
AM
6419 err = ufshcd_set_dma_mask(hba);
6420 if (err) {
6421 dev_err(hba->dev, "set dma mask failed\n");
6422 goto out_disable;
6423 }
6424
7a3e97b0
SY
6425 /* Allocate memory for host memory space */
6426 err = ufshcd_memory_alloc(hba);
6427 if (err) {
3b1d0580
VH
6428 dev_err(hba->dev, "Memory allocation failed\n");
6429 goto out_disable;
7a3e97b0
SY
6430 }
6431
6432 /* Configure LRB */
6433 ufshcd_host_memory_configure(hba);
6434
6435 host->can_queue = hba->nutrs;
6436 host->cmd_per_lun = hba->nutrs;
6437 host->max_id = UFSHCD_MAX_ID;
0ce147d4 6438 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
6439 host->max_channel = UFSHCD_MAX_CHANNEL;
6440 host->unique_id = host->host_no;
6441 host->max_cmd_len = MAX_CDB_SIZE;
6442
7eb584db
DR
6443 hba->max_pwr_info.is_valid = false;
6444
7a3e97b0 6445 /* Initailize wait queue for task management */
e2933132
SRT
6446 init_waitqueue_head(&hba->tm_wq);
6447 init_waitqueue_head(&hba->tm_tag_wq);
7a3e97b0
SY
6448
6449 /* Initialize work queues */
e8e7f271 6450 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 6451 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 6452
6ccf44fe
SJ
6453 /* Initialize UIC command mutex */
6454 mutex_init(&hba->uic_cmd_mutex);
6455
5a0b0cb9
SRT
6456 /* Initialize mutex for device management commands */
6457 mutex_init(&hba->dev_cmd.lock);
6458
6459 /* Initialize device management tag acquire wait queue */
6460 init_waitqueue_head(&hba->dev_cmd.tag_wq);
6461
1ab27c9c 6462 ufshcd_init_clk_gating(hba);
199ef13c
YG
6463
6464 /*
6465 * In order to avoid any spurious interrupt immediately after
6466 * registering UFS controller interrupt handler, clear any pending UFS
6467 * interrupt status and disable all the UFS interrupts.
6468 */
6469 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
6470 REG_INTERRUPT_STATUS);
6471 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
6472 /*
6473 * Make sure that UFS interrupts are disabled and any pending interrupt
6474 * status is cleared before registering UFS interrupt handler.
6475 */
6476 mb();
6477
7a3e97b0 6478 /* IRQ registration */
2953f850 6479 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 6480 if (err) {
3b1d0580 6481 dev_err(hba->dev, "request irq failed\n");
1ab27c9c 6482 goto exit_gating;
57d104c1
SJ
6483 } else {
6484 hba->is_irq_enabled = true;
7a3e97b0
SY
6485 }
6486
3b1d0580 6487 err = scsi_add_host(host, hba->dev);
7a3e97b0 6488 if (err) {
3b1d0580 6489 dev_err(hba->dev, "scsi_add_host failed\n");
1ab27c9c 6490 goto exit_gating;
7a3e97b0
SY
6491 }
6492
6ccf44fe
SJ
6493 /* Host controller enable */
6494 err = ufshcd_hba_enable(hba);
7a3e97b0 6495 if (err) {
6ccf44fe 6496 dev_err(hba->dev, "Host controller enable failed\n");
3b1d0580 6497 goto out_remove_scsi_host;
7a3e97b0 6498 }
6ccf44fe 6499
856b3483
ST
6500 if (ufshcd_is_clkscaling_enabled(hba)) {
6501 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
6502 "simple_ondemand", NULL);
6503 if (IS_ERR(hba->devfreq)) {
6504 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
6505 PTR_ERR(hba->devfreq));
73811c94 6506 err = PTR_ERR(hba->devfreq);
856b3483
ST
6507 goto out_remove_scsi_host;
6508 }
6509 /* Suspend devfreq until the UFS device is detected */
6510 devfreq_suspend_device(hba->devfreq);
6511 hba->clk_scaling.window_start_t = 0;
6512 }
6513
62694735
SRT
6514 /* Hold auto suspend until async scan completes */
6515 pm_runtime_get_sync(dev);
6516
57d104c1
SJ
6517 /*
6518 * The device-initialize-sequence hasn't been invoked yet.
6519 * Set the device to power-off state
6520 */
6521 ufshcd_set_ufs_dev_poweroff(hba);
6522
6ccf44fe
SJ
6523 async_schedule(ufshcd_async_scan, hba);
6524
7a3e97b0
SY
6525 return 0;
6526
3b1d0580
VH
6527out_remove_scsi_host:
6528 scsi_remove_host(hba->host);
1ab27c9c
ST
6529exit_gating:
6530 ufshcd_exit_clk_gating(hba);
3b1d0580 6531out_disable:
57d104c1 6532 hba->is_irq_enabled = false;
3b1d0580 6533 scsi_host_put(host);
aa497613 6534 ufshcd_hba_exit(hba);
3b1d0580
VH
6535out_error:
6536 return err;
6537}
6538EXPORT_SYMBOL_GPL(ufshcd_init);
6539
3b1d0580
VH
6540MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
6541MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 6542MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
6543MODULE_LICENSE("GPL");
6544MODULE_VERSION(UFSHCD_DRIVER_VERSION);