Commit | Line | Data |
---|---|---|
7a3e97b0 | 1 | /* |
e0eca63e | 2 | * Universal Flash Storage Host controller driver Core |
7a3e97b0 SY |
3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | |
3b1d0580 | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
52ac95fe | 6 | * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. |
7a3e97b0 | 7 | * |
3b1d0580 VH |
8 | * Authors: |
9 | * Santosh Yaraganavi <santosh.sy@samsung.com> | |
10 | * Vinayak Holikatti <h.vinayak@samsung.com> | |
7a3e97b0 SY |
11 | * |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version 2 | |
15 | * of the License, or (at your option) any later version. | |
3b1d0580 VH |
16 | * See the COPYING file in the top-level directory or visit |
17 | * <http://www.gnu.org/licenses/gpl-2.0.html> | |
7a3e97b0 SY |
18 | * |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
3b1d0580 VH |
24 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
25 | * without warranty of any kind. You are solely responsible for | |
26 | * determining the appropriateness of using and distributing | |
27 | * the program and assume all risks associated with your exercise | |
28 | * of rights with respect to the program, including but not limited | |
29 | * to infringement of third party rights, the risks and costs of | |
30 | * program errors, damage to or loss of data, programs or equipment, | |
31 | * and unavailability or interruption of operations. Under no | |
32 | * circumstances will the contributor of this Program be liable for | |
33 | * any damages of any kind arising from your use or distribution of | |
34 | * this program. | |
5c0c28a8 SRT |
35 | * |
36 | * The Linux Foundation chooses to take subject only to the GPLv2 | |
37 | * license terms, and distributes only under these terms. | |
7a3e97b0 SY |
38 | */ |
39 | ||
6ccf44fe | 40 | #include <linux/async.h> |
856b3483 | 41 | #include <linux/devfreq.h> |
6ccf44fe | 42 | |
54b879b7 | 43 | #include <linux/of.h> |
e0eca63e | 44 | #include "ufshcd.h" |
53b3d9c3 | 45 | #include "unipro.h" |
7a3e97b0 | 46 | |
2fbd009b SJ |
47 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
48 | UTP_TASK_REQ_COMPL |\ | |
49 | UFSHCD_ERROR_MASK) | |
6ccf44fe SJ |
50 | /* UIC command timeout, unit: ms */ |
51 | #define UIC_CMD_TIMEOUT 500 | |
2fbd009b | 52 | |
5a0b0cb9 SRT |
53 | /* NOP OUT retries waiting for NOP IN response */ |
54 | #define NOP_OUT_RETRIES 10 | |
55 | /* Timeout after 30 msecs if NOP OUT hangs without response */ | |
56 | #define NOP_OUT_TIMEOUT 30 /* msecs */ | |
57 | ||
68078d5c DR |
58 | /* Query request retries */ |
59 | #define QUERY_REQ_RETRIES 10 | |
60 | /* Query request timeout */ | |
61 | #define QUERY_REQ_TIMEOUT 30 /* msec */ | |
e5ad406c YG |
62 | /* |
63 | * Query request timeout for fDeviceInit flag | |
64 | * fDeviceInit query response time for some devices is too large that default | |
65 | * QUERY_REQ_TIMEOUT may not be enough for such devices. | |
66 | */ | |
67 | #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */ | |
68078d5c | 68 | |
e2933132 SRT |
69 | /* Task management command timeout */ |
70 | #define TM_CMD_TIMEOUT 100 /* msecs */ | |
71 | ||
64238fbd YG |
72 | /* maximum number of retries for a general UIC command */ |
73 | #define UFS_UIC_COMMAND_RETRIES 3 | |
74 | ||
1d337ec2 SRT |
75 | /* maximum number of link-startup retries */ |
76 | #define DME_LINKSTARTUP_RETRIES 3 | |
77 | ||
87d0b4a6 YG |
78 | /* Maximum retries for Hibern8 enter */ |
79 | #define UIC_HIBERN8_ENTER_RETRIES 3 | |
80 | ||
1d337ec2 SRT |
81 | /* maximum number of reset retries before giving up */ |
82 | #define MAX_HOST_RESET_RETRIES 5 | |
83 | ||
68078d5c DR |
84 | /* Expose the flag value from utp_upiu_query.value */ |
85 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | |
86 | ||
7d568652 SJ |
87 | /* Interrupt aggregation default timeout, unit: 40us */ |
88 | #define INT_AGGR_DEF_TO 0x02 | |
89 | ||
aa497613 SRT |
90 | #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ |
91 | ({ \ | |
92 | int _ret; \ | |
93 | if (_on) \ | |
94 | _ret = ufshcd_enable_vreg(_dev, _vreg); \ | |
95 | else \ | |
96 | _ret = ufshcd_disable_vreg(_dev, _vreg); \ | |
97 | _ret; \ | |
98 | }) | |
99 | ||
da461cec SJ |
100 | static u32 ufs_query_desc_max_size[] = { |
101 | QUERY_DESC_DEVICE_MAX_SIZE, | |
102 | QUERY_DESC_CONFIGURAION_MAX_SIZE, | |
103 | QUERY_DESC_UNIT_MAX_SIZE, | |
104 | QUERY_DESC_RFU_MAX_SIZE, | |
105 | QUERY_DESC_INTERCONNECT_MAX_SIZE, | |
106 | QUERY_DESC_STRING_MAX_SIZE, | |
107 | QUERY_DESC_RFU_MAX_SIZE, | |
1ce21794 | 108 | QUERY_DESC_GEOMETRY_MAX_SIZE, |
da461cec SJ |
109 | QUERY_DESC_POWER_MAX_SIZE, |
110 | QUERY_DESC_RFU_MAX_SIZE, | |
111 | }; | |
112 | ||
7a3e97b0 SY |
113 | enum { |
114 | UFSHCD_MAX_CHANNEL = 0, | |
115 | UFSHCD_MAX_ID = 1, | |
7a3e97b0 SY |
116 | UFSHCD_CMD_PER_LUN = 32, |
117 | UFSHCD_CAN_QUEUE = 32, | |
118 | }; | |
119 | ||
120 | /* UFSHCD states */ | |
121 | enum { | |
7a3e97b0 SY |
122 | UFSHCD_STATE_RESET, |
123 | UFSHCD_STATE_ERROR, | |
3441da7d SRT |
124 | UFSHCD_STATE_OPERATIONAL, |
125 | }; | |
126 | ||
127 | /* UFSHCD error handling flags */ | |
128 | enum { | |
129 | UFSHCD_EH_IN_PROGRESS = (1 << 0), | |
7a3e97b0 SY |
130 | }; |
131 | ||
e8e7f271 SRT |
132 | /* UFSHCD UIC layer error flags */ |
133 | enum { | |
134 | UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ | |
135 | UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ | |
136 | UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ | |
137 | UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ | |
138 | }; | |
139 | ||
7a3e97b0 SY |
140 | /* Interrupt configuration options */ |
141 | enum { | |
142 | UFSHCD_INT_DISABLE, | |
143 | UFSHCD_INT_ENABLE, | |
144 | UFSHCD_INT_CLEAR, | |
145 | }; | |
146 | ||
3441da7d SRT |
147 | #define ufshcd_set_eh_in_progress(h) \ |
148 | (h->eh_flags |= UFSHCD_EH_IN_PROGRESS) | |
149 | #define ufshcd_eh_in_progress(h) \ | |
150 | (h->eh_flags & UFSHCD_EH_IN_PROGRESS) | |
151 | #define ufshcd_clear_eh_in_progress(h) \ | |
152 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) | |
153 | ||
57d104c1 SJ |
154 | #define ufshcd_set_ufs_dev_active(h) \ |
155 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | |
156 | #define ufshcd_set_ufs_dev_sleep(h) \ | |
157 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | |
158 | #define ufshcd_set_ufs_dev_poweroff(h) \ | |
159 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | |
160 | #define ufshcd_is_ufs_dev_active(h) \ | |
161 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | |
162 | #define ufshcd_is_ufs_dev_sleep(h) \ | |
163 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | |
164 | #define ufshcd_is_ufs_dev_poweroff(h) \ | |
165 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | |
166 | ||
167 | static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { | |
168 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | |
169 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
170 | {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | |
171 | {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
172 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | |
173 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, | |
174 | }; | |
175 | ||
176 | static inline enum ufs_dev_pwr_mode | |
177 | ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) | |
178 | { | |
179 | return ufs_pm_lvl_states[lvl].dev_state; | |
180 | } | |
181 | ||
182 | static inline enum uic_link_state | |
183 | ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) | |
184 | { | |
185 | return ufs_pm_lvl_states[lvl].link_state; | |
186 | } | |
187 | ||
3441da7d SRT |
188 | static void ufshcd_tmc_handler(struct ufs_hba *hba); |
189 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | |
e8e7f271 SRT |
190 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); |
191 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); | |
1d337ec2 SRT |
192 | static void ufshcd_hba_exit(struct ufs_hba *hba); |
193 | static int ufshcd_probe_hba(struct ufs_hba *hba); | |
1ab27c9c ST |
194 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
195 | bool skip_ref_clk); | |
196 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); | |
197 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); | |
198 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); | |
cad2e03d | 199 | static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); |
57d104c1 SJ |
200 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); |
201 | static irqreturn_t ufshcd_intr(int irq, void *__hba); | |
7eb584db DR |
202 | static int ufshcd_config_pwr_mode(struct ufs_hba *hba, |
203 | struct ufs_pa_layer_attr *desired_pwr_mode); | |
874237f7 YG |
204 | static int ufshcd_change_power_mode(struct ufs_hba *hba, |
205 | struct ufs_pa_layer_attr *pwr_mode); | |
14497328 YG |
206 | static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) |
207 | { | |
208 | return tag >= 0 && tag < hba->nutrs; | |
209 | } | |
57d104c1 SJ |
210 | |
211 | static inline int ufshcd_enable_irq(struct ufs_hba *hba) | |
212 | { | |
213 | int ret = 0; | |
214 | ||
215 | if (!hba->is_irq_enabled) { | |
216 | ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, | |
217 | hba); | |
218 | if (ret) | |
219 | dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", | |
220 | __func__, ret); | |
221 | hba->is_irq_enabled = true; | |
222 | } | |
223 | ||
224 | return ret; | |
225 | } | |
226 | ||
227 | static inline void ufshcd_disable_irq(struct ufs_hba *hba) | |
228 | { | |
229 | if (hba->is_irq_enabled) { | |
230 | free_irq(hba->irq, hba); | |
231 | hba->is_irq_enabled = false; | |
232 | } | |
233 | } | |
3441da7d | 234 | |
5a0b0cb9 SRT |
235 | /* |
236 | * ufshcd_wait_for_register - wait for register value to change | |
237 | * @hba - per-adapter interface | |
238 | * @reg - mmio register offset | |
239 | * @mask - mask to apply to read register value | |
240 | * @val - wait condition | |
241 | * @interval_us - polling interval in microsecs | |
242 | * @timeout_ms - timeout in millisecs | |
243 | * | |
244 | * Returns -ETIMEDOUT on error, zero on success | |
245 | */ | |
246 | static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, | |
247 | u32 val, unsigned long interval_us, unsigned long timeout_ms) | |
248 | { | |
249 | int err = 0; | |
250 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
251 | ||
252 | /* ignore bits that we don't intend to wait on */ | |
253 | val = val & mask; | |
254 | ||
255 | while ((ufshcd_readl(hba, reg) & mask) != val) { | |
256 | /* wakeup within 50us of expiry */ | |
257 | usleep_range(interval_us, interval_us + 50); | |
258 | ||
259 | if (time_after(jiffies, timeout)) { | |
260 | if ((ufshcd_readl(hba, reg) & mask) != val) | |
261 | err = -ETIMEDOUT; | |
262 | break; | |
263 | } | |
264 | } | |
265 | ||
266 | return err; | |
267 | } | |
268 | ||
2fbd009b SJ |
269 | /** |
270 | * ufshcd_get_intr_mask - Get the interrupt bit mask | |
271 | * @hba - Pointer to adapter instance | |
272 | * | |
273 | * Returns interrupt bit mask per version | |
274 | */ | |
275 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) | |
276 | { | |
277 | if (hba->ufs_version == UFSHCI_VERSION_10) | |
278 | return INTERRUPT_MASK_ALL_VER_10; | |
279 | else | |
280 | return INTERRUPT_MASK_ALL_VER_11; | |
281 | } | |
282 | ||
7a3e97b0 SY |
283 | /** |
284 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | |
285 | * @hba - Pointer to adapter instance | |
286 | * | |
287 | * Returns UFSHCI version supported by the controller | |
288 | */ | |
289 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | |
290 | { | |
0263bcd0 YG |
291 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) |
292 | return ufshcd_vops_get_ufs_hci_version(hba); | |
9949e702 | 293 | |
b873a275 | 294 | return ufshcd_readl(hba, REG_UFS_VERSION); |
7a3e97b0 SY |
295 | } |
296 | ||
297 | /** | |
298 | * ufshcd_is_device_present - Check if any device connected to | |
299 | * the host controller | |
5c0c28a8 | 300 | * @hba: pointer to adapter instance |
7a3e97b0 | 301 | * |
73ec513a | 302 | * Returns 1 if device present, 0 if no device detected |
7a3e97b0 | 303 | */ |
5c0c28a8 | 304 | static inline int ufshcd_is_device_present(struct ufs_hba *hba) |
7a3e97b0 | 305 | { |
5c0c28a8 SRT |
306 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & |
307 | DEVICE_PRESENT) ? 1 : 0; | |
7a3e97b0 SY |
308 | } |
309 | ||
310 | /** | |
311 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | |
312 | * @lrb: pointer to local command reference block | |
313 | * | |
314 | * This function is used to get the OCS field from UTRD | |
315 | * Returns the OCS field in the UTRD | |
316 | */ | |
317 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | |
318 | { | |
e8c8e82a | 319 | return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; |
7a3e97b0 SY |
320 | } |
321 | ||
322 | /** | |
323 | * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status | |
324 | * @task_req_descp: pointer to utp_task_req_desc structure | |
325 | * | |
326 | * This function is used to get the OCS field from UTMRD | |
327 | * Returns the OCS field in the UTMRD | |
328 | */ | |
329 | static inline int | |
330 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) | |
331 | { | |
e8c8e82a | 332 | return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS; |
7a3e97b0 SY |
333 | } |
334 | ||
335 | /** | |
336 | * ufshcd_get_tm_free_slot - get a free slot for task management request | |
337 | * @hba: per adapter instance | |
e2933132 | 338 | * @free_slot: pointer to variable with available slot value |
7a3e97b0 | 339 | * |
e2933132 SRT |
340 | * Get a free tag and lock it until ufshcd_put_tm_slot() is called. |
341 | * Returns 0 if free slot is not available, else return 1 with tag value | |
342 | * in @free_slot. | |
7a3e97b0 | 343 | */ |
e2933132 | 344 | static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) |
7a3e97b0 | 345 | { |
e2933132 SRT |
346 | int tag; |
347 | bool ret = false; | |
348 | ||
349 | if (!free_slot) | |
350 | goto out; | |
351 | ||
352 | do { | |
353 | tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); | |
354 | if (tag >= hba->nutmrs) | |
355 | goto out; | |
356 | } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); | |
357 | ||
358 | *free_slot = tag; | |
359 | ret = true; | |
360 | out: | |
361 | return ret; | |
362 | } | |
363 | ||
364 | static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) | |
365 | { | |
366 | clear_bit_unlock(slot, &hba->tm_slots_in_use); | |
7a3e97b0 SY |
367 | } |
368 | ||
369 | /** | |
370 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register | |
371 | * @hba: per adapter instance | |
372 | * @pos: position of the bit to be cleared | |
373 | */ | |
374 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) | |
375 | { | |
b873a275 | 376 | ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); |
7a3e97b0 SY |
377 | } |
378 | ||
a48353f6 YG |
379 | /** |
380 | * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field | |
381 | * @hba: per adapter instance | |
382 | * @tag: position of the bit to be cleared | |
383 | */ | |
384 | static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) | |
385 | { | |
386 | __clear_bit(tag, &hba->outstanding_reqs); | |
387 | } | |
388 | ||
7a3e97b0 SY |
389 | /** |
390 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | |
391 | * @reg: Register value of host controller status | |
392 | * | |
393 | * Returns integer, 0 on Success and positive value if failed | |
394 | */ | |
395 | static inline int ufshcd_get_lists_status(u32 reg) | |
396 | { | |
397 | /* | |
398 | * The mask 0xFF is for the following HCS register bits | |
399 | * Bit Description | |
400 | * 0 Device Present | |
401 | * 1 UTRLRDY | |
402 | * 2 UTMRLRDY | |
403 | * 3 UCRDY | |
897efe62 | 404 | * 4-7 reserved |
7a3e97b0 | 405 | */ |
897efe62 | 406 | return ((reg & 0xFF) >> 1) ^ 0x07; |
7a3e97b0 SY |
407 | } |
408 | ||
409 | /** | |
410 | * ufshcd_get_uic_cmd_result - Get the UIC command result | |
411 | * @hba: Pointer to adapter instance | |
412 | * | |
413 | * This function gets the result of UIC command completion | |
414 | * Returns 0 on success, non zero value on error | |
415 | */ | |
416 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | |
417 | { | |
b873a275 | 418 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & |
7a3e97b0 SY |
419 | MASK_UIC_COMMAND_RESULT; |
420 | } | |
421 | ||
12b4fdb4 SJ |
422 | /** |
423 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command | |
424 | * @hba: Pointer to adapter instance | |
425 | * | |
426 | * This function gets UIC command argument3 | |
427 | * Returns 0 on success, non zero value on error | |
428 | */ | |
429 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) | |
430 | { | |
431 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); | |
432 | } | |
433 | ||
7a3e97b0 | 434 | /** |
5a0b0cb9 | 435 | * ufshcd_get_req_rsp - returns the TR response transaction type |
7a3e97b0 | 436 | * @ucd_rsp_ptr: pointer to response UPIU |
7a3e97b0 SY |
437 | */ |
438 | static inline int | |
5a0b0cb9 | 439 | ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) |
7a3e97b0 | 440 | { |
5a0b0cb9 | 441 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; |
7a3e97b0 SY |
442 | } |
443 | ||
444 | /** | |
445 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU | |
446 | * @ucd_rsp_ptr: pointer to response UPIU | |
447 | * | |
448 | * This function gets the response status and scsi_status from response UPIU | |
449 | * Returns the response result code. | |
450 | */ | |
451 | static inline int | |
452 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | |
453 | { | |
454 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | |
455 | } | |
456 | ||
1c2623c5 SJ |
457 | /* |
458 | * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length | |
459 | * from response UPIU | |
460 | * @ucd_rsp_ptr: pointer to response UPIU | |
461 | * | |
462 | * Return the data segment length. | |
463 | */ | |
464 | static inline unsigned int | |
465 | ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) | |
466 | { | |
467 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | |
468 | MASK_RSP_UPIU_DATA_SEG_LEN; | |
469 | } | |
470 | ||
66ec6d59 SRT |
471 | /** |
472 | * ufshcd_is_exception_event - Check if the device raised an exception event | |
473 | * @ucd_rsp_ptr: pointer to response UPIU | |
474 | * | |
475 | * The function checks if the device raised an exception event indicated in | |
476 | * the Device Information field of response UPIU. | |
477 | * | |
478 | * Returns true if exception is raised, false otherwise. | |
479 | */ | |
480 | static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) | |
481 | { | |
482 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | |
483 | MASK_RSP_EXCEPTION_EVENT ? true : false; | |
484 | } | |
485 | ||
7a3e97b0 | 486 | /** |
7d568652 | 487 | * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. |
7a3e97b0 | 488 | * @hba: per adapter instance |
7a3e97b0 SY |
489 | */ |
490 | static inline void | |
7d568652 | 491 | ufshcd_reset_intr_aggr(struct ufs_hba *hba) |
7a3e97b0 | 492 | { |
7d568652 SJ |
493 | ufshcd_writel(hba, INT_AGGR_ENABLE | |
494 | INT_AGGR_COUNTER_AND_TIMER_RESET, | |
495 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
496 | } | |
497 | ||
498 | /** | |
499 | * ufshcd_config_intr_aggr - Configure interrupt aggregation values. | |
500 | * @hba: per adapter instance | |
501 | * @cnt: Interrupt aggregation counter threshold | |
502 | * @tmout: Interrupt aggregation timeout value | |
503 | */ | |
504 | static inline void | |
505 | ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) | |
506 | { | |
507 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | | |
508 | INT_AGGR_COUNTER_THLD_VAL(cnt) | | |
509 | INT_AGGR_TIMEOUT_VAL(tmout), | |
510 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
7a3e97b0 SY |
511 | } |
512 | ||
b852190e YG |
513 | /** |
514 | * ufshcd_disable_intr_aggr - Disables interrupt aggregation. | |
515 | * @hba: per adapter instance | |
516 | */ | |
517 | static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) | |
518 | { | |
519 | ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
520 | } | |
521 | ||
7a3e97b0 SY |
522 | /** |
523 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | |
524 | * When run-stop registers are set to 1, it indicates the | |
525 | * host controller that it can process the requests | |
526 | * @hba: per adapter instance | |
527 | */ | |
528 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | |
529 | { | |
b873a275 SJ |
530 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, |
531 | REG_UTP_TASK_REQ_LIST_RUN_STOP); | |
532 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | |
533 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); | |
7a3e97b0 SY |
534 | } |
535 | ||
7a3e97b0 SY |
536 | /** |
537 | * ufshcd_hba_start - Start controller initialization sequence | |
538 | * @hba: per adapter instance | |
539 | */ | |
540 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | |
541 | { | |
b873a275 | 542 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); |
7a3e97b0 SY |
543 | } |
544 | ||
545 | /** | |
546 | * ufshcd_is_hba_active - Get controller state | |
547 | * @hba: per adapter instance | |
548 | * | |
549 | * Returns zero if controller is active, 1 otherwise | |
550 | */ | |
551 | static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | |
552 | { | |
b873a275 | 553 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; |
7a3e97b0 SY |
554 | } |
555 | ||
1ab27c9c ST |
556 | static void ufshcd_ungate_work(struct work_struct *work) |
557 | { | |
558 | int ret; | |
559 | unsigned long flags; | |
560 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
561 | clk_gating.ungate_work); | |
562 | ||
563 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | |
564 | ||
565 | spin_lock_irqsave(hba->host->host_lock, flags); | |
566 | if (hba->clk_gating.state == CLKS_ON) { | |
567 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
568 | goto unblock_reqs; | |
569 | } | |
570 | ||
571 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
572 | ufshcd_setup_clocks(hba, true); | |
573 | ||
574 | /* Exit from hibern8 */ | |
575 | if (ufshcd_can_hibern8_during_gating(hba)) { | |
576 | /* Prevent gating in this path */ | |
577 | hba->clk_gating.is_suspended = true; | |
578 | if (ufshcd_is_link_hibern8(hba)) { | |
579 | ret = ufshcd_uic_hibern8_exit(hba); | |
580 | if (ret) | |
581 | dev_err(hba->dev, "%s: hibern8 exit failed %d\n", | |
582 | __func__, ret); | |
583 | else | |
584 | ufshcd_set_link_active(hba); | |
585 | } | |
586 | hba->clk_gating.is_suspended = false; | |
587 | } | |
588 | unblock_reqs: | |
856b3483 ST |
589 | if (ufshcd_is_clkscaling_enabled(hba)) |
590 | devfreq_resume_device(hba->devfreq); | |
1ab27c9c ST |
591 | scsi_unblock_requests(hba->host); |
592 | } | |
593 | ||
594 | /** | |
595 | * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. | |
596 | * Also, exit from hibern8 mode and set the link as active. | |
597 | * @hba: per adapter instance | |
598 | * @async: This indicates whether caller should ungate clocks asynchronously. | |
599 | */ | |
600 | int ufshcd_hold(struct ufs_hba *hba, bool async) | |
601 | { | |
602 | int rc = 0; | |
603 | unsigned long flags; | |
604 | ||
605 | if (!ufshcd_is_clkgating_allowed(hba)) | |
606 | goto out; | |
1ab27c9c ST |
607 | spin_lock_irqsave(hba->host->host_lock, flags); |
608 | hba->clk_gating.active_reqs++; | |
609 | ||
53c12d0e YG |
610 | if (ufshcd_eh_in_progress(hba)) { |
611 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
612 | return 0; | |
613 | } | |
614 | ||
856b3483 | 615 | start: |
1ab27c9c ST |
616 | switch (hba->clk_gating.state) { |
617 | case CLKS_ON: | |
618 | break; | |
619 | case REQ_CLKS_OFF: | |
620 | if (cancel_delayed_work(&hba->clk_gating.gate_work)) { | |
621 | hba->clk_gating.state = CLKS_ON; | |
622 | break; | |
623 | } | |
624 | /* | |
625 | * If we here, it means gating work is either done or | |
626 | * currently running. Hence, fall through to cancel gating | |
627 | * work and to enable clocks. | |
628 | */ | |
629 | case CLKS_OFF: | |
630 | scsi_block_requests(hba->host); | |
631 | hba->clk_gating.state = REQ_CLKS_ON; | |
632 | schedule_work(&hba->clk_gating.ungate_work); | |
633 | /* | |
634 | * fall through to check if we should wait for this | |
635 | * work to be done or not. | |
636 | */ | |
637 | case REQ_CLKS_ON: | |
638 | if (async) { | |
639 | rc = -EAGAIN; | |
640 | hba->clk_gating.active_reqs--; | |
641 | break; | |
642 | } | |
643 | ||
644 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
645 | flush_work(&hba->clk_gating.ungate_work); | |
646 | /* Make sure state is CLKS_ON before returning */ | |
856b3483 | 647 | spin_lock_irqsave(hba->host->host_lock, flags); |
1ab27c9c ST |
648 | goto start; |
649 | default: | |
650 | dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", | |
651 | __func__, hba->clk_gating.state); | |
652 | break; | |
653 | } | |
654 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
655 | out: | |
656 | return rc; | |
657 | } | |
6e3fd44d | 658 | EXPORT_SYMBOL_GPL(ufshcd_hold); |
1ab27c9c ST |
659 | |
660 | static void ufshcd_gate_work(struct work_struct *work) | |
661 | { | |
662 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | |
663 | clk_gating.gate_work.work); | |
664 | unsigned long flags; | |
665 | ||
666 | spin_lock_irqsave(hba->host->host_lock, flags); | |
667 | if (hba->clk_gating.is_suspended) { | |
668 | hba->clk_gating.state = CLKS_ON; | |
669 | goto rel_lock; | |
670 | } | |
671 | ||
672 | if (hba->clk_gating.active_reqs | |
673 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | |
674 | || hba->lrb_in_use || hba->outstanding_tasks | |
675 | || hba->active_uic_cmd || hba->uic_async_done) | |
676 | goto rel_lock; | |
677 | ||
678 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
679 | ||
680 | /* put the link into hibern8 mode before turning off clocks */ | |
681 | if (ufshcd_can_hibern8_during_gating(hba)) { | |
682 | if (ufshcd_uic_hibern8_enter(hba)) { | |
683 | hba->clk_gating.state = CLKS_ON; | |
684 | goto out; | |
685 | } | |
686 | ufshcd_set_link_hibern8(hba); | |
687 | } | |
688 | ||
856b3483 ST |
689 | if (ufshcd_is_clkscaling_enabled(hba)) { |
690 | devfreq_suspend_device(hba->devfreq); | |
691 | hba->clk_scaling.window_start_t = 0; | |
692 | } | |
693 | ||
1ab27c9c ST |
694 | if (!ufshcd_is_link_active(hba)) |
695 | ufshcd_setup_clocks(hba, false); | |
696 | else | |
697 | /* If link is active, device ref_clk can't be switched off */ | |
698 | __ufshcd_setup_clocks(hba, false, true); | |
699 | ||
700 | /* | |
701 | * In case you are here to cancel this work the gating state | |
702 | * would be marked as REQ_CLKS_ON. In this case keep the state | |
703 | * as REQ_CLKS_ON which would anyway imply that clocks are off | |
704 | * and a request to turn them on is pending. By doing this way, | |
705 | * we keep the state machine in tact and this would ultimately | |
706 | * prevent from doing cancel work multiple times when there are | |
707 | * new requests arriving before the current cancel work is done. | |
708 | */ | |
709 | spin_lock_irqsave(hba->host->host_lock, flags); | |
710 | if (hba->clk_gating.state == REQ_CLKS_OFF) | |
711 | hba->clk_gating.state = CLKS_OFF; | |
712 | ||
713 | rel_lock: | |
714 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
715 | out: | |
716 | return; | |
717 | } | |
718 | ||
719 | /* host lock must be held before calling this variant */ | |
720 | static void __ufshcd_release(struct ufs_hba *hba) | |
721 | { | |
722 | if (!ufshcd_is_clkgating_allowed(hba)) | |
723 | return; | |
724 | ||
725 | hba->clk_gating.active_reqs--; | |
726 | ||
727 | if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended | |
728 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | |
729 | || hba->lrb_in_use || hba->outstanding_tasks | |
53c12d0e YG |
730 | || hba->active_uic_cmd || hba->uic_async_done |
731 | || ufshcd_eh_in_progress(hba)) | |
1ab27c9c ST |
732 | return; |
733 | ||
734 | hba->clk_gating.state = REQ_CLKS_OFF; | |
735 | schedule_delayed_work(&hba->clk_gating.gate_work, | |
736 | msecs_to_jiffies(hba->clk_gating.delay_ms)); | |
737 | } | |
738 | ||
739 | void ufshcd_release(struct ufs_hba *hba) | |
740 | { | |
741 | unsigned long flags; | |
742 | ||
743 | spin_lock_irqsave(hba->host->host_lock, flags); | |
744 | __ufshcd_release(hba); | |
745 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
746 | } | |
6e3fd44d | 747 | EXPORT_SYMBOL_GPL(ufshcd_release); |
1ab27c9c ST |
748 | |
749 | static ssize_t ufshcd_clkgate_delay_show(struct device *dev, | |
750 | struct device_attribute *attr, char *buf) | |
751 | { | |
752 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
753 | ||
754 | return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); | |
755 | } | |
756 | ||
757 | static ssize_t ufshcd_clkgate_delay_store(struct device *dev, | |
758 | struct device_attribute *attr, const char *buf, size_t count) | |
759 | { | |
760 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
761 | unsigned long flags, value; | |
762 | ||
763 | if (kstrtoul(buf, 0, &value)) | |
764 | return -EINVAL; | |
765 | ||
766 | spin_lock_irqsave(hba->host->host_lock, flags); | |
767 | hba->clk_gating.delay_ms = value; | |
768 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
769 | return count; | |
770 | } | |
771 | ||
772 | static void ufshcd_init_clk_gating(struct ufs_hba *hba) | |
773 | { | |
774 | if (!ufshcd_is_clkgating_allowed(hba)) | |
775 | return; | |
776 | ||
777 | hba->clk_gating.delay_ms = 150; | |
778 | INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); | |
779 | INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); | |
780 | ||
781 | hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; | |
782 | hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; | |
783 | sysfs_attr_init(&hba->clk_gating.delay_attr.attr); | |
784 | hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; | |
785 | hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; | |
786 | if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) | |
787 | dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); | |
788 | } | |
789 | ||
790 | static void ufshcd_exit_clk_gating(struct ufs_hba *hba) | |
791 | { | |
792 | if (!ufshcd_is_clkgating_allowed(hba)) | |
793 | return; | |
794 | device_remove_file(hba->dev, &hba->clk_gating.delay_attr); | |
97cd6805 AM |
795 | cancel_work_sync(&hba->clk_gating.ungate_work); |
796 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | |
1ab27c9c ST |
797 | } |
798 | ||
856b3483 ST |
799 | /* Must be called with host lock acquired */ |
800 | static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) | |
801 | { | |
802 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
803 | return; | |
804 | ||
805 | if (!hba->clk_scaling.is_busy_started) { | |
806 | hba->clk_scaling.busy_start_t = ktime_get(); | |
807 | hba->clk_scaling.is_busy_started = true; | |
808 | } | |
809 | } | |
810 | ||
811 | static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) | |
812 | { | |
813 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | |
814 | ||
815 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
816 | return; | |
817 | ||
818 | if (!hba->outstanding_reqs && scaling->is_busy_started) { | |
819 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | |
820 | scaling->busy_start_t)); | |
821 | scaling->busy_start_t = ktime_set(0, 0); | |
822 | scaling->is_busy_started = false; | |
823 | } | |
824 | } | |
7a3e97b0 SY |
825 | /** |
826 | * ufshcd_send_command - Send SCSI or device management commands | |
827 | * @hba: per adapter instance | |
828 | * @task_tag: Task tag of the command | |
829 | */ | |
830 | static inline | |
831 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | |
832 | { | |
856b3483 | 833 | ufshcd_clk_scaling_start_busy(hba); |
7a3e97b0 | 834 | __set_bit(task_tag, &hba->outstanding_reqs); |
b873a275 | 835 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
7a3e97b0 SY |
836 | } |
837 | ||
838 | /** | |
839 | * ufshcd_copy_sense_data - Copy sense data in case of check condition | |
840 | * @lrb - pointer to local reference block | |
841 | */ | |
842 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | |
843 | { | |
844 | int len; | |
1c2623c5 SJ |
845 | if (lrbp->sense_buffer && |
846 | ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { | |
5a0b0cb9 | 847 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); |
7a3e97b0 | 848 | memcpy(lrbp->sense_buffer, |
5a0b0cb9 | 849 | lrbp->ucd_rsp_ptr->sr.sense_data, |
7a3e97b0 SY |
850 | min_t(int, len, SCSI_SENSE_BUFFERSIZE)); |
851 | } | |
852 | } | |
853 | ||
68078d5c DR |
854 | /** |
855 | * ufshcd_copy_query_response() - Copy the Query Response and the data | |
856 | * descriptor | |
857 | * @hba: per adapter instance | |
858 | * @lrb - pointer to local reference block | |
859 | */ | |
860 | static | |
c6d4a831 | 861 | int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
68078d5c DR |
862 | { |
863 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
864 | ||
68078d5c | 865 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
68078d5c | 866 | |
68078d5c DR |
867 | /* Get the descriptor */ |
868 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { | |
d44a5f98 | 869 | u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + |
68078d5c | 870 | GENERAL_UPIU_REQUEST_SIZE; |
c6d4a831 DR |
871 | u16 resp_len; |
872 | u16 buf_len; | |
68078d5c DR |
873 | |
874 | /* data segment length */ | |
c6d4a831 | 875 | resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & |
68078d5c | 876 | MASK_QUERY_DATA_SEG_LEN; |
ea2aab24 SRT |
877 | buf_len = be16_to_cpu( |
878 | hba->dev_cmd.query.request.upiu_req.length); | |
c6d4a831 DR |
879 | if (likely(buf_len >= resp_len)) { |
880 | memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); | |
881 | } else { | |
882 | dev_warn(hba->dev, | |
883 | "%s: Response size is bigger than buffer", | |
884 | __func__); | |
885 | return -EINVAL; | |
886 | } | |
68078d5c | 887 | } |
c6d4a831 DR |
888 | |
889 | return 0; | |
68078d5c DR |
890 | } |
891 | ||
7a3e97b0 SY |
892 | /** |
893 | * ufshcd_hba_capabilities - Read controller capabilities | |
894 | * @hba: per adapter instance | |
895 | */ | |
896 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) | |
897 | { | |
b873a275 | 898 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); |
7a3e97b0 SY |
899 | |
900 | /* nutrs and nutmrs are 0 based values */ | |
901 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; | |
902 | hba->nutmrs = | |
903 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; | |
904 | } | |
905 | ||
906 | /** | |
6ccf44fe SJ |
907 | * ufshcd_ready_for_uic_cmd - Check if controller is ready |
908 | * to accept UIC commands | |
7a3e97b0 | 909 | * @hba: per adapter instance |
6ccf44fe SJ |
910 | * Return true on success, else false |
911 | */ | |
912 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) | |
913 | { | |
914 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) | |
915 | return true; | |
916 | else | |
917 | return false; | |
918 | } | |
919 | ||
53b3d9c3 SJ |
920 | /** |
921 | * ufshcd_get_upmcrs - Get the power mode change request status | |
922 | * @hba: Pointer to adapter instance | |
923 | * | |
924 | * This function gets the UPMCRS field of HCS register | |
925 | * Returns value of UPMCRS field | |
926 | */ | |
927 | static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) | |
928 | { | |
929 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; | |
930 | } | |
931 | ||
6ccf44fe SJ |
932 | /** |
933 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers | |
934 | * @hba: per adapter instance | |
935 | * @uic_cmd: UIC command | |
936 | * | |
937 | * Mutex must be held. | |
7a3e97b0 SY |
938 | */ |
939 | static inline void | |
6ccf44fe | 940 | ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
7a3e97b0 | 941 | { |
6ccf44fe SJ |
942 | WARN_ON(hba->active_uic_cmd); |
943 | ||
944 | hba->active_uic_cmd = uic_cmd; | |
945 | ||
7a3e97b0 | 946 | /* Write Args */ |
6ccf44fe SJ |
947 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); |
948 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); | |
949 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); | |
7a3e97b0 SY |
950 | |
951 | /* Write UIC Cmd */ | |
6ccf44fe | 952 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, |
b873a275 | 953 | REG_UIC_COMMAND); |
7a3e97b0 SY |
954 | } |
955 | ||
6ccf44fe SJ |
956 | /** |
957 | * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command | |
958 | * @hba: per adapter instance | |
959 | * @uic_command: UIC command | |
960 | * | |
961 | * Must be called with mutex held. | |
962 | * Returns 0 only if success. | |
963 | */ | |
964 | static int | |
965 | ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
966 | { | |
967 | int ret; | |
968 | unsigned long flags; | |
969 | ||
970 | if (wait_for_completion_timeout(&uic_cmd->done, | |
971 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) | |
972 | ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; | |
973 | else | |
974 | ret = -ETIMEDOUT; | |
975 | ||
976 | spin_lock_irqsave(hba->host->host_lock, flags); | |
977 | hba->active_uic_cmd = NULL; | |
978 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
979 | ||
980 | return ret; | |
981 | } | |
982 | ||
983 | /** | |
984 | * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | |
985 | * @hba: per adapter instance | |
986 | * @uic_cmd: UIC command | |
d75f7fe4 | 987 | * @completion: initialize the completion only if this is set to true |
6ccf44fe SJ |
988 | * |
989 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called | |
57d104c1 | 990 | * with mutex held and host_lock locked. |
6ccf44fe SJ |
991 | * Returns 0 only if success. |
992 | */ | |
993 | static int | |
d75f7fe4 YG |
994 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, |
995 | bool completion) | |
6ccf44fe | 996 | { |
6ccf44fe SJ |
997 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
998 | dev_err(hba->dev, | |
999 | "Controller not ready to accept UIC commands\n"); | |
1000 | return -EIO; | |
1001 | } | |
1002 | ||
d75f7fe4 YG |
1003 | if (completion) |
1004 | init_completion(&uic_cmd->done); | |
6ccf44fe | 1005 | |
6ccf44fe | 1006 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); |
6ccf44fe | 1007 | |
57d104c1 | 1008 | return 0; |
6ccf44fe SJ |
1009 | } |
1010 | ||
1011 | /** | |
1012 | * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result | |
1013 | * @hba: per adapter instance | |
1014 | * @uic_cmd: UIC command | |
1015 | * | |
1016 | * Returns 0 only if success. | |
1017 | */ | |
1018 | static int | |
1019 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
1020 | { | |
1021 | int ret; | |
57d104c1 | 1022 | unsigned long flags; |
6ccf44fe | 1023 | |
1ab27c9c | 1024 | ufshcd_hold(hba, false); |
6ccf44fe | 1025 | mutex_lock(&hba->uic_cmd_mutex); |
cad2e03d YG |
1026 | ufshcd_add_delay_before_dme_cmd(hba); |
1027 | ||
57d104c1 | 1028 | spin_lock_irqsave(hba->host->host_lock, flags); |
d75f7fe4 | 1029 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); |
57d104c1 SJ |
1030 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1031 | if (!ret) | |
1032 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | |
1033 | ||
6ccf44fe SJ |
1034 | mutex_unlock(&hba->uic_cmd_mutex); |
1035 | ||
1ab27c9c | 1036 | ufshcd_release(hba); |
6ccf44fe SJ |
1037 | return ret; |
1038 | } | |
1039 | ||
7a3e97b0 SY |
1040 | /** |
1041 | * ufshcd_map_sg - Map scatter-gather list to prdt | |
1042 | * @lrbp - pointer to local reference block | |
1043 | * | |
1044 | * Returns 0 in case of success, non-zero value in case of failure | |
1045 | */ | |
1046 | static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) | |
1047 | { | |
1048 | struct ufshcd_sg_entry *prd_table; | |
1049 | struct scatterlist *sg; | |
1050 | struct scsi_cmnd *cmd; | |
1051 | int sg_segments; | |
1052 | int i; | |
1053 | ||
1054 | cmd = lrbp->cmd; | |
1055 | sg_segments = scsi_dma_map(cmd); | |
1056 | if (sg_segments < 0) | |
1057 | return sg_segments; | |
1058 | ||
1059 | if (sg_segments) { | |
1060 | lrbp->utr_descriptor_ptr->prd_table_length = | |
1061 | cpu_to_le16((u16) (sg_segments)); | |
1062 | ||
1063 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; | |
1064 | ||
1065 | scsi_for_each_sg(cmd, sg, sg_segments, i) { | |
1066 | prd_table[i].size = | |
1067 | cpu_to_le32(((u32) sg_dma_len(sg))-1); | |
1068 | prd_table[i].base_addr = | |
1069 | cpu_to_le32(lower_32_bits(sg->dma_address)); | |
1070 | prd_table[i].upper_addr = | |
1071 | cpu_to_le32(upper_32_bits(sg->dma_address)); | |
52ac95fe | 1072 | prd_table[i].reserved = 0; |
7a3e97b0 SY |
1073 | } |
1074 | } else { | |
1075 | lrbp->utr_descriptor_ptr->prd_table_length = 0; | |
1076 | } | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | /** | |
2fbd009b | 1082 | * ufshcd_enable_intr - enable interrupts |
7a3e97b0 | 1083 | * @hba: per adapter instance |
2fbd009b | 1084 | * @intrs: interrupt bits |
7a3e97b0 | 1085 | */ |
2fbd009b | 1086 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) |
7a3e97b0 | 1087 | { |
2fbd009b SJ |
1088 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
1089 | ||
1090 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
1091 | u32 rw; | |
1092 | rw = set & INTERRUPT_MASK_RW_VER_10; | |
1093 | set = rw | ((set ^ intrs) & intrs); | |
1094 | } else { | |
1095 | set |= intrs; | |
1096 | } | |
1097 | ||
1098 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
1099 | } | |
1100 | ||
1101 | /** | |
1102 | * ufshcd_disable_intr - disable interrupts | |
1103 | * @hba: per adapter instance | |
1104 | * @intrs: interrupt bits | |
1105 | */ | |
1106 | static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) | |
1107 | { | |
1108 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
1109 | ||
1110 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
1111 | u32 rw; | |
1112 | rw = (set & INTERRUPT_MASK_RW_VER_10) & | |
1113 | ~(intrs & INTERRUPT_MASK_RW_VER_10); | |
1114 | set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); | |
1115 | ||
1116 | } else { | |
1117 | set &= ~intrs; | |
7a3e97b0 | 1118 | } |
2fbd009b SJ |
1119 | |
1120 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
7a3e97b0 SY |
1121 | } |
1122 | ||
5a0b0cb9 SRT |
1123 | /** |
1124 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header | |
1125 | * descriptor according to request | |
1126 | * @lrbp: pointer to local reference block | |
1127 | * @upiu_flags: flags required in the header | |
1128 | * @cmd_dir: requests data direction | |
1129 | */ | |
1130 | static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, | |
1131 | u32 *upiu_flags, enum dma_data_direction cmd_dir) | |
1132 | { | |
1133 | struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; | |
1134 | u32 data_direction; | |
1135 | u32 dword_0; | |
1136 | ||
1137 | if (cmd_dir == DMA_FROM_DEVICE) { | |
1138 | data_direction = UTP_DEVICE_TO_HOST; | |
1139 | *upiu_flags = UPIU_CMD_FLAGS_READ; | |
1140 | } else if (cmd_dir == DMA_TO_DEVICE) { | |
1141 | data_direction = UTP_HOST_TO_DEVICE; | |
1142 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; | |
1143 | } else { | |
1144 | data_direction = UTP_NO_DATA_TRANSFER; | |
1145 | *upiu_flags = UPIU_CMD_FLAGS_NONE; | |
1146 | } | |
1147 | ||
1148 | dword_0 = data_direction | (lrbp->command_type | |
1149 | << UPIU_COMMAND_TYPE_OFFSET); | |
1150 | if (lrbp->intr_cmd) | |
1151 | dword_0 |= UTP_REQ_DESC_INT_CMD; | |
1152 | ||
1153 | /* Transfer request descriptor header fields */ | |
1154 | req_desc->header.dword_0 = cpu_to_le32(dword_0); | |
52ac95fe YG |
1155 | /* dword_1 is reserved, hence it is set to 0 */ |
1156 | req_desc->header.dword_1 = 0; | |
5a0b0cb9 SRT |
1157 | /* |
1158 | * assigning invalid value for command status. Controller | |
1159 | * updates OCS on command completion, with the command | |
1160 | * status | |
1161 | */ | |
1162 | req_desc->header.dword_2 = | |
1163 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
52ac95fe YG |
1164 | /* dword_3 is reserved, hence it is set to 0 */ |
1165 | req_desc->header.dword_3 = 0; | |
51047266 YG |
1166 | |
1167 | req_desc->prd_table_length = 0; | |
5a0b0cb9 SRT |
1168 | } |
1169 | ||
1170 | /** | |
1171 | * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, | |
1172 | * for scsi commands | |
1173 | * @lrbp - local reference block pointer | |
1174 | * @upiu_flags - flags | |
1175 | */ | |
1176 | static | |
1177 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) | |
1178 | { | |
1179 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
52ac95fe | 1180 | unsigned short cdb_len; |
5a0b0cb9 SRT |
1181 | |
1182 | /* command descriptor fields */ | |
1183 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | |
1184 | UPIU_TRANSACTION_COMMAND, upiu_flags, | |
1185 | lrbp->lun, lrbp->task_tag); | |
1186 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | |
1187 | UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); | |
1188 | ||
1189 | /* Total EHS length and Data segment length will be zero */ | |
1190 | ucd_req_ptr->header.dword_2 = 0; | |
1191 | ||
1192 | ucd_req_ptr->sc.exp_data_transfer_len = | |
1193 | cpu_to_be32(lrbp->cmd->sdb.length); | |
1194 | ||
52ac95fe YG |
1195 | cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE); |
1196 | memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE); | |
1197 | memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); | |
1198 | ||
1199 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
5a0b0cb9 SRT |
1200 | } |
1201 | ||
68078d5c DR |
1202 | /** |
1203 | * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, | |
1204 | * for query requsts | |
1205 | * @hba: UFS hba | |
1206 | * @lrbp: local reference block pointer | |
1207 | * @upiu_flags: flags | |
1208 | */ | |
1209 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |
1210 | struct ufshcd_lrb *lrbp, u32 upiu_flags) | |
1211 | { | |
1212 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
1213 | struct ufs_query *query = &hba->dev_cmd.query; | |
e8c8e82a | 1214 | u16 len = be16_to_cpu(query->request.upiu_req.length); |
68078d5c DR |
1215 | u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; |
1216 | ||
1217 | /* Query request header */ | |
1218 | ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( | |
1219 | UPIU_TRANSACTION_QUERY_REQ, upiu_flags, | |
1220 | lrbp->lun, lrbp->task_tag); | |
1221 | ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( | |
1222 | 0, query->request.query_func, 0, 0); | |
1223 | ||
1224 | /* Data segment length */ | |
1225 | ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( | |
1226 | 0, 0, len >> 8, (u8)len); | |
1227 | ||
1228 | /* Copy the Query Request buffer as is */ | |
1229 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, | |
1230 | QUERY_OSF_SIZE); | |
68078d5c DR |
1231 | |
1232 | /* Copy the Descriptor */ | |
c6d4a831 DR |
1233 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) |
1234 | memcpy(descp, query->descriptor, len); | |
1235 | ||
51047266 | 1236 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); |
68078d5c DR |
1237 | } |
1238 | ||
5a0b0cb9 SRT |
1239 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) |
1240 | { | |
1241 | struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; | |
1242 | ||
1243 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); | |
1244 | ||
1245 | /* command descriptor fields */ | |
1246 | ucd_req_ptr->header.dword_0 = | |
1247 | UPIU_HEADER_DWORD( | |
1248 | UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); | |
51047266 YG |
1249 | /* clear rest of the fields of basic header */ |
1250 | ucd_req_ptr->header.dword_1 = 0; | |
1251 | ucd_req_ptr->header.dword_2 = 0; | |
1252 | ||
1253 | memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
5a0b0cb9 SRT |
1254 | } |
1255 | ||
7a3e97b0 SY |
1256 | /** |
1257 | * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) | |
5a0b0cb9 | 1258 | * @hba - per adapter instance |
7a3e97b0 SY |
1259 | * @lrb - pointer to local reference block |
1260 | */ | |
5a0b0cb9 | 1261 | static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
7a3e97b0 | 1262 | { |
7a3e97b0 | 1263 | u32 upiu_flags; |
5a0b0cb9 | 1264 | int ret = 0; |
7a3e97b0 SY |
1265 | |
1266 | switch (lrbp->command_type) { | |
1267 | case UTP_CMD_TYPE_SCSI: | |
5a0b0cb9 SRT |
1268 | if (likely(lrbp->cmd)) { |
1269 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, | |
1270 | lrbp->cmd->sc_data_direction); | |
1271 | ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); | |
7a3e97b0 | 1272 | } else { |
5a0b0cb9 | 1273 | ret = -EINVAL; |
7a3e97b0 | 1274 | } |
7a3e97b0 SY |
1275 | break; |
1276 | case UTP_CMD_TYPE_DEV_MANAGE: | |
5a0b0cb9 | 1277 | ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); |
68078d5c DR |
1278 | if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) |
1279 | ufshcd_prepare_utp_query_req_upiu( | |
1280 | hba, lrbp, upiu_flags); | |
1281 | else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) | |
5a0b0cb9 SRT |
1282 | ufshcd_prepare_utp_nop_upiu(lrbp); |
1283 | else | |
1284 | ret = -EINVAL; | |
7a3e97b0 SY |
1285 | break; |
1286 | case UTP_CMD_TYPE_UFS: | |
1287 | /* For UFS native command implementation */ | |
5a0b0cb9 SRT |
1288 | ret = -ENOTSUPP; |
1289 | dev_err(hba->dev, "%s: UFS native command are not supported\n", | |
1290 | __func__); | |
1291 | break; | |
1292 | default: | |
1293 | ret = -ENOTSUPP; | |
1294 | dev_err(hba->dev, "%s: unknown command type: 0x%x\n", | |
1295 | __func__, lrbp->command_type); | |
7a3e97b0 SY |
1296 | break; |
1297 | } /* end of switch */ | |
5a0b0cb9 SRT |
1298 | |
1299 | return ret; | |
7a3e97b0 SY |
1300 | } |
1301 | ||
0ce147d4 SJ |
1302 | /* |
1303 | * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN | |
1304 | * @scsi_lun: scsi LUN id | |
1305 | * | |
1306 | * Returns UPIU LUN id | |
1307 | */ | |
1308 | static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) | |
1309 | { | |
1310 | if (scsi_is_wlun(scsi_lun)) | |
1311 | return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) | |
1312 | | UFS_UPIU_WLUN_ID; | |
1313 | else | |
1314 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; | |
1315 | } | |
1316 | ||
2a8fa600 SJ |
1317 | /** |
1318 | * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID | |
1319 | * @scsi_lun: UPIU W-LUN id | |
1320 | * | |
1321 | * Returns SCSI W-LUN id | |
1322 | */ | |
1323 | static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) | |
1324 | { | |
1325 | return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; | |
1326 | } | |
1327 | ||
7a3e97b0 SY |
1328 | /** |
1329 | * ufshcd_queuecommand - main entry point for SCSI requests | |
1330 | * @cmd: command from SCSI Midlayer | |
1331 | * @done: call back function | |
1332 | * | |
1333 | * Returns 0 for success, non-zero in case of failure | |
1334 | */ | |
1335 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |
1336 | { | |
1337 | struct ufshcd_lrb *lrbp; | |
1338 | struct ufs_hba *hba; | |
1339 | unsigned long flags; | |
1340 | int tag; | |
1341 | int err = 0; | |
1342 | ||
1343 | hba = shost_priv(host); | |
1344 | ||
1345 | tag = cmd->request->tag; | |
14497328 YG |
1346 | if (!ufshcd_valid_tag(hba, tag)) { |
1347 | dev_err(hba->dev, | |
1348 | "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", | |
1349 | __func__, tag, cmd, cmd->request); | |
1350 | BUG(); | |
1351 | } | |
7a3e97b0 | 1352 | |
3441da7d SRT |
1353 | spin_lock_irqsave(hba->host->host_lock, flags); |
1354 | switch (hba->ufshcd_state) { | |
1355 | case UFSHCD_STATE_OPERATIONAL: | |
1356 | break; | |
1357 | case UFSHCD_STATE_RESET: | |
7a3e97b0 | 1358 | err = SCSI_MLQUEUE_HOST_BUSY; |
3441da7d SRT |
1359 | goto out_unlock; |
1360 | case UFSHCD_STATE_ERROR: | |
1361 | set_host_byte(cmd, DID_ERROR); | |
1362 | cmd->scsi_done(cmd); | |
1363 | goto out_unlock; | |
1364 | default: | |
1365 | dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", | |
1366 | __func__, hba->ufshcd_state); | |
1367 | set_host_byte(cmd, DID_BAD_TARGET); | |
1368 | cmd->scsi_done(cmd); | |
1369 | goto out_unlock; | |
7a3e97b0 | 1370 | } |
53c12d0e YG |
1371 | |
1372 | /* if error handling is in progress, don't issue commands */ | |
1373 | if (ufshcd_eh_in_progress(hba)) { | |
1374 | set_host_byte(cmd, DID_ERROR); | |
1375 | cmd->scsi_done(cmd); | |
1376 | goto out_unlock; | |
1377 | } | |
3441da7d | 1378 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
7a3e97b0 | 1379 | |
5a0b0cb9 SRT |
1380 | /* acquire the tag to make sure device cmds don't use it */ |
1381 | if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { | |
1382 | /* | |
1383 | * Dev manage command in progress, requeue the command. | |
1384 | * Requeuing the command helps in cases where the request *may* | |
1385 | * find different tag instead of waiting for dev manage command | |
1386 | * completion. | |
1387 | */ | |
1388 | err = SCSI_MLQUEUE_HOST_BUSY; | |
1389 | goto out; | |
1390 | } | |
1391 | ||
1ab27c9c ST |
1392 | err = ufshcd_hold(hba, true); |
1393 | if (err) { | |
1394 | err = SCSI_MLQUEUE_HOST_BUSY; | |
1395 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
1396 | goto out; | |
1397 | } | |
1398 | WARN_ON(hba->clk_gating.state != CLKS_ON); | |
1399 | ||
7a3e97b0 SY |
1400 | lrbp = &hba->lrb[tag]; |
1401 | ||
5a0b0cb9 | 1402 | WARN_ON(lrbp->cmd); |
7a3e97b0 SY |
1403 | lrbp->cmd = cmd; |
1404 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; | |
1405 | lrbp->sense_buffer = cmd->sense_buffer; | |
1406 | lrbp->task_tag = tag; | |
0ce147d4 | 1407 | lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); |
b852190e | 1408 | lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; |
7a3e97b0 SY |
1409 | lrbp->command_type = UTP_CMD_TYPE_SCSI; |
1410 | ||
1411 | /* form UPIU before issuing the command */ | |
5a0b0cb9 | 1412 | ufshcd_compose_upiu(hba, lrbp); |
7a3e97b0 | 1413 | err = ufshcd_map_sg(lrbp); |
5a0b0cb9 SRT |
1414 | if (err) { |
1415 | lrbp->cmd = NULL; | |
1416 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
7a3e97b0 | 1417 | goto out; |
5a0b0cb9 | 1418 | } |
7a3e97b0 SY |
1419 | |
1420 | /* issue command to the controller */ | |
1421 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1422 | ufshcd_send_command(hba, tag); | |
3441da7d | 1423 | out_unlock: |
7a3e97b0 SY |
1424 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1425 | out: | |
1426 | return err; | |
1427 | } | |
1428 | ||
5a0b0cb9 SRT |
1429 | static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, |
1430 | struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) | |
1431 | { | |
1432 | lrbp->cmd = NULL; | |
1433 | lrbp->sense_bufflen = 0; | |
1434 | lrbp->sense_buffer = NULL; | |
1435 | lrbp->task_tag = tag; | |
1436 | lrbp->lun = 0; /* device management cmd is not specific to any LUN */ | |
1437 | lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; | |
1438 | lrbp->intr_cmd = true; /* No interrupt aggregation */ | |
1439 | hba->dev_cmd.type = cmd_type; | |
1440 | ||
1441 | return ufshcd_compose_upiu(hba, lrbp); | |
1442 | } | |
1443 | ||
1444 | static int | |
1445 | ufshcd_clear_cmd(struct ufs_hba *hba, int tag) | |
1446 | { | |
1447 | int err = 0; | |
1448 | unsigned long flags; | |
1449 | u32 mask = 1 << tag; | |
1450 | ||
1451 | /* clear outstanding transaction before retry */ | |
1452 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1453 | ufshcd_utrl_clear(hba, tag); | |
1454 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1455 | ||
1456 | /* | |
1457 | * wait for for h/w to clear corresponding bit in door-bell. | |
1458 | * max. wait is 1 sec. | |
1459 | */ | |
1460 | err = ufshcd_wait_for_register(hba, | |
1461 | REG_UTP_TRANSFER_REQ_DOOR_BELL, | |
1462 | mask, ~mask, 1000, 1000); | |
1463 | ||
1464 | return err; | |
1465 | } | |
1466 | ||
c6d4a831 DR |
1467 | static int |
1468 | ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
1469 | { | |
1470 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
1471 | ||
1472 | /* Get the UPIU response */ | |
1473 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> | |
1474 | UPIU_RSP_CODE_OFFSET; | |
1475 | return query_res->response; | |
1476 | } | |
1477 | ||
5a0b0cb9 SRT |
1478 | /** |
1479 | * ufshcd_dev_cmd_completion() - handles device management command responses | |
1480 | * @hba: per adapter instance | |
1481 | * @lrbp: pointer to local reference block | |
1482 | */ | |
1483 | static int | |
1484 | ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
1485 | { | |
1486 | int resp; | |
1487 | int err = 0; | |
1488 | ||
1489 | resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); | |
1490 | ||
1491 | switch (resp) { | |
1492 | case UPIU_TRANSACTION_NOP_IN: | |
1493 | if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { | |
1494 | err = -EINVAL; | |
1495 | dev_err(hba->dev, "%s: unexpected response %x\n", | |
1496 | __func__, resp); | |
1497 | } | |
1498 | break; | |
68078d5c | 1499 | case UPIU_TRANSACTION_QUERY_RSP: |
c6d4a831 DR |
1500 | err = ufshcd_check_query_response(hba, lrbp); |
1501 | if (!err) | |
1502 | err = ufshcd_copy_query_response(hba, lrbp); | |
68078d5c | 1503 | break; |
5a0b0cb9 SRT |
1504 | case UPIU_TRANSACTION_REJECT_UPIU: |
1505 | /* TODO: handle Reject UPIU Response */ | |
1506 | err = -EPERM; | |
1507 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", | |
1508 | __func__); | |
1509 | break; | |
1510 | default: | |
1511 | err = -EINVAL; | |
1512 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", | |
1513 | __func__, resp); | |
1514 | break; | |
1515 | } | |
1516 | ||
1517 | return err; | |
1518 | } | |
1519 | ||
1520 | static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, | |
1521 | struct ufshcd_lrb *lrbp, int max_timeout) | |
1522 | { | |
1523 | int err = 0; | |
1524 | unsigned long time_left; | |
1525 | unsigned long flags; | |
1526 | ||
1527 | time_left = wait_for_completion_timeout(hba->dev_cmd.complete, | |
1528 | msecs_to_jiffies(max_timeout)); | |
1529 | ||
1530 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1531 | hba->dev_cmd.complete = NULL; | |
1532 | if (likely(time_left)) { | |
1533 | err = ufshcd_get_tr_ocs(lrbp); | |
1534 | if (!err) | |
1535 | err = ufshcd_dev_cmd_completion(hba, lrbp); | |
1536 | } | |
1537 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1538 | ||
1539 | if (!time_left) { | |
1540 | err = -ETIMEDOUT; | |
a48353f6 YG |
1541 | dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", |
1542 | __func__, lrbp->task_tag); | |
5a0b0cb9 | 1543 | if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) |
a48353f6 | 1544 | /* successfully cleared the command, retry if needed */ |
5a0b0cb9 | 1545 | err = -EAGAIN; |
a48353f6 YG |
1546 | /* |
1547 | * in case of an error, after clearing the doorbell, | |
1548 | * we also need to clear the outstanding_request | |
1549 | * field in hba | |
1550 | */ | |
1551 | ufshcd_outstanding_req_clear(hba, lrbp->task_tag); | |
5a0b0cb9 SRT |
1552 | } |
1553 | ||
1554 | return err; | |
1555 | } | |
1556 | ||
1557 | /** | |
1558 | * ufshcd_get_dev_cmd_tag - Get device management command tag | |
1559 | * @hba: per-adapter instance | |
1560 | * @tag: pointer to variable with available slot value | |
1561 | * | |
1562 | * Get a free slot and lock it until device management command | |
1563 | * completes. | |
1564 | * | |
1565 | * Returns false if free slot is unavailable for locking, else | |
1566 | * return true with tag value in @tag. | |
1567 | */ | |
1568 | static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) | |
1569 | { | |
1570 | int tag; | |
1571 | bool ret = false; | |
1572 | unsigned long tmp; | |
1573 | ||
1574 | if (!tag_out) | |
1575 | goto out; | |
1576 | ||
1577 | do { | |
1578 | tmp = ~hba->lrb_in_use; | |
1579 | tag = find_last_bit(&tmp, hba->nutrs); | |
1580 | if (tag >= hba->nutrs) | |
1581 | goto out; | |
1582 | } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); | |
1583 | ||
1584 | *tag_out = tag; | |
1585 | ret = true; | |
1586 | out: | |
1587 | return ret; | |
1588 | } | |
1589 | ||
1590 | static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) | |
1591 | { | |
1592 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
1593 | } | |
1594 | ||
1595 | /** | |
1596 | * ufshcd_exec_dev_cmd - API for sending device management requests | |
1597 | * @hba - UFS hba | |
1598 | * @cmd_type - specifies the type (NOP, Query...) | |
1599 | * @timeout - time in seconds | |
1600 | * | |
68078d5c DR |
1601 | * NOTE: Since there is only one available tag for device management commands, |
1602 | * it is expected you hold the hba->dev_cmd.lock mutex. | |
5a0b0cb9 SRT |
1603 | */ |
1604 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, | |
1605 | enum dev_cmd_type cmd_type, int timeout) | |
1606 | { | |
1607 | struct ufshcd_lrb *lrbp; | |
1608 | int err; | |
1609 | int tag; | |
1610 | struct completion wait; | |
1611 | unsigned long flags; | |
1612 | ||
1613 | /* | |
1614 | * Get free slot, sleep if slots are unavailable. | |
1615 | * Even though we use wait_event() which sleeps indefinitely, | |
1616 | * the maximum wait time is bounded by SCSI request timeout. | |
1617 | */ | |
1618 | wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); | |
1619 | ||
1620 | init_completion(&wait); | |
1621 | lrbp = &hba->lrb[tag]; | |
1622 | WARN_ON(lrbp->cmd); | |
1623 | err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); | |
1624 | if (unlikely(err)) | |
1625 | goto out_put_tag; | |
1626 | ||
1627 | hba->dev_cmd.complete = &wait; | |
1628 | ||
e3dfdc53 YG |
1629 | /* Make sure descriptors are ready before ringing the doorbell */ |
1630 | wmb(); | |
5a0b0cb9 SRT |
1631 | spin_lock_irqsave(hba->host->host_lock, flags); |
1632 | ufshcd_send_command(hba, tag); | |
1633 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1634 | ||
1635 | err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); | |
1636 | ||
1637 | out_put_tag: | |
1638 | ufshcd_put_dev_cmd_tag(hba, tag); | |
1639 | wake_up(&hba->dev_cmd.tag_wq); | |
1640 | return err; | |
1641 | } | |
1642 | ||
d44a5f98 DR |
1643 | /** |
1644 | * ufshcd_init_query() - init the query response and request parameters | |
1645 | * @hba: per-adapter instance | |
1646 | * @request: address of the request pointer to be initialized | |
1647 | * @response: address of the response pointer to be initialized | |
1648 | * @opcode: operation to perform | |
1649 | * @idn: flag idn to access | |
1650 | * @index: LU number to access | |
1651 | * @selector: query/flag/descriptor further identification | |
1652 | */ | |
1653 | static inline void ufshcd_init_query(struct ufs_hba *hba, | |
1654 | struct ufs_query_req **request, struct ufs_query_res **response, | |
1655 | enum query_opcode opcode, u8 idn, u8 index, u8 selector) | |
1656 | { | |
1657 | *request = &hba->dev_cmd.query.request; | |
1658 | *response = &hba->dev_cmd.query.response; | |
1659 | memset(*request, 0, sizeof(struct ufs_query_req)); | |
1660 | memset(*response, 0, sizeof(struct ufs_query_res)); | |
1661 | (*request)->upiu_req.opcode = opcode; | |
1662 | (*request)->upiu_req.idn = idn; | |
1663 | (*request)->upiu_req.index = index; | |
1664 | (*request)->upiu_req.selector = selector; | |
1665 | } | |
1666 | ||
dc3c8d3a YG |
1667 | static int ufshcd_query_flag_retry(struct ufs_hba *hba, |
1668 | enum query_opcode opcode, enum flag_idn idn, bool *flag_res) | |
1669 | { | |
1670 | int ret; | |
1671 | int retries; | |
1672 | ||
1673 | for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { | |
1674 | ret = ufshcd_query_flag(hba, opcode, idn, flag_res); | |
1675 | if (ret) | |
1676 | dev_dbg(hba->dev, | |
1677 | "%s: failed with error %d, retries %d\n", | |
1678 | __func__, ret, retries); | |
1679 | else | |
1680 | break; | |
1681 | } | |
1682 | ||
1683 | if (ret) | |
1684 | dev_err(hba->dev, | |
1685 | "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", | |
1686 | __func__, opcode, idn, ret, retries); | |
1687 | return ret; | |
1688 | } | |
1689 | ||
68078d5c DR |
1690 | /** |
1691 | * ufshcd_query_flag() - API function for sending flag query requests | |
1692 | * hba: per-adapter instance | |
1693 | * query_opcode: flag query to perform | |
1694 | * idn: flag idn to access | |
1695 | * flag_res: the flag value after the query request completes | |
1696 | * | |
1697 | * Returns 0 for success, non-zero in case of failure | |
1698 | */ | |
dc3c8d3a | 1699 | int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
68078d5c DR |
1700 | enum flag_idn idn, bool *flag_res) |
1701 | { | |
d44a5f98 DR |
1702 | struct ufs_query_req *request = NULL; |
1703 | struct ufs_query_res *response = NULL; | |
1704 | int err, index = 0, selector = 0; | |
e5ad406c | 1705 | int timeout = QUERY_REQ_TIMEOUT; |
68078d5c DR |
1706 | |
1707 | BUG_ON(!hba); | |
1708 | ||
1ab27c9c | 1709 | ufshcd_hold(hba, false); |
68078d5c | 1710 | mutex_lock(&hba->dev_cmd.lock); |
d44a5f98 DR |
1711 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
1712 | selector); | |
68078d5c DR |
1713 | |
1714 | switch (opcode) { | |
1715 | case UPIU_QUERY_OPCODE_SET_FLAG: | |
1716 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: | |
1717 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: | |
1718 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
1719 | break; | |
1720 | case UPIU_QUERY_OPCODE_READ_FLAG: | |
1721 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1722 | if (!flag_res) { | |
1723 | /* No dummy reads */ | |
1724 | dev_err(hba->dev, "%s: Invalid argument for read request\n", | |
1725 | __func__); | |
1726 | err = -EINVAL; | |
1727 | goto out_unlock; | |
1728 | } | |
1729 | break; | |
1730 | default: | |
1731 | dev_err(hba->dev, | |
1732 | "%s: Expected query flag opcode but got = %d\n", | |
1733 | __func__, opcode); | |
1734 | err = -EINVAL; | |
1735 | goto out_unlock; | |
1736 | } | |
68078d5c | 1737 | |
e5ad406c YG |
1738 | if (idn == QUERY_FLAG_IDN_FDEVICEINIT) |
1739 | timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT; | |
1740 | ||
1741 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); | |
68078d5c DR |
1742 | |
1743 | if (err) { | |
1744 | dev_err(hba->dev, | |
1745 | "%s: Sending flag query for idn %d failed, err = %d\n", | |
1746 | __func__, idn, err); | |
1747 | goto out_unlock; | |
1748 | } | |
1749 | ||
1750 | if (flag_res) | |
e8c8e82a | 1751 | *flag_res = (be32_to_cpu(response->upiu_res.value) & |
68078d5c DR |
1752 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; |
1753 | ||
1754 | out_unlock: | |
1755 | mutex_unlock(&hba->dev_cmd.lock); | |
1ab27c9c | 1756 | ufshcd_release(hba); |
68078d5c DR |
1757 | return err; |
1758 | } | |
1759 | ||
66ec6d59 SRT |
1760 | /** |
1761 | * ufshcd_query_attr - API function for sending attribute requests | |
1762 | * hba: per-adapter instance | |
1763 | * opcode: attribute opcode | |
1764 | * idn: attribute idn to access | |
1765 | * index: index field | |
1766 | * selector: selector field | |
1767 | * attr_val: the attribute value after the query request completes | |
1768 | * | |
1769 | * Returns 0 for success, non-zero in case of failure | |
1770 | */ | |
bdbe5d2f | 1771 | static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
66ec6d59 SRT |
1772 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) |
1773 | { | |
d44a5f98 DR |
1774 | struct ufs_query_req *request = NULL; |
1775 | struct ufs_query_res *response = NULL; | |
66ec6d59 SRT |
1776 | int err; |
1777 | ||
1778 | BUG_ON(!hba); | |
1779 | ||
1ab27c9c | 1780 | ufshcd_hold(hba, false); |
66ec6d59 SRT |
1781 | if (!attr_val) { |
1782 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", | |
1783 | __func__, opcode); | |
1784 | err = -EINVAL; | |
1785 | goto out; | |
1786 | } | |
1787 | ||
1788 | mutex_lock(&hba->dev_cmd.lock); | |
d44a5f98 DR |
1789 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
1790 | selector); | |
66ec6d59 SRT |
1791 | |
1792 | switch (opcode) { | |
1793 | case UPIU_QUERY_OPCODE_WRITE_ATTR: | |
1794 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
e8c8e82a | 1795 | request->upiu_req.value = cpu_to_be32(*attr_val); |
66ec6d59 SRT |
1796 | break; |
1797 | case UPIU_QUERY_OPCODE_READ_ATTR: | |
1798 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1799 | break; | |
1800 | default: | |
1801 | dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", | |
1802 | __func__, opcode); | |
1803 | err = -EINVAL; | |
1804 | goto out_unlock; | |
1805 | } | |
1806 | ||
d44a5f98 | 1807 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
66ec6d59 SRT |
1808 | |
1809 | if (err) { | |
1810 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | |
1811 | __func__, opcode, idn, err); | |
1812 | goto out_unlock; | |
1813 | } | |
1814 | ||
e8c8e82a | 1815 | *attr_val = be32_to_cpu(response->upiu_res.value); |
66ec6d59 SRT |
1816 | |
1817 | out_unlock: | |
1818 | mutex_unlock(&hba->dev_cmd.lock); | |
1819 | out: | |
1ab27c9c | 1820 | ufshcd_release(hba); |
66ec6d59 SRT |
1821 | return err; |
1822 | } | |
1823 | ||
5e86ae44 YG |
1824 | /** |
1825 | * ufshcd_query_attr_retry() - API function for sending query | |
1826 | * attribute with retries | |
1827 | * @hba: per-adapter instance | |
1828 | * @opcode: attribute opcode | |
1829 | * @idn: attribute idn to access | |
1830 | * @index: index field | |
1831 | * @selector: selector field | |
1832 | * @attr_val: the attribute value after the query request | |
1833 | * completes | |
1834 | * | |
1835 | * Returns 0 for success, non-zero in case of failure | |
1836 | */ | |
1837 | static int ufshcd_query_attr_retry(struct ufs_hba *hba, | |
1838 | enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, | |
1839 | u32 *attr_val) | |
1840 | { | |
1841 | int ret = 0; | |
1842 | u32 retries; | |
1843 | ||
1844 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | |
1845 | ret = ufshcd_query_attr(hba, opcode, idn, index, | |
1846 | selector, attr_val); | |
1847 | if (ret) | |
1848 | dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", | |
1849 | __func__, ret, retries); | |
1850 | else | |
1851 | break; | |
1852 | } | |
1853 | ||
1854 | if (ret) | |
1855 | dev_err(hba->dev, | |
1856 | "%s: query attribute, idn %d, failed with error %d after %d retires\n", | |
1857 | __func__, idn, ret, QUERY_REQ_RETRIES); | |
1858 | return ret; | |
1859 | } | |
1860 | ||
d44a5f98 DR |
1861 | /** |
1862 | * ufshcd_query_descriptor - API function for sending descriptor requests | |
1863 | * hba: per-adapter instance | |
1864 | * opcode: attribute opcode | |
1865 | * idn: attribute idn to access | |
1866 | * index: index field | |
1867 | * selector: selector field | |
1868 | * desc_buf: the buffer that contains the descriptor | |
1869 | * buf_len: length parameter passed to the device | |
1870 | * | |
1871 | * Returns 0 for success, non-zero in case of failure. | |
1872 | * The buf_len parameter will contain, on return, the length parameter | |
1873 | * received on the response. | |
1874 | */ | |
7289f983 | 1875 | static int ufshcd_query_descriptor(struct ufs_hba *hba, |
d44a5f98 DR |
1876 | enum query_opcode opcode, enum desc_idn idn, u8 index, |
1877 | u8 selector, u8 *desc_buf, int *buf_len) | |
1878 | { | |
1879 | struct ufs_query_req *request = NULL; | |
1880 | struct ufs_query_res *response = NULL; | |
1881 | int err; | |
1882 | ||
1883 | BUG_ON(!hba); | |
1884 | ||
1ab27c9c | 1885 | ufshcd_hold(hba, false); |
d44a5f98 DR |
1886 | if (!desc_buf) { |
1887 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", | |
1888 | __func__, opcode); | |
1889 | err = -EINVAL; | |
1890 | goto out; | |
1891 | } | |
1892 | ||
1893 | if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { | |
1894 | dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", | |
1895 | __func__, *buf_len); | |
1896 | err = -EINVAL; | |
1897 | goto out; | |
1898 | } | |
1899 | ||
1900 | mutex_lock(&hba->dev_cmd.lock); | |
1901 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | |
1902 | selector); | |
1903 | hba->dev_cmd.query.descriptor = desc_buf; | |
ea2aab24 | 1904 | request->upiu_req.length = cpu_to_be16(*buf_len); |
d44a5f98 DR |
1905 | |
1906 | switch (opcode) { | |
1907 | case UPIU_QUERY_OPCODE_WRITE_DESC: | |
1908 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
1909 | break; | |
1910 | case UPIU_QUERY_OPCODE_READ_DESC: | |
1911 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1912 | break; | |
1913 | default: | |
1914 | dev_err(hba->dev, | |
1915 | "%s: Expected query descriptor opcode but got = 0x%.2x\n", | |
1916 | __func__, opcode); | |
1917 | err = -EINVAL; | |
1918 | goto out_unlock; | |
1919 | } | |
1920 | ||
1921 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); | |
1922 | ||
1923 | if (err) { | |
1924 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | |
1925 | __func__, opcode, idn, err); | |
1926 | goto out_unlock; | |
1927 | } | |
1928 | ||
1929 | hba->dev_cmd.query.descriptor = NULL; | |
ea2aab24 | 1930 | *buf_len = be16_to_cpu(response->upiu_res.length); |
d44a5f98 DR |
1931 | |
1932 | out_unlock: | |
1933 | mutex_unlock(&hba->dev_cmd.lock); | |
1934 | out: | |
1ab27c9c | 1935 | ufshcd_release(hba); |
d44a5f98 DR |
1936 | return err; |
1937 | } | |
1938 | ||
da461cec SJ |
1939 | /** |
1940 | * ufshcd_read_desc_param - read the specified descriptor parameter | |
1941 | * @hba: Pointer to adapter instance | |
1942 | * @desc_id: descriptor idn value | |
1943 | * @desc_index: descriptor index | |
1944 | * @param_offset: offset of the parameter to read | |
1945 | * @param_read_buf: pointer to buffer where parameter would be read | |
1946 | * @param_size: sizeof(param_read_buf) | |
1947 | * | |
1948 | * Return 0 in case of success, non-zero otherwise | |
1949 | */ | |
1950 | static int ufshcd_read_desc_param(struct ufs_hba *hba, | |
1951 | enum desc_idn desc_id, | |
1952 | int desc_index, | |
1953 | u32 param_offset, | |
1954 | u8 *param_read_buf, | |
1955 | u32 param_size) | |
1956 | { | |
1957 | int ret; | |
1958 | u8 *desc_buf; | |
1959 | u32 buff_len; | |
1960 | bool is_kmalloc = true; | |
1961 | ||
1962 | /* safety checks */ | |
1963 | if (desc_id >= QUERY_DESC_IDN_MAX) | |
1964 | return -EINVAL; | |
1965 | ||
1966 | buff_len = ufs_query_desc_max_size[desc_id]; | |
1967 | if ((param_offset + param_size) > buff_len) | |
1968 | return -EINVAL; | |
1969 | ||
1970 | if (!param_offset && (param_size == buff_len)) { | |
1971 | /* memory space already available to hold full descriptor */ | |
1972 | desc_buf = param_read_buf; | |
1973 | is_kmalloc = false; | |
1974 | } else { | |
1975 | /* allocate memory to hold full descriptor */ | |
1976 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | |
1977 | if (!desc_buf) | |
1978 | return -ENOMEM; | |
1979 | } | |
1980 | ||
1981 | ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, | |
1982 | desc_id, desc_index, 0, desc_buf, | |
1983 | &buff_len); | |
1984 | ||
1985 | if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || | |
1986 | (desc_buf[QUERY_DESC_LENGTH_OFFSET] != | |
1987 | ufs_query_desc_max_size[desc_id]) | |
1988 | || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) { | |
1989 | dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", | |
1990 | __func__, desc_id, param_offset, buff_len, ret); | |
1991 | if (!ret) | |
1992 | ret = -EINVAL; | |
1993 | ||
1994 | goto out; | |
1995 | } | |
1996 | ||
1997 | if (is_kmalloc) | |
1998 | memcpy(param_read_buf, &desc_buf[param_offset], param_size); | |
1999 | out: | |
2000 | if (is_kmalloc) | |
2001 | kfree(desc_buf); | |
2002 | return ret; | |
2003 | } | |
2004 | ||
2005 | static inline int ufshcd_read_desc(struct ufs_hba *hba, | |
2006 | enum desc_idn desc_id, | |
2007 | int desc_index, | |
2008 | u8 *buf, | |
2009 | u32 size) | |
2010 | { | |
2011 | return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); | |
2012 | } | |
2013 | ||
2014 | static inline int ufshcd_read_power_desc(struct ufs_hba *hba, | |
2015 | u8 *buf, | |
2016 | u32 size) | |
2017 | { | |
2018 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); | |
2019 | } | |
2020 | ||
2021 | /** | |
2022 | * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter | |
2023 | * @hba: Pointer to adapter instance | |
2024 | * @lun: lun id | |
2025 | * @param_offset: offset of the parameter to read | |
2026 | * @param_read_buf: pointer to buffer where parameter would be read | |
2027 | * @param_size: sizeof(param_read_buf) | |
2028 | * | |
2029 | * Return 0 in case of success, non-zero otherwise | |
2030 | */ | |
2031 | static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, | |
2032 | int lun, | |
2033 | enum unit_desc_param param_offset, | |
2034 | u8 *param_read_buf, | |
2035 | u32 param_size) | |
2036 | { | |
2037 | /* | |
2038 | * Unit descriptors are only available for general purpose LUs (LUN id | |
2039 | * from 0 to 7) and RPMB Well known LU. | |
2040 | */ | |
0ce147d4 | 2041 | if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN)) |
da461cec SJ |
2042 | return -EOPNOTSUPP; |
2043 | ||
2044 | return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, | |
2045 | param_offset, param_read_buf, param_size); | |
2046 | } | |
2047 | ||
7a3e97b0 SY |
2048 | /** |
2049 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | |
2050 | * @hba: per adapter instance | |
2051 | * | |
2052 | * 1. Allocate DMA memory for Command Descriptor array | |
2053 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT | |
2054 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). | |
2055 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List | |
2056 | * (UTMRDL) | |
2057 | * 4. Allocate memory for local reference block(lrb). | |
2058 | * | |
2059 | * Returns 0 for success, non-zero in case of failure | |
2060 | */ | |
2061 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | |
2062 | { | |
2063 | size_t utmrdl_size, utrdl_size, ucdl_size; | |
2064 | ||
2065 | /* Allocate memory for UTP command descriptors */ | |
2066 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | |
2953f850 SJ |
2067 | hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, |
2068 | ucdl_size, | |
2069 | &hba->ucdl_dma_addr, | |
2070 | GFP_KERNEL); | |
7a3e97b0 SY |
2071 | |
2072 | /* | |
2073 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. | |
2074 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE | |
2075 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will | |
2076 | * be aligned to 128 bytes as well | |
2077 | */ | |
2078 | if (!hba->ucdl_base_addr || | |
2079 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 2080 | dev_err(hba->dev, |
7a3e97b0 SY |
2081 | "Command Descriptor Memory allocation failed\n"); |
2082 | goto out; | |
2083 | } | |
2084 | ||
2085 | /* | |
2086 | * Allocate memory for UTP Transfer descriptors | |
2087 | * UFSHCI requires 1024 byte alignment of UTRD | |
2088 | */ | |
2089 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | |
2953f850 SJ |
2090 | hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, |
2091 | utrdl_size, | |
2092 | &hba->utrdl_dma_addr, | |
2093 | GFP_KERNEL); | |
7a3e97b0 SY |
2094 | if (!hba->utrdl_base_addr || |
2095 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 2096 | dev_err(hba->dev, |
7a3e97b0 SY |
2097 | "Transfer Descriptor Memory allocation failed\n"); |
2098 | goto out; | |
2099 | } | |
2100 | ||
2101 | /* | |
2102 | * Allocate memory for UTP Task Management descriptors | |
2103 | * UFSHCI requires 1024 byte alignment of UTMRD | |
2104 | */ | |
2105 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | |
2953f850 SJ |
2106 | hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, |
2107 | utmrdl_size, | |
2108 | &hba->utmrdl_dma_addr, | |
2109 | GFP_KERNEL); | |
7a3e97b0 SY |
2110 | if (!hba->utmrdl_base_addr || |
2111 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 2112 | dev_err(hba->dev, |
7a3e97b0 SY |
2113 | "Task Management Descriptor Memory allocation failed\n"); |
2114 | goto out; | |
2115 | } | |
2116 | ||
2117 | /* Allocate memory for local reference block */ | |
2953f850 SJ |
2118 | hba->lrb = devm_kzalloc(hba->dev, |
2119 | hba->nutrs * sizeof(struct ufshcd_lrb), | |
2120 | GFP_KERNEL); | |
7a3e97b0 | 2121 | if (!hba->lrb) { |
3b1d0580 | 2122 | dev_err(hba->dev, "LRB Memory allocation failed\n"); |
7a3e97b0 SY |
2123 | goto out; |
2124 | } | |
2125 | return 0; | |
2126 | out: | |
7a3e97b0 SY |
2127 | return -ENOMEM; |
2128 | } | |
2129 | ||
2130 | /** | |
2131 | * ufshcd_host_memory_configure - configure local reference block with | |
2132 | * memory offsets | |
2133 | * @hba: per adapter instance | |
2134 | * | |
2135 | * Configure Host memory space | |
2136 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA | |
2137 | * address. | |
2138 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length | |
2139 | * and PRDT offset. | |
2140 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT | |
2141 | * into local reference block. | |
2142 | */ | |
2143 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | |
2144 | { | |
2145 | struct utp_transfer_cmd_desc *cmd_descp; | |
2146 | struct utp_transfer_req_desc *utrdlp; | |
2147 | dma_addr_t cmd_desc_dma_addr; | |
2148 | dma_addr_t cmd_desc_element_addr; | |
2149 | u16 response_offset; | |
2150 | u16 prdt_offset; | |
2151 | int cmd_desc_size; | |
2152 | int i; | |
2153 | ||
2154 | utrdlp = hba->utrdl_base_addr; | |
2155 | cmd_descp = hba->ucdl_base_addr; | |
2156 | ||
2157 | response_offset = | |
2158 | offsetof(struct utp_transfer_cmd_desc, response_upiu); | |
2159 | prdt_offset = | |
2160 | offsetof(struct utp_transfer_cmd_desc, prd_table); | |
2161 | ||
2162 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); | |
2163 | cmd_desc_dma_addr = hba->ucdl_dma_addr; | |
2164 | ||
2165 | for (i = 0; i < hba->nutrs; i++) { | |
2166 | /* Configure UTRD with command descriptor base address */ | |
2167 | cmd_desc_element_addr = | |
2168 | (cmd_desc_dma_addr + (cmd_desc_size * i)); | |
2169 | utrdlp[i].command_desc_base_addr_lo = | |
2170 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); | |
2171 | utrdlp[i].command_desc_base_addr_hi = | |
2172 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); | |
2173 | ||
2174 | /* Response upiu and prdt offset should be in double words */ | |
2175 | utrdlp[i].response_upiu_offset = | |
2176 | cpu_to_le16((response_offset >> 2)); | |
2177 | utrdlp[i].prd_table_offset = | |
2178 | cpu_to_le16((prdt_offset >> 2)); | |
2179 | utrdlp[i].response_upiu_length = | |
3ca316c5 | 2180 | cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); |
7a3e97b0 SY |
2181 | |
2182 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); | |
5a0b0cb9 SRT |
2183 | hba->lrb[i].ucd_req_ptr = |
2184 | (struct utp_upiu_req *)(cmd_descp + i); | |
7a3e97b0 SY |
2185 | hba->lrb[i].ucd_rsp_ptr = |
2186 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; | |
2187 | hba->lrb[i].ucd_prdt_ptr = | |
2188 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; | |
2189 | } | |
2190 | } | |
2191 | ||
2192 | /** | |
2193 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | |
2194 | * @hba: per adapter instance | |
2195 | * | |
2196 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, | |
2197 | * in order to initialize the Unipro link startup procedure. | |
2198 | * Once the Unipro links are up, the device connected to the controller | |
2199 | * is detected. | |
2200 | * | |
2201 | * Returns 0 on success, non-zero value on failure | |
2202 | */ | |
2203 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |
2204 | { | |
6ccf44fe SJ |
2205 | struct uic_command uic_cmd = {0}; |
2206 | int ret; | |
7a3e97b0 | 2207 | |
6ccf44fe | 2208 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; |
7a3e97b0 | 2209 | |
6ccf44fe SJ |
2210 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); |
2211 | if (ret) | |
2212 | dev_err(hba->dev, | |
2213 | "dme-link-startup: error code %d\n", ret); | |
2214 | return ret; | |
7a3e97b0 SY |
2215 | } |
2216 | ||
cad2e03d YG |
2217 | static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) |
2218 | { | |
2219 | #define MIN_DELAY_BEFORE_DME_CMDS_US 1000 | |
2220 | unsigned long min_sleep_time_us; | |
2221 | ||
2222 | if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) | |
2223 | return; | |
2224 | ||
2225 | /* | |
2226 | * last_dme_cmd_tstamp will be 0 only for 1st call to | |
2227 | * this function | |
2228 | */ | |
2229 | if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { | |
2230 | min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US; | |
2231 | } else { | |
2232 | unsigned long delta = | |
2233 | (unsigned long) ktime_to_us( | |
2234 | ktime_sub(ktime_get(), | |
2235 | hba->last_dme_cmd_tstamp)); | |
2236 | ||
2237 | if (delta < MIN_DELAY_BEFORE_DME_CMDS_US) | |
2238 | min_sleep_time_us = | |
2239 | MIN_DELAY_BEFORE_DME_CMDS_US - delta; | |
2240 | else | |
2241 | return; /* no more delay required */ | |
2242 | } | |
2243 | ||
2244 | /* allow sleep for extra 50us if needed */ | |
2245 | usleep_range(min_sleep_time_us, min_sleep_time_us + 50); | |
2246 | } | |
2247 | ||
12b4fdb4 SJ |
2248 | /** |
2249 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET | |
2250 | * @hba: per adapter instance | |
2251 | * @attr_sel: uic command argument1 | |
2252 | * @attr_set: attribute set type as uic command argument2 | |
2253 | * @mib_val: setting value as uic command argument3 | |
2254 | * @peer: indicate whether peer or local | |
2255 | * | |
2256 | * Returns 0 on success, non-zero value on failure | |
2257 | */ | |
2258 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | |
2259 | u8 attr_set, u32 mib_val, u8 peer) | |
2260 | { | |
2261 | struct uic_command uic_cmd = {0}; | |
2262 | static const char *const action[] = { | |
2263 | "dme-set", | |
2264 | "dme-peer-set" | |
2265 | }; | |
2266 | const char *set = action[!!peer]; | |
2267 | int ret; | |
64238fbd | 2268 | int retries = UFS_UIC_COMMAND_RETRIES; |
12b4fdb4 SJ |
2269 | |
2270 | uic_cmd.command = peer ? | |
2271 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; | |
2272 | uic_cmd.argument1 = attr_sel; | |
2273 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); | |
2274 | uic_cmd.argument3 = mib_val; | |
2275 | ||
64238fbd YG |
2276 | do { |
2277 | /* for peer attributes we retry upon failure */ | |
2278 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
2279 | if (ret) | |
2280 | dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", | |
2281 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); | |
2282 | } while (ret && peer && --retries); | |
2283 | ||
2284 | if (!retries) | |
2285 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", | |
2286 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, | |
2287 | retries); | |
12b4fdb4 SJ |
2288 | |
2289 | return ret; | |
2290 | } | |
2291 | EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); | |
2292 | ||
2293 | /** | |
2294 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET | |
2295 | * @hba: per adapter instance | |
2296 | * @attr_sel: uic command argument1 | |
2297 | * @mib_val: the value of the attribute as returned by the UIC command | |
2298 | * @peer: indicate whether peer or local | |
2299 | * | |
2300 | * Returns 0 on success, non-zero value on failure | |
2301 | */ | |
2302 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | |
2303 | u32 *mib_val, u8 peer) | |
2304 | { | |
2305 | struct uic_command uic_cmd = {0}; | |
2306 | static const char *const action[] = { | |
2307 | "dme-get", | |
2308 | "dme-peer-get" | |
2309 | }; | |
2310 | const char *get = action[!!peer]; | |
2311 | int ret; | |
64238fbd | 2312 | int retries = UFS_UIC_COMMAND_RETRIES; |
874237f7 YG |
2313 | struct ufs_pa_layer_attr orig_pwr_info; |
2314 | struct ufs_pa_layer_attr temp_pwr_info; | |
2315 | bool pwr_mode_change = false; | |
2316 | ||
2317 | if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { | |
2318 | orig_pwr_info = hba->pwr_info; | |
2319 | temp_pwr_info = orig_pwr_info; | |
2320 | ||
2321 | if (orig_pwr_info.pwr_tx == FAST_MODE || | |
2322 | orig_pwr_info.pwr_rx == FAST_MODE) { | |
2323 | temp_pwr_info.pwr_tx = FASTAUTO_MODE; | |
2324 | temp_pwr_info.pwr_rx = FASTAUTO_MODE; | |
2325 | pwr_mode_change = true; | |
2326 | } else if (orig_pwr_info.pwr_tx == SLOW_MODE || | |
2327 | orig_pwr_info.pwr_rx == SLOW_MODE) { | |
2328 | temp_pwr_info.pwr_tx = SLOWAUTO_MODE; | |
2329 | temp_pwr_info.pwr_rx = SLOWAUTO_MODE; | |
2330 | pwr_mode_change = true; | |
2331 | } | |
2332 | if (pwr_mode_change) { | |
2333 | ret = ufshcd_change_power_mode(hba, &temp_pwr_info); | |
2334 | if (ret) | |
2335 | goto out; | |
2336 | } | |
2337 | } | |
12b4fdb4 SJ |
2338 | |
2339 | uic_cmd.command = peer ? | |
2340 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; | |
2341 | uic_cmd.argument1 = attr_sel; | |
2342 | ||
64238fbd YG |
2343 | do { |
2344 | /* for peer attributes we retry upon failure */ | |
2345 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
2346 | if (ret) | |
2347 | dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", | |
2348 | get, UIC_GET_ATTR_ID(attr_sel), ret); | |
2349 | } while (ret && peer && --retries); | |
2350 | ||
2351 | if (!retries) | |
2352 | dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", | |
2353 | get, UIC_GET_ATTR_ID(attr_sel), retries); | |
12b4fdb4 | 2354 | |
64238fbd | 2355 | if (mib_val && !ret) |
12b4fdb4 | 2356 | *mib_val = uic_cmd.argument3; |
874237f7 YG |
2357 | |
2358 | if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) | |
2359 | && pwr_mode_change) | |
2360 | ufshcd_change_power_mode(hba, &orig_pwr_info); | |
12b4fdb4 SJ |
2361 | out: |
2362 | return ret; | |
2363 | } | |
2364 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | |
2365 | ||
53b3d9c3 | 2366 | /** |
57d104c1 SJ |
2367 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power |
2368 | * state) and waits for it to take effect. | |
2369 | * | |
53b3d9c3 | 2370 | * @hba: per adapter instance |
57d104c1 SJ |
2371 | * @cmd: UIC command to execute |
2372 | * | |
2373 | * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & | |
2374 | * DME_HIBERNATE_EXIT commands take some time to take its effect on both host | |
2375 | * and device UniPro link and hence it's final completion would be indicated by | |
2376 | * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in | |
2377 | * addition to normal UIC command completion Status (UCCS). This function only | |
2378 | * returns after the relevant status bits indicate the completion. | |
53b3d9c3 SJ |
2379 | * |
2380 | * Returns 0 on success, non-zero value on failure | |
2381 | */ | |
57d104c1 | 2382 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) |
53b3d9c3 | 2383 | { |
57d104c1 | 2384 | struct completion uic_async_done; |
53b3d9c3 SJ |
2385 | unsigned long flags; |
2386 | u8 status; | |
2387 | int ret; | |
d75f7fe4 | 2388 | bool reenable_intr = false; |
53b3d9c3 | 2389 | |
53b3d9c3 | 2390 | mutex_lock(&hba->uic_cmd_mutex); |
57d104c1 | 2391 | init_completion(&uic_async_done); |
cad2e03d | 2392 | ufshcd_add_delay_before_dme_cmd(hba); |
53b3d9c3 SJ |
2393 | |
2394 | spin_lock_irqsave(hba->host->host_lock, flags); | |
57d104c1 | 2395 | hba->uic_async_done = &uic_async_done; |
d75f7fe4 YG |
2396 | if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { |
2397 | ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); | |
2398 | /* | |
2399 | * Make sure UIC command completion interrupt is disabled before | |
2400 | * issuing UIC command. | |
2401 | */ | |
2402 | wmb(); | |
2403 | reenable_intr = true; | |
57d104c1 | 2404 | } |
d75f7fe4 YG |
2405 | ret = __ufshcd_send_uic_cmd(hba, cmd, false); |
2406 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
57d104c1 SJ |
2407 | if (ret) { |
2408 | dev_err(hba->dev, | |
2409 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", | |
2410 | cmd->command, cmd->argument3, ret); | |
53b3d9c3 SJ |
2411 | goto out; |
2412 | } | |
2413 | ||
57d104c1 | 2414 | if (!wait_for_completion_timeout(hba->uic_async_done, |
53b3d9c3 SJ |
2415 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { |
2416 | dev_err(hba->dev, | |
57d104c1 SJ |
2417 | "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", |
2418 | cmd->command, cmd->argument3); | |
53b3d9c3 SJ |
2419 | ret = -ETIMEDOUT; |
2420 | goto out; | |
2421 | } | |
2422 | ||
2423 | status = ufshcd_get_upmcrs(hba); | |
2424 | if (status != PWR_LOCAL) { | |
2425 | dev_err(hba->dev, | |
57d104c1 SJ |
2426 | "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", |
2427 | cmd->command, status); | |
53b3d9c3 SJ |
2428 | ret = (status != PWR_OK) ? status : -1; |
2429 | } | |
2430 | out: | |
2431 | spin_lock_irqsave(hba->host->host_lock, flags); | |
d75f7fe4 | 2432 | hba->active_uic_cmd = NULL; |
57d104c1 | 2433 | hba->uic_async_done = NULL; |
d75f7fe4 YG |
2434 | if (reenable_intr) |
2435 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); | |
53b3d9c3 SJ |
2436 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
2437 | mutex_unlock(&hba->uic_cmd_mutex); | |
1ab27c9c | 2438 | |
53b3d9c3 SJ |
2439 | return ret; |
2440 | } | |
2441 | ||
57d104c1 SJ |
2442 | /** |
2443 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | |
2444 | * using DME_SET primitives. | |
2445 | * @hba: per adapter instance | |
2446 | * @mode: powr mode value | |
2447 | * | |
2448 | * Returns 0 on success, non-zero value on failure | |
2449 | */ | |
2450 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | |
2451 | { | |
2452 | struct uic_command uic_cmd = {0}; | |
1ab27c9c | 2453 | int ret; |
57d104c1 | 2454 | |
c3a2f9ee YG |
2455 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { |
2456 | ret = ufshcd_dme_set(hba, | |
2457 | UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); | |
2458 | if (ret) { | |
2459 | dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", | |
2460 | __func__, ret); | |
2461 | goto out; | |
2462 | } | |
2463 | } | |
2464 | ||
57d104c1 SJ |
2465 | uic_cmd.command = UIC_CMD_DME_SET; |
2466 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | |
2467 | uic_cmd.argument3 = mode; | |
1ab27c9c ST |
2468 | ufshcd_hold(hba, false); |
2469 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
2470 | ufshcd_release(hba); | |
57d104c1 | 2471 | |
c3a2f9ee | 2472 | out: |
1ab27c9c | 2473 | return ret; |
57d104c1 SJ |
2474 | } |
2475 | ||
53c12d0e YG |
2476 | static int ufshcd_link_recovery(struct ufs_hba *hba) |
2477 | { | |
2478 | int ret; | |
2479 | unsigned long flags; | |
2480 | ||
2481 | spin_lock_irqsave(hba->host->host_lock, flags); | |
2482 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
2483 | ufshcd_set_eh_in_progress(hba); | |
2484 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
2485 | ||
2486 | ret = ufshcd_host_reset_and_restore(hba); | |
2487 | ||
2488 | spin_lock_irqsave(hba->host->host_lock, flags); | |
2489 | if (ret) | |
2490 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
2491 | ufshcd_clear_eh_in_progress(hba); | |
2492 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
2493 | ||
2494 | if (ret) | |
2495 | dev_err(hba->dev, "%s: link recovery failed, err %d", | |
2496 | __func__, ret); | |
2497 | ||
2498 | return ret; | |
2499 | } | |
2500 | ||
87d0b4a6 | 2501 | static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) |
57d104c1 | 2502 | { |
87d0b4a6 | 2503 | int ret; |
57d104c1 SJ |
2504 | struct uic_command uic_cmd = {0}; |
2505 | ||
2506 | uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; | |
87d0b4a6 YG |
2507 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); |
2508 | ||
53c12d0e | 2509 | if (ret) { |
87d0b4a6 YG |
2510 | dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", |
2511 | __func__, ret); | |
2512 | ||
53c12d0e YG |
2513 | /* |
2514 | * If link recovery fails then return error so that caller | |
2515 | * don't retry the hibern8 enter again. | |
2516 | */ | |
2517 | if (ufshcd_link_recovery(hba)) | |
2518 | ret = -ENOLINK; | |
2519 | } | |
2520 | ||
87d0b4a6 YG |
2521 | return ret; |
2522 | } | |
2523 | ||
2524 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) | |
2525 | { | |
2526 | int ret = 0, retries; | |
57d104c1 | 2527 | |
87d0b4a6 YG |
2528 | for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { |
2529 | ret = __ufshcd_uic_hibern8_enter(hba); | |
2530 | if (!ret || ret == -ENOLINK) | |
2531 | goto out; | |
2532 | } | |
2533 | out: | |
2534 | return ret; | |
57d104c1 SJ |
2535 | } |
2536 | ||
2537 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) | |
2538 | { | |
2539 | struct uic_command uic_cmd = {0}; | |
2540 | int ret; | |
2541 | ||
2542 | uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; | |
2543 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
2544 | if (ret) { | |
53c12d0e YG |
2545 | dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", |
2546 | __func__, ret); | |
2547 | ret = ufshcd_link_recovery(hba); | |
57d104c1 SJ |
2548 | } |
2549 | ||
2550 | return ret; | |
2551 | } | |
2552 | ||
5064636c YG |
2553 | /** |
2554 | * ufshcd_init_pwr_info - setting the POR (power on reset) | |
2555 | * values in hba power info | |
2556 | * @hba: per-adapter instance | |
2557 | */ | |
2558 | static void ufshcd_init_pwr_info(struct ufs_hba *hba) | |
2559 | { | |
2560 | hba->pwr_info.gear_rx = UFS_PWM_G1; | |
2561 | hba->pwr_info.gear_tx = UFS_PWM_G1; | |
2562 | hba->pwr_info.lane_rx = 1; | |
2563 | hba->pwr_info.lane_tx = 1; | |
2564 | hba->pwr_info.pwr_rx = SLOWAUTO_MODE; | |
2565 | hba->pwr_info.pwr_tx = SLOWAUTO_MODE; | |
2566 | hba->pwr_info.hs_rate = 0; | |
2567 | } | |
2568 | ||
d3e89bac | 2569 | /** |
7eb584db DR |
2570 | * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device |
2571 | * @hba: per-adapter instance | |
d3e89bac | 2572 | */ |
7eb584db | 2573 | static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) |
d3e89bac | 2574 | { |
7eb584db DR |
2575 | struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; |
2576 | ||
2577 | if (hba->max_pwr_info.is_valid) | |
2578 | return 0; | |
2579 | ||
2580 | pwr_info->pwr_tx = FASTAUTO_MODE; | |
2581 | pwr_info->pwr_rx = FASTAUTO_MODE; | |
2582 | pwr_info->hs_rate = PA_HS_MODE_B; | |
d3e89bac SJ |
2583 | |
2584 | /* Get the connected lane count */ | |
7eb584db DR |
2585 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), |
2586 | &pwr_info->lane_rx); | |
2587 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
2588 | &pwr_info->lane_tx); | |
2589 | ||
2590 | if (!pwr_info->lane_rx || !pwr_info->lane_tx) { | |
2591 | dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", | |
2592 | __func__, | |
2593 | pwr_info->lane_rx, | |
2594 | pwr_info->lane_tx); | |
2595 | return -EINVAL; | |
2596 | } | |
d3e89bac SJ |
2597 | |
2598 | /* | |
2599 | * First, get the maximum gears of HS speed. | |
2600 | * If a zero value, it means there is no HSGEAR capability. | |
2601 | * Then, get the maximum gears of PWM speed. | |
2602 | */ | |
7eb584db DR |
2603 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); |
2604 | if (!pwr_info->gear_rx) { | |
2605 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | |
2606 | &pwr_info->gear_rx); | |
2607 | if (!pwr_info->gear_rx) { | |
2608 | dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", | |
2609 | __func__, pwr_info->gear_rx); | |
2610 | return -EINVAL; | |
2611 | } | |
2612 | pwr_info->pwr_rx = SLOWAUTO_MODE; | |
d3e89bac SJ |
2613 | } |
2614 | ||
7eb584db DR |
2615 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), |
2616 | &pwr_info->gear_tx); | |
2617 | if (!pwr_info->gear_tx) { | |
d3e89bac | 2618 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
7eb584db DR |
2619 | &pwr_info->gear_tx); |
2620 | if (!pwr_info->gear_tx) { | |
2621 | dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", | |
2622 | __func__, pwr_info->gear_tx); | |
2623 | return -EINVAL; | |
2624 | } | |
2625 | pwr_info->pwr_tx = SLOWAUTO_MODE; | |
2626 | } | |
2627 | ||
2628 | hba->max_pwr_info.is_valid = true; | |
2629 | return 0; | |
2630 | } | |
2631 | ||
2632 | static int ufshcd_change_power_mode(struct ufs_hba *hba, | |
2633 | struct ufs_pa_layer_attr *pwr_mode) | |
2634 | { | |
2635 | int ret; | |
2636 | ||
2637 | /* if already configured to the requested pwr_mode */ | |
2638 | if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && | |
2639 | pwr_mode->gear_tx == hba->pwr_info.gear_tx && | |
2640 | pwr_mode->lane_rx == hba->pwr_info.lane_rx && | |
2641 | pwr_mode->lane_tx == hba->pwr_info.lane_tx && | |
2642 | pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && | |
2643 | pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && | |
2644 | pwr_mode->hs_rate == hba->pwr_info.hs_rate) { | |
2645 | dev_dbg(hba->dev, "%s: power already configured\n", __func__); | |
2646 | return 0; | |
d3e89bac SJ |
2647 | } |
2648 | ||
2649 | /* | |
2650 | * Configure attributes for power mode change with below. | |
2651 | * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, | |
2652 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, | |
2653 | * - PA_HSSERIES | |
2654 | */ | |
7eb584db DR |
2655 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); |
2656 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), | |
2657 | pwr_mode->lane_rx); | |
2658 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || | |
2659 | pwr_mode->pwr_rx == FAST_MODE) | |
d3e89bac | 2660 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); |
7eb584db DR |
2661 | else |
2662 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); | |
d3e89bac | 2663 | |
7eb584db DR |
2664 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); |
2665 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), | |
2666 | pwr_mode->lane_tx); | |
2667 | if (pwr_mode->pwr_tx == FASTAUTO_MODE || | |
2668 | pwr_mode->pwr_tx == FAST_MODE) | |
d3e89bac | 2669 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); |
7eb584db DR |
2670 | else |
2671 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); | |
d3e89bac | 2672 | |
7eb584db DR |
2673 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || |
2674 | pwr_mode->pwr_tx == FASTAUTO_MODE || | |
2675 | pwr_mode->pwr_rx == FAST_MODE || | |
2676 | pwr_mode->pwr_tx == FAST_MODE) | |
2677 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), | |
2678 | pwr_mode->hs_rate); | |
d3e89bac | 2679 | |
7eb584db DR |
2680 | ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
2681 | | pwr_mode->pwr_tx); | |
2682 | ||
2683 | if (ret) { | |
d3e89bac | 2684 | dev_err(hba->dev, |
7eb584db DR |
2685 | "%s: power mode change failed %d\n", __func__, ret); |
2686 | } else { | |
0263bcd0 YG |
2687 | ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, |
2688 | pwr_mode); | |
7eb584db DR |
2689 | |
2690 | memcpy(&hba->pwr_info, pwr_mode, | |
2691 | sizeof(struct ufs_pa_layer_attr)); | |
2692 | } | |
2693 | ||
2694 | return ret; | |
2695 | } | |
2696 | ||
2697 | /** | |
2698 | * ufshcd_config_pwr_mode - configure a new power mode | |
2699 | * @hba: per-adapter instance | |
2700 | * @desired_pwr_mode: desired power configuration | |
2701 | */ | |
2702 | static int ufshcd_config_pwr_mode(struct ufs_hba *hba, | |
2703 | struct ufs_pa_layer_attr *desired_pwr_mode) | |
2704 | { | |
2705 | struct ufs_pa_layer_attr final_params = { 0 }; | |
2706 | int ret; | |
2707 | ||
0263bcd0 YG |
2708 | ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, |
2709 | desired_pwr_mode, &final_params); | |
2710 | ||
2711 | if (ret) | |
7eb584db DR |
2712 | memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); |
2713 | ||
2714 | ret = ufshcd_change_power_mode(hba, &final_params); | |
d3e89bac SJ |
2715 | |
2716 | return ret; | |
2717 | } | |
2718 | ||
68078d5c DR |
2719 | /** |
2720 | * ufshcd_complete_dev_init() - checks device readiness | |
2721 | * hba: per-adapter instance | |
2722 | * | |
2723 | * Set fDeviceInit flag and poll until device toggles it. | |
2724 | */ | |
2725 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) | |
2726 | { | |
dc3c8d3a YG |
2727 | int i; |
2728 | int err; | |
68078d5c DR |
2729 | bool flag_res = 1; |
2730 | ||
dc3c8d3a YG |
2731 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
2732 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); | |
68078d5c DR |
2733 | if (err) { |
2734 | dev_err(hba->dev, | |
2735 | "%s setting fDeviceInit flag failed with error %d\n", | |
2736 | __func__, err); | |
2737 | goto out; | |
2738 | } | |
2739 | ||
dc3c8d3a YG |
2740 | /* poll for max. 1000 iterations for fDeviceInit flag to clear */ |
2741 | for (i = 0; i < 1000 && !err && flag_res; i++) | |
2742 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, | |
2743 | QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); | |
2744 | ||
68078d5c DR |
2745 | if (err) |
2746 | dev_err(hba->dev, | |
2747 | "%s reading fDeviceInit flag failed with error %d\n", | |
2748 | __func__, err); | |
2749 | else if (flag_res) | |
2750 | dev_err(hba->dev, | |
2751 | "%s fDeviceInit was not cleared by the device\n", | |
2752 | __func__); | |
2753 | ||
2754 | out: | |
2755 | return err; | |
2756 | } | |
2757 | ||
7a3e97b0 SY |
2758 | /** |
2759 | * ufshcd_make_hba_operational - Make UFS controller operational | |
2760 | * @hba: per adapter instance | |
2761 | * | |
2762 | * To bring UFS host controller to operational state, | |
5c0c28a8 SRT |
2763 | * 1. Enable required interrupts |
2764 | * 2. Configure interrupt aggregation | |
897efe62 | 2765 | * 3. Program UTRL and UTMRL base address |
5c0c28a8 | 2766 | * 4. Configure run-stop-registers |
7a3e97b0 SY |
2767 | * |
2768 | * Returns 0 on success, non-zero value on failure | |
2769 | */ | |
2770 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |
2771 | { | |
2772 | int err = 0; | |
2773 | u32 reg; | |
2774 | ||
6ccf44fe SJ |
2775 | /* Enable required interrupts */ |
2776 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | |
2777 | ||
2778 | /* Configure interrupt aggregation */ | |
b852190e YG |
2779 | if (ufshcd_is_intr_aggr_allowed(hba)) |
2780 | ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); | |
2781 | else | |
2782 | ufshcd_disable_intr_aggr(hba); | |
6ccf44fe SJ |
2783 | |
2784 | /* Configure UTRL and UTMRL base address registers */ | |
2785 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), | |
2786 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); | |
2787 | ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), | |
2788 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); | |
2789 | ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), | |
2790 | REG_UTP_TASK_REQ_LIST_BASE_L); | |
2791 | ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), | |
2792 | REG_UTP_TASK_REQ_LIST_BASE_H); | |
2793 | ||
897efe62 YG |
2794 | /* |
2795 | * Make sure base address and interrupt setup are updated before | |
2796 | * enabling the run/stop registers below. | |
2797 | */ | |
2798 | wmb(); | |
2799 | ||
7a3e97b0 SY |
2800 | /* |
2801 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | |
7a3e97b0 | 2802 | */ |
5c0c28a8 | 2803 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); |
7a3e97b0 SY |
2804 | if (!(ufshcd_get_lists_status(reg))) { |
2805 | ufshcd_enable_run_stop_reg(hba); | |
2806 | } else { | |
3b1d0580 | 2807 | dev_err(hba->dev, |
7a3e97b0 SY |
2808 | "Host controller not ready to process requests"); |
2809 | err = -EIO; | |
2810 | goto out; | |
2811 | } | |
2812 | ||
7a3e97b0 SY |
2813 | out: |
2814 | return err; | |
2815 | } | |
2816 | ||
2817 | /** | |
2818 | * ufshcd_hba_enable - initialize the controller | |
2819 | * @hba: per adapter instance | |
2820 | * | |
2821 | * The controller resets itself and controller firmware initialization | |
2822 | * sequence kicks off. When controller is ready it will set | |
2823 | * the Host Controller Enable bit to 1. | |
2824 | * | |
2825 | * Returns 0 on success, non-zero value on failure | |
2826 | */ | |
2827 | static int ufshcd_hba_enable(struct ufs_hba *hba) | |
2828 | { | |
2829 | int retry; | |
2830 | ||
2831 | /* | |
2832 | * msleep of 1 and 5 used in this function might result in msleep(20), | |
2833 | * but it was necessary to send the UFS FPGA to reset mode during | |
2834 | * development and testing of this driver. msleep can be changed to | |
2835 | * mdelay and retry count can be reduced based on the controller. | |
2836 | */ | |
2837 | if (!ufshcd_is_hba_active(hba)) { | |
2838 | ||
2839 | /* change controller state to "reset state" */ | |
2840 | ufshcd_hba_stop(hba); | |
2841 | ||
2842 | /* | |
2843 | * This delay is based on the testing done with UFS host | |
2844 | * controller FPGA. The delay can be changed based on the | |
2845 | * host controller used. | |
2846 | */ | |
2847 | msleep(5); | |
2848 | } | |
2849 | ||
57d104c1 SJ |
2850 | /* UniPro link is disabled at this point */ |
2851 | ufshcd_set_link_off(hba); | |
2852 | ||
0263bcd0 | 2853 | ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); |
5c0c28a8 | 2854 | |
7a3e97b0 SY |
2855 | /* start controller initialization sequence */ |
2856 | ufshcd_hba_start(hba); | |
2857 | ||
2858 | /* | |
2859 | * To initialize a UFS host controller HCE bit must be set to 1. | |
2860 | * During initialization the HCE bit value changes from 1->0->1. | |
2861 | * When the host controller completes initialization sequence | |
2862 | * it sets the value of HCE bit to 1. The same HCE bit is read back | |
2863 | * to check if the controller has completed initialization sequence. | |
2864 | * So without this delay the value HCE = 1, set in the previous | |
2865 | * instruction might be read back. | |
2866 | * This delay can be changed based on the controller. | |
2867 | */ | |
2868 | msleep(1); | |
2869 | ||
2870 | /* wait for the host controller to complete initialization */ | |
2871 | retry = 10; | |
2872 | while (ufshcd_is_hba_active(hba)) { | |
2873 | if (retry) { | |
2874 | retry--; | |
2875 | } else { | |
3b1d0580 | 2876 | dev_err(hba->dev, |
7a3e97b0 SY |
2877 | "Controller enable failed\n"); |
2878 | return -EIO; | |
2879 | } | |
2880 | msleep(5); | |
2881 | } | |
5c0c28a8 | 2882 | |
1d337ec2 | 2883 | /* enable UIC related interrupts */ |
57d104c1 | 2884 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); |
1d337ec2 | 2885 | |
0263bcd0 | 2886 | ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); |
5c0c28a8 | 2887 | |
7a3e97b0 SY |
2888 | return 0; |
2889 | } | |
2890 | ||
7ca38cf3 YG |
2891 | static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) |
2892 | { | |
2893 | int tx_lanes, i, err = 0; | |
2894 | ||
2895 | if (!peer) | |
2896 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
2897 | &tx_lanes); | |
2898 | else | |
2899 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
2900 | &tx_lanes); | |
2901 | for (i = 0; i < tx_lanes; i++) { | |
2902 | if (!peer) | |
2903 | err = ufshcd_dme_set(hba, | |
2904 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
2905 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
2906 | 0); | |
2907 | else | |
2908 | err = ufshcd_dme_peer_set(hba, | |
2909 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
2910 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
2911 | 0); | |
2912 | if (err) { | |
2913 | dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", | |
2914 | __func__, peer, i, err); | |
2915 | break; | |
2916 | } | |
2917 | } | |
2918 | ||
2919 | return err; | |
2920 | } | |
2921 | ||
2922 | static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) | |
2923 | { | |
2924 | return ufshcd_disable_tx_lcc(hba, true); | |
2925 | } | |
2926 | ||
7a3e97b0 | 2927 | /** |
6ccf44fe | 2928 | * ufshcd_link_startup - Initialize unipro link startup |
7a3e97b0 SY |
2929 | * @hba: per adapter instance |
2930 | * | |
6ccf44fe | 2931 | * Returns 0 for success, non-zero in case of failure |
7a3e97b0 | 2932 | */ |
6ccf44fe | 2933 | static int ufshcd_link_startup(struct ufs_hba *hba) |
7a3e97b0 | 2934 | { |
6ccf44fe | 2935 | int ret; |
1d337ec2 | 2936 | int retries = DME_LINKSTARTUP_RETRIES; |
7a3e97b0 | 2937 | |
1d337ec2 | 2938 | do { |
0263bcd0 | 2939 | ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); |
6ccf44fe | 2940 | |
1d337ec2 | 2941 | ret = ufshcd_dme_link_startup(hba); |
5c0c28a8 | 2942 | |
1d337ec2 SRT |
2943 | /* check if device is detected by inter-connect layer */ |
2944 | if (!ret && !ufshcd_is_device_present(hba)) { | |
2945 | dev_err(hba->dev, "%s: Device not present\n", __func__); | |
2946 | ret = -ENXIO; | |
2947 | goto out; | |
2948 | } | |
6ccf44fe | 2949 | |
1d337ec2 SRT |
2950 | /* |
2951 | * DME link lost indication is only received when link is up, | |
2952 | * but we can't be sure if the link is up until link startup | |
2953 | * succeeds. So reset the local Uni-Pro and try again. | |
2954 | */ | |
2955 | if (ret && ufshcd_hba_enable(hba)) | |
2956 | goto out; | |
2957 | } while (ret && retries--); | |
2958 | ||
2959 | if (ret) | |
2960 | /* failed to get the link up... retire */ | |
5c0c28a8 | 2961 | goto out; |
5c0c28a8 | 2962 | |
7ca38cf3 YG |
2963 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { |
2964 | ret = ufshcd_disable_device_tx_lcc(hba); | |
2965 | if (ret) | |
2966 | goto out; | |
2967 | } | |
2968 | ||
5c0c28a8 | 2969 | /* Include any host controller configuration via UIC commands */ |
0263bcd0 YG |
2970 | ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); |
2971 | if (ret) | |
2972 | goto out; | |
7a3e97b0 | 2973 | |
5c0c28a8 | 2974 | ret = ufshcd_make_hba_operational(hba); |
6ccf44fe SJ |
2975 | out: |
2976 | if (ret) | |
2977 | dev_err(hba->dev, "link startup failed %d\n", ret); | |
2978 | return ret; | |
7a3e97b0 SY |
2979 | } |
2980 | ||
5a0b0cb9 SRT |
2981 | /** |
2982 | * ufshcd_verify_dev_init() - Verify device initialization | |
2983 | * @hba: per-adapter instance | |
2984 | * | |
2985 | * Send NOP OUT UPIU and wait for NOP IN response to check whether the | |
2986 | * device Transport Protocol (UTP) layer is ready after a reset. | |
2987 | * If the UTP layer at the device side is not initialized, it may | |
2988 | * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT | |
2989 | * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. | |
2990 | */ | |
2991 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |
2992 | { | |
2993 | int err = 0; | |
2994 | int retries; | |
2995 | ||
1ab27c9c | 2996 | ufshcd_hold(hba, false); |
5a0b0cb9 SRT |
2997 | mutex_lock(&hba->dev_cmd.lock); |
2998 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { | |
2999 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, | |
3000 | NOP_OUT_TIMEOUT); | |
3001 | ||
3002 | if (!err || err == -ETIMEDOUT) | |
3003 | break; | |
3004 | ||
3005 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | |
3006 | } | |
3007 | mutex_unlock(&hba->dev_cmd.lock); | |
1ab27c9c | 3008 | ufshcd_release(hba); |
5a0b0cb9 SRT |
3009 | |
3010 | if (err) | |
3011 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); | |
3012 | return err; | |
3013 | } | |
3014 | ||
0ce147d4 SJ |
3015 | /** |
3016 | * ufshcd_set_queue_depth - set lun queue depth | |
3017 | * @sdev: pointer to SCSI device | |
3018 | * | |
3019 | * Read bLUQueueDepth value and activate scsi tagged command | |
3020 | * queueing. For WLUN, queue depth is set to 1. For best-effort | |
3021 | * cases (bLUQueueDepth = 0) the queue depth is set to a maximum | |
3022 | * value that host can queue. | |
3023 | */ | |
3024 | static void ufshcd_set_queue_depth(struct scsi_device *sdev) | |
3025 | { | |
3026 | int ret = 0; | |
3027 | u8 lun_qdepth; | |
3028 | struct ufs_hba *hba; | |
3029 | ||
3030 | hba = shost_priv(sdev->host); | |
3031 | ||
3032 | lun_qdepth = hba->nutrs; | |
3033 | ret = ufshcd_read_unit_desc_param(hba, | |
3034 | ufshcd_scsi_to_upiu_lun(sdev->lun), | |
3035 | UNIT_DESC_PARAM_LU_Q_DEPTH, | |
3036 | &lun_qdepth, | |
3037 | sizeof(lun_qdepth)); | |
3038 | ||
3039 | /* Some WLUN doesn't support unit descriptor */ | |
3040 | if (ret == -EOPNOTSUPP) | |
3041 | lun_qdepth = 1; | |
3042 | else if (!lun_qdepth) | |
3043 | /* eventually, we can figure out the real queue depth */ | |
3044 | lun_qdepth = hba->nutrs; | |
3045 | else | |
3046 | lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); | |
3047 | ||
3048 | dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", | |
3049 | __func__, lun_qdepth); | |
db5ed4df | 3050 | scsi_change_queue_depth(sdev, lun_qdepth); |
0ce147d4 SJ |
3051 | } |
3052 | ||
57d104c1 SJ |
3053 | /* |
3054 | * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR | |
3055 | * @hba: per-adapter instance | |
3056 | * @lun: UFS device lun id | |
3057 | * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info | |
3058 | * | |
3059 | * Returns 0 in case of success and b_lu_write_protect status would be returned | |
3060 | * @b_lu_write_protect parameter. | |
3061 | * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. | |
3062 | * Returns -EINVAL in case of invalid parameters passed to this function. | |
3063 | */ | |
3064 | static int ufshcd_get_lu_wp(struct ufs_hba *hba, | |
3065 | u8 lun, | |
3066 | u8 *b_lu_write_protect) | |
3067 | { | |
3068 | int ret; | |
3069 | ||
3070 | if (!b_lu_write_protect) | |
3071 | ret = -EINVAL; | |
3072 | /* | |
3073 | * According to UFS device spec, RPMB LU can't be write | |
3074 | * protected so skip reading bLUWriteProtect parameter for | |
3075 | * it. For other W-LUs, UNIT DESCRIPTOR is not available. | |
3076 | */ | |
3077 | else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) | |
3078 | ret = -ENOTSUPP; | |
3079 | else | |
3080 | ret = ufshcd_read_unit_desc_param(hba, | |
3081 | lun, | |
3082 | UNIT_DESC_PARAM_LU_WR_PROTECT, | |
3083 | b_lu_write_protect, | |
3084 | sizeof(*b_lu_write_protect)); | |
3085 | return ret; | |
3086 | } | |
3087 | ||
3088 | /** | |
3089 | * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect | |
3090 | * status | |
3091 | * @hba: per-adapter instance | |
3092 | * @sdev: pointer to SCSI device | |
3093 | * | |
3094 | */ | |
3095 | static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, | |
3096 | struct scsi_device *sdev) | |
3097 | { | |
3098 | if (hba->dev_info.f_power_on_wp_en && | |
3099 | !hba->dev_info.is_lu_power_on_wp) { | |
3100 | u8 b_lu_write_protect; | |
3101 | ||
3102 | if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), | |
3103 | &b_lu_write_protect) && | |
3104 | (b_lu_write_protect == UFS_LU_POWER_ON_WP)) | |
3105 | hba->dev_info.is_lu_power_on_wp = true; | |
3106 | } | |
3107 | } | |
3108 | ||
7a3e97b0 SY |
3109 | /** |
3110 | * ufshcd_slave_alloc - handle initial SCSI device configurations | |
3111 | * @sdev: pointer to SCSI device | |
3112 | * | |
3113 | * Returns success | |
3114 | */ | |
3115 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | |
3116 | { | |
3117 | struct ufs_hba *hba; | |
3118 | ||
3119 | hba = shost_priv(sdev->host); | |
7a3e97b0 SY |
3120 | |
3121 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ | |
3122 | sdev->use_10_for_ms = 1; | |
7a3e97b0 | 3123 | |
e8e7f271 SRT |
3124 | /* allow SCSI layer to restart the device in case of errors */ |
3125 | sdev->allow_restart = 1; | |
4264fd61 | 3126 | |
b2a6c522 SRT |
3127 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
3128 | sdev->no_report_opcodes = 1; | |
3129 | ||
e8e7f271 | 3130 | |
0ce147d4 | 3131 | ufshcd_set_queue_depth(sdev); |
4264fd61 | 3132 | |
57d104c1 SJ |
3133 | ufshcd_get_lu_power_on_wp_status(hba, sdev); |
3134 | ||
7a3e97b0 SY |
3135 | return 0; |
3136 | } | |
3137 | ||
4264fd61 SRT |
3138 | /** |
3139 | * ufshcd_change_queue_depth - change queue depth | |
3140 | * @sdev: pointer to SCSI device | |
3141 | * @depth: required depth to set | |
4264fd61 | 3142 | * |
db5ed4df | 3143 | * Change queue depth and make sure the max. limits are not crossed. |
4264fd61 | 3144 | */ |
db5ed4df | 3145 | static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) |
4264fd61 SRT |
3146 | { |
3147 | struct ufs_hba *hba = shost_priv(sdev->host); | |
3148 | ||
3149 | if (depth > hba->nutrs) | |
3150 | depth = hba->nutrs; | |
db5ed4df | 3151 | return scsi_change_queue_depth(sdev, depth); |
4264fd61 SRT |
3152 | } |
3153 | ||
eeda4749 AM |
3154 | /** |
3155 | * ufshcd_slave_configure - adjust SCSI device configurations | |
3156 | * @sdev: pointer to SCSI device | |
3157 | */ | |
3158 | static int ufshcd_slave_configure(struct scsi_device *sdev) | |
3159 | { | |
3160 | struct request_queue *q = sdev->request_queue; | |
3161 | ||
3162 | blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); | |
3163 | blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX); | |
3164 | ||
3165 | return 0; | |
3166 | } | |
3167 | ||
7a3e97b0 SY |
3168 | /** |
3169 | * ufshcd_slave_destroy - remove SCSI device configurations | |
3170 | * @sdev: pointer to SCSI device | |
3171 | */ | |
3172 | static void ufshcd_slave_destroy(struct scsi_device *sdev) | |
3173 | { | |
3174 | struct ufs_hba *hba; | |
3175 | ||
3176 | hba = shost_priv(sdev->host); | |
0ce147d4 | 3177 | /* Drop the reference as it won't be needed anymore */ |
7c48bfd0 AM |
3178 | if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { |
3179 | unsigned long flags; | |
3180 | ||
3181 | spin_lock_irqsave(hba->host->host_lock, flags); | |
0ce147d4 | 3182 | hba->sdev_ufs_device = NULL; |
7c48bfd0 AM |
3183 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
3184 | } | |
7a3e97b0 SY |
3185 | } |
3186 | ||
3187 | /** | |
3188 | * ufshcd_task_req_compl - handle task management request completion | |
3189 | * @hba: per adapter instance | |
3190 | * @index: index of the completed request | |
e2933132 | 3191 | * @resp: task management service response |
7a3e97b0 | 3192 | * |
e2933132 | 3193 | * Returns non-zero value on error, zero on success |
7a3e97b0 | 3194 | */ |
e2933132 | 3195 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) |
7a3e97b0 SY |
3196 | { |
3197 | struct utp_task_req_desc *task_req_descp; | |
3198 | struct utp_upiu_task_rsp *task_rsp_upiup; | |
3199 | unsigned long flags; | |
3200 | int ocs_value; | |
3201 | int task_result; | |
3202 | ||
3203 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3204 | ||
3205 | /* Clear completed tasks from outstanding_tasks */ | |
3206 | __clear_bit(index, &hba->outstanding_tasks); | |
3207 | ||
3208 | task_req_descp = hba->utmrdl_base_addr; | |
3209 | ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); | |
3210 | ||
3211 | if (ocs_value == OCS_SUCCESS) { | |
3212 | task_rsp_upiup = (struct utp_upiu_task_rsp *) | |
3213 | task_req_descp[index].task_rsp_upiu; | |
3214 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); | |
3215 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); | |
e2933132 SRT |
3216 | if (resp) |
3217 | *resp = (u8)task_result; | |
7a3e97b0 | 3218 | } else { |
e2933132 SRT |
3219 | dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", |
3220 | __func__, ocs_value); | |
7a3e97b0 SY |
3221 | } |
3222 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
e2933132 SRT |
3223 | |
3224 | return ocs_value; | |
7a3e97b0 SY |
3225 | } |
3226 | ||
7a3e97b0 SY |
3227 | /** |
3228 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | |
3229 | * @lrb: pointer to local reference block of completed command | |
3230 | * @scsi_status: SCSI command status | |
3231 | * | |
3232 | * Returns value base on SCSI command status | |
3233 | */ | |
3234 | static inline int | |
3235 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | |
3236 | { | |
3237 | int result = 0; | |
3238 | ||
3239 | switch (scsi_status) { | |
7a3e97b0 | 3240 | case SAM_STAT_CHECK_CONDITION: |
1c2623c5 SJ |
3241 | ufshcd_copy_sense_data(lrbp); |
3242 | case SAM_STAT_GOOD: | |
7a3e97b0 SY |
3243 | result |= DID_OK << 16 | |
3244 | COMMAND_COMPLETE << 8 | | |
1c2623c5 | 3245 | scsi_status; |
7a3e97b0 SY |
3246 | break; |
3247 | case SAM_STAT_TASK_SET_FULL: | |
1c2623c5 | 3248 | case SAM_STAT_BUSY: |
7a3e97b0 | 3249 | case SAM_STAT_TASK_ABORTED: |
1c2623c5 SJ |
3250 | ufshcd_copy_sense_data(lrbp); |
3251 | result |= scsi_status; | |
7a3e97b0 SY |
3252 | break; |
3253 | default: | |
3254 | result |= DID_ERROR << 16; | |
3255 | break; | |
3256 | } /* end of switch */ | |
3257 | ||
3258 | return result; | |
3259 | } | |
3260 | ||
3261 | /** | |
3262 | * ufshcd_transfer_rsp_status - Get overall status of the response | |
3263 | * @hba: per adapter instance | |
3264 | * @lrb: pointer to local reference block of completed command | |
3265 | * | |
3266 | * Returns result of the command to notify SCSI midlayer | |
3267 | */ | |
3268 | static inline int | |
3269 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
3270 | { | |
3271 | int result = 0; | |
3272 | int scsi_status; | |
3273 | int ocs; | |
3274 | ||
3275 | /* overall command status of utrd */ | |
3276 | ocs = ufshcd_get_tr_ocs(lrbp); | |
3277 | ||
3278 | switch (ocs) { | |
3279 | case OCS_SUCCESS: | |
5a0b0cb9 | 3280 | result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); |
7a3e97b0 | 3281 | |
5a0b0cb9 SRT |
3282 | switch (result) { |
3283 | case UPIU_TRANSACTION_RESPONSE: | |
3284 | /* | |
3285 | * get the response UPIU result to extract | |
3286 | * the SCSI command status | |
3287 | */ | |
3288 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); | |
3289 | ||
3290 | /* | |
3291 | * get the result based on SCSI status response | |
3292 | * to notify the SCSI midlayer of the command status | |
3293 | */ | |
3294 | scsi_status = result & MASK_SCSI_STATUS; | |
3295 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); | |
66ec6d59 | 3296 | |
f05ac2e5 YG |
3297 | /* |
3298 | * Currently we are only supporting BKOPs exception | |
3299 | * events hence we can ignore BKOPs exception event | |
3300 | * during power management callbacks. BKOPs exception | |
3301 | * event is not expected to be raised in runtime suspend | |
3302 | * callback as it allows the urgent bkops. | |
3303 | * During system suspend, we are anyway forcefully | |
3304 | * disabling the bkops and if urgent bkops is needed | |
3305 | * it will be enabled on system resume. Long term | |
3306 | * solution could be to abort the system suspend if | |
3307 | * UFS device needs urgent BKOPs. | |
3308 | */ | |
3309 | if (!hba->pm_op_in_progress && | |
3310 | ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) | |
66ec6d59 | 3311 | schedule_work(&hba->eeh_work); |
5a0b0cb9 SRT |
3312 | break; |
3313 | case UPIU_TRANSACTION_REJECT_UPIU: | |
3314 | /* TODO: handle Reject UPIU Response */ | |
3315 | result = DID_ERROR << 16; | |
3b1d0580 | 3316 | dev_err(hba->dev, |
5a0b0cb9 SRT |
3317 | "Reject UPIU not fully implemented\n"); |
3318 | break; | |
3319 | default: | |
3320 | result = DID_ERROR << 16; | |
3321 | dev_err(hba->dev, | |
3322 | "Unexpected request response code = %x\n", | |
3323 | result); | |
7a3e97b0 SY |
3324 | break; |
3325 | } | |
7a3e97b0 SY |
3326 | break; |
3327 | case OCS_ABORTED: | |
3328 | result |= DID_ABORT << 16; | |
3329 | break; | |
e8e7f271 SRT |
3330 | case OCS_INVALID_COMMAND_STATUS: |
3331 | result |= DID_REQUEUE << 16; | |
3332 | break; | |
7a3e97b0 SY |
3333 | case OCS_INVALID_CMD_TABLE_ATTR: |
3334 | case OCS_INVALID_PRDT_ATTR: | |
3335 | case OCS_MISMATCH_DATA_BUF_SIZE: | |
3336 | case OCS_MISMATCH_RESP_UPIU_SIZE: | |
3337 | case OCS_PEER_COMM_FAILURE: | |
3338 | case OCS_FATAL_ERROR: | |
3339 | default: | |
3340 | result |= DID_ERROR << 16; | |
3b1d0580 | 3341 | dev_err(hba->dev, |
7a3e97b0 SY |
3342 | "OCS error from controller = %x\n", ocs); |
3343 | break; | |
3344 | } /* end of switch */ | |
3345 | ||
3346 | return result; | |
3347 | } | |
3348 | ||
6ccf44fe SJ |
3349 | /** |
3350 | * ufshcd_uic_cmd_compl - handle completion of uic command | |
3351 | * @hba: per adapter instance | |
53b3d9c3 | 3352 | * @intr_status: interrupt status generated by the controller |
6ccf44fe | 3353 | */ |
53b3d9c3 | 3354 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) |
6ccf44fe | 3355 | { |
53b3d9c3 | 3356 | if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { |
6ccf44fe SJ |
3357 | hba->active_uic_cmd->argument2 |= |
3358 | ufshcd_get_uic_cmd_result(hba); | |
12b4fdb4 SJ |
3359 | hba->active_uic_cmd->argument3 = |
3360 | ufshcd_get_dme_attr_val(hba); | |
6ccf44fe SJ |
3361 | complete(&hba->active_uic_cmd->done); |
3362 | } | |
53b3d9c3 | 3363 | |
57d104c1 SJ |
3364 | if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) |
3365 | complete(hba->uic_async_done); | |
6ccf44fe SJ |
3366 | } |
3367 | ||
7a3e97b0 SY |
3368 | /** |
3369 | * ufshcd_transfer_req_compl - handle SCSI and query command completion | |
3370 | * @hba: per adapter instance | |
3371 | */ | |
3372 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |
3373 | { | |
5a0b0cb9 SRT |
3374 | struct ufshcd_lrb *lrbp; |
3375 | struct scsi_cmnd *cmd; | |
7a3e97b0 SY |
3376 | unsigned long completed_reqs; |
3377 | u32 tr_doorbell; | |
3378 | int result; | |
3379 | int index; | |
e9d501b1 DR |
3380 | |
3381 | /* Resetting interrupt aggregation counters first and reading the | |
3382 | * DOOR_BELL afterward allows us to handle all the completed requests. | |
3383 | * In order to prevent other interrupts starvation the DB is read once | |
3384 | * after reset. The down side of this solution is the possibility of | |
3385 | * false interrupt if device completes another request after resetting | |
3386 | * aggregation and before reading the DB. | |
3387 | */ | |
b852190e YG |
3388 | if (ufshcd_is_intr_aggr_allowed(hba)) |
3389 | ufshcd_reset_intr_aggr(hba); | |
7a3e97b0 | 3390 | |
b873a275 | 3391 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
7a3e97b0 SY |
3392 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; |
3393 | ||
e9d501b1 DR |
3394 | for_each_set_bit(index, &completed_reqs, hba->nutrs) { |
3395 | lrbp = &hba->lrb[index]; | |
3396 | cmd = lrbp->cmd; | |
3397 | if (cmd) { | |
3398 | result = ufshcd_transfer_rsp_status(hba, lrbp); | |
3399 | scsi_dma_unmap(cmd); | |
3400 | cmd->result = result; | |
3401 | /* Mark completed command as NULL in LRB */ | |
3402 | lrbp->cmd = NULL; | |
3403 | clear_bit_unlock(index, &hba->lrb_in_use); | |
3404 | /* Do not touch lrbp after scsi done */ | |
3405 | cmd->scsi_done(cmd); | |
1ab27c9c | 3406 | __ufshcd_release(hba); |
e9d501b1 DR |
3407 | } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { |
3408 | if (hba->dev_cmd.complete) | |
3409 | complete(hba->dev_cmd.complete); | |
3410 | } | |
3411 | } | |
7a3e97b0 SY |
3412 | |
3413 | /* clear corresponding bits of completed commands */ | |
3414 | hba->outstanding_reqs ^= completed_reqs; | |
3415 | ||
856b3483 ST |
3416 | ufshcd_clk_scaling_update_busy(hba); |
3417 | ||
5a0b0cb9 SRT |
3418 | /* we might have free'd some tags above */ |
3419 | wake_up(&hba->dev_cmd.tag_wq); | |
7a3e97b0 SY |
3420 | } |
3421 | ||
66ec6d59 SRT |
3422 | /** |
3423 | * ufshcd_disable_ee - disable exception event | |
3424 | * @hba: per-adapter instance | |
3425 | * @mask: exception event to disable | |
3426 | * | |
3427 | * Disables exception event in the device so that the EVENT_ALERT | |
3428 | * bit is not set. | |
3429 | * | |
3430 | * Returns zero on success, non-zero error value on failure. | |
3431 | */ | |
3432 | static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) | |
3433 | { | |
3434 | int err = 0; | |
3435 | u32 val; | |
3436 | ||
3437 | if (!(hba->ee_ctrl_mask & mask)) | |
3438 | goto out; | |
3439 | ||
3440 | val = hba->ee_ctrl_mask & ~mask; | |
3441 | val &= 0xFFFF; /* 2 bytes */ | |
5e86ae44 | 3442 | err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
66ec6d59 SRT |
3443 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); |
3444 | if (!err) | |
3445 | hba->ee_ctrl_mask &= ~mask; | |
3446 | out: | |
3447 | return err; | |
3448 | } | |
3449 | ||
3450 | /** | |
3451 | * ufshcd_enable_ee - enable exception event | |
3452 | * @hba: per-adapter instance | |
3453 | * @mask: exception event to enable | |
3454 | * | |
3455 | * Enable corresponding exception event in the device to allow | |
3456 | * device to alert host in critical scenarios. | |
3457 | * | |
3458 | * Returns zero on success, non-zero error value on failure. | |
3459 | */ | |
3460 | static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) | |
3461 | { | |
3462 | int err = 0; | |
3463 | u32 val; | |
3464 | ||
3465 | if (hba->ee_ctrl_mask & mask) | |
3466 | goto out; | |
3467 | ||
3468 | val = hba->ee_ctrl_mask | mask; | |
3469 | val &= 0xFFFF; /* 2 bytes */ | |
5e86ae44 | 3470 | err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
66ec6d59 SRT |
3471 | QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); |
3472 | if (!err) | |
3473 | hba->ee_ctrl_mask |= mask; | |
3474 | out: | |
3475 | return err; | |
3476 | } | |
3477 | ||
3478 | /** | |
3479 | * ufshcd_enable_auto_bkops - Allow device managed BKOPS | |
3480 | * @hba: per-adapter instance | |
3481 | * | |
3482 | * Allow device to manage background operations on its own. Enabling | |
3483 | * this might lead to inconsistent latencies during normal data transfers | |
3484 | * as the device is allowed to manage its own way of handling background | |
3485 | * operations. | |
3486 | * | |
3487 | * Returns zero on success, non-zero on failure. | |
3488 | */ | |
3489 | static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) | |
3490 | { | |
3491 | int err = 0; | |
3492 | ||
3493 | if (hba->auto_bkops_enabled) | |
3494 | goto out; | |
3495 | ||
dc3c8d3a | 3496 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, |
66ec6d59 SRT |
3497 | QUERY_FLAG_IDN_BKOPS_EN, NULL); |
3498 | if (err) { | |
3499 | dev_err(hba->dev, "%s: failed to enable bkops %d\n", | |
3500 | __func__, err); | |
3501 | goto out; | |
3502 | } | |
3503 | ||
3504 | hba->auto_bkops_enabled = true; | |
3505 | ||
3506 | /* No need of URGENT_BKOPS exception from the device */ | |
3507 | err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | |
3508 | if (err) | |
3509 | dev_err(hba->dev, "%s: failed to disable exception event %d\n", | |
3510 | __func__, err); | |
3511 | out: | |
3512 | return err; | |
3513 | } | |
3514 | ||
3515 | /** | |
3516 | * ufshcd_disable_auto_bkops - block device in doing background operations | |
3517 | * @hba: per-adapter instance | |
3518 | * | |
3519 | * Disabling background operations improves command response latency but | |
3520 | * has drawback of device moving into critical state where the device is | |
3521 | * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the | |
3522 | * host is idle so that BKOPS are managed effectively without any negative | |
3523 | * impacts. | |
3524 | * | |
3525 | * Returns zero on success, non-zero on failure. | |
3526 | */ | |
3527 | static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) | |
3528 | { | |
3529 | int err = 0; | |
3530 | ||
3531 | if (!hba->auto_bkops_enabled) | |
3532 | goto out; | |
3533 | ||
3534 | /* | |
3535 | * If host assisted BKOPs is to be enabled, make sure | |
3536 | * urgent bkops exception is allowed. | |
3537 | */ | |
3538 | err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); | |
3539 | if (err) { | |
3540 | dev_err(hba->dev, "%s: failed to enable exception event %d\n", | |
3541 | __func__, err); | |
3542 | goto out; | |
3543 | } | |
3544 | ||
dc3c8d3a | 3545 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, |
66ec6d59 SRT |
3546 | QUERY_FLAG_IDN_BKOPS_EN, NULL); |
3547 | if (err) { | |
3548 | dev_err(hba->dev, "%s: failed to disable bkops %d\n", | |
3549 | __func__, err); | |
3550 | ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); | |
3551 | goto out; | |
3552 | } | |
3553 | ||
3554 | hba->auto_bkops_enabled = false; | |
3555 | out: | |
3556 | return err; | |
3557 | } | |
3558 | ||
3559 | /** | |
3560 | * ufshcd_force_reset_auto_bkops - force enable of auto bkops | |
3561 | * @hba: per adapter instance | |
3562 | * | |
3563 | * After a device reset the device may toggle the BKOPS_EN flag | |
3564 | * to default value. The s/w tracking variables should be updated | |
3565 | * as well. Do this by forcing enable of auto bkops. | |
3566 | */ | |
3567 | static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) | |
3568 | { | |
3569 | hba->auto_bkops_enabled = false; | |
3570 | hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; | |
3571 | ufshcd_enable_auto_bkops(hba); | |
3572 | } | |
3573 | ||
3574 | static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) | |
3575 | { | |
5e86ae44 | 3576 | return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
66ec6d59 SRT |
3577 | QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); |
3578 | } | |
3579 | ||
3580 | /** | |
57d104c1 | 3581 | * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status |
66ec6d59 | 3582 | * @hba: per-adapter instance |
57d104c1 | 3583 | * @status: bkops_status value |
66ec6d59 | 3584 | * |
57d104c1 SJ |
3585 | * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn |
3586 | * flag in the device to permit background operations if the device | |
3587 | * bkops_status is greater than or equal to "status" argument passed to | |
3588 | * this function, disable otherwise. | |
3589 | * | |
3590 | * Returns 0 for success, non-zero in case of failure. | |
3591 | * | |
3592 | * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag | |
3593 | * to know whether auto bkops is enabled or disabled after this function | |
3594 | * returns control to it. | |
66ec6d59 | 3595 | */ |
57d104c1 SJ |
3596 | static int ufshcd_bkops_ctrl(struct ufs_hba *hba, |
3597 | enum bkops_status status) | |
66ec6d59 SRT |
3598 | { |
3599 | int err; | |
57d104c1 | 3600 | u32 curr_status = 0; |
66ec6d59 | 3601 | |
57d104c1 | 3602 | err = ufshcd_get_bkops_status(hba, &curr_status); |
66ec6d59 SRT |
3603 | if (err) { |
3604 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | |
3605 | __func__, err); | |
3606 | goto out; | |
57d104c1 SJ |
3607 | } else if (curr_status > BKOPS_STATUS_MAX) { |
3608 | dev_err(hba->dev, "%s: invalid BKOPS status %d\n", | |
3609 | __func__, curr_status); | |
3610 | err = -EINVAL; | |
3611 | goto out; | |
66ec6d59 SRT |
3612 | } |
3613 | ||
57d104c1 | 3614 | if (curr_status >= status) |
66ec6d59 | 3615 | err = ufshcd_enable_auto_bkops(hba); |
57d104c1 SJ |
3616 | else |
3617 | err = ufshcd_disable_auto_bkops(hba); | |
66ec6d59 SRT |
3618 | out: |
3619 | return err; | |
3620 | } | |
3621 | ||
57d104c1 SJ |
3622 | /** |
3623 | * ufshcd_urgent_bkops - handle urgent bkops exception event | |
3624 | * @hba: per-adapter instance | |
3625 | * | |
3626 | * Enable fBackgroundOpsEn flag in the device to permit background | |
3627 | * operations. | |
3628 | * | |
3629 | * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled | |
3630 | * and negative error value for any other failure. | |
3631 | */ | |
3632 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | |
3633 | { | |
3634 | return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); | |
3635 | } | |
3636 | ||
66ec6d59 SRT |
3637 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) |
3638 | { | |
5e86ae44 | 3639 | return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
66ec6d59 SRT |
3640 | QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); |
3641 | } | |
3642 | ||
3643 | /** | |
3644 | * ufshcd_exception_event_handler - handle exceptions raised by device | |
3645 | * @work: pointer to work data | |
3646 | * | |
3647 | * Read bExceptionEventStatus attribute from the device and handle the | |
3648 | * exception event accordingly. | |
3649 | */ | |
3650 | static void ufshcd_exception_event_handler(struct work_struct *work) | |
3651 | { | |
3652 | struct ufs_hba *hba; | |
3653 | int err; | |
3654 | u32 status = 0; | |
3655 | hba = container_of(work, struct ufs_hba, eeh_work); | |
3656 | ||
62694735 | 3657 | pm_runtime_get_sync(hba->dev); |
66ec6d59 SRT |
3658 | err = ufshcd_get_ee_status(hba, &status); |
3659 | if (err) { | |
3660 | dev_err(hba->dev, "%s: failed to get exception status %d\n", | |
3661 | __func__, err); | |
3662 | goto out; | |
3663 | } | |
3664 | ||
3665 | status &= hba->ee_ctrl_mask; | |
3666 | if (status & MASK_EE_URGENT_BKOPS) { | |
3667 | err = ufshcd_urgent_bkops(hba); | |
57d104c1 | 3668 | if (err < 0) |
66ec6d59 SRT |
3669 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", |
3670 | __func__, err); | |
3671 | } | |
3672 | out: | |
62694735 | 3673 | pm_runtime_put_sync(hba->dev); |
66ec6d59 SRT |
3674 | return; |
3675 | } | |
3676 | ||
7a3e97b0 | 3677 | /** |
e8e7f271 SRT |
3678 | * ufshcd_err_handler - handle UFS errors that require s/w attention |
3679 | * @work: pointer to work structure | |
7a3e97b0 | 3680 | */ |
e8e7f271 | 3681 | static void ufshcd_err_handler(struct work_struct *work) |
7a3e97b0 SY |
3682 | { |
3683 | struct ufs_hba *hba; | |
e8e7f271 SRT |
3684 | unsigned long flags; |
3685 | u32 err_xfer = 0; | |
3686 | u32 err_tm = 0; | |
3687 | int err = 0; | |
3688 | int tag; | |
3689 | ||
3690 | hba = container_of(work, struct ufs_hba, eh_work); | |
7a3e97b0 | 3691 | |
62694735 | 3692 | pm_runtime_get_sync(hba->dev); |
1ab27c9c | 3693 | ufshcd_hold(hba, false); |
e8e7f271 SRT |
3694 | |
3695 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3696 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { | |
3697 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3698 | goto out; | |
3699 | } | |
3700 | ||
3701 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
3702 | ufshcd_set_eh_in_progress(hba); | |
3703 | ||
3704 | /* Complete requests that have door-bell cleared by h/w */ | |
3705 | ufshcd_transfer_req_compl(hba); | |
3706 | ufshcd_tmc_handler(hba); | |
3707 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3708 | ||
3709 | /* Clear pending transfer requests */ | |
3710 | for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) | |
3711 | if (ufshcd_clear_cmd(hba, tag)) | |
3712 | err_xfer |= 1 << tag; | |
3713 | ||
3714 | /* Clear pending task management requests */ | |
3715 | for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) | |
3716 | if (ufshcd_clear_tm_cmd(hba, tag)) | |
3717 | err_tm |= 1 << tag; | |
3718 | ||
3719 | /* Complete the requests that are cleared by s/w */ | |
3720 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3721 | ufshcd_transfer_req_compl(hba); | |
3722 | ufshcd_tmc_handler(hba); | |
3723 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3724 | ||
3725 | /* Fatal errors need reset */ | |
3726 | if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || | |
3727 | ((hba->saved_err & UIC_ERROR) && | |
3728 | (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { | |
3729 | err = ufshcd_reset_and_restore(hba); | |
3730 | if (err) { | |
3731 | dev_err(hba->dev, "%s: reset and restore failed\n", | |
3732 | __func__); | |
3733 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
3734 | } | |
3735 | /* | |
3736 | * Inform scsi mid-layer that we did reset and allow to handle | |
3737 | * Unit Attention properly. | |
3738 | */ | |
3739 | scsi_report_bus_reset(hba->host, 0); | |
3740 | hba->saved_err = 0; | |
3741 | hba->saved_uic_err = 0; | |
3742 | } | |
3743 | ufshcd_clear_eh_in_progress(hba); | |
3744 | ||
3745 | out: | |
3746 | scsi_unblock_requests(hba->host); | |
1ab27c9c | 3747 | ufshcd_release(hba); |
62694735 | 3748 | pm_runtime_put_sync(hba->dev); |
7a3e97b0 SY |
3749 | } |
3750 | ||
3751 | /** | |
e8e7f271 SRT |
3752 | * ufshcd_update_uic_error - check and set fatal UIC error flags. |
3753 | * @hba: per-adapter instance | |
7a3e97b0 | 3754 | */ |
e8e7f271 | 3755 | static void ufshcd_update_uic_error(struct ufs_hba *hba) |
7a3e97b0 SY |
3756 | { |
3757 | u32 reg; | |
3758 | ||
e8e7f271 SRT |
3759 | /* PA_INIT_ERROR is fatal and needs UIC reset */ |
3760 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); | |
3761 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) | |
3762 | hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; | |
3763 | ||
3764 | /* UIC NL/TL/DME errors needs software retry */ | |
3765 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); | |
3766 | if (reg) | |
3767 | hba->uic_error |= UFSHCD_UIC_NL_ERROR; | |
3768 | ||
3769 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); | |
3770 | if (reg) | |
3771 | hba->uic_error |= UFSHCD_UIC_TL_ERROR; | |
3772 | ||
3773 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); | |
3774 | if (reg) | |
3775 | hba->uic_error |= UFSHCD_UIC_DME_ERROR; | |
3776 | ||
3777 | dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", | |
3778 | __func__, hba->uic_error); | |
3779 | } | |
3780 | ||
3781 | /** | |
3782 | * ufshcd_check_errors - Check for errors that need s/w attention | |
3783 | * @hba: per-adapter instance | |
3784 | */ | |
3785 | static void ufshcd_check_errors(struct ufs_hba *hba) | |
3786 | { | |
3787 | bool queue_eh_work = false; | |
3788 | ||
7a3e97b0 | 3789 | if (hba->errors & INT_FATAL_ERRORS) |
e8e7f271 | 3790 | queue_eh_work = true; |
7a3e97b0 SY |
3791 | |
3792 | if (hba->errors & UIC_ERROR) { | |
e8e7f271 SRT |
3793 | hba->uic_error = 0; |
3794 | ufshcd_update_uic_error(hba); | |
3795 | if (hba->uic_error) | |
3796 | queue_eh_work = true; | |
7a3e97b0 | 3797 | } |
e8e7f271 SRT |
3798 | |
3799 | if (queue_eh_work) { | |
3800 | /* handle fatal errors only when link is functional */ | |
3801 | if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { | |
3802 | /* block commands from scsi mid-layer */ | |
3803 | scsi_block_requests(hba->host); | |
3804 | ||
3805 | /* transfer error masks to sticky bits */ | |
3806 | hba->saved_err |= hba->errors; | |
3807 | hba->saved_uic_err |= hba->uic_error; | |
3808 | ||
3809 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
3810 | schedule_work(&hba->eh_work); | |
3811 | } | |
3441da7d | 3812 | } |
e8e7f271 SRT |
3813 | /* |
3814 | * if (!queue_eh_work) - | |
3815 | * Other errors are either non-fatal where host recovers | |
3816 | * itself without s/w intervention or errors that will be | |
3817 | * handled by the SCSI core layer. | |
3818 | */ | |
7a3e97b0 SY |
3819 | } |
3820 | ||
3821 | /** | |
3822 | * ufshcd_tmc_handler - handle task management function completion | |
3823 | * @hba: per adapter instance | |
3824 | */ | |
3825 | static void ufshcd_tmc_handler(struct ufs_hba *hba) | |
3826 | { | |
3827 | u32 tm_doorbell; | |
3828 | ||
b873a275 | 3829 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 | 3830 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; |
e2933132 | 3831 | wake_up(&hba->tm_wq); |
7a3e97b0 SY |
3832 | } |
3833 | ||
3834 | /** | |
3835 | * ufshcd_sl_intr - Interrupt service routine | |
3836 | * @hba: per adapter instance | |
3837 | * @intr_status: contains interrupts generated by the controller | |
3838 | */ | |
3839 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | |
3840 | { | |
3841 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | |
3842 | if (hba->errors) | |
e8e7f271 | 3843 | ufshcd_check_errors(hba); |
7a3e97b0 | 3844 | |
53b3d9c3 SJ |
3845 | if (intr_status & UFSHCD_UIC_MASK) |
3846 | ufshcd_uic_cmd_compl(hba, intr_status); | |
7a3e97b0 SY |
3847 | |
3848 | if (intr_status & UTP_TASK_REQ_COMPL) | |
3849 | ufshcd_tmc_handler(hba); | |
3850 | ||
3851 | if (intr_status & UTP_TRANSFER_REQ_COMPL) | |
3852 | ufshcd_transfer_req_compl(hba); | |
3853 | } | |
3854 | ||
3855 | /** | |
3856 | * ufshcd_intr - Main interrupt service routine | |
3857 | * @irq: irq number | |
3858 | * @__hba: pointer to adapter instance | |
3859 | * | |
3860 | * Returns IRQ_HANDLED - If interrupt is valid | |
3861 | * IRQ_NONE - If invalid interrupt | |
3862 | */ | |
3863 | static irqreturn_t ufshcd_intr(int irq, void *__hba) | |
3864 | { | |
d75f7fe4 | 3865 | u32 intr_status, enabled_intr_status; |
7a3e97b0 SY |
3866 | irqreturn_t retval = IRQ_NONE; |
3867 | struct ufs_hba *hba = __hba; | |
3868 | ||
3869 | spin_lock(hba->host->host_lock); | |
b873a275 | 3870 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
d75f7fe4 YG |
3871 | enabled_intr_status = |
3872 | intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
7a3e97b0 | 3873 | |
d75f7fe4 | 3874 | if (intr_status) |
261ea452 | 3875 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); |
d75f7fe4 YG |
3876 | |
3877 | if (enabled_intr_status) { | |
3878 | ufshcd_sl_intr(hba, enabled_intr_status); | |
7a3e97b0 SY |
3879 | retval = IRQ_HANDLED; |
3880 | } | |
3881 | spin_unlock(hba->host->host_lock); | |
3882 | return retval; | |
3883 | } | |
3884 | ||
e2933132 SRT |
3885 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) |
3886 | { | |
3887 | int err = 0; | |
3888 | u32 mask = 1 << tag; | |
3889 | unsigned long flags; | |
3890 | ||
3891 | if (!test_bit(tag, &hba->outstanding_tasks)) | |
3892 | goto out; | |
3893 | ||
3894 | spin_lock_irqsave(hba->host->host_lock, flags); | |
3895 | ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); | |
3896 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
3897 | ||
3898 | /* poll for max. 1 sec to clear door bell register by h/w */ | |
3899 | err = ufshcd_wait_for_register(hba, | |
3900 | REG_UTP_TASK_REQ_DOOR_BELL, | |
3901 | mask, 0, 1000, 1000); | |
3902 | out: | |
3903 | return err; | |
3904 | } | |
3905 | ||
7a3e97b0 SY |
3906 | /** |
3907 | * ufshcd_issue_tm_cmd - issues task management commands to controller | |
3908 | * @hba: per adapter instance | |
e2933132 SRT |
3909 | * @lun_id: LUN ID to which TM command is sent |
3910 | * @task_id: task ID to which the TM command is applicable | |
3911 | * @tm_function: task management function opcode | |
3912 | * @tm_response: task management service response return value | |
7a3e97b0 | 3913 | * |
e2933132 | 3914 | * Returns non-zero value on error, zero on success. |
7a3e97b0 | 3915 | */ |
e2933132 SRT |
3916 | static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, |
3917 | u8 tm_function, u8 *tm_response) | |
7a3e97b0 SY |
3918 | { |
3919 | struct utp_task_req_desc *task_req_descp; | |
3920 | struct utp_upiu_task_req *task_req_upiup; | |
3921 | struct Scsi_Host *host; | |
3922 | unsigned long flags; | |
e2933132 | 3923 | int free_slot; |
7a3e97b0 | 3924 | int err; |
e2933132 | 3925 | int task_tag; |
7a3e97b0 SY |
3926 | |
3927 | host = hba->host; | |
3928 | ||
e2933132 SRT |
3929 | /* |
3930 | * Get free slot, sleep if slots are unavailable. | |
3931 | * Even though we use wait_event() which sleeps indefinitely, | |
3932 | * the maximum wait time is bounded by %TM_CMD_TIMEOUT. | |
3933 | */ | |
3934 | wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); | |
1ab27c9c | 3935 | ufshcd_hold(hba, false); |
7a3e97b0 | 3936 | |
e2933132 | 3937 | spin_lock_irqsave(host->host_lock, flags); |
7a3e97b0 SY |
3938 | task_req_descp = hba->utmrdl_base_addr; |
3939 | task_req_descp += free_slot; | |
3940 | ||
3941 | /* Configure task request descriptor */ | |
3942 | task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | |
3943 | task_req_descp->header.dword_2 = | |
3944 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
3945 | ||
3946 | /* Configure task request UPIU */ | |
3947 | task_req_upiup = | |
3948 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; | |
e2933132 | 3949 | task_tag = hba->nutrs + free_slot; |
7a3e97b0 | 3950 | task_req_upiup->header.dword_0 = |
5a0b0cb9 | 3951 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, |
e2933132 | 3952 | lun_id, task_tag); |
7a3e97b0 | 3953 | task_req_upiup->header.dword_1 = |
5a0b0cb9 | 3954 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); |
0ce147d4 SJ |
3955 | /* |
3956 | * The host shall provide the same value for LUN field in the basic | |
3957 | * header and for Input Parameter. | |
3958 | */ | |
e2933132 SRT |
3959 | task_req_upiup->input_param1 = cpu_to_be32(lun_id); |
3960 | task_req_upiup->input_param2 = cpu_to_be32(task_id); | |
7a3e97b0 SY |
3961 | |
3962 | /* send command to the controller */ | |
3963 | __set_bit(free_slot, &hba->outstanding_tasks); | |
897efe62 YG |
3964 | |
3965 | /* Make sure descriptors are ready before ringing the task doorbell */ | |
3966 | wmb(); | |
3967 | ||
b873a275 | 3968 | ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 SY |
3969 | |
3970 | spin_unlock_irqrestore(host->host_lock, flags); | |
3971 | ||
3972 | /* wait until the task management command is completed */ | |
e2933132 SRT |
3973 | err = wait_event_timeout(hba->tm_wq, |
3974 | test_bit(free_slot, &hba->tm_condition), | |
3975 | msecs_to_jiffies(TM_CMD_TIMEOUT)); | |
7a3e97b0 | 3976 | if (!err) { |
e2933132 SRT |
3977 | dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", |
3978 | __func__, tm_function); | |
3979 | if (ufshcd_clear_tm_cmd(hba, free_slot)) | |
3980 | dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", | |
3981 | __func__, free_slot); | |
3982 | err = -ETIMEDOUT; | |
3983 | } else { | |
3984 | err = ufshcd_task_req_compl(hba, free_slot, tm_response); | |
7a3e97b0 | 3985 | } |
e2933132 | 3986 | |
7a3e97b0 | 3987 | clear_bit(free_slot, &hba->tm_condition); |
e2933132 SRT |
3988 | ufshcd_put_tm_slot(hba, free_slot); |
3989 | wake_up(&hba->tm_tag_wq); | |
3990 | ||
1ab27c9c | 3991 | ufshcd_release(hba); |
7a3e97b0 SY |
3992 | return err; |
3993 | } | |
3994 | ||
3995 | /** | |
3441da7d SRT |
3996 | * ufshcd_eh_device_reset_handler - device reset handler registered to |
3997 | * scsi layer. | |
7a3e97b0 SY |
3998 | * @cmd: SCSI command pointer |
3999 | * | |
4000 | * Returns SUCCESS/FAILED | |
4001 | */ | |
3441da7d | 4002 | static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) |
7a3e97b0 SY |
4003 | { |
4004 | struct Scsi_Host *host; | |
4005 | struct ufs_hba *hba; | |
4006 | unsigned int tag; | |
4007 | u32 pos; | |
4008 | int err; | |
e2933132 SRT |
4009 | u8 resp = 0xF; |
4010 | struct ufshcd_lrb *lrbp; | |
3441da7d | 4011 | unsigned long flags; |
7a3e97b0 SY |
4012 | |
4013 | host = cmd->device->host; | |
4014 | hba = shost_priv(host); | |
4015 | tag = cmd->request->tag; | |
4016 | ||
e2933132 SRT |
4017 | lrbp = &hba->lrb[tag]; |
4018 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); | |
4019 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
3441da7d SRT |
4020 | if (!err) |
4021 | err = resp; | |
7a3e97b0 | 4022 | goto out; |
e2933132 | 4023 | } |
7a3e97b0 | 4024 | |
3441da7d SRT |
4025 | /* clear the commands that were pending for corresponding LUN */ |
4026 | for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { | |
4027 | if (hba->lrb[pos].lun == lrbp->lun) { | |
4028 | err = ufshcd_clear_cmd(hba, pos); | |
4029 | if (err) | |
4030 | break; | |
7a3e97b0 | 4031 | } |
3441da7d SRT |
4032 | } |
4033 | spin_lock_irqsave(host->host_lock, flags); | |
4034 | ufshcd_transfer_req_compl(hba); | |
4035 | spin_unlock_irqrestore(host->host_lock, flags); | |
7a3e97b0 | 4036 | out: |
3441da7d SRT |
4037 | if (!err) { |
4038 | err = SUCCESS; | |
4039 | } else { | |
4040 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | |
4041 | err = FAILED; | |
4042 | } | |
7a3e97b0 SY |
4043 | return err; |
4044 | } | |
4045 | ||
7a3e97b0 SY |
4046 | /** |
4047 | * ufshcd_abort - abort a specific command | |
4048 | * @cmd: SCSI command pointer | |
4049 | * | |
f20810d8 SRT |
4050 | * Abort the pending command in device by sending UFS_ABORT_TASK task management |
4051 | * command, and in host controller by clearing the door-bell register. There can | |
4052 | * be race between controller sending the command to the device while abort is | |
4053 | * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is | |
4054 | * really issued and then try to abort it. | |
4055 | * | |
7a3e97b0 SY |
4056 | * Returns SUCCESS/FAILED |
4057 | */ | |
4058 | static int ufshcd_abort(struct scsi_cmnd *cmd) | |
4059 | { | |
4060 | struct Scsi_Host *host; | |
4061 | struct ufs_hba *hba; | |
4062 | unsigned long flags; | |
4063 | unsigned int tag; | |
f20810d8 SRT |
4064 | int err = 0; |
4065 | int poll_cnt; | |
e2933132 SRT |
4066 | u8 resp = 0xF; |
4067 | struct ufshcd_lrb *lrbp; | |
e9d501b1 | 4068 | u32 reg; |
7a3e97b0 SY |
4069 | |
4070 | host = cmd->device->host; | |
4071 | hba = shost_priv(host); | |
4072 | tag = cmd->request->tag; | |
14497328 YG |
4073 | if (!ufshcd_valid_tag(hba, tag)) { |
4074 | dev_err(hba->dev, | |
4075 | "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", | |
4076 | __func__, tag, cmd, cmd->request); | |
4077 | BUG(); | |
4078 | } | |
7a3e97b0 | 4079 | |
1ab27c9c | 4080 | ufshcd_hold(hba, false); |
14497328 | 4081 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
f20810d8 | 4082 | /* If command is already aborted/completed, return SUCCESS */ |
14497328 YG |
4083 | if (!(test_bit(tag, &hba->outstanding_reqs))) { |
4084 | dev_err(hba->dev, | |
4085 | "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", | |
4086 | __func__, tag, hba->outstanding_reqs, reg); | |
f20810d8 | 4087 | goto out; |
14497328 | 4088 | } |
7a3e97b0 | 4089 | |
e9d501b1 DR |
4090 | if (!(reg & (1 << tag))) { |
4091 | dev_err(hba->dev, | |
4092 | "%s: cmd was completed, but without a notifying intr, tag = %d", | |
4093 | __func__, tag); | |
4094 | } | |
4095 | ||
f20810d8 SRT |
4096 | lrbp = &hba->lrb[tag]; |
4097 | for (poll_cnt = 100; poll_cnt; poll_cnt--) { | |
4098 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, | |
4099 | UFS_QUERY_TASK, &resp); | |
4100 | if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { | |
4101 | /* cmd pending in the device */ | |
4102 | break; | |
4103 | } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
f20810d8 SRT |
4104 | /* |
4105 | * cmd not pending in the device, check if it is | |
4106 | * in transition. | |
4107 | */ | |
4108 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | |
4109 | if (reg & (1 << tag)) { | |
4110 | /* sleep for max. 200us to stabilize */ | |
4111 | usleep_range(100, 200); | |
4112 | continue; | |
4113 | } | |
4114 | /* command completed already */ | |
4115 | goto out; | |
4116 | } else { | |
4117 | if (!err) | |
4118 | err = resp; /* service response error */ | |
4119 | goto out; | |
4120 | } | |
4121 | } | |
4122 | ||
4123 | if (!poll_cnt) { | |
4124 | err = -EBUSY; | |
7a3e97b0 SY |
4125 | goto out; |
4126 | } | |
7a3e97b0 | 4127 | |
e2933132 SRT |
4128 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, |
4129 | UFS_ABORT_TASK, &resp); | |
4130 | if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | |
f20810d8 SRT |
4131 | if (!err) |
4132 | err = resp; /* service response error */ | |
7a3e97b0 | 4133 | goto out; |
e2933132 | 4134 | } |
7a3e97b0 | 4135 | |
f20810d8 SRT |
4136 | err = ufshcd_clear_cmd(hba, tag); |
4137 | if (err) | |
4138 | goto out; | |
4139 | ||
7a3e97b0 SY |
4140 | scsi_dma_unmap(cmd); |
4141 | ||
4142 | spin_lock_irqsave(host->host_lock, flags); | |
a48353f6 | 4143 | ufshcd_outstanding_req_clear(hba, tag); |
7a3e97b0 SY |
4144 | hba->lrb[tag].cmd = NULL; |
4145 | spin_unlock_irqrestore(host->host_lock, flags); | |
5a0b0cb9 SRT |
4146 | |
4147 | clear_bit_unlock(tag, &hba->lrb_in_use); | |
4148 | wake_up(&hba->dev_cmd.tag_wq); | |
1ab27c9c | 4149 | |
7a3e97b0 | 4150 | out: |
f20810d8 SRT |
4151 | if (!err) { |
4152 | err = SUCCESS; | |
4153 | } else { | |
4154 | dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); | |
4155 | err = FAILED; | |
4156 | } | |
4157 | ||
1ab27c9c ST |
4158 | /* |
4159 | * This ufshcd_release() corresponds to the original scsi cmd that got | |
4160 | * aborted here (as we won't get any IRQ for it). | |
4161 | */ | |
4162 | ufshcd_release(hba); | |
7a3e97b0 SY |
4163 | return err; |
4164 | } | |
4165 | ||
3441da7d SRT |
4166 | /** |
4167 | * ufshcd_host_reset_and_restore - reset and restore host controller | |
4168 | * @hba: per-adapter instance | |
4169 | * | |
4170 | * Note that host controller reset may issue DME_RESET to | |
4171 | * local and remote (device) Uni-Pro stack and the attributes | |
4172 | * are reset to default state. | |
4173 | * | |
4174 | * Returns zero on success, non-zero on failure | |
4175 | */ | |
4176 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) | |
4177 | { | |
4178 | int err; | |
3441da7d SRT |
4179 | unsigned long flags; |
4180 | ||
4181 | /* Reset the host controller */ | |
4182 | spin_lock_irqsave(hba->host->host_lock, flags); | |
4183 | ufshcd_hba_stop(hba); | |
4184 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4185 | ||
4186 | err = ufshcd_hba_enable(hba); | |
4187 | if (err) | |
4188 | goto out; | |
4189 | ||
4190 | /* Establish the link again and restore the device */ | |
1d337ec2 SRT |
4191 | err = ufshcd_probe_hba(hba); |
4192 | ||
4193 | if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) | |
3441da7d SRT |
4194 | err = -EIO; |
4195 | out: | |
4196 | if (err) | |
4197 | dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); | |
4198 | ||
4199 | return err; | |
4200 | } | |
4201 | ||
4202 | /** | |
4203 | * ufshcd_reset_and_restore - reset and re-initialize host/device | |
4204 | * @hba: per-adapter instance | |
4205 | * | |
4206 | * Reset and recover device, host and re-establish link. This | |
4207 | * is helpful to recover the communication in fatal error conditions. | |
4208 | * | |
4209 | * Returns zero on success, non-zero on failure | |
4210 | */ | |
4211 | static int ufshcd_reset_and_restore(struct ufs_hba *hba) | |
4212 | { | |
4213 | int err = 0; | |
4214 | unsigned long flags; | |
1d337ec2 | 4215 | int retries = MAX_HOST_RESET_RETRIES; |
3441da7d | 4216 | |
1d337ec2 SRT |
4217 | do { |
4218 | err = ufshcd_host_reset_and_restore(hba); | |
4219 | } while (err && --retries); | |
3441da7d SRT |
4220 | |
4221 | /* | |
4222 | * After reset the door-bell might be cleared, complete | |
4223 | * outstanding requests in s/w here. | |
4224 | */ | |
4225 | spin_lock_irqsave(hba->host->host_lock, flags); | |
4226 | ufshcd_transfer_req_compl(hba); | |
4227 | ufshcd_tmc_handler(hba); | |
4228 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4229 | ||
4230 | return err; | |
4231 | } | |
4232 | ||
4233 | /** | |
4234 | * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer | |
4235 | * @cmd - SCSI command pointer | |
4236 | * | |
4237 | * Returns SUCCESS/FAILED | |
4238 | */ | |
4239 | static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | |
4240 | { | |
4241 | int err; | |
4242 | unsigned long flags; | |
4243 | struct ufs_hba *hba; | |
4244 | ||
4245 | hba = shost_priv(cmd->device->host); | |
4246 | ||
1ab27c9c | 4247 | ufshcd_hold(hba, false); |
3441da7d SRT |
4248 | /* |
4249 | * Check if there is any race with fatal error handling. | |
4250 | * If so, wait for it to complete. Even though fatal error | |
4251 | * handling does reset and restore in some cases, don't assume | |
4252 | * anything out of it. We are just avoiding race here. | |
4253 | */ | |
4254 | do { | |
4255 | spin_lock_irqsave(hba->host->host_lock, flags); | |
e8e7f271 | 4256 | if (!(work_pending(&hba->eh_work) || |
3441da7d SRT |
4257 | hba->ufshcd_state == UFSHCD_STATE_RESET)) |
4258 | break; | |
4259 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4260 | dev_dbg(hba->dev, "%s: reset in progress\n", __func__); | |
e8e7f271 | 4261 | flush_work(&hba->eh_work); |
3441da7d SRT |
4262 | } while (1); |
4263 | ||
4264 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
4265 | ufshcd_set_eh_in_progress(hba); | |
4266 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4267 | ||
4268 | err = ufshcd_reset_and_restore(hba); | |
4269 | ||
4270 | spin_lock_irqsave(hba->host->host_lock, flags); | |
4271 | if (!err) { | |
4272 | err = SUCCESS; | |
4273 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
4274 | } else { | |
4275 | err = FAILED; | |
4276 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
4277 | } | |
4278 | ufshcd_clear_eh_in_progress(hba); | |
4279 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
4280 | ||
1ab27c9c | 4281 | ufshcd_release(hba); |
3441da7d SRT |
4282 | return err; |
4283 | } | |
4284 | ||
3a4bf06d YG |
4285 | /** |
4286 | * ufshcd_get_max_icc_level - calculate the ICC level | |
4287 | * @sup_curr_uA: max. current supported by the regulator | |
4288 | * @start_scan: row at the desc table to start scan from | |
4289 | * @buff: power descriptor buffer | |
4290 | * | |
4291 | * Returns calculated max ICC level for specific regulator | |
4292 | */ | |
4293 | static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) | |
4294 | { | |
4295 | int i; | |
4296 | int curr_uA; | |
4297 | u16 data; | |
4298 | u16 unit; | |
4299 | ||
4300 | for (i = start_scan; i >= 0; i--) { | |
4301 | data = be16_to_cpu(*((u16 *)(buff + 2*i))); | |
4302 | unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> | |
4303 | ATTR_ICC_LVL_UNIT_OFFSET; | |
4304 | curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; | |
4305 | switch (unit) { | |
4306 | case UFSHCD_NANO_AMP: | |
4307 | curr_uA = curr_uA / 1000; | |
4308 | break; | |
4309 | case UFSHCD_MILI_AMP: | |
4310 | curr_uA = curr_uA * 1000; | |
4311 | break; | |
4312 | case UFSHCD_AMP: | |
4313 | curr_uA = curr_uA * 1000 * 1000; | |
4314 | break; | |
4315 | case UFSHCD_MICRO_AMP: | |
4316 | default: | |
4317 | break; | |
4318 | } | |
4319 | if (sup_curr_uA >= curr_uA) | |
4320 | break; | |
4321 | } | |
4322 | if (i < 0) { | |
4323 | i = 0; | |
4324 | pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); | |
4325 | } | |
4326 | ||
4327 | return (u32)i; | |
4328 | } | |
4329 | ||
4330 | /** | |
4331 | * ufshcd_calc_icc_level - calculate the max ICC level | |
4332 | * In case regulators are not initialized we'll return 0 | |
4333 | * @hba: per-adapter instance | |
4334 | * @desc_buf: power descriptor buffer to extract ICC levels from. | |
4335 | * @len: length of desc_buff | |
4336 | * | |
4337 | * Returns calculated ICC level | |
4338 | */ | |
4339 | static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, | |
4340 | u8 *desc_buf, int len) | |
4341 | { | |
4342 | u32 icc_level = 0; | |
4343 | ||
4344 | if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || | |
4345 | !hba->vreg_info.vccq2) { | |
4346 | dev_err(hba->dev, | |
4347 | "%s: Regulator capability was not set, actvIccLevel=%d", | |
4348 | __func__, icc_level); | |
4349 | goto out; | |
4350 | } | |
4351 | ||
4352 | if (hba->vreg_info.vcc) | |
4353 | icc_level = ufshcd_get_max_icc_level( | |
4354 | hba->vreg_info.vcc->max_uA, | |
4355 | POWER_DESC_MAX_ACTV_ICC_LVLS - 1, | |
4356 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); | |
4357 | ||
4358 | if (hba->vreg_info.vccq) | |
4359 | icc_level = ufshcd_get_max_icc_level( | |
4360 | hba->vreg_info.vccq->max_uA, | |
4361 | icc_level, | |
4362 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); | |
4363 | ||
4364 | if (hba->vreg_info.vccq2) | |
4365 | icc_level = ufshcd_get_max_icc_level( | |
4366 | hba->vreg_info.vccq2->max_uA, | |
4367 | icc_level, | |
4368 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); | |
4369 | out: | |
4370 | return icc_level; | |
4371 | } | |
4372 | ||
4373 | static void ufshcd_init_icc_levels(struct ufs_hba *hba) | |
4374 | { | |
4375 | int ret; | |
4376 | int buff_len = QUERY_DESC_POWER_MAX_SIZE; | |
4377 | u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; | |
4378 | ||
4379 | ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); | |
4380 | if (ret) { | |
4381 | dev_err(hba->dev, | |
4382 | "%s: Failed reading power descriptor.len = %d ret = %d", | |
4383 | __func__, buff_len, ret); | |
4384 | return; | |
4385 | } | |
4386 | ||
4387 | hba->init_prefetch_data.icc_level = | |
4388 | ufshcd_find_max_sup_active_icc_level(hba, | |
4389 | desc_buf, buff_len); | |
4390 | dev_dbg(hba->dev, "%s: setting icc_level 0x%x", | |
4391 | __func__, hba->init_prefetch_data.icc_level); | |
4392 | ||
5e86ae44 YG |
4393 | ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
4394 | QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, | |
4395 | &hba->init_prefetch_data.icc_level); | |
3a4bf06d YG |
4396 | |
4397 | if (ret) | |
4398 | dev_err(hba->dev, | |
4399 | "%s: Failed configuring bActiveICCLevel = %d ret = %d", | |
4400 | __func__, hba->init_prefetch_data.icc_level , ret); | |
4401 | ||
4402 | } | |
4403 | ||
2a8fa600 SJ |
4404 | /** |
4405 | * ufshcd_scsi_add_wlus - Adds required W-LUs | |
4406 | * @hba: per-adapter instance | |
4407 | * | |
4408 | * UFS device specification requires the UFS devices to support 4 well known | |
4409 | * logical units: | |
4410 | * "REPORT_LUNS" (address: 01h) | |
4411 | * "UFS Device" (address: 50h) | |
4412 | * "RPMB" (address: 44h) | |
4413 | * "BOOT" (address: 30h) | |
4414 | * UFS device's power management needs to be controlled by "POWER CONDITION" | |
4415 | * field of SSU (START STOP UNIT) command. But this "power condition" field | |
4416 | * will take effect only when its sent to "UFS device" well known logical unit | |
4417 | * hence we require the scsi_device instance to represent this logical unit in | |
4418 | * order for the UFS host driver to send the SSU command for power management. | |
4419 | ||
4420 | * We also require the scsi_device instance for "RPMB" (Replay Protected Memory | |
4421 | * Block) LU so user space process can control this LU. User space may also | |
4422 | * want to have access to BOOT LU. | |
4423 | ||
4424 | * This function adds scsi device instances for each of all well known LUs | |
4425 | * (except "REPORT LUNS" LU). | |
4426 | * | |
4427 | * Returns zero on success (all required W-LUs are added successfully), | |
4428 | * non-zero error value on failure (if failed to add any of the required W-LU). | |
4429 | */ | |
4430 | static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) | |
4431 | { | |
4432 | int ret = 0; | |
7c48bfd0 AM |
4433 | struct scsi_device *sdev_rpmb; |
4434 | struct scsi_device *sdev_boot; | |
2a8fa600 SJ |
4435 | |
4436 | hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, | |
4437 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); | |
4438 | if (IS_ERR(hba->sdev_ufs_device)) { | |
4439 | ret = PTR_ERR(hba->sdev_ufs_device); | |
4440 | hba->sdev_ufs_device = NULL; | |
4441 | goto out; | |
4442 | } | |
7c48bfd0 | 4443 | scsi_device_put(hba->sdev_ufs_device); |
2a8fa600 | 4444 | |
7c48bfd0 | 4445 | sdev_boot = __scsi_add_device(hba->host, 0, 0, |
2a8fa600 | 4446 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); |
7c48bfd0 AM |
4447 | if (IS_ERR(sdev_boot)) { |
4448 | ret = PTR_ERR(sdev_boot); | |
2a8fa600 SJ |
4449 | goto remove_sdev_ufs_device; |
4450 | } | |
7c48bfd0 | 4451 | scsi_device_put(sdev_boot); |
2a8fa600 | 4452 | |
7c48bfd0 | 4453 | sdev_rpmb = __scsi_add_device(hba->host, 0, 0, |
2a8fa600 | 4454 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); |
7c48bfd0 AM |
4455 | if (IS_ERR(sdev_rpmb)) { |
4456 | ret = PTR_ERR(sdev_rpmb); | |
2a8fa600 SJ |
4457 | goto remove_sdev_boot; |
4458 | } | |
7c48bfd0 | 4459 | scsi_device_put(sdev_rpmb); |
2a8fa600 SJ |
4460 | goto out; |
4461 | ||
4462 | remove_sdev_boot: | |
7c48bfd0 | 4463 | scsi_remove_device(sdev_boot); |
2a8fa600 SJ |
4464 | remove_sdev_ufs_device: |
4465 | scsi_remove_device(hba->sdev_ufs_device); | |
4466 | out: | |
4467 | return ret; | |
4468 | } | |
4469 | ||
6ccf44fe | 4470 | /** |
1d337ec2 SRT |
4471 | * ufshcd_probe_hba - probe hba to detect device and initialize |
4472 | * @hba: per-adapter instance | |
4473 | * | |
4474 | * Execute link-startup and verify device initialization | |
6ccf44fe | 4475 | */ |
1d337ec2 | 4476 | static int ufshcd_probe_hba(struct ufs_hba *hba) |
6ccf44fe | 4477 | { |
6ccf44fe SJ |
4478 | int ret; |
4479 | ||
4480 | ret = ufshcd_link_startup(hba); | |
5a0b0cb9 SRT |
4481 | if (ret) |
4482 | goto out; | |
4483 | ||
5064636c YG |
4484 | ufshcd_init_pwr_info(hba); |
4485 | ||
57d104c1 SJ |
4486 | /* UniPro link is active now */ |
4487 | ufshcd_set_link_active(hba); | |
d3e89bac | 4488 | |
5a0b0cb9 SRT |
4489 | ret = ufshcd_verify_dev_init(hba); |
4490 | if (ret) | |
4491 | goto out; | |
68078d5c DR |
4492 | |
4493 | ret = ufshcd_complete_dev_init(hba); | |
4494 | if (ret) | |
4495 | goto out; | |
5a0b0cb9 | 4496 | |
57d104c1 SJ |
4497 | /* UFS device is also active now */ |
4498 | ufshcd_set_ufs_dev_active(hba); | |
66ec6d59 | 4499 | ufshcd_force_reset_auto_bkops(hba); |
57d104c1 SJ |
4500 | hba->wlun_dev_clr_ua = true; |
4501 | ||
7eb584db DR |
4502 | if (ufshcd_get_max_pwr_mode(hba)) { |
4503 | dev_err(hba->dev, | |
4504 | "%s: Failed getting max supported power mode\n", | |
4505 | __func__); | |
4506 | } else { | |
4507 | ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); | |
4508 | if (ret) | |
4509 | dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", | |
4510 | __func__, ret); | |
4511 | } | |
57d104c1 | 4512 | |
53c12d0e YG |
4513 | /* set the state as operational after switching to desired gear */ |
4514 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
57d104c1 SJ |
4515 | /* |
4516 | * If we are in error handling context or in power management callbacks | |
4517 | * context, no need to scan the host | |
4518 | */ | |
4519 | if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { | |
4520 | bool flag; | |
4521 | ||
4522 | /* clear any previous UFS device information */ | |
4523 | memset(&hba->dev_info, 0, sizeof(hba->dev_info)); | |
dc3c8d3a YG |
4524 | if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, |
4525 | QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) | |
57d104c1 | 4526 | hba->dev_info.f_power_on_wp_en = flag; |
3441da7d | 4527 | |
3a4bf06d YG |
4528 | if (!hba->is_init_prefetch) |
4529 | ufshcd_init_icc_levels(hba); | |
4530 | ||
2a8fa600 SJ |
4531 | /* Add required well known logical units to scsi mid layer */ |
4532 | if (ufshcd_scsi_add_wlus(hba)) | |
4533 | goto out; | |
4534 | ||
3441da7d SRT |
4535 | scsi_scan_host(hba->host); |
4536 | pm_runtime_put_sync(hba->dev); | |
4537 | } | |
3a4bf06d YG |
4538 | |
4539 | if (!hba->is_init_prefetch) | |
4540 | hba->is_init_prefetch = true; | |
4541 | ||
856b3483 ST |
4542 | /* Resume devfreq after UFS device is detected */ |
4543 | if (ufshcd_is_clkscaling_enabled(hba)) | |
4544 | devfreq_resume_device(hba->devfreq); | |
4545 | ||
5a0b0cb9 | 4546 | out: |
1d337ec2 SRT |
4547 | /* |
4548 | * If we failed to initialize the device or the device is not | |
4549 | * present, turn off the power/clocks etc. | |
4550 | */ | |
57d104c1 SJ |
4551 | if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { |
4552 | pm_runtime_put_sync(hba->dev); | |
1d337ec2 | 4553 | ufshcd_hba_exit(hba); |
57d104c1 | 4554 | } |
1d337ec2 SRT |
4555 | |
4556 | return ret; | |
4557 | } | |
4558 | ||
4559 | /** | |
4560 | * ufshcd_async_scan - asynchronous execution for probing hba | |
4561 | * @data: data pointer to pass to this function | |
4562 | * @cookie: cookie data | |
4563 | */ | |
4564 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) | |
4565 | { | |
4566 | struct ufs_hba *hba = (struct ufs_hba *)data; | |
4567 | ||
4568 | ufshcd_probe_hba(hba); | |
6ccf44fe SJ |
4569 | } |
4570 | ||
7a3e97b0 SY |
4571 | static struct scsi_host_template ufshcd_driver_template = { |
4572 | .module = THIS_MODULE, | |
4573 | .name = UFSHCD, | |
4574 | .proc_name = UFSHCD, | |
4575 | .queuecommand = ufshcd_queuecommand, | |
4576 | .slave_alloc = ufshcd_slave_alloc, | |
eeda4749 | 4577 | .slave_configure = ufshcd_slave_configure, |
7a3e97b0 | 4578 | .slave_destroy = ufshcd_slave_destroy, |
4264fd61 | 4579 | .change_queue_depth = ufshcd_change_queue_depth, |
7a3e97b0 | 4580 | .eh_abort_handler = ufshcd_abort, |
3441da7d SRT |
4581 | .eh_device_reset_handler = ufshcd_eh_device_reset_handler, |
4582 | .eh_host_reset_handler = ufshcd_eh_host_reset_handler, | |
7a3e97b0 SY |
4583 | .this_id = -1, |
4584 | .sg_tablesize = SG_ALL, | |
4585 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | |
4586 | .can_queue = UFSHCD_CAN_QUEUE, | |
1ab27c9c | 4587 | .max_host_blocked = 1, |
c40ecc12 | 4588 | .track_queue_depth = 1, |
7a3e97b0 SY |
4589 | }; |
4590 | ||
57d104c1 SJ |
4591 | static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, |
4592 | int ua) | |
4593 | { | |
7b16a07c | 4594 | int ret; |
57d104c1 | 4595 | |
7b16a07c BA |
4596 | if (!vreg) |
4597 | return 0; | |
57d104c1 | 4598 | |
7b16a07c BA |
4599 | ret = regulator_set_load(vreg->reg, ua); |
4600 | if (ret < 0) { | |
4601 | dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n", | |
4602 | __func__, vreg->name, ua, ret); | |
57d104c1 SJ |
4603 | } |
4604 | ||
4605 | return ret; | |
4606 | } | |
4607 | ||
4608 | static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, | |
4609 | struct ufs_vreg *vreg) | |
4610 | { | |
4611 | return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); | |
4612 | } | |
4613 | ||
4614 | static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, | |
4615 | struct ufs_vreg *vreg) | |
4616 | { | |
4617 | return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); | |
4618 | } | |
4619 | ||
aa497613 SRT |
4620 | static int ufshcd_config_vreg(struct device *dev, |
4621 | struct ufs_vreg *vreg, bool on) | |
4622 | { | |
4623 | int ret = 0; | |
4624 | struct regulator *reg = vreg->reg; | |
4625 | const char *name = vreg->name; | |
4626 | int min_uV, uA_load; | |
4627 | ||
4628 | BUG_ON(!vreg); | |
4629 | ||
4630 | if (regulator_count_voltages(reg) > 0) { | |
4631 | min_uV = on ? vreg->min_uV : 0; | |
4632 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
4633 | if (ret) { | |
4634 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
4635 | __func__, name, ret); | |
4636 | goto out; | |
4637 | } | |
4638 | ||
4639 | uA_load = on ? vreg->max_uA : 0; | |
57d104c1 SJ |
4640 | ret = ufshcd_config_vreg_load(dev, vreg, uA_load); |
4641 | if (ret) | |
aa497613 | 4642 | goto out; |
aa497613 SRT |
4643 | } |
4644 | out: | |
4645 | return ret; | |
4646 | } | |
4647 | ||
4648 | static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) | |
4649 | { | |
4650 | int ret = 0; | |
4651 | ||
4652 | if (!vreg || vreg->enabled) | |
4653 | goto out; | |
4654 | ||
4655 | ret = ufshcd_config_vreg(dev, vreg, true); | |
4656 | if (!ret) | |
4657 | ret = regulator_enable(vreg->reg); | |
4658 | ||
4659 | if (!ret) | |
4660 | vreg->enabled = true; | |
4661 | else | |
4662 | dev_err(dev, "%s: %s enable failed, err=%d\n", | |
4663 | __func__, vreg->name, ret); | |
4664 | out: | |
4665 | return ret; | |
4666 | } | |
4667 | ||
4668 | static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) | |
4669 | { | |
4670 | int ret = 0; | |
4671 | ||
4672 | if (!vreg || !vreg->enabled) | |
4673 | goto out; | |
4674 | ||
4675 | ret = regulator_disable(vreg->reg); | |
4676 | ||
4677 | if (!ret) { | |
4678 | /* ignore errors on applying disable config */ | |
4679 | ufshcd_config_vreg(dev, vreg, false); | |
4680 | vreg->enabled = false; | |
4681 | } else { | |
4682 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
4683 | __func__, vreg->name, ret); | |
4684 | } | |
4685 | out: | |
4686 | return ret; | |
4687 | } | |
4688 | ||
4689 | static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) | |
4690 | { | |
4691 | int ret = 0; | |
4692 | struct device *dev = hba->dev; | |
4693 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4694 | ||
4695 | if (!info) | |
4696 | goto out; | |
4697 | ||
4698 | ret = ufshcd_toggle_vreg(dev, info->vcc, on); | |
4699 | if (ret) | |
4700 | goto out; | |
4701 | ||
4702 | ret = ufshcd_toggle_vreg(dev, info->vccq, on); | |
4703 | if (ret) | |
4704 | goto out; | |
4705 | ||
4706 | ret = ufshcd_toggle_vreg(dev, info->vccq2, on); | |
4707 | if (ret) | |
4708 | goto out; | |
4709 | ||
4710 | out: | |
4711 | if (ret) { | |
4712 | ufshcd_toggle_vreg(dev, info->vccq2, false); | |
4713 | ufshcd_toggle_vreg(dev, info->vccq, false); | |
4714 | ufshcd_toggle_vreg(dev, info->vcc, false); | |
4715 | } | |
4716 | return ret; | |
4717 | } | |
4718 | ||
6a771a65 RS |
4719 | static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) |
4720 | { | |
4721 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4722 | ||
4723 | if (info) | |
4724 | return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); | |
4725 | ||
4726 | return 0; | |
4727 | } | |
4728 | ||
aa497613 SRT |
4729 | static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) |
4730 | { | |
4731 | int ret = 0; | |
4732 | ||
4733 | if (!vreg) | |
4734 | goto out; | |
4735 | ||
4736 | vreg->reg = devm_regulator_get(dev, vreg->name); | |
4737 | if (IS_ERR(vreg->reg)) { | |
4738 | ret = PTR_ERR(vreg->reg); | |
4739 | dev_err(dev, "%s: %s get failed, err=%d\n", | |
4740 | __func__, vreg->name, ret); | |
4741 | } | |
4742 | out: | |
4743 | return ret; | |
4744 | } | |
4745 | ||
4746 | static int ufshcd_init_vreg(struct ufs_hba *hba) | |
4747 | { | |
4748 | int ret = 0; | |
4749 | struct device *dev = hba->dev; | |
4750 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4751 | ||
4752 | if (!info) | |
4753 | goto out; | |
4754 | ||
4755 | ret = ufshcd_get_vreg(dev, info->vcc); | |
4756 | if (ret) | |
4757 | goto out; | |
4758 | ||
4759 | ret = ufshcd_get_vreg(dev, info->vccq); | |
4760 | if (ret) | |
4761 | goto out; | |
4762 | ||
4763 | ret = ufshcd_get_vreg(dev, info->vccq2); | |
4764 | out: | |
4765 | return ret; | |
4766 | } | |
4767 | ||
6a771a65 RS |
4768 | static int ufshcd_init_hba_vreg(struct ufs_hba *hba) |
4769 | { | |
4770 | struct ufs_vreg_info *info = &hba->vreg_info; | |
4771 | ||
4772 | if (info) | |
4773 | return ufshcd_get_vreg(hba->dev, info->vdd_hba); | |
4774 | ||
4775 | return 0; | |
4776 | } | |
4777 | ||
57d104c1 SJ |
4778 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
4779 | bool skip_ref_clk) | |
c6e79dac SRT |
4780 | { |
4781 | int ret = 0; | |
4782 | struct ufs_clk_info *clki; | |
4783 | struct list_head *head = &hba->clk_list_head; | |
1ab27c9c | 4784 | unsigned long flags; |
c6e79dac SRT |
4785 | |
4786 | if (!head || list_empty(head)) | |
4787 | goto out; | |
4788 | ||
4789 | list_for_each_entry(clki, head, list) { | |
4790 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
57d104c1 SJ |
4791 | if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) |
4792 | continue; | |
4793 | ||
c6e79dac SRT |
4794 | if (on && !clki->enabled) { |
4795 | ret = clk_prepare_enable(clki->clk); | |
4796 | if (ret) { | |
4797 | dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", | |
4798 | __func__, clki->name, ret); | |
4799 | goto out; | |
4800 | } | |
4801 | } else if (!on && clki->enabled) { | |
4802 | clk_disable_unprepare(clki->clk); | |
4803 | } | |
4804 | clki->enabled = on; | |
4805 | dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, | |
4806 | clki->name, on ? "en" : "dis"); | |
4807 | } | |
4808 | } | |
1ab27c9c | 4809 | |
0263bcd0 | 4810 | ret = ufshcd_vops_setup_clocks(hba, on); |
c6e79dac SRT |
4811 | out: |
4812 | if (ret) { | |
4813 | list_for_each_entry(clki, head, list) { | |
4814 | if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) | |
4815 | clk_disable_unprepare(clki->clk); | |
4816 | } | |
eda910e4 | 4817 | } else if (on) { |
1ab27c9c ST |
4818 | spin_lock_irqsave(hba->host->host_lock, flags); |
4819 | hba->clk_gating.state = CLKS_ON; | |
4820 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
c6e79dac SRT |
4821 | } |
4822 | return ret; | |
4823 | } | |
4824 | ||
57d104c1 SJ |
4825 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) |
4826 | { | |
4827 | return __ufshcd_setup_clocks(hba, on, false); | |
4828 | } | |
4829 | ||
c6e79dac SRT |
4830 | static int ufshcd_init_clocks(struct ufs_hba *hba) |
4831 | { | |
4832 | int ret = 0; | |
4833 | struct ufs_clk_info *clki; | |
4834 | struct device *dev = hba->dev; | |
4835 | struct list_head *head = &hba->clk_list_head; | |
4836 | ||
4837 | if (!head || list_empty(head)) | |
4838 | goto out; | |
4839 | ||
4840 | list_for_each_entry(clki, head, list) { | |
4841 | if (!clki->name) | |
4842 | continue; | |
4843 | ||
4844 | clki->clk = devm_clk_get(dev, clki->name); | |
4845 | if (IS_ERR(clki->clk)) { | |
4846 | ret = PTR_ERR(clki->clk); | |
4847 | dev_err(dev, "%s: %s clk get failed, %d\n", | |
4848 | __func__, clki->name, ret); | |
4849 | goto out; | |
4850 | } | |
4851 | ||
4852 | if (clki->max_freq) { | |
4853 | ret = clk_set_rate(clki->clk, clki->max_freq); | |
4854 | if (ret) { | |
4855 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
4856 | __func__, clki->name, | |
4857 | clki->max_freq, ret); | |
4858 | goto out; | |
4859 | } | |
856b3483 | 4860 | clki->curr_freq = clki->max_freq; |
c6e79dac SRT |
4861 | } |
4862 | dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, | |
4863 | clki->name, clk_get_rate(clki->clk)); | |
4864 | } | |
4865 | out: | |
4866 | return ret; | |
4867 | } | |
4868 | ||
5c0c28a8 SRT |
4869 | static int ufshcd_variant_hba_init(struct ufs_hba *hba) |
4870 | { | |
4871 | int err = 0; | |
4872 | ||
4873 | if (!hba->vops) | |
4874 | goto out; | |
4875 | ||
0263bcd0 YG |
4876 | err = ufshcd_vops_init(hba); |
4877 | if (err) | |
4878 | goto out; | |
5c0c28a8 | 4879 | |
0263bcd0 YG |
4880 | err = ufshcd_vops_setup_regulators(hba, true); |
4881 | if (err) | |
4882 | goto out_exit; | |
5c0c28a8 SRT |
4883 | |
4884 | goto out; | |
4885 | ||
5c0c28a8 | 4886 | out_exit: |
0263bcd0 | 4887 | ufshcd_vops_exit(hba); |
5c0c28a8 SRT |
4888 | out: |
4889 | if (err) | |
4890 | dev_err(hba->dev, "%s: variant %s init failed err %d\n", | |
0263bcd0 | 4891 | __func__, ufshcd_get_var_name(hba), err); |
5c0c28a8 SRT |
4892 | return err; |
4893 | } | |
4894 | ||
4895 | static void ufshcd_variant_hba_exit(struct ufs_hba *hba) | |
4896 | { | |
4897 | if (!hba->vops) | |
4898 | return; | |
4899 | ||
0263bcd0 | 4900 | ufshcd_vops_setup_clocks(hba, false); |
5c0c28a8 | 4901 | |
0263bcd0 | 4902 | ufshcd_vops_setup_regulators(hba, false); |
5c0c28a8 | 4903 | |
0263bcd0 | 4904 | ufshcd_vops_exit(hba); |
5c0c28a8 SRT |
4905 | } |
4906 | ||
aa497613 SRT |
4907 | static int ufshcd_hba_init(struct ufs_hba *hba) |
4908 | { | |
4909 | int err; | |
4910 | ||
6a771a65 RS |
4911 | /* |
4912 | * Handle host controller power separately from the UFS device power | |
4913 | * rails as it will help controlling the UFS host controller power | |
4914 | * collapse easily which is different than UFS device power collapse. | |
4915 | * Also, enable the host controller power before we go ahead with rest | |
4916 | * of the initialization here. | |
4917 | */ | |
4918 | err = ufshcd_init_hba_vreg(hba); | |
aa497613 SRT |
4919 | if (err) |
4920 | goto out; | |
4921 | ||
6a771a65 | 4922 | err = ufshcd_setup_hba_vreg(hba, true); |
aa497613 SRT |
4923 | if (err) |
4924 | goto out; | |
4925 | ||
6a771a65 RS |
4926 | err = ufshcd_init_clocks(hba); |
4927 | if (err) | |
4928 | goto out_disable_hba_vreg; | |
4929 | ||
4930 | err = ufshcd_setup_clocks(hba, true); | |
4931 | if (err) | |
4932 | goto out_disable_hba_vreg; | |
4933 | ||
c6e79dac SRT |
4934 | err = ufshcd_init_vreg(hba); |
4935 | if (err) | |
4936 | goto out_disable_clks; | |
4937 | ||
4938 | err = ufshcd_setup_vreg(hba, true); | |
4939 | if (err) | |
4940 | goto out_disable_clks; | |
4941 | ||
aa497613 SRT |
4942 | err = ufshcd_variant_hba_init(hba); |
4943 | if (err) | |
4944 | goto out_disable_vreg; | |
4945 | ||
1d337ec2 | 4946 | hba->is_powered = true; |
aa497613 SRT |
4947 | goto out; |
4948 | ||
4949 | out_disable_vreg: | |
4950 | ufshcd_setup_vreg(hba, false); | |
c6e79dac SRT |
4951 | out_disable_clks: |
4952 | ufshcd_setup_clocks(hba, false); | |
6a771a65 RS |
4953 | out_disable_hba_vreg: |
4954 | ufshcd_setup_hba_vreg(hba, false); | |
aa497613 SRT |
4955 | out: |
4956 | return err; | |
4957 | } | |
4958 | ||
4959 | static void ufshcd_hba_exit(struct ufs_hba *hba) | |
4960 | { | |
1d337ec2 SRT |
4961 | if (hba->is_powered) { |
4962 | ufshcd_variant_hba_exit(hba); | |
4963 | ufshcd_setup_vreg(hba, false); | |
4964 | ufshcd_setup_clocks(hba, false); | |
4965 | ufshcd_setup_hba_vreg(hba, false); | |
4966 | hba->is_powered = false; | |
4967 | } | |
aa497613 SRT |
4968 | } |
4969 | ||
57d104c1 SJ |
4970 | static int |
4971 | ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) | |
4972 | { | |
4973 | unsigned char cmd[6] = {REQUEST_SENSE, | |
4974 | 0, | |
4975 | 0, | |
4976 | 0, | |
4977 | SCSI_SENSE_BUFFERSIZE, | |
4978 | 0}; | |
4979 | char *buffer; | |
4980 | int ret; | |
4981 | ||
4982 | buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); | |
4983 | if (!buffer) { | |
4984 | ret = -ENOMEM; | |
4985 | goto out; | |
4986 | } | |
4987 | ||
4988 | ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, | |
4989 | SCSI_SENSE_BUFFERSIZE, NULL, | |
4990 | msecs_to_jiffies(1000), 3, NULL, REQ_PM); | |
4991 | if (ret) | |
4992 | pr_err("%s: failed with err %d\n", __func__, ret); | |
4993 | ||
4994 | kfree(buffer); | |
4995 | out: | |
4996 | return ret; | |
4997 | } | |
4998 | ||
4999 | /** | |
5000 | * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device | |
5001 | * power mode | |
5002 | * @hba: per adapter instance | |
5003 | * @pwr_mode: device power mode to set | |
5004 | * | |
5005 | * Returns 0 if requested power mode is set successfully | |
5006 | * Returns non-zero if failed to set the requested power mode | |
5007 | */ | |
5008 | static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | |
5009 | enum ufs_dev_pwr_mode pwr_mode) | |
5010 | { | |
5011 | unsigned char cmd[6] = { START_STOP }; | |
5012 | struct scsi_sense_hdr sshdr; | |
7c48bfd0 AM |
5013 | struct scsi_device *sdp; |
5014 | unsigned long flags; | |
57d104c1 SJ |
5015 | int ret; |
5016 | ||
7c48bfd0 AM |
5017 | spin_lock_irqsave(hba->host->host_lock, flags); |
5018 | sdp = hba->sdev_ufs_device; | |
5019 | if (sdp) { | |
5020 | ret = scsi_device_get(sdp); | |
5021 | if (!ret && !scsi_device_online(sdp)) { | |
5022 | ret = -ENODEV; | |
5023 | scsi_device_put(sdp); | |
5024 | } | |
5025 | } else { | |
5026 | ret = -ENODEV; | |
5027 | } | |
5028 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5029 | ||
5030 | if (ret) | |
5031 | return ret; | |
57d104c1 SJ |
5032 | |
5033 | /* | |
5034 | * If scsi commands fail, the scsi mid-layer schedules scsi error- | |
5035 | * handling, which would wait for host to be resumed. Since we know | |
5036 | * we are functional while we are here, skip host resume in error | |
5037 | * handling context. | |
5038 | */ | |
5039 | hba->host->eh_noresume = 1; | |
5040 | if (hba->wlun_dev_clr_ua) { | |
5041 | ret = ufshcd_send_request_sense(hba, sdp); | |
5042 | if (ret) | |
5043 | goto out; | |
5044 | /* Unit attention condition is cleared now */ | |
5045 | hba->wlun_dev_clr_ua = false; | |
5046 | } | |
5047 | ||
5048 | cmd[4] = pwr_mode << 4; | |
5049 | ||
5050 | /* | |
5051 | * Current function would be generally called from the power management | |
5052 | * callbacks hence set the REQ_PM flag so that it doesn't resume the | |
5053 | * already suspended childs. | |
5054 | */ | |
5055 | ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, | |
5056 | START_STOP_TIMEOUT, 0, NULL, REQ_PM); | |
5057 | if (ret) { | |
5058 | sdev_printk(KERN_WARNING, sdp, | |
ef61329d HR |
5059 | "START_STOP failed for power mode: %d, result %x\n", |
5060 | pwr_mode, ret); | |
21045519 HR |
5061 | if (driver_byte(ret) & DRIVER_SENSE) |
5062 | scsi_print_sense_hdr(sdp, NULL, &sshdr); | |
57d104c1 SJ |
5063 | } |
5064 | ||
5065 | if (!ret) | |
5066 | hba->curr_dev_pwr_mode = pwr_mode; | |
5067 | out: | |
7c48bfd0 | 5068 | scsi_device_put(sdp); |
57d104c1 SJ |
5069 | hba->host->eh_noresume = 0; |
5070 | return ret; | |
5071 | } | |
5072 | ||
5073 | static int ufshcd_link_state_transition(struct ufs_hba *hba, | |
5074 | enum uic_link_state req_link_state, | |
5075 | int check_for_bkops) | |
5076 | { | |
5077 | int ret = 0; | |
5078 | ||
5079 | if (req_link_state == hba->uic_link_state) | |
5080 | return 0; | |
5081 | ||
5082 | if (req_link_state == UIC_LINK_HIBERN8_STATE) { | |
5083 | ret = ufshcd_uic_hibern8_enter(hba); | |
5084 | if (!ret) | |
5085 | ufshcd_set_link_hibern8(hba); | |
5086 | else | |
5087 | goto out; | |
5088 | } | |
5089 | /* | |
5090 | * If autobkops is enabled, link can't be turned off because | |
5091 | * turning off the link would also turn off the device. | |
5092 | */ | |
5093 | else if ((req_link_state == UIC_LINK_OFF_STATE) && | |
5094 | (!check_for_bkops || (check_for_bkops && | |
5095 | !hba->auto_bkops_enabled))) { | |
5096 | /* | |
5097 | * Change controller state to "reset state" which | |
5098 | * should also put the link in off/reset state | |
5099 | */ | |
5100 | ufshcd_hba_stop(hba); | |
5101 | /* | |
5102 | * TODO: Check if we need any delay to make sure that | |
5103 | * controller is reset | |
5104 | */ | |
5105 | ufshcd_set_link_off(hba); | |
5106 | } | |
5107 | ||
5108 | out: | |
5109 | return ret; | |
5110 | } | |
5111 | ||
5112 | static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) | |
5113 | { | |
5114 | /* | |
5115 | * If UFS device is either in UFS_Sleep turn off VCC rail to save some | |
5116 | * power. | |
5117 | * | |
5118 | * If UFS device and link is in OFF state, all power supplies (VCC, | |
5119 | * VCCQ, VCCQ2) can be turned off if power on write protect is not | |
5120 | * required. If UFS link is inactive (Hibern8 or OFF state) and device | |
5121 | * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. | |
5122 | * | |
5123 | * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway | |
5124 | * in low power state which would save some power. | |
5125 | */ | |
5126 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | |
5127 | !hba->dev_info.is_lu_power_on_wp) { | |
5128 | ufshcd_setup_vreg(hba, false); | |
5129 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | |
5130 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | |
5131 | if (!ufshcd_is_link_active(hba)) { | |
5132 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | |
5133 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); | |
5134 | } | |
5135 | } | |
5136 | } | |
5137 | ||
5138 | static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) | |
5139 | { | |
5140 | int ret = 0; | |
5141 | ||
5142 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | |
5143 | !hba->dev_info.is_lu_power_on_wp) { | |
5144 | ret = ufshcd_setup_vreg(hba, true); | |
5145 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | |
5146 | ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); | |
5147 | if (!ret && !ufshcd_is_link_active(hba)) { | |
5148 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); | |
5149 | if (ret) | |
5150 | goto vcc_disable; | |
5151 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); | |
5152 | if (ret) | |
5153 | goto vccq_lpm; | |
5154 | } | |
5155 | } | |
5156 | goto out; | |
5157 | ||
5158 | vccq_lpm: | |
5159 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | |
5160 | vcc_disable: | |
5161 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | |
5162 | out: | |
5163 | return ret; | |
5164 | } | |
5165 | ||
5166 | static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) | |
5167 | { | |
5168 | if (ufshcd_is_link_off(hba)) | |
5169 | ufshcd_setup_hba_vreg(hba, false); | |
5170 | } | |
5171 | ||
5172 | static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) | |
5173 | { | |
5174 | if (ufshcd_is_link_off(hba)) | |
5175 | ufshcd_setup_hba_vreg(hba, true); | |
5176 | } | |
5177 | ||
7a3e97b0 | 5178 | /** |
57d104c1 | 5179 | * ufshcd_suspend - helper function for suspend operations |
3b1d0580 | 5180 | * @hba: per adapter instance |
57d104c1 SJ |
5181 | * @pm_op: desired low power operation type |
5182 | * | |
5183 | * This function will try to put the UFS device and link into low power | |
5184 | * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" | |
5185 | * (System PM level). | |
5186 | * | |
5187 | * If this function is called during shutdown, it will make sure that | |
5188 | * both UFS device and UFS link is powered off. | |
7a3e97b0 | 5189 | * |
57d104c1 SJ |
5190 | * NOTE: UFS device & link must be active before we enter in this function. |
5191 | * | |
5192 | * Returns 0 for success and non-zero for failure | |
7a3e97b0 | 5193 | */ |
57d104c1 | 5194 | static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
7a3e97b0 | 5195 | { |
57d104c1 SJ |
5196 | int ret = 0; |
5197 | enum ufs_pm_level pm_lvl; | |
5198 | enum ufs_dev_pwr_mode req_dev_pwr_mode; | |
5199 | enum uic_link_state req_link_state; | |
5200 | ||
5201 | hba->pm_op_in_progress = 1; | |
5202 | if (!ufshcd_is_shutdown_pm(pm_op)) { | |
5203 | pm_lvl = ufshcd_is_runtime_pm(pm_op) ? | |
5204 | hba->rpm_lvl : hba->spm_lvl; | |
5205 | req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); | |
5206 | req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); | |
5207 | } else { | |
5208 | req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; | |
5209 | req_link_state = UIC_LINK_OFF_STATE; | |
5210 | } | |
5211 | ||
7a3e97b0 | 5212 | /* |
57d104c1 SJ |
5213 | * If we can't transition into any of the low power modes |
5214 | * just gate the clocks. | |
7a3e97b0 | 5215 | */ |
1ab27c9c ST |
5216 | ufshcd_hold(hba, false); |
5217 | hba->clk_gating.is_suspended = true; | |
5218 | ||
57d104c1 SJ |
5219 | if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && |
5220 | req_link_state == UIC_LINK_ACTIVE_STATE) { | |
5221 | goto disable_clks; | |
5222 | } | |
7a3e97b0 | 5223 | |
57d104c1 SJ |
5224 | if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && |
5225 | (req_link_state == hba->uic_link_state)) | |
5226 | goto out; | |
5227 | ||
5228 | /* UFS device & link must be active before we enter in this function */ | |
5229 | if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { | |
5230 | ret = -EINVAL; | |
5231 | goto out; | |
5232 | } | |
5233 | ||
5234 | if (ufshcd_is_runtime_pm(pm_op)) { | |
374a246e SJ |
5235 | if (ufshcd_can_autobkops_during_suspend(hba)) { |
5236 | /* | |
5237 | * The device is idle with no requests in the queue, | |
5238 | * allow background operations if bkops status shows | |
5239 | * that performance might be impacted. | |
5240 | */ | |
5241 | ret = ufshcd_urgent_bkops(hba); | |
5242 | if (ret) | |
5243 | goto enable_gating; | |
5244 | } else { | |
5245 | /* make sure that auto bkops is disabled */ | |
5246 | ufshcd_disable_auto_bkops(hba); | |
5247 | } | |
57d104c1 SJ |
5248 | } |
5249 | ||
5250 | if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && | |
5251 | ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || | |
5252 | !ufshcd_is_runtime_pm(pm_op))) { | |
5253 | /* ensure that bkops is disabled */ | |
5254 | ufshcd_disable_auto_bkops(hba); | |
5255 | ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); | |
5256 | if (ret) | |
1ab27c9c | 5257 | goto enable_gating; |
57d104c1 SJ |
5258 | } |
5259 | ||
5260 | ret = ufshcd_link_state_transition(hba, req_link_state, 1); | |
5261 | if (ret) | |
5262 | goto set_dev_active; | |
5263 | ||
5264 | ufshcd_vreg_set_lpm(hba); | |
5265 | ||
5266 | disable_clks: | |
856b3483 ST |
5267 | /* |
5268 | * The clock scaling needs access to controller registers. Hence, Wait | |
5269 | * for pending clock scaling work to be done before clocks are | |
5270 | * turned off. | |
5271 | */ | |
5272 | if (ufshcd_is_clkscaling_enabled(hba)) { | |
5273 | devfreq_suspend_device(hba->devfreq); | |
5274 | hba->clk_scaling.window_start_t = 0; | |
5275 | } | |
57d104c1 SJ |
5276 | /* |
5277 | * Call vendor specific suspend callback. As these callbacks may access | |
5278 | * vendor specific host controller register space call them before the | |
5279 | * host clocks are ON. | |
5280 | */ | |
0263bcd0 YG |
5281 | ret = ufshcd_vops_suspend(hba, pm_op); |
5282 | if (ret) | |
5283 | goto set_link_active; | |
57d104c1 | 5284 | |
0263bcd0 YG |
5285 | ret = ufshcd_vops_setup_clocks(hba, false); |
5286 | if (ret) | |
5287 | goto vops_resume; | |
57d104c1 SJ |
5288 | |
5289 | if (!ufshcd_is_link_active(hba)) | |
5290 | ufshcd_setup_clocks(hba, false); | |
5291 | else | |
5292 | /* If link is active, device ref_clk can't be switched off */ | |
5293 | __ufshcd_setup_clocks(hba, false, true); | |
5294 | ||
1ab27c9c | 5295 | hba->clk_gating.state = CLKS_OFF; |
57d104c1 SJ |
5296 | /* |
5297 | * Disable the host irq as host controller as there won't be any | |
0263bcd0 | 5298 | * host controller transaction expected till resume. |
57d104c1 SJ |
5299 | */ |
5300 | ufshcd_disable_irq(hba); | |
5301 | /* Put the host controller in low power mode if possible */ | |
5302 | ufshcd_hba_vreg_set_lpm(hba); | |
5303 | goto out; | |
5304 | ||
5305 | vops_resume: | |
0263bcd0 | 5306 | ufshcd_vops_resume(hba, pm_op); |
57d104c1 SJ |
5307 | set_link_active: |
5308 | ufshcd_vreg_set_hpm(hba); | |
5309 | if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) | |
5310 | ufshcd_set_link_active(hba); | |
5311 | else if (ufshcd_is_link_off(hba)) | |
5312 | ufshcd_host_reset_and_restore(hba); | |
5313 | set_dev_active: | |
5314 | if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) | |
5315 | ufshcd_disable_auto_bkops(hba); | |
1ab27c9c ST |
5316 | enable_gating: |
5317 | hba->clk_gating.is_suspended = false; | |
5318 | ufshcd_release(hba); | |
57d104c1 SJ |
5319 | out: |
5320 | hba->pm_op_in_progress = 0; | |
5321 | return ret; | |
7a3e97b0 SY |
5322 | } |
5323 | ||
5324 | /** | |
57d104c1 | 5325 | * ufshcd_resume - helper function for resume operations |
3b1d0580 | 5326 | * @hba: per adapter instance |
57d104c1 | 5327 | * @pm_op: runtime PM or system PM |
7a3e97b0 | 5328 | * |
57d104c1 SJ |
5329 | * This function basically brings the UFS device, UniPro link and controller |
5330 | * to active state. | |
5331 | * | |
5332 | * Returns 0 for success and non-zero for failure | |
7a3e97b0 | 5333 | */ |
57d104c1 | 5334 | static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
7a3e97b0 | 5335 | { |
57d104c1 SJ |
5336 | int ret; |
5337 | enum uic_link_state old_link_state; | |
5338 | ||
5339 | hba->pm_op_in_progress = 1; | |
5340 | old_link_state = hba->uic_link_state; | |
5341 | ||
5342 | ufshcd_hba_vreg_set_hpm(hba); | |
5343 | /* Make sure clocks are enabled before accessing controller */ | |
5344 | ret = ufshcd_setup_clocks(hba, true); | |
5345 | if (ret) | |
5346 | goto out; | |
5347 | ||
57d104c1 SJ |
5348 | /* enable the host irq as host controller would be active soon */ |
5349 | ret = ufshcd_enable_irq(hba); | |
5350 | if (ret) | |
5351 | goto disable_irq_and_vops_clks; | |
5352 | ||
5353 | ret = ufshcd_vreg_set_hpm(hba); | |
5354 | if (ret) | |
5355 | goto disable_irq_and_vops_clks; | |
5356 | ||
7a3e97b0 | 5357 | /* |
57d104c1 SJ |
5358 | * Call vendor specific resume callback. As these callbacks may access |
5359 | * vendor specific host controller register space call them when the | |
5360 | * host clocks are ON. | |
7a3e97b0 | 5361 | */ |
0263bcd0 YG |
5362 | ret = ufshcd_vops_resume(hba, pm_op); |
5363 | if (ret) | |
5364 | goto disable_vreg; | |
57d104c1 SJ |
5365 | |
5366 | if (ufshcd_is_link_hibern8(hba)) { | |
5367 | ret = ufshcd_uic_hibern8_exit(hba); | |
5368 | if (!ret) | |
5369 | ufshcd_set_link_active(hba); | |
5370 | else | |
5371 | goto vendor_suspend; | |
5372 | } else if (ufshcd_is_link_off(hba)) { | |
5373 | ret = ufshcd_host_reset_and_restore(hba); | |
5374 | /* | |
5375 | * ufshcd_host_reset_and_restore() should have already | |
5376 | * set the link state as active | |
5377 | */ | |
5378 | if (ret || !ufshcd_is_link_active(hba)) | |
5379 | goto vendor_suspend; | |
5380 | } | |
5381 | ||
5382 | if (!ufshcd_is_ufs_dev_active(hba)) { | |
5383 | ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); | |
5384 | if (ret) | |
5385 | goto set_old_link_state; | |
5386 | } | |
5387 | ||
374a246e SJ |
5388 | /* |
5389 | * If BKOPs operations are urgently needed at this moment then | |
5390 | * keep auto-bkops enabled or else disable it. | |
5391 | */ | |
5392 | ufshcd_urgent_bkops(hba); | |
1ab27c9c ST |
5393 | hba->clk_gating.is_suspended = false; |
5394 | ||
856b3483 ST |
5395 | if (ufshcd_is_clkscaling_enabled(hba)) |
5396 | devfreq_resume_device(hba->devfreq); | |
5397 | ||
1ab27c9c ST |
5398 | /* Schedule clock gating in case of no access to UFS device yet */ |
5399 | ufshcd_release(hba); | |
57d104c1 SJ |
5400 | goto out; |
5401 | ||
5402 | set_old_link_state: | |
5403 | ufshcd_link_state_transition(hba, old_link_state, 0); | |
5404 | vendor_suspend: | |
0263bcd0 | 5405 | ufshcd_vops_suspend(hba, pm_op); |
57d104c1 SJ |
5406 | disable_vreg: |
5407 | ufshcd_vreg_set_lpm(hba); | |
5408 | disable_irq_and_vops_clks: | |
5409 | ufshcd_disable_irq(hba); | |
57d104c1 SJ |
5410 | ufshcd_setup_clocks(hba, false); |
5411 | out: | |
5412 | hba->pm_op_in_progress = 0; | |
5413 | return ret; | |
5414 | } | |
5415 | ||
5416 | /** | |
5417 | * ufshcd_system_suspend - system suspend routine | |
5418 | * @hba: per adapter instance | |
5419 | * @pm_op: runtime PM or system PM | |
5420 | * | |
5421 | * Check the description of ufshcd_suspend() function for more details. | |
5422 | * | |
5423 | * Returns 0 for success and non-zero for failure | |
5424 | */ | |
5425 | int ufshcd_system_suspend(struct ufs_hba *hba) | |
5426 | { | |
5427 | int ret = 0; | |
5428 | ||
5429 | if (!hba || !hba->is_powered) | |
233b594b | 5430 | return 0; |
57d104c1 SJ |
5431 | |
5432 | if (pm_runtime_suspended(hba->dev)) { | |
5433 | if (hba->rpm_lvl == hba->spm_lvl) | |
5434 | /* | |
5435 | * There is possibility that device may still be in | |
5436 | * active state during the runtime suspend. | |
5437 | */ | |
5438 | if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == | |
5439 | hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) | |
5440 | goto out; | |
5441 | ||
5442 | /* | |
5443 | * UFS device and/or UFS link low power states during runtime | |
5444 | * suspend seems to be different than what is expected during | |
5445 | * system suspend. Hence runtime resume the devic & link and | |
5446 | * let the system suspend low power states to take effect. | |
5447 | * TODO: If resume takes longer time, we might have optimize | |
5448 | * it in future by not resuming everything if possible. | |
5449 | */ | |
5450 | ret = ufshcd_runtime_resume(hba); | |
5451 | if (ret) | |
5452 | goto out; | |
5453 | } | |
5454 | ||
5455 | ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); | |
5456 | out: | |
e785060e DR |
5457 | if (!ret) |
5458 | hba->is_sys_suspended = true; | |
57d104c1 SJ |
5459 | return ret; |
5460 | } | |
5461 | EXPORT_SYMBOL(ufshcd_system_suspend); | |
5462 | ||
5463 | /** | |
5464 | * ufshcd_system_resume - system resume routine | |
5465 | * @hba: per adapter instance | |
5466 | * | |
5467 | * Returns 0 for success and non-zero for failure | |
5468 | */ | |
7a3e97b0 | 5469 | |
57d104c1 SJ |
5470 | int ufshcd_system_resume(struct ufs_hba *hba) |
5471 | { | |
5472 | if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) | |
5473 | /* | |
5474 | * Let the runtime resume take care of resuming | |
5475 | * if runtime suspended. | |
5476 | */ | |
5477 | return 0; | |
5478 | ||
5479 | return ufshcd_resume(hba, UFS_SYSTEM_PM); | |
7a3e97b0 | 5480 | } |
57d104c1 | 5481 | EXPORT_SYMBOL(ufshcd_system_resume); |
3b1d0580 | 5482 | |
57d104c1 SJ |
5483 | /** |
5484 | * ufshcd_runtime_suspend - runtime suspend routine | |
5485 | * @hba: per adapter instance | |
5486 | * | |
5487 | * Check the description of ufshcd_suspend() function for more details. | |
5488 | * | |
5489 | * Returns 0 for success and non-zero for failure | |
5490 | */ | |
66ec6d59 SRT |
5491 | int ufshcd_runtime_suspend(struct ufs_hba *hba) |
5492 | { | |
57d104c1 | 5493 | if (!hba || !hba->is_powered) |
66ec6d59 SRT |
5494 | return 0; |
5495 | ||
57d104c1 | 5496 | return ufshcd_suspend(hba, UFS_RUNTIME_PM); |
66ec6d59 SRT |
5497 | } |
5498 | EXPORT_SYMBOL(ufshcd_runtime_suspend); | |
5499 | ||
57d104c1 SJ |
5500 | /** |
5501 | * ufshcd_runtime_resume - runtime resume routine | |
5502 | * @hba: per adapter instance | |
5503 | * | |
5504 | * This function basically brings the UFS device, UniPro link and controller | |
5505 | * to active state. Following operations are done in this function: | |
5506 | * | |
5507 | * 1. Turn on all the controller related clocks | |
5508 | * 2. Bring the UniPro link out of Hibernate state | |
5509 | * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device | |
5510 | * to active state. | |
5511 | * 4. If auto-bkops is enabled on the device, disable it. | |
5512 | * | |
5513 | * So following would be the possible power state after this function return | |
5514 | * successfully: | |
5515 | * S1: UFS device in Active state with VCC rail ON | |
5516 | * UniPro link in Active state | |
5517 | * All the UFS/UniPro controller clocks are ON | |
5518 | * | |
5519 | * Returns 0 for success and non-zero for failure | |
5520 | */ | |
66ec6d59 SRT |
5521 | int ufshcd_runtime_resume(struct ufs_hba *hba) |
5522 | { | |
57d104c1 | 5523 | if (!hba || !hba->is_powered) |
66ec6d59 | 5524 | return 0; |
57d104c1 SJ |
5525 | else |
5526 | return ufshcd_resume(hba, UFS_RUNTIME_PM); | |
66ec6d59 SRT |
5527 | } |
5528 | EXPORT_SYMBOL(ufshcd_runtime_resume); | |
5529 | ||
5530 | int ufshcd_runtime_idle(struct ufs_hba *hba) | |
5531 | { | |
5532 | return 0; | |
5533 | } | |
5534 | EXPORT_SYMBOL(ufshcd_runtime_idle); | |
5535 | ||
57d104c1 SJ |
5536 | /** |
5537 | * ufshcd_shutdown - shutdown routine | |
5538 | * @hba: per adapter instance | |
5539 | * | |
5540 | * This function would power off both UFS device and UFS link. | |
5541 | * | |
5542 | * Returns 0 always to allow force shutdown even in case of errors. | |
5543 | */ | |
5544 | int ufshcd_shutdown(struct ufs_hba *hba) | |
5545 | { | |
5546 | int ret = 0; | |
5547 | ||
5548 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) | |
5549 | goto out; | |
5550 | ||
5551 | if (pm_runtime_suspended(hba->dev)) { | |
5552 | ret = ufshcd_runtime_resume(hba); | |
5553 | if (ret) | |
5554 | goto out; | |
5555 | } | |
5556 | ||
5557 | ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); | |
5558 | out: | |
5559 | if (ret) | |
5560 | dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); | |
5561 | /* allow force shutdown even in case of errors */ | |
5562 | return 0; | |
5563 | } | |
5564 | EXPORT_SYMBOL(ufshcd_shutdown); | |
5565 | ||
7a3e97b0 | 5566 | /** |
3b1d0580 | 5567 | * ufshcd_remove - de-allocate SCSI host and host memory space |
7a3e97b0 | 5568 | * data structure memory |
3b1d0580 | 5569 | * @hba - per adapter instance |
7a3e97b0 | 5570 | */ |
3b1d0580 | 5571 | void ufshcd_remove(struct ufs_hba *hba) |
7a3e97b0 | 5572 | { |
cfdf9c91 | 5573 | scsi_remove_host(hba->host); |
7a3e97b0 | 5574 | /* disable interrupts */ |
2fbd009b | 5575 | ufshcd_disable_intr(hba, hba->intr_mask); |
7a3e97b0 | 5576 | ufshcd_hba_stop(hba); |
7a3e97b0 | 5577 | |
7a3e97b0 | 5578 | scsi_host_put(hba->host); |
5c0c28a8 | 5579 | |
1ab27c9c | 5580 | ufshcd_exit_clk_gating(hba); |
856b3483 ST |
5581 | if (ufshcd_is_clkscaling_enabled(hba)) |
5582 | devfreq_remove_device(hba->devfreq); | |
aa497613 | 5583 | ufshcd_hba_exit(hba); |
3b1d0580 VH |
5584 | } |
5585 | EXPORT_SYMBOL_GPL(ufshcd_remove); | |
5586 | ||
47555a5c YG |
5587 | /** |
5588 | * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) | |
5589 | * @hba: pointer to Host Bus Adapter (HBA) | |
5590 | */ | |
5591 | void ufshcd_dealloc_host(struct ufs_hba *hba) | |
5592 | { | |
5593 | scsi_host_put(hba->host); | |
5594 | } | |
5595 | EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); | |
5596 | ||
ca3d7bf9 AM |
5597 | /** |
5598 | * ufshcd_set_dma_mask - Set dma mask based on the controller | |
5599 | * addressing capability | |
5600 | * @hba: per adapter instance | |
5601 | * | |
5602 | * Returns 0 for success, non-zero for failure | |
5603 | */ | |
5604 | static int ufshcd_set_dma_mask(struct ufs_hba *hba) | |
5605 | { | |
5606 | if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { | |
5607 | if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) | |
5608 | return 0; | |
5609 | } | |
5610 | return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); | |
5611 | } | |
5612 | ||
7a3e97b0 | 5613 | /** |
5c0c28a8 | 5614 | * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) |
3b1d0580 VH |
5615 | * @dev: pointer to device handle |
5616 | * @hba_handle: driver private handle | |
7a3e97b0 SY |
5617 | * Returns 0 on success, non-zero value on failure |
5618 | */ | |
5c0c28a8 | 5619 | int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) |
7a3e97b0 SY |
5620 | { |
5621 | struct Scsi_Host *host; | |
5622 | struct ufs_hba *hba; | |
5c0c28a8 | 5623 | int err = 0; |
7a3e97b0 | 5624 | |
3b1d0580 VH |
5625 | if (!dev) { |
5626 | dev_err(dev, | |
5627 | "Invalid memory reference for dev is NULL\n"); | |
5628 | err = -ENODEV; | |
7a3e97b0 SY |
5629 | goto out_error; |
5630 | } | |
5631 | ||
7a3e97b0 SY |
5632 | host = scsi_host_alloc(&ufshcd_driver_template, |
5633 | sizeof(struct ufs_hba)); | |
5634 | if (!host) { | |
3b1d0580 | 5635 | dev_err(dev, "scsi_host_alloc failed\n"); |
7a3e97b0 | 5636 | err = -ENOMEM; |
3b1d0580 | 5637 | goto out_error; |
7a3e97b0 SY |
5638 | } |
5639 | hba = shost_priv(host); | |
7a3e97b0 | 5640 | hba->host = host; |
3b1d0580 | 5641 | hba->dev = dev; |
5c0c28a8 SRT |
5642 | *hba_handle = hba; |
5643 | ||
5644 | out_error: | |
5645 | return err; | |
5646 | } | |
5647 | EXPORT_SYMBOL(ufshcd_alloc_host); | |
5648 | ||
856b3483 ST |
5649 | static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) |
5650 | { | |
5651 | int ret = 0; | |
5652 | struct ufs_clk_info *clki; | |
5653 | struct list_head *head = &hba->clk_list_head; | |
5654 | ||
5655 | if (!head || list_empty(head)) | |
5656 | goto out; | |
5657 | ||
f06fcc71 YG |
5658 | ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); |
5659 | if (ret) | |
5660 | return ret; | |
5661 | ||
856b3483 ST |
5662 | list_for_each_entry(clki, head, list) { |
5663 | if (!IS_ERR_OR_NULL(clki->clk)) { | |
5664 | if (scale_up && clki->max_freq) { | |
5665 | if (clki->curr_freq == clki->max_freq) | |
5666 | continue; | |
5667 | ret = clk_set_rate(clki->clk, clki->max_freq); | |
5668 | if (ret) { | |
5669 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
5670 | __func__, clki->name, | |
5671 | clki->max_freq, ret); | |
5672 | break; | |
5673 | } | |
5674 | clki->curr_freq = clki->max_freq; | |
5675 | ||
5676 | } else if (!scale_up && clki->min_freq) { | |
5677 | if (clki->curr_freq == clki->min_freq) | |
5678 | continue; | |
5679 | ret = clk_set_rate(clki->clk, clki->min_freq); | |
5680 | if (ret) { | |
5681 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | |
5682 | __func__, clki->name, | |
5683 | clki->min_freq, ret); | |
5684 | break; | |
5685 | } | |
5686 | clki->curr_freq = clki->min_freq; | |
5687 | } | |
5688 | } | |
5689 | dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, | |
5690 | clki->name, clk_get_rate(clki->clk)); | |
5691 | } | |
f06fcc71 YG |
5692 | |
5693 | ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); | |
5694 | ||
856b3483 ST |
5695 | out: |
5696 | return ret; | |
5697 | } | |
5698 | ||
5699 | static int ufshcd_devfreq_target(struct device *dev, | |
5700 | unsigned long *freq, u32 flags) | |
5701 | { | |
5702 | int err = 0; | |
5703 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
5704 | ||
5705 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
5706 | return -EINVAL; | |
5707 | ||
5708 | if (*freq == UINT_MAX) | |
5709 | err = ufshcd_scale_clks(hba, true); | |
5710 | else if (*freq == 0) | |
5711 | err = ufshcd_scale_clks(hba, false); | |
5712 | ||
5713 | return err; | |
5714 | } | |
5715 | ||
5716 | static int ufshcd_devfreq_get_dev_status(struct device *dev, | |
5717 | struct devfreq_dev_status *stat) | |
5718 | { | |
5719 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
5720 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | |
5721 | unsigned long flags; | |
5722 | ||
5723 | if (!ufshcd_is_clkscaling_enabled(hba)) | |
5724 | return -EINVAL; | |
5725 | ||
5726 | memset(stat, 0, sizeof(*stat)); | |
5727 | ||
5728 | spin_lock_irqsave(hba->host->host_lock, flags); | |
5729 | if (!scaling->window_start_t) | |
5730 | goto start_window; | |
5731 | ||
5732 | if (scaling->is_busy_started) | |
5733 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | |
5734 | scaling->busy_start_t)); | |
5735 | ||
5736 | stat->total_time = jiffies_to_usecs((long)jiffies - | |
5737 | (long)scaling->window_start_t); | |
5738 | stat->busy_time = scaling->tot_busy_t; | |
5739 | start_window: | |
5740 | scaling->window_start_t = jiffies; | |
5741 | scaling->tot_busy_t = 0; | |
5742 | ||
5743 | if (hba->outstanding_reqs) { | |
5744 | scaling->busy_start_t = ktime_get(); | |
5745 | scaling->is_busy_started = true; | |
5746 | } else { | |
5747 | scaling->busy_start_t = ktime_set(0, 0); | |
5748 | scaling->is_busy_started = false; | |
5749 | } | |
5750 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
5751 | return 0; | |
5752 | } | |
5753 | ||
5754 | static struct devfreq_dev_profile ufs_devfreq_profile = { | |
5755 | .polling_ms = 100, | |
5756 | .target = ufshcd_devfreq_target, | |
5757 | .get_dev_status = ufshcd_devfreq_get_dev_status, | |
5758 | }; | |
5759 | ||
5c0c28a8 SRT |
5760 | /** |
5761 | * ufshcd_init - Driver initialization routine | |
5762 | * @hba: per-adapter instance | |
5763 | * @mmio_base: base register address | |
5764 | * @irq: Interrupt line of device | |
5765 | * Returns 0 on success, non-zero value on failure | |
5766 | */ | |
5767 | int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | |
5768 | { | |
5769 | int err; | |
5770 | struct Scsi_Host *host = hba->host; | |
5771 | struct device *dev = hba->dev; | |
5772 | ||
5773 | if (!mmio_base) { | |
5774 | dev_err(hba->dev, | |
5775 | "Invalid memory reference for mmio_base is NULL\n"); | |
5776 | err = -ENODEV; | |
5777 | goto out_error; | |
5778 | } | |
5779 | ||
3b1d0580 VH |
5780 | hba->mmio_base = mmio_base; |
5781 | hba->irq = irq; | |
7a3e97b0 | 5782 | |
aa497613 | 5783 | err = ufshcd_hba_init(hba); |
5c0c28a8 SRT |
5784 | if (err) |
5785 | goto out_error; | |
5786 | ||
7a3e97b0 SY |
5787 | /* Read capabilities registers */ |
5788 | ufshcd_hba_capabilities(hba); | |
5789 | ||
5790 | /* Get UFS version supported by the controller */ | |
5791 | hba->ufs_version = ufshcd_get_ufs_version(hba); | |
5792 | ||
2fbd009b SJ |
5793 | /* Get Interrupt bit mask per version */ |
5794 | hba->intr_mask = ufshcd_get_intr_mask(hba); | |
5795 | ||
ca3d7bf9 AM |
5796 | err = ufshcd_set_dma_mask(hba); |
5797 | if (err) { | |
5798 | dev_err(hba->dev, "set dma mask failed\n"); | |
5799 | goto out_disable; | |
5800 | } | |
5801 | ||
7a3e97b0 SY |
5802 | /* Allocate memory for host memory space */ |
5803 | err = ufshcd_memory_alloc(hba); | |
5804 | if (err) { | |
3b1d0580 VH |
5805 | dev_err(hba->dev, "Memory allocation failed\n"); |
5806 | goto out_disable; | |
7a3e97b0 SY |
5807 | } |
5808 | ||
5809 | /* Configure LRB */ | |
5810 | ufshcd_host_memory_configure(hba); | |
5811 | ||
5812 | host->can_queue = hba->nutrs; | |
5813 | host->cmd_per_lun = hba->nutrs; | |
5814 | host->max_id = UFSHCD_MAX_ID; | |
0ce147d4 | 5815 | host->max_lun = UFS_MAX_LUNS; |
7a3e97b0 SY |
5816 | host->max_channel = UFSHCD_MAX_CHANNEL; |
5817 | host->unique_id = host->host_no; | |
5818 | host->max_cmd_len = MAX_CDB_SIZE; | |
5819 | ||
7eb584db DR |
5820 | hba->max_pwr_info.is_valid = false; |
5821 | ||
7a3e97b0 | 5822 | /* Initailize wait queue for task management */ |
e2933132 SRT |
5823 | init_waitqueue_head(&hba->tm_wq); |
5824 | init_waitqueue_head(&hba->tm_tag_wq); | |
7a3e97b0 SY |
5825 | |
5826 | /* Initialize work queues */ | |
e8e7f271 | 5827 | INIT_WORK(&hba->eh_work, ufshcd_err_handler); |
66ec6d59 | 5828 | INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); |
7a3e97b0 | 5829 | |
6ccf44fe SJ |
5830 | /* Initialize UIC command mutex */ |
5831 | mutex_init(&hba->uic_cmd_mutex); | |
5832 | ||
5a0b0cb9 SRT |
5833 | /* Initialize mutex for device management commands */ |
5834 | mutex_init(&hba->dev_cmd.lock); | |
5835 | ||
5836 | /* Initialize device management tag acquire wait queue */ | |
5837 | init_waitqueue_head(&hba->dev_cmd.tag_wq); | |
5838 | ||
1ab27c9c | 5839 | ufshcd_init_clk_gating(hba); |
7a3e97b0 | 5840 | /* IRQ registration */ |
2953f850 | 5841 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
7a3e97b0 | 5842 | if (err) { |
3b1d0580 | 5843 | dev_err(hba->dev, "request irq failed\n"); |
1ab27c9c | 5844 | goto exit_gating; |
57d104c1 SJ |
5845 | } else { |
5846 | hba->is_irq_enabled = true; | |
7a3e97b0 SY |
5847 | } |
5848 | ||
3b1d0580 | 5849 | err = scsi_add_host(host, hba->dev); |
7a3e97b0 | 5850 | if (err) { |
3b1d0580 | 5851 | dev_err(hba->dev, "scsi_add_host failed\n"); |
1ab27c9c | 5852 | goto exit_gating; |
7a3e97b0 SY |
5853 | } |
5854 | ||
6ccf44fe SJ |
5855 | /* Host controller enable */ |
5856 | err = ufshcd_hba_enable(hba); | |
7a3e97b0 | 5857 | if (err) { |
6ccf44fe | 5858 | dev_err(hba->dev, "Host controller enable failed\n"); |
3b1d0580 | 5859 | goto out_remove_scsi_host; |
7a3e97b0 | 5860 | } |
6ccf44fe | 5861 | |
856b3483 ST |
5862 | if (ufshcd_is_clkscaling_enabled(hba)) { |
5863 | hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, | |
5864 | "simple_ondemand", NULL); | |
5865 | if (IS_ERR(hba->devfreq)) { | |
5866 | dev_err(hba->dev, "Unable to register with devfreq %ld\n", | |
5867 | PTR_ERR(hba->devfreq)); | |
5868 | goto out_remove_scsi_host; | |
5869 | } | |
5870 | /* Suspend devfreq until the UFS device is detected */ | |
5871 | devfreq_suspend_device(hba->devfreq); | |
5872 | hba->clk_scaling.window_start_t = 0; | |
5873 | } | |
5874 | ||
62694735 SRT |
5875 | /* Hold auto suspend until async scan completes */ |
5876 | pm_runtime_get_sync(dev); | |
5877 | ||
57d104c1 SJ |
5878 | /* |
5879 | * The device-initialize-sequence hasn't been invoked yet. | |
5880 | * Set the device to power-off state | |
5881 | */ | |
5882 | ufshcd_set_ufs_dev_poweroff(hba); | |
5883 | ||
6ccf44fe SJ |
5884 | async_schedule(ufshcd_async_scan, hba); |
5885 | ||
7a3e97b0 SY |
5886 | return 0; |
5887 | ||
3b1d0580 VH |
5888 | out_remove_scsi_host: |
5889 | scsi_remove_host(hba->host); | |
1ab27c9c ST |
5890 | exit_gating: |
5891 | ufshcd_exit_clk_gating(hba); | |
3b1d0580 | 5892 | out_disable: |
57d104c1 | 5893 | hba->is_irq_enabled = false; |
3b1d0580 | 5894 | scsi_host_put(host); |
aa497613 | 5895 | ufshcd_hba_exit(hba); |
3b1d0580 VH |
5896 | out_error: |
5897 | return err; | |
5898 | } | |
5899 | EXPORT_SYMBOL_GPL(ufshcd_init); | |
5900 | ||
3b1d0580 VH |
5901 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); |
5902 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); | |
e0eca63e | 5903 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |
7a3e97b0 SY |
5904 | MODULE_LICENSE("GPL"); |
5905 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); |