Commit | Line | Data |
---|---|---|
67351119 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
e0eca63e VH |
2 | /* |
3 | * Universal Flash Storage Host controller driver | |
e0eca63e | 4 | * Copyright (C) 2011-2013 Samsung India Software Operations |
dc3c8d3a | 5 | * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. |
e0eca63e VH |
6 | * |
7 | * Authors: | |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | |
e0eca63e VH |
10 | */ |
11 | ||
12 | #ifndef _UFSHCD_H | |
13 | #define _UFSHCD_H | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/spinlock.h> | |
a3cd5ec5 | 23 | #include <linux/rwsem.h> |
e0eca63e VH |
24 | #include <linux/workqueue.h> |
25 | #include <linux/errno.h> | |
26 | #include <linux/types.h> | |
27 | #include <linux/wait.h> | |
28 | #include <linux/bitops.h> | |
29 | #include <linux/pm_runtime.h> | |
30 | #include <linux/clk.h> | |
6ccf44fe | 31 | #include <linux/completion.h> |
aa497613 | 32 | #include <linux/regulator/consumer.h> |
5a244e0e | 33 | #include <linux/bitfield.h> |
2c75f9a5 | 34 | #include <linux/devfreq.h> |
70297a8a | 35 | #include <linux/keyslot-manager.h> |
f37aabcf | 36 | #include "unipro.h" |
e0eca63e VH |
37 | |
38 | #include <asm/irq.h> | |
39 | #include <asm/byteorder.h> | |
40 | #include <scsi/scsi.h> | |
41 | #include <scsi/scsi_cmnd.h> | |
42 | #include <scsi/scsi_host.h> | |
43 | #include <scsi/scsi_tcq.h> | |
44 | #include <scsi/scsi_dbg.h> | |
45 | #include <scsi/scsi_eh.h> | |
46 | ||
47 | #include "ufs.h" | |
c28c00ba | 48 | #include "ufs_quirks.h" |
e0eca63e VH |
49 | #include "ufshci.h" |
50 | ||
51 | #define UFSHCD "ufshcd" | |
52 | #define UFSHCD_DRIVER_VERSION "0.2" | |
53 | ||
5c0c28a8 SRT |
54 | struct ufs_hba; |
55 | ||
5a0b0cb9 SRT |
56 | enum dev_cmd_type { |
57 | DEV_CMD_TYPE_NOP = 0x0, | |
68078d5c | 58 | DEV_CMD_TYPE_QUERY = 0x1, |
5a0b0cb9 SRT |
59 | }; |
60 | ||
e965e5e0 SC |
61 | enum ufs_event_type { |
62 | /* uic specific errors */ | |
63 | UFS_EVT_PA_ERR = 0, | |
64 | UFS_EVT_DL_ERR, | |
65 | UFS_EVT_NL_ERR, | |
66 | UFS_EVT_TL_ERR, | |
67 | UFS_EVT_DME_ERR, | |
68 | ||
69 | /* fatal errors */ | |
70 | UFS_EVT_AUTO_HIBERN8_ERR, | |
71 | UFS_EVT_FATAL_ERR, | |
72 | UFS_EVT_LINK_STARTUP_FAIL, | |
73 | UFS_EVT_RESUME_ERR, | |
74 | UFS_EVT_SUSPEND_ERR, | |
75 | ||
76 | /* abnormal events */ | |
77 | UFS_EVT_DEV_RESET, | |
78 | UFS_EVT_HOST_RESET, | |
79 | UFS_EVT_ABORT, | |
80 | ||
81 | UFS_EVT_CNT, | |
82 | }; | |
83 | ||
e0eca63e VH |
84 | /** |
85 | * struct uic_command - UIC command structure | |
86 | * @command: UIC command | |
87 | * @argument1: UIC command argument 1 | |
88 | * @argument2: UIC command argument 2 | |
89 | * @argument3: UIC command argument 3 | |
6ccf44fe | 90 | * @done: UIC command completion |
e0eca63e VH |
91 | */ |
92 | struct uic_command { | |
93 | u32 command; | |
94 | u32 argument1; | |
95 | u32 argument2; | |
96 | u32 argument3; | |
6ccf44fe | 97 | struct completion done; |
e0eca63e VH |
98 | }; |
99 | ||
57d104c1 SJ |
100 | /* Used to differentiate the power management options */ |
101 | enum ufs_pm_op { | |
102 | UFS_RUNTIME_PM, | |
103 | UFS_SYSTEM_PM, | |
104 | UFS_SHUTDOWN_PM, | |
105 | }; | |
106 | ||
107 | #define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) | |
108 | #define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) | |
109 | #define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) | |
110 | ||
111 | /* Host <-> Device UniPro Link state */ | |
112 | enum uic_link_state { | |
113 | UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ | |
114 | UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ | |
115 | UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ | |
4db7a236 | 116 | UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */ |
57d104c1 SJ |
117 | }; |
118 | ||
119 | #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) | |
120 | #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ | |
121 | UIC_LINK_ACTIVE_STATE) | |
122 | #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ | |
123 | UIC_LINK_HIBERN8_STATE) | |
4db7a236 CG |
124 | #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \ |
125 | UIC_LINK_BROKEN_STATE) | |
57d104c1 SJ |
126 | #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) |
127 | #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ | |
128 | UIC_LINK_ACTIVE_STATE) | |
129 | #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ | |
130 | UIC_LINK_HIBERN8_STATE) | |
4db7a236 CG |
131 | #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \ |
132 | UIC_LINK_BROKEN_STATE) | |
57d104c1 | 133 | |
1764fa2a SC |
134 | #define ufshcd_set_ufs_dev_active(h) \ |
135 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | |
136 | #define ufshcd_set_ufs_dev_sleep(h) \ | |
137 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | |
138 | #define ufshcd_set_ufs_dev_poweroff(h) \ | |
139 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | |
fe1d4c2e AH |
140 | #define ufshcd_set_ufs_dev_deepsleep(h) \ |
141 | ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE) | |
1764fa2a SC |
142 | #define ufshcd_is_ufs_dev_active(h) \ |
143 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | |
144 | #define ufshcd_is_ufs_dev_sleep(h) \ | |
145 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | |
146 | #define ufshcd_is_ufs_dev_poweroff(h) \ | |
147 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | |
fe1d4c2e AH |
148 | #define ufshcd_is_ufs_dev_deepsleep(h) \ |
149 | ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE) | |
1764fa2a | 150 | |
57d104c1 SJ |
151 | /* |
152 | * UFS Power management levels. | |
fe1d4c2e AH |
153 | * Each level is in increasing order of power savings, except DeepSleep |
154 | * which is lower than PowerDown with power on but not PowerDown with | |
155 | * power off. | |
57d104c1 SJ |
156 | */ |
157 | enum ufs_pm_level { | |
158 | UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ | |
159 | UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ | |
160 | UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ | |
161 | UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ | |
162 | UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ | |
163 | UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ | |
fe1d4c2e | 164 | UFS_PM_LVL_6, /* UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE */ |
57d104c1 SJ |
165 | UFS_PM_LVL_MAX |
166 | }; | |
167 | ||
168 | struct ufs_pm_lvl_states { | |
169 | enum ufs_dev_pwr_mode dev_state; | |
170 | enum uic_link_state link_state; | |
171 | }; | |
172 | ||
e0eca63e VH |
173 | /** |
174 | * struct ufshcd_lrb - local reference block | |
175 | * @utr_descriptor_ptr: UTRD address of the command | |
5a0b0cb9 | 176 | * @ucd_req_ptr: UCD address of the command |
e0eca63e VH |
177 | * @ucd_rsp_ptr: Response UPIU address for this command |
178 | * @ucd_prdt_ptr: PRDT address of the command | |
ff8e20c6 DR |
179 | * @utrd_dma_addr: UTRD dma address for debug |
180 | * @ucd_prdt_dma_addr: PRDT dma address for debug | |
181 | * @ucd_rsp_dma_addr: UPIU response dma address for debug | |
182 | * @ucd_req_dma_addr: UPIU request dma address for debug | |
e0eca63e VH |
183 | * @cmd: pointer to SCSI command |
184 | * @sense_buffer: pointer to sense buffer address of the SCSI command | |
185 | * @sense_bufflen: Length of the sense buffer | |
186 | * @scsi_status: SCSI status of the command | |
187 | * @command_type: SCSI, UFS, Query. | |
188 | * @task_tag: Task tag of the command | |
189 | * @lun: LUN of the command | |
5a0b0cb9 | 190 | * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) |
ff8e20c6 | 191 | * @issue_time_stamp: time stamp for debug purposes |
09017188 | 192 | * @compl_time_stamp: time stamp for statistics |
df043c74 ST |
193 | * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) |
194 | * @data_unit_num: the data unit number for the first block for inline crypto | |
e0b299e3 | 195 | * @req_abort_skip: skip request abort task flag |
e0eca63e VH |
196 | */ |
197 | struct ufshcd_lrb { | |
198 | struct utp_transfer_req_desc *utr_descriptor_ptr; | |
5a0b0cb9 | 199 | struct utp_upiu_req *ucd_req_ptr; |
e0eca63e VH |
200 | struct utp_upiu_rsp *ucd_rsp_ptr; |
201 | struct ufshcd_sg_entry *ucd_prdt_ptr; | |
202 | ||
ff8e20c6 DR |
203 | dma_addr_t utrd_dma_addr; |
204 | dma_addr_t ucd_req_dma_addr; | |
205 | dma_addr_t ucd_rsp_dma_addr; | |
206 | dma_addr_t ucd_prdt_dma_addr; | |
207 | ||
e0eca63e VH |
208 | struct scsi_cmnd *cmd; |
209 | u8 *sense_buffer; | |
210 | unsigned int sense_bufflen; | |
211 | int scsi_status; | |
212 | ||
213 | int command_type; | |
214 | int task_tag; | |
0ce147d4 | 215 | u8 lun; /* UPIU LUN id field is only 8-bit wide */ |
5a0b0cb9 | 216 | bool intr_cmd; |
ff8e20c6 | 217 | ktime_t issue_time_stamp; |
09017188 | 218 | ktime_t compl_time_stamp; |
df043c74 ST |
219 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
220 | int crypto_key_slot; | |
221 | u64 data_unit_num; | |
222 | #endif | |
e0b299e3 GB |
223 | |
224 | bool req_abort_skip; | |
e0eca63e VH |
225 | }; |
226 | ||
68078d5c | 227 | /** |
a230c2f6 | 228 | * struct ufs_query - holds relevant data structures for query request |
68078d5c DR |
229 | * @request: request upiu and function |
230 | * @descriptor: buffer for sending/receiving descriptor | |
231 | * @response: response upiu and response | |
232 | */ | |
233 | struct ufs_query { | |
234 | struct ufs_query_req request; | |
235 | u8 *descriptor; | |
236 | struct ufs_query_res response; | |
237 | }; | |
238 | ||
5a0b0cb9 SRT |
239 | /** |
240 | * struct ufs_dev_cmd - all assosiated fields with device management commands | |
241 | * @type: device management command type - Query, NOP OUT | |
242 | * @lock: lock to allow one command at a time | |
243 | * @complete: internal commands completion | |
5a0b0cb9 SRT |
244 | */ |
245 | struct ufs_dev_cmd { | |
246 | enum dev_cmd_type type; | |
247 | struct mutex lock; | |
248 | struct completion *complete; | |
68078d5c | 249 | struct ufs_query query; |
5a0b0cb9 | 250 | }; |
e0eca63e | 251 | |
c6e79dac SRT |
252 | /** |
253 | * struct ufs_clk_info - UFS clock related info | |
254 | * @list: list headed by hba->clk_list_head | |
255 | * @clk: clock node | |
256 | * @name: clock name | |
257 | * @max_freq: maximum frequency supported by the clock | |
4cff6d99 | 258 | * @min_freq: min frequency that can be used for clock scaling |
856b3483 | 259 | * @curr_freq: indicates the current frequency that it is set to |
81309c24 CG |
260 | * @keep_link_active: indicates that the clk should not be disabled if |
261 | link is active | |
c6e79dac SRT |
262 | * @enabled: variable to check against multiple enable/disable |
263 | */ | |
264 | struct ufs_clk_info { | |
265 | struct list_head list; | |
266 | struct clk *clk; | |
267 | const char *name; | |
268 | u32 max_freq; | |
4cff6d99 | 269 | u32 min_freq; |
856b3483 | 270 | u32 curr_freq; |
81309c24 | 271 | bool keep_link_active; |
c6e79dac SRT |
272 | bool enabled; |
273 | }; | |
274 | ||
f06fcc71 YG |
275 | enum ufs_notify_change_status { |
276 | PRE_CHANGE, | |
277 | POST_CHANGE, | |
278 | }; | |
7eb584db DR |
279 | |
280 | struct ufs_pa_layer_attr { | |
281 | u32 gear_rx; | |
282 | u32 gear_tx; | |
283 | u32 lane_rx; | |
284 | u32 lane_tx; | |
285 | u32 pwr_rx; | |
286 | u32 pwr_tx; | |
287 | u32 hs_rate; | |
288 | }; | |
289 | ||
290 | struct ufs_pwr_mode_info { | |
291 | bool is_valid; | |
292 | struct ufs_pa_layer_attr info; | |
293 | }; | |
294 | ||
5c0c28a8 SRT |
295 | /** |
296 | * struct ufs_hba_variant_ops - variant specific callbacks | |
297 | * @name: variant name | |
298 | * @init: called when the driver is initialized | |
299 | * @exit: called to cleanup everything done in init | |
9949e702 | 300 | * @get_ufs_hci_version: called to get UFS HCI version |
856b3483 | 301 | * @clk_scale_notify: notifies that clks are scaled up/down |
5c0c28a8 SRT |
302 | * @setup_clocks: called before touching any of the controller registers |
303 | * @setup_regulators: called before accessing the host controller | |
304 | * @hce_enable_notify: called before and after HCE enable bit is set to allow | |
305 | * variant specific Uni-Pro initialization. | |
306 | * @link_startup_notify: called before and after Link startup is carried out | |
307 | * to allow variant specific Uni-Pro initialization. | |
7eb584db DR |
308 | * @pwr_change_notify: called before and after a power mode change |
309 | * is carried out to allow vendor spesific capabilities | |
310 | * to be set. | |
0e675efa KK |
311 | * @setup_xfer_req: called before any transfer request is issued |
312 | * to set some things | |
d2877be4 KK |
313 | * @setup_task_mgmt: called before any task management request is issued |
314 | * to set some things | |
ee32c909 | 315 | * @hibern8_notify: called around hibern8 enter/exit |
56d4a186 | 316 | * @apply_dev_quirks: called to apply device specific quirks |
57d104c1 SJ |
317 | * @suspend: called during host controller PM callback |
318 | * @resume: called during host controller PM callback | |
6e3fd44d | 319 | * @dbg_register_dump: used to dump controller debug information |
4b9ffb5a | 320 | * @phy_initialization: used to initialize phys |
d8d9f793 | 321 | * @device_reset: called to issue a reset pulse on the UFS device |
1bc726e2 | 322 | * @program_key: program or evict an inline encryption key |
5c0c28a8 SRT |
323 | */ |
324 | struct ufs_hba_variant_ops { | |
325 | const char *name; | |
326 | int (*init)(struct ufs_hba *); | |
327 | void (*exit)(struct ufs_hba *); | |
9949e702 | 328 | u32 (*get_ufs_hci_version)(struct ufs_hba *); |
f06fcc71 YG |
329 | int (*clk_scale_notify)(struct ufs_hba *, bool, |
330 | enum ufs_notify_change_status); | |
1e879e8f SJ |
331 | int (*setup_clocks)(struct ufs_hba *, bool, |
332 | enum ufs_notify_change_status); | |
5c0c28a8 | 333 | int (*setup_regulators)(struct ufs_hba *, bool); |
f06fcc71 YG |
334 | int (*hce_enable_notify)(struct ufs_hba *, |
335 | enum ufs_notify_change_status); | |
336 | int (*link_startup_notify)(struct ufs_hba *, | |
337 | enum ufs_notify_change_status); | |
7eb584db | 338 | int (*pwr_change_notify)(struct ufs_hba *, |
f06fcc71 YG |
339 | enum ufs_notify_change_status status, |
340 | struct ufs_pa_layer_attr *, | |
7eb584db | 341 | struct ufs_pa_layer_attr *); |
0e675efa | 342 | void (*setup_xfer_req)(struct ufs_hba *, int, bool); |
d2877be4 | 343 | void (*setup_task_mgmt)(struct ufs_hba *, int, u8); |
ee32c909 | 344 | void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, |
56d4a186 | 345 | enum ufs_notify_change_status); |
09750066 | 346 | int (*apply_dev_quirks)(struct ufs_hba *hba); |
c28c00ba | 347 | void (*fixup_dev_quirks)(struct ufs_hba *hba); |
57d104c1 SJ |
348 | int (*suspend)(struct ufs_hba *, enum ufs_pm_op); |
349 | int (*resume)(struct ufs_hba *, enum ufs_pm_op); | |
6e3fd44d | 350 | void (*dbg_register_dump)(struct ufs_hba *hba); |
4b9ffb5a | 351 | int (*phy_initialization)(struct ufs_hba *); |
151f1b66 | 352 | int (*device_reset)(struct ufs_hba *hba); |
2c75f9a5 AD |
353 | void (*config_scaling_param)(struct ufs_hba *hba, |
354 | struct devfreq_dev_profile *profile, | |
355 | void *data); | |
1bc726e2 EB |
356 | int (*program_key)(struct ufs_hba *hba, |
357 | const union ufs_crypto_cfg_entry *cfg, int slot); | |
5c0c28a8 SRT |
358 | }; |
359 | ||
1ab27c9c ST |
360 | /* clock gating state */ |
361 | enum clk_gating_state { | |
362 | CLKS_OFF, | |
363 | CLKS_ON, | |
364 | REQ_CLKS_OFF, | |
365 | REQ_CLKS_ON, | |
366 | }; | |
367 | ||
368 | /** | |
369 | * struct ufs_clk_gating - UFS clock gating related info | |
370 | * @gate_work: worker to turn off clocks after some delay as specified in | |
371 | * delay_ms | |
372 | * @ungate_work: worker to turn on clocks that will be used in case of | |
373 | * interrupt context | |
374 | * @state: the current clocks state | |
375 | * @delay_ms: gating delay in ms | |
376 | * @is_suspended: clk gating is suspended when set to 1 which can be used | |
377 | * during suspend/resume | |
378 | * @delay_attr: sysfs attribute to control delay_attr | |
b427411a ST |
379 | * @enable_attr: sysfs attribute to enable/disable clock gating |
380 | * @is_enabled: Indicates the current status of clock gating | |
1ab27c9c ST |
381 | * @active_reqs: number of requests that are pending and should be waited for |
382 | * completion before gating clocks. | |
383 | */ | |
384 | struct ufs_clk_gating { | |
385 | struct delayed_work gate_work; | |
386 | struct work_struct ungate_work; | |
387 | enum clk_gating_state state; | |
388 | unsigned long delay_ms; | |
389 | bool is_suspended; | |
390 | struct device_attribute delay_attr; | |
b427411a ST |
391 | struct device_attribute enable_attr; |
392 | bool is_enabled; | |
1ab27c9c | 393 | int active_reqs; |
10e5e375 | 394 | struct workqueue_struct *clk_gating_workq; |
1ab27c9c ST |
395 | }; |
396 | ||
a3cd5ec5 | 397 | struct ufs_saved_pwr_info { |
398 | struct ufs_pa_layer_attr info; | |
399 | bool is_valid; | |
400 | }; | |
401 | ||
401f1e44 | 402 | /** |
403 | * struct ufs_clk_scaling - UFS clock scaling related data | |
404 | * @active_reqs: number of requests that are pending. If this is zero when | |
405 | * devfreq ->target() function is called then schedule "suspend_work" to | |
406 | * suspend devfreq. | |
407 | * @tot_busy_t: Total busy time in current polling window | |
408 | * @window_start_t: Start time (in jiffies) of the current polling window | |
409 | * @busy_start_t: Start time of current busy period | |
410 | * @enable_attr: sysfs attribute to enable/disable clock scaling | |
411 | * @saved_pwr_info: UFS power mode may also be changed during scaling and this | |
412 | * one keeps track of previous power mode. | |
413 | * @workq: workqueue to schedule devfreq suspend/resume work | |
414 | * @suspend_work: worker to suspend devfreq | |
415 | * @resume_work: worker to resume devfreq | |
29b87e92 | 416 | * @min_gear: lowest HS gear to scale down to |
401f1e44 | 417 | * @is_allowed: tracks if scaling is currently allowed or not |
418 | * @is_busy_started: tracks if busy period has started or not | |
419 | * @is_suspended: tracks if devfreq is suspended or not | |
420 | */ | |
856b3483 | 421 | struct ufs_clk_scaling { |
401f1e44 | 422 | int active_reqs; |
423 | unsigned long tot_busy_t; | |
b1bf66d1 | 424 | ktime_t window_start_t; |
401f1e44 | 425 | ktime_t busy_start_t; |
fcb0c4b0 | 426 | struct device_attribute enable_attr; |
a3cd5ec5 | 427 | struct ufs_saved_pwr_info saved_pwr_info; |
401f1e44 | 428 | struct workqueue_struct *workq; |
429 | struct work_struct suspend_work; | |
430 | struct work_struct resume_work; | |
29b87e92 | 431 | u32 min_gear; |
401f1e44 | 432 | bool is_allowed; |
433 | bool is_busy_started; | |
434 | bool is_suspended; | |
856b3483 ST |
435 | }; |
436 | ||
e965e5e0 | 437 | #define UFS_EVENT_HIST_LENGTH 8 |
ff8e20c6 | 438 | /** |
e965e5e0 | 439 | * struct ufs_event_hist - keeps history of errors |
ff8e20c6 DR |
440 | * @pos: index to indicate cyclic buffer position |
441 | * @reg: cyclic buffer for registers value | |
442 | * @tstamp: cyclic buffer for time stamp | |
443 | */ | |
e965e5e0 | 444 | struct ufs_event_hist { |
ff8e20c6 | 445 | int pos; |
e965e5e0 SC |
446 | u32 val[UFS_EVENT_HIST_LENGTH]; |
447 | ktime_t tstamp[UFS_EVENT_HIST_LENGTH]; | |
ff8e20c6 DR |
448 | }; |
449 | ||
450 | /** | |
451 | * struct ufs_stats - keeps usage/err statistics | |
3f8af604 CG |
452 | * @last_intr_status: record the last interrupt status. |
453 | * @last_intr_ts: record the last interrupt timestamp. | |
ff8e20c6 DR |
454 | * @hibern8_exit_cnt: Counter to keep track of number of exits, |
455 | * reset this after link-startup. | |
456 | * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. | |
457 | * Clear after the first successful command completion. | |
ff8e20c6 DR |
458 | */ |
459 | struct ufs_stats { | |
3f8af604 CG |
460 | u32 last_intr_status; |
461 | ktime_t last_intr_ts; | |
462 | ||
ff8e20c6 DR |
463 | u32 hibern8_exit_cnt; |
464 | ktime_t last_hibern8_exit_tstamp; | |
e965e5e0 | 465 | struct ufs_event_hist event[UFS_EVT_CNT]; |
ff8e20c6 DR |
466 | }; |
467 | ||
c3f7d1fc CH |
468 | enum ufshcd_quirks { |
469 | /* Interrupt aggregation support is broken */ | |
470 | UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, | |
471 | ||
472 | /* | |
473 | * delay before each dme command is required as the unipro | |
474 | * layer has shown instabilities | |
475 | */ | |
476 | UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1, | |
477 | ||
478 | /* | |
479 | * If UFS host controller is having issue in processing LCC (Line | |
480 | * Control Command) coming from device then enable this quirk. | |
481 | * When this quirk is enabled, host controller driver should disable | |
482 | * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE | |
483 | * attribute of device to 0). | |
484 | */ | |
485 | UFSHCD_QUIRK_BROKEN_LCC = 1 << 2, | |
486 | ||
487 | /* | |
488 | * The attribute PA_RXHSUNTERMCAP specifies whether or not the | |
489 | * inbound Link supports unterminated line in HS mode. Setting this | |
490 | * attribute to 1 fixes moving to HS gear. | |
491 | */ | |
492 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3, | |
493 | ||
494 | /* | |
495 | * This quirk needs to be enabled if the host controller only allows | |
496 | * accessing the peer dme attributes in AUTO mode (FAST AUTO or | |
497 | * SLOW AUTO). | |
498 | */ | |
499 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4, | |
500 | ||
501 | /* | |
502 | * This quirk needs to be enabled if the host controller doesn't | |
503 | * advertise the correct version in UFS_VER register. If this quirk | |
504 | * is enabled, standard UFS host driver will call the vendor specific | |
505 | * ops (get_ufs_hci_version) to get the correct version. | |
506 | */ | |
507 | UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, | |
87183841 AA |
508 | |
509 | /* | |
510 | * Clear handling for transfer/task request list is just opposite. | |
511 | */ | |
512 | UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6, | |
b638b5eb AA |
513 | |
514 | /* | |
515 | * This quirk needs to be enabled if host controller doesn't allow | |
516 | * that the interrupt aggregation timer and counter are reset by s/w. | |
517 | */ | |
518 | UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7, | |
39bf2d83 AA |
519 | |
520 | /* | |
521 | * This quirks needs to be enabled if host controller cannot be | |
522 | * enabled via HCE register. | |
523 | */ | |
524 | UFSHCI_QUIRK_BROKEN_HCE = 1 << 8, | |
26f968d7 AA |
525 | |
526 | /* | |
527 | * This quirk needs to be enabled if the host controller regards | |
528 | * resolution of the values of PRDTO and PRDTL in UTRD as byte. | |
529 | */ | |
530 | UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9, | |
d779a6e9 KK |
531 | |
532 | /* | |
533 | * This quirk needs to be enabled if the host controller reports | |
534 | * OCS FATAL ERROR with device error through sense data | |
535 | */ | |
536 | UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, | |
5df6f2de | 537 | |
8da76f71 AH |
538 | /* |
539 | * This quirk needs to be enabled if the host controller has | |
540 | * auto-hibernate capability but it doesn't work. | |
541 | */ | |
542 | UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11, | |
02f74150 | 543 | |
5df6f2de KK |
544 | /* |
545 | * This quirk needs to disable manual flush for write booster | |
546 | */ | |
02f74150 MP |
547 | UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12, |
548 | ||
c3f7d1fc CH |
549 | }; |
550 | ||
c2014682 SC |
551 | enum ufshcd_caps { |
552 | /* Allow dynamic clk gating */ | |
553 | UFSHCD_CAP_CLK_GATING = 1 << 0, | |
554 | ||
555 | /* Allow hiberb8 with clk gating */ | |
556 | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1, | |
557 | ||
558 | /* Allow dynamic clk scaling */ | |
559 | UFSHCD_CAP_CLK_SCALING = 1 << 2, | |
560 | ||
561 | /* Allow auto bkops to enabled during runtime suspend */ | |
562 | UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3, | |
563 | ||
564 | /* | |
565 | * This capability allows host controller driver to use the UFS HCI's | |
566 | * interrupt aggregation capability. | |
567 | * CAUTION: Enabling this might reduce overall UFS throughput. | |
568 | */ | |
569 | UFSHCD_CAP_INTR_AGGR = 1 << 4, | |
570 | ||
571 | /* | |
572 | * This capability allows the device auto-bkops to be always enabled | |
573 | * except during suspend (both runtime and suspend). | |
574 | * Enabling this capability means that device will always be allowed | |
575 | * to do background operation when it's active but it might degrade | |
576 | * the performance of ongoing read/write operations. | |
577 | */ | |
578 | UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5, | |
579 | ||
580 | /* | |
581 | * This capability allows host controller driver to automatically | |
582 | * enable runtime power management by itself instead of waiting | |
583 | * for userspace to control the power management. | |
584 | */ | |
585 | UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6, | |
3d17b9b5 AD |
586 | |
587 | /* | |
588 | * This capability allows the host controller driver to turn-on | |
589 | * WriteBooster, if the underlying device supports it and is | |
590 | * provisioned to be used. This would increase the write performance. | |
591 | */ | |
592 | UFSHCD_CAP_WB_EN = 1 << 7, | |
5e7341e1 ST |
593 | |
594 | /* | |
595 | * This capability allows the host controller driver to use the | |
596 | * inline crypto engine, if it is present | |
597 | */ | |
598 | UFSHCD_CAP_CRYPTO = 1 << 8, | |
dd7143e2 CG |
599 | |
600 | /* | |
601 | * This capability allows the controller regulators to be put into | |
602 | * lpm mode aggressively during clock gating. | |
603 | * This would increase power savings. | |
604 | */ | |
605 | UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9, | |
fe1d4c2e AH |
606 | |
607 | /* | |
608 | * This capability allows the host controller driver to use DeepSleep, | |
609 | * if it is supported by the UFS device. The host controller driver must | |
610 | * support device hardware reset via the hba->device_reset() callback, | |
611 | * in order to exit DeepSleep state. | |
612 | */ | |
613 | UFSHCD_CAP_DEEPSLEEP = 1 << 10, | |
c2014682 SC |
614 | }; |
615 | ||
90b8491c SC |
616 | struct ufs_hba_variant_params { |
617 | struct devfreq_dev_profile devfreq_profile; | |
618 | struct devfreq_simple_ondemand_data ondemand_data; | |
619 | u16 hba_enable_delay_us; | |
d14734ae | 620 | u32 wb_flush_threshold; |
90b8491c SC |
621 | }; |
622 | ||
e0eca63e VH |
623 | /** |
624 | * struct ufs_hba - per adapter private structure | |
625 | * @mmio_base: UFSHCI base register address | |
626 | * @ucdl_base_addr: UFS Command Descriptor base address | |
627 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address | |
628 | * @utmrdl_base_addr: UTP Task Management Descriptor base address | |
629 | * @ucdl_dma_addr: UFS Command Descriptor DMA address | |
630 | * @utrdl_dma_addr: UTRDL DMA address | |
631 | * @utmrdl_dma_addr: UTMRDL DMA address | |
632 | * @host: Scsi_Host instance of the driver | |
633 | * @dev: device handle | |
634 | * @lrb: local reference block | |
7252a360 | 635 | * @cmd_queue: Used to allocate command tags from hba->host->tag_set. |
e0eca63e VH |
636 | * @outstanding_tasks: Bits representing outstanding task requests |
637 | * @outstanding_reqs: Bits representing outstanding transfer requests | |
638 | * @capabilities: UFS Controller Capabilities | |
639 | * @nutrs: Transfer Request Queue depth supported by controller | |
640 | * @nutmrs: Task Management Queue depth supported by controller | |
641 | * @ufs_version: UFS Version to which controller complies | |
5c0c28a8 SRT |
642 | * @vops: pointer to variant specific operations |
643 | * @priv: pointer to variant specific private data | |
e0eca63e VH |
644 | * @irq: Irq number of the controller |
645 | * @active_uic_cmd: handle of active UIC command | |
6ccf44fe | 646 | * @uic_cmd_mutex: mutex for uic command |
69a6c269 BVA |
647 | * @tmf_tag_set: TMF tag set. |
648 | * @tmf_queue: Used to allocate TMF tags. | |
53b3d9c3 | 649 | * @pwr_done: completion for power mode change |
e0eca63e | 650 | * @ufshcd_state: UFSHCD states |
3441da7d | 651 | * @eh_flags: Error handling flags |
2fbd009b | 652 | * @intr_mask: Interrupt Mask Bits |
66ec6d59 | 653 | * @ee_ctrl_mask: Exception event control mask |
1d337ec2 | 654 | * @is_powered: flag to check if HBA is powered |
4db7a236 | 655 | * @eh_wq: Workqueue that eh_work works on |
e8e7f271 | 656 | * @eh_work: Worker to handle UFS errors that require s/w attention |
66ec6d59 | 657 | * @eeh_work: Worker to handle exception events |
e0eca63e | 658 | * @errors: HBA errors |
e8e7f271 SRT |
659 | * @uic_error: UFS interconnect layer error status |
660 | * @saved_err: sticky error mask | |
661 | * @saved_uic_err: sticky UIC error mask | |
4db7a236 | 662 | * @force_reset: flag to force eh_work perform a full reset |
2355b66e | 663 | * @force_pmc: flag to force a power mode change |
2df74b69 | 664 | * @silence_err_logs: flag to silence error logs |
5a0b0cb9 | 665 | * @dev_cmd: ufs device management command information |
cad2e03d | 666 | * @last_dme_cmd_tstamp: time stamp of the last completed DME command |
66ec6d59 | 667 | * @auto_bkops_enabled: to track whether bkops is enabled in device |
aa497613 | 668 | * @vreg_info: UFS device voltage regulator information |
c6e79dac | 669 | * @clk_list_head: UFS host controller clocks list node head |
7eb584db DR |
670 | * @pwr_info: holds current power mode |
671 | * @max_pwr_info: keeps the device max valid pwm | |
a4b0e8a4 | 672 | * @desc_size: descriptor sizes reported by device |
afdfff59 YG |
673 | * @urgent_bkops_lvl: keeps track of urgent bkops level for device |
674 | * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for | |
675 | * device is known or not. | |
38135535 | 676 | * @scsi_block_reqs_cnt: reference counting for scsi block requests |
70297a8a ST |
677 | * @crypto_capabilities: Content of crypto capabilities register (0x100) |
678 | * @crypto_cap_array: Array of crypto capabilities | |
679 | * @crypto_cfg_register: Start of the crypto cfg array | |
680 | * @ksm: the keyslot manager tied to this hba | |
e0eca63e VH |
681 | */ |
682 | struct ufs_hba { | |
683 | void __iomem *mmio_base; | |
684 | ||
685 | /* Virtual memory reference */ | |
686 | struct utp_transfer_cmd_desc *ucdl_base_addr; | |
687 | struct utp_transfer_req_desc *utrdl_base_addr; | |
688 | struct utp_task_req_desc *utmrdl_base_addr; | |
689 | ||
690 | /* DMA memory reference */ | |
691 | dma_addr_t ucdl_dma_addr; | |
692 | dma_addr_t utrdl_dma_addr; | |
693 | dma_addr_t utmrdl_dma_addr; | |
694 | ||
695 | struct Scsi_Host *host; | |
696 | struct device *dev; | |
7252a360 | 697 | struct request_queue *cmd_queue; |
2a8fa600 SJ |
698 | /* |
699 | * This field is to keep a reference to "scsi_device" corresponding to | |
700 | * "UFS device" W-LU. | |
701 | */ | |
702 | struct scsi_device *sdev_ufs_device; | |
4f3e900b | 703 | struct scsi_device *sdev_rpmb; |
e0eca63e | 704 | |
57d104c1 SJ |
705 | enum ufs_dev_pwr_mode curr_dev_pwr_mode; |
706 | enum uic_link_state uic_link_state; | |
707 | /* Desired UFS power management level during runtime PM */ | |
708 | enum ufs_pm_level rpm_lvl; | |
709 | /* Desired UFS power management level during system PM */ | |
710 | enum ufs_pm_level spm_lvl; | |
09690d5a | 711 | struct device_attribute rpm_lvl_attr; |
712 | struct device_attribute spm_lvl_attr; | |
57d104c1 SJ |
713 | int pm_op_in_progress; |
714 | ||
ad448378 AH |
715 | /* Auto-Hibernate Idle Timer register value */ |
716 | u32 ahit; | |
717 | ||
e0eca63e VH |
718 | struct ufshcd_lrb *lrb; |
719 | ||
720 | unsigned long outstanding_tasks; | |
721 | unsigned long outstanding_reqs; | |
722 | ||
723 | u32 capabilities; | |
724 | int nutrs; | |
725 | int nutmrs; | |
726 | u32 ufs_version; | |
176eb927 | 727 | const struct ufs_hba_variant_ops *vops; |
90b8491c | 728 | struct ufs_hba_variant_params *vps; |
5c0c28a8 | 729 | void *priv; |
e0eca63e | 730 | unsigned int irq; |
57d104c1 | 731 | bool is_irq_enabled; |
9e1e8a75 | 732 | enum ufs_ref_clk_freq dev_ref_clk_freq; |
e0eca63e | 733 | |
cad2e03d | 734 | unsigned int quirks; /* Deviations from standard UFSHCI spec. */ |
6ccf44fe | 735 | |
c58ab7aa YG |
736 | /* Device deviations from standard UFS device spec. */ |
737 | unsigned int dev_quirks; | |
738 | ||
69a6c269 BVA |
739 | struct blk_mq_tag_set tmf_tag_set; |
740 | struct request_queue *tmf_queue; | |
e0eca63e | 741 | |
57d104c1 SJ |
742 | struct uic_command *active_uic_cmd; |
743 | struct mutex uic_cmd_mutex; | |
744 | struct completion *uic_async_done; | |
53b3d9c3 | 745 | |
e0eca63e | 746 | u32 ufshcd_state; |
3441da7d | 747 | u32 eh_flags; |
2fbd009b | 748 | u32 intr_mask; |
66ec6d59 | 749 | u16 ee_ctrl_mask; |
1d337ec2 | 750 | bool is_powered; |
e0eca63e VH |
751 | |
752 | /* Work Queues */ | |
4db7a236 | 753 | struct workqueue_struct *eh_wq; |
e8e7f271 | 754 | struct work_struct eh_work; |
66ec6d59 | 755 | struct work_struct eeh_work; |
e0eca63e VH |
756 | |
757 | /* HBA Errors */ | |
758 | u32 errors; | |
e8e7f271 SRT |
759 | u32 uic_error; |
760 | u32 saved_err; | |
761 | u32 saved_uic_err; | |
ff8e20c6 | 762 | struct ufs_stats ufs_stats; |
4db7a236 | 763 | bool force_reset; |
2355b66e | 764 | bool force_pmc; |
2df74b69 | 765 | bool silence_err_logs; |
5a0b0cb9 SRT |
766 | |
767 | /* Device management request data */ | |
768 | struct ufs_dev_cmd dev_cmd; | |
cad2e03d | 769 | ktime_t last_dme_cmd_tstamp; |
66ec6d59 | 770 | |
57d104c1 SJ |
771 | /* Keeps information of the UFS device connected to this host */ |
772 | struct ufs_dev_info dev_info; | |
66ec6d59 | 773 | bool auto_bkops_enabled; |
aa497613 | 774 | struct ufs_vreg_info vreg_info; |
c6e79dac | 775 | struct list_head clk_list_head; |
57d104c1 SJ |
776 | |
777 | bool wlun_dev_clr_ua; | |
7eb584db | 778 | |
7fabb77b GB |
779 | /* Number of requests aborts */ |
780 | int req_abort_count; | |
781 | ||
54b879b7 YG |
782 | /* Number of lanes available (1 or 2) for Rx/Tx */ |
783 | u32 lanes_per_direction; | |
7eb584db DR |
784 | struct ufs_pa_layer_attr pwr_info; |
785 | struct ufs_pwr_mode_info max_pwr_info; | |
1ab27c9c ST |
786 | |
787 | struct ufs_clk_gating clk_gating; | |
788 | /* Control to enable/disable host capabilities */ | |
789 | u32 caps; | |
856b3483 ST |
790 | |
791 | struct devfreq *devfreq; | |
792 | struct ufs_clk_scaling clk_scaling; | |
e785060e | 793 | bool is_sys_suspended; |
afdfff59 YG |
794 | |
795 | enum bkops_status urgent_bkops_lvl; | |
796 | bool is_urgent_bkops_lvl_checked; | |
a3cd5ec5 | 797 | |
798 | struct rw_semaphore clk_scaling_lock; | |
7a0bf85b | 799 | unsigned char desc_size[QUERY_DESC_IDN_MAX]; |
38135535 | 800 | atomic_t scsi_block_reqs_cnt; |
df032bf2 AA |
801 | |
802 | struct device bsg_dev; | |
803 | struct request_queue *bsg_queue; | |
3d17b9b5 AD |
804 | bool wb_buf_flush_enabled; |
805 | bool wb_enabled; | |
51dd905b | 806 | struct delayed_work rpm_dev_flush_recheck_work; |
70297a8a ST |
807 | |
808 | #ifdef CONFIG_SCSI_UFS_CRYPTO | |
809 | union ufs_crypto_capabilities crypto_capabilities; | |
810 | union ufs_crypto_cap_entry *crypto_cap_array; | |
811 | u32 crypto_cfg_register; | |
812 | struct blk_keyslot_manager ksm; | |
813 | #endif | |
e0eca63e VH |
814 | }; |
815 | ||
1ab27c9c ST |
816 | /* Returns true if clocks can be gated. Otherwise false */ |
817 | static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) | |
818 | { | |
819 | return hba->caps & UFSHCD_CAP_CLK_GATING; | |
820 | } | |
821 | static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) | |
822 | { | |
823 | return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; | |
824 | } | |
fcb0c4b0 | 825 | static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) |
856b3483 ST |
826 | { |
827 | return hba->caps & UFSHCD_CAP_CLK_SCALING; | |
828 | } | |
374a246e SJ |
829 | static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) |
830 | { | |
831 | return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; | |
832 | } | |
49615ba1 SC |
833 | static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) |
834 | { | |
835 | return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; | |
836 | } | |
374a246e | 837 | |
b852190e YG |
838 | static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) |
839 | { | |
4b9ffb5a JP |
840 | /* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ |
841 | #ifndef CONFIG_SCSI_UFS_DWC | |
b852190e YG |
842 | if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && |
843 | !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) | |
844 | return true; | |
845 | else | |
846 | return false; | |
4b9ffb5a JP |
847 | #else |
848 | return true; | |
849 | #endif | |
b852190e YG |
850 | } |
851 | ||
dd7143e2 CG |
852 | static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba) |
853 | { | |
854 | return !!(ufshcd_is_link_hibern8(hba) && | |
855 | (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE)); | |
856 | } | |
857 | ||
ee5f1042 SC |
858 | static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) |
859 | { | |
8da76f71 AH |
860 | return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) && |
861 | !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8); | |
ee5f1042 SC |
862 | } |
863 | ||
5a244e0e SC |
864 | static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) |
865 | { | |
866 | return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; | |
867 | } | |
868 | ||
3d17b9b5 AD |
869 | static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba) |
870 | { | |
871 | return hba->caps & UFSHCD_CAP_WB_EN; | |
872 | } | |
873 | ||
b873a275 SJ |
874 | #define ufshcd_writel(hba, val, reg) \ |
875 | writel((val), (hba)->mmio_base + (reg)) | |
876 | #define ufshcd_readl(hba, reg) \ | |
877 | readl((hba)->mmio_base + (reg)) | |
878 | ||
e785060e DR |
879 | /** |
880 | * ufshcd_rmwl - read modify write into a register | |
881 | * @hba - per adapter instance | |
882 | * @mask - mask to apply on read value | |
883 | * @val - actual value to write | |
884 | * @reg - register address | |
885 | */ | |
886 | static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) | |
887 | { | |
888 | u32 tmp; | |
889 | ||
890 | tmp = ufshcd_readl(hba, reg); | |
891 | tmp &= ~mask; | |
892 | tmp |= (val & mask); | |
893 | ufshcd_writel(hba, tmp, reg); | |
894 | } | |
895 | ||
5c0c28a8 | 896 | int ufshcd_alloc_host(struct device *, struct ufs_hba **); |
47555a5c | 897 | void ufshcd_dealloc_host(struct ufs_hba *); |
9d19bf7a | 898 | int ufshcd_hba_enable(struct ufs_hba *hba); |
5c0c28a8 | 899 | int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); |
087c5efa | 900 | int ufshcd_link_recovery(struct ufs_hba *hba); |
9d19bf7a | 901 | int ufshcd_make_hba_operational(struct ufs_hba *hba); |
e0eca63e | 902 | void ufshcd_remove(struct ufs_hba *); |
9d19bf7a | 903 | int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); |
5c955c10 | 904 | void ufshcd_delay_us(unsigned long us, unsigned long tolerance); |
596585a2 YG |
905 | int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, |
906 | u32 val, unsigned long interval_us, | |
5cac1095 | 907 | unsigned long timeout_ms); |
9e1e8a75 | 908 | void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); |
e965e5e0 | 909 | void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); |
e0eca63e | 910 | |
68078d5c DR |
911 | static inline void check_upiu_size(void) |
912 | { | |
913 | BUILD_BUG_ON(ALIGNED_UPIU_SIZE < | |
914 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); | |
915 | } | |
916 | ||
1ce5898a YG |
917 | /** |
918 | * ufshcd_set_variant - set variant specific data to the hba | |
919 | * @hba - per adapter instance | |
920 | * @variant - pointer to variant specific data | |
921 | */ | |
922 | static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) | |
923 | { | |
924 | BUG_ON(!hba); | |
925 | hba->priv = variant; | |
926 | } | |
927 | ||
928 | /** | |
929 | * ufshcd_get_variant - get variant specific data from the hba | |
930 | * @hba - per adapter instance | |
931 | */ | |
932 | static inline void *ufshcd_get_variant(struct ufs_hba *hba) | |
933 | { | |
934 | BUG_ON(!hba); | |
935 | return hba->priv; | |
936 | } | |
4e768e76 | 937 | static inline bool ufshcd_keep_autobkops_enabled_except_suspend( |
938 | struct ufs_hba *hba) | |
939 | { | |
940 | return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; | |
941 | } | |
1ce5898a | 942 | |
e31011ab | 943 | static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba) |
6f8d5a6a SC |
944 | { |
945 | if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED) | |
946 | return hba->dev_info.wb_dedicated_lu; | |
947 | return 0; | |
948 | } | |
949 | ||
66ec6d59 SRT |
950 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); |
951 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); | |
952 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); | |
57d104c1 SJ |
953 | extern int ufshcd_system_suspend(struct ufs_hba *hba); |
954 | extern int ufshcd_system_resume(struct ufs_hba *hba); | |
955 | extern int ufshcd_shutdown(struct ufs_hba *hba); | |
fc85a74e SC |
956 | extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba, |
957 | int agreed_gear, | |
958 | int adapt_val); | |
12b4fdb4 SJ |
959 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, |
960 | u8 attr_set, u32 mib_val, u8 peer); | |
961 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | |
962 | u32 *mib_val, u8 peer); | |
0d846e70 AA |
963 | extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, |
964 | struct ufs_pa_layer_attr *desired_pwr_mode); | |
12b4fdb4 SJ |
965 | |
966 | /* UIC command interfaces for DME primitives */ | |
967 | #define DME_LOCAL 0 | |
968 | #define DME_PEER 1 | |
969 | #define ATTR_SET_NOR 0 /* NORMAL */ | |
970 | #define ATTR_SET_ST 1 /* STATIC */ | |
971 | ||
972 | static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, | |
973 | u32 mib_val) | |
974 | { | |
975 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | |
976 | mib_val, DME_LOCAL); | |
977 | } | |
978 | ||
979 | static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, | |
980 | u32 mib_val) | |
981 | { | |
982 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | |
983 | mib_val, DME_LOCAL); | |
984 | } | |
985 | ||
986 | static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, | |
987 | u32 mib_val) | |
988 | { | |
989 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | |
990 | mib_val, DME_PEER); | |
991 | } | |
992 | ||
993 | static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, | |
994 | u32 mib_val) | |
995 | { | |
996 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | |
997 | mib_val, DME_PEER); | |
998 | } | |
999 | ||
1000 | static inline int ufshcd_dme_get(struct ufs_hba *hba, | |
1001 | u32 attr_sel, u32 *mib_val) | |
1002 | { | |
1003 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); | |
1004 | } | |
1005 | ||
1006 | static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, | |
1007 | u32 attr_sel, u32 *mib_val) | |
1008 | { | |
1009 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); | |
1010 | } | |
1011 | ||
f37aabcf YG |
1012 | static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) |
1013 | { | |
1014 | return (pwr_info->pwr_rx == FAST_MODE || | |
1015 | pwr_info->pwr_rx == FASTAUTO_MODE) && | |
1016 | (pwr_info->pwr_tx == FAST_MODE || | |
1017 | pwr_info->pwr_tx == FASTAUTO_MODE); | |
1018 | } | |
1019 | ||
984eaac1 SC |
1020 | static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba) |
1021 | { | |
1022 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); | |
1023 | } | |
1024 | ||
dc3c8d3a | 1025 | /* Expose Query-Request API */ |
2238d31c SN |
1026 | int ufshcd_query_descriptor_retry(struct ufs_hba *hba, |
1027 | enum query_opcode opcode, | |
1028 | enum desc_idn idn, u8 index, | |
1029 | u8 selector, | |
1030 | u8 *desc_buf, int *buf_len); | |
45bced87 SN |
1031 | int ufshcd_read_desc_param(struct ufs_hba *hba, |
1032 | enum desc_idn desc_id, | |
1033 | int desc_index, | |
1034 | u8 param_offset, | |
1035 | u8 *param_read_buf, | |
1036 | u8 param_size); | |
ec92b59c SN |
1037 | int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
1038 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); | |
dc3c8d3a | 1039 | int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
1f34eedf | 1040 | enum flag_idn idn, u8 index, bool *flag_res); |
4b828fe1 | 1041 | |
71d848b8 | 1042 | void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); |
ba7af5ec | 1043 | void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); |
8db269a5 | 1044 | void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups); |
4b828fe1 TW |
1045 | #define SD_ASCII_STD true |
1046 | #define SD_RAW false | |
1047 | int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, | |
1048 | u8 **buf, bool ascii); | |
2238d31c | 1049 | |
1ab27c9c ST |
1050 | int ufshcd_hold(struct ufs_hba *hba, bool async); |
1051 | void ufshcd_release(struct ufs_hba *hba); | |
a4b0e8a4 | 1052 | |
7a0bf85b BH |
1053 | void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, |
1054 | int *desc_length); | |
a4b0e8a4 | 1055 | |
37113106 | 1056 | u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); |
0263bcd0 | 1057 | |
e77044c5 AA |
1058 | int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); |
1059 | ||
5e0a86ee AA |
1060 | int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, |
1061 | struct utp_upiu_req *req_upiu, | |
1062 | struct utp_upiu_req *rsp_upiu, | |
1063 | int msgcode, | |
1064 | u8 *desc_buff, int *buff_len, | |
1065 | enum query_opcode desc_op); | |
1066 | ||
0263bcd0 YG |
1067 | /* Wrapper functions for safely calling variant operations */ |
1068 | static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) | |
1069 | { | |
1070 | if (hba->vops) | |
1071 | return hba->vops->name; | |
1072 | return ""; | |
1073 | } | |
1074 | ||
1075 | static inline int ufshcd_vops_init(struct ufs_hba *hba) | |
1076 | { | |
1077 | if (hba->vops && hba->vops->init) | |
1078 | return hba->vops->init(hba); | |
1079 | ||
1080 | return 0; | |
1081 | } | |
1082 | ||
1083 | static inline void ufshcd_vops_exit(struct ufs_hba *hba) | |
1084 | { | |
1085 | if (hba->vops && hba->vops->exit) | |
1086 | return hba->vops->exit(hba); | |
1087 | } | |
1088 | ||
1089 | static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) | |
1090 | { | |
1091 | if (hba->vops && hba->vops->get_ufs_hci_version) | |
1092 | return hba->vops->get_ufs_hci_version(hba); | |
1093 | ||
1094 | return ufshcd_readl(hba, REG_UFS_VERSION); | |
1095 | } | |
1096 | ||
f06fcc71 YG |
1097 | static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, |
1098 | bool up, enum ufs_notify_change_status status) | |
0263bcd0 YG |
1099 | { |
1100 | if (hba->vops && hba->vops->clk_scale_notify) | |
f06fcc71 YG |
1101 | return hba->vops->clk_scale_notify(hba, up, status); |
1102 | return 0; | |
0263bcd0 YG |
1103 | } |
1104 | ||
1e879e8f SJ |
1105 | static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, |
1106 | enum ufs_notify_change_status status) | |
0263bcd0 YG |
1107 | { |
1108 | if (hba->vops && hba->vops->setup_clocks) | |
1e879e8f | 1109 | return hba->vops->setup_clocks(hba, on, status); |
0263bcd0 YG |
1110 | return 0; |
1111 | } | |
1112 | ||
1113 | static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status) | |
1114 | { | |
1115 | if (hba->vops && hba->vops->setup_regulators) | |
1116 | return hba->vops->setup_regulators(hba, status); | |
1117 | ||
1118 | return 0; | |
1119 | } | |
1120 | ||
1121 | static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, | |
1122 | bool status) | |
1123 | { | |
1124 | if (hba->vops && hba->vops->hce_enable_notify) | |
1125 | return hba->vops->hce_enable_notify(hba, status); | |
1126 | ||
1127 | return 0; | |
1128 | } | |
1129 | static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, | |
1130 | bool status) | |
1131 | { | |
1132 | if (hba->vops && hba->vops->link_startup_notify) | |
1133 | return hba->vops->link_startup_notify(hba, status); | |
1134 | ||
1135 | return 0; | |
1136 | } | |
1137 | ||
1138 | static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, | |
1139 | bool status, | |
1140 | struct ufs_pa_layer_attr *dev_max_params, | |
1141 | struct ufs_pa_layer_attr *dev_req_params) | |
1142 | { | |
1143 | if (hba->vops && hba->vops->pwr_change_notify) | |
1144 | return hba->vops->pwr_change_notify(hba, status, | |
1145 | dev_max_params, dev_req_params); | |
1146 | ||
1147 | return -ENOTSUPP; | |
1148 | } | |
1149 | ||
0e675efa KK |
1150 | static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, |
1151 | bool is_scsi_cmd) | |
1152 | { | |
1153 | if (hba->vops && hba->vops->setup_xfer_req) | |
1154 | return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); | |
1155 | } | |
1156 | ||
d2877be4 KK |
1157 | static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, |
1158 | int tag, u8 tm_function) | |
1159 | { | |
1160 | if (hba->vops && hba->vops->setup_task_mgmt) | |
1161 | return hba->vops->setup_task_mgmt(hba, tag, tm_function); | |
1162 | } | |
1163 | ||
ee32c909 KK |
1164 | static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, |
1165 | enum uic_cmd_dme cmd, | |
1166 | enum ufs_notify_change_status status) | |
1167 | { | |
1168 | if (hba->vops && hba->vops->hibern8_notify) | |
1169 | return hba->vops->hibern8_notify(hba, cmd, status); | |
1170 | } | |
1171 | ||
09750066 | 1172 | static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) |
56d4a186 SJ |
1173 | { |
1174 | if (hba->vops && hba->vops->apply_dev_quirks) | |
09750066 | 1175 | return hba->vops->apply_dev_quirks(hba); |
56d4a186 SJ |
1176 | return 0; |
1177 | } | |
1178 | ||
c28c00ba SC |
1179 | static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba) |
1180 | { | |
1181 | if (hba->vops && hba->vops->fixup_dev_quirks) | |
1182 | hba->vops->fixup_dev_quirks(hba); | |
1183 | } | |
1184 | ||
0263bcd0 YG |
1185 | static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) |
1186 | { | |
1187 | if (hba->vops && hba->vops->suspend) | |
1188 | return hba->vops->suspend(hba, op); | |
1189 | ||
1190 | return 0; | |
1191 | } | |
1192 | ||
1193 | static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) | |
1194 | { | |
1195 | if (hba->vops && hba->vops->resume) | |
1196 | return hba->vops->resume(hba, op); | |
1197 | ||
1198 | return 0; | |
1199 | } | |
1200 | ||
6e3fd44d YG |
1201 | static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) |
1202 | { | |
1203 | if (hba->vops && hba->vops->dbg_register_dump) | |
1204 | hba->vops->dbg_register_dump(hba); | |
1205 | } | |
1206 | ||
d8d9f793 BA |
1207 | static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) |
1208 | { | |
a5fe372d | 1209 | if (hba->vops && hba->vops->device_reset) { |
151f1b66 AH |
1210 | int err = hba->vops->device_reset(hba); |
1211 | ||
1212 | if (!err) | |
1213 | ufshcd_set_ufs_dev_active(hba); | |
1214 | if (err != -EOPNOTSUPP) | |
e965e5e0 | 1215 | ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); |
a5fe372d | 1216 | } |
d8d9f793 BA |
1217 | } |
1218 | ||
2c75f9a5 AD |
1219 | static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, |
1220 | struct devfreq_dev_profile | |
1221 | *profile, void *data) | |
1222 | { | |
1223 | if (hba->vops && hba->vops->config_scaling_param) | |
1224 | hba->vops->config_scaling_param(hba, profile, data); | |
1225 | } | |
1226 | ||
cbb6813e SN |
1227 | extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; |
1228 | ||
d829fc8a SN |
1229 | /* |
1230 | * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN | |
1231 | * @scsi_lun: scsi LUN id | |
1232 | * | |
1233 | * Returns UPIU LUN id | |
1234 | */ | |
1235 | static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) | |
1236 | { | |
1237 | if (scsi_is_wlun(scsi_lun)) | |
1238 | return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) | |
1239 | | UFS_UPIU_WLUN_ID; | |
1240 | else | |
1241 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; | |
1242 | } | |
1243 | ||
ba80917d TW |
1244 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, |
1245 | const char *prefix); | |
1246 | ||
e0eca63e | 1247 | #endif /* End of Header */ |