Commit | Line | Data |
---|---|---|
67351119 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
e0eca63e VH |
2 | /* |
3 | * Universal Flash Storage Host controller driver | |
e0eca63e | 4 | * Copyright (C) 2011-2013 Samsung India Software Operations |
dc3c8d3a | 5 | * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. |
e0eca63e VH |
6 | * |
7 | * Authors: | |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | |
e0eca63e VH |
10 | */ |
11 | ||
12 | #ifndef _UFSHCD_H | |
13 | #define _UFSHCD_H | |
14 | ||
5a244e0e | 15 | #include <linux/bitfield.h> |
1e8d44bd | 16 | #include <linux/blk-crypto-profile.h> |
3f06f780 BVA |
17 | #include <linux/blk-mq.h> |
18 | #include <linux/devfreq.h> | |
045da307 | 19 | #include <linux/fault-inject.h> |
6ce2082f | 20 | #include <linux/debugfs.h> |
e02288e0 | 21 | #include <linux/msi.h> |
3f06f780 | 22 | #include <linux/pm_runtime.h> |
f3e57da5 | 23 | #include <linux/dma-direction.h> |
3f06f780 | 24 | #include <scsi/scsi_device.h> |
cce9fd60 | 25 | #include <scsi/scsi_host.h> |
dd11376b BVA |
26 | #include <ufs/unipro.h> |
27 | #include <ufs/ufs.h> | |
28 | #include <ufs/ufs_quirks.h> | |
29 | #include <ufs/ufshci.h> | |
e0eca63e VH |
30 | |
31 | #define UFSHCD "ufshcd" | |
e0eca63e | 32 | |
858231bd | 33 | struct scsi_device; |
5c0c28a8 SRT |
34 | struct ufs_hba; |
35 | ||
5a0b0cb9 SRT |
36 | enum dev_cmd_type { |
37 | DEV_CMD_TYPE_NOP = 0x0, | |
68078d5c | 38 | DEV_CMD_TYPE_QUERY = 0x1, |
6ff265fc | 39 | DEV_CMD_TYPE_RPMB = 0x2, |
5a0b0cb9 SRT |
40 | }; |
41 | ||
e965e5e0 SC |
42 | enum ufs_event_type { |
43 | /* uic specific errors */ | |
44 | UFS_EVT_PA_ERR = 0, | |
45 | UFS_EVT_DL_ERR, | |
46 | UFS_EVT_NL_ERR, | |
47 | UFS_EVT_TL_ERR, | |
48 | UFS_EVT_DME_ERR, | |
49 | ||
50 | /* fatal errors */ | |
51 | UFS_EVT_AUTO_HIBERN8_ERR, | |
52 | UFS_EVT_FATAL_ERR, | |
53 | UFS_EVT_LINK_STARTUP_FAIL, | |
54 | UFS_EVT_RESUME_ERR, | |
55 | UFS_EVT_SUSPEND_ERR, | |
b294ff3e AD |
56 | UFS_EVT_WL_SUSP_ERR, |
57 | UFS_EVT_WL_RES_ERR, | |
e965e5e0 SC |
58 | |
59 | /* abnormal events */ | |
60 | UFS_EVT_DEV_RESET, | |
61 | UFS_EVT_HOST_RESET, | |
62 | UFS_EVT_ABORT, | |
63 | ||
64 | UFS_EVT_CNT, | |
65 | }; | |
66 | ||
e0eca63e VH |
67 | /** |
68 | * struct uic_command - UIC command structure | |
69 | * @command: UIC command | |
70 | * @argument1: UIC command argument 1 | |
71 | * @argument2: UIC command argument 2 | |
72 | * @argument3: UIC command argument 3 | |
0f52fcb9 | 73 | * @cmd_active: Indicate if UIC command is outstanding |
6ccf44fe | 74 | * @done: UIC command completion |
e0eca63e VH |
75 | */ |
76 | struct uic_command { | |
93ef12d9 BVA |
77 | const u32 command; |
78 | const u32 argument1; | |
e0eca63e VH |
79 | u32 argument2; |
80 | u32 argument3; | |
0f52fcb9 | 81 | int cmd_active; |
6ccf44fe | 82 | struct completion done; |
e0eca63e VH |
83 | }; |
84 | ||
57d104c1 SJ |
85 | /* Used to differentiate the power management options */ |
86 | enum ufs_pm_op { | |
87 | UFS_RUNTIME_PM, | |
88 | UFS_SYSTEM_PM, | |
89 | UFS_SHUTDOWN_PM, | |
90 | }; | |
91 | ||
57d104c1 SJ |
92 | /* Host <-> Device UniPro Link state */ |
93 | enum uic_link_state { | |
94 | UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ | |
95 | UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ | |
96 | UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ | |
4db7a236 | 97 | UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */ |
57d104c1 SJ |
98 | }; |
99 | ||
100 | #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) | |
101 | #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ | |
102 | UIC_LINK_ACTIVE_STATE) | |
103 | #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ | |
104 | UIC_LINK_HIBERN8_STATE) | |
4db7a236 CG |
105 | #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \ |
106 | UIC_LINK_BROKEN_STATE) | |
57d104c1 SJ |
107 | #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) |
108 | #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ | |
109 | UIC_LINK_ACTIVE_STATE) | |
110 | #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ | |
111 | UIC_LINK_HIBERN8_STATE) | |
4db7a236 CG |
112 | #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \ |
113 | UIC_LINK_BROKEN_STATE) | |
57d104c1 | 114 | |
1764fa2a SC |
115 | #define ufshcd_set_ufs_dev_active(h) \ |
116 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | |
117 | #define ufshcd_set_ufs_dev_sleep(h) \ | |
118 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | |
119 | #define ufshcd_set_ufs_dev_poweroff(h) \ | |
120 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | |
fe1d4c2e AH |
121 | #define ufshcd_set_ufs_dev_deepsleep(h) \ |
122 | ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE) | |
1764fa2a SC |
123 | #define ufshcd_is_ufs_dev_active(h) \ |
124 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | |
125 | #define ufshcd_is_ufs_dev_sleep(h) \ | |
126 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | |
127 | #define ufshcd_is_ufs_dev_poweroff(h) \ | |
128 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | |
fe1d4c2e AH |
129 | #define ufshcd_is_ufs_dev_deepsleep(h) \ |
130 | ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE) | |
1764fa2a | 131 | |
57d104c1 SJ |
132 | /* |
133 | * UFS Power management levels. | |
fe1d4c2e AH |
134 | * Each level is in increasing order of power savings, except DeepSleep |
135 | * which is lower than PowerDown with power on but not PowerDown with | |
136 | * power off. | |
57d104c1 SJ |
137 | */ |
138 | enum ufs_pm_level { | |
e2ac7ab2 BVA |
139 | UFS_PM_LVL_0, |
140 | UFS_PM_LVL_1, | |
141 | UFS_PM_LVL_2, | |
142 | UFS_PM_LVL_3, | |
143 | UFS_PM_LVL_4, | |
144 | UFS_PM_LVL_5, | |
145 | UFS_PM_LVL_6, | |
57d104c1 SJ |
146 | UFS_PM_LVL_MAX |
147 | }; | |
148 | ||
149 | struct ufs_pm_lvl_states { | |
150 | enum ufs_dev_pwr_mode dev_state; | |
151 | enum uic_link_state link_state; | |
152 | }; | |
153 | ||
e0eca63e VH |
154 | /** |
155 | * struct ufshcd_lrb - local reference block | |
156 | * @utr_descriptor_ptr: UTRD address of the command | |
5a0b0cb9 | 157 | * @ucd_req_ptr: UCD address of the command |
e0eca63e VH |
158 | * @ucd_rsp_ptr: Response UPIU address for this command |
159 | * @ucd_prdt_ptr: PRDT address of the command | |
ff8e20c6 DR |
160 | * @utrd_dma_addr: UTRD dma address for debug |
161 | * @ucd_prdt_dma_addr: PRDT dma address for debug | |
162 | * @ucd_rsp_dma_addr: UPIU response dma address for debug | |
163 | * @ucd_req_dma_addr: UPIU request dma address for debug | |
e0eca63e | 164 | * @cmd: pointer to SCSI command |
e0eca63e VH |
165 | * @scsi_status: SCSI status of the command |
166 | * @command_type: SCSI, UFS, Query. | |
167 | * @task_tag: Task tag of the command | |
168 | * @lun: LUN of the command | |
5a0b0cb9 | 169 | * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) |
0f85e747 DL |
170 | * @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC) |
171 | * @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock) | |
172 | * @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC) | |
173 | * @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock) | |
df043c74 ST |
174 | * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) |
175 | * @data_unit_num: the data unit number for the first block for inline crypto | |
e0b299e3 | 176 | * @req_abort_skip: skip request abort task flag |
e0eca63e VH |
177 | */ |
178 | struct ufshcd_lrb { | |
179 | struct utp_transfer_req_desc *utr_descriptor_ptr; | |
5a0b0cb9 | 180 | struct utp_upiu_req *ucd_req_ptr; |
e0eca63e VH |
181 | struct utp_upiu_rsp *ucd_rsp_ptr; |
182 | struct ufshcd_sg_entry *ucd_prdt_ptr; | |
183 | ||
ff8e20c6 DR |
184 | dma_addr_t utrd_dma_addr; |
185 | dma_addr_t ucd_req_dma_addr; | |
186 | dma_addr_t ucd_rsp_dma_addr; | |
187 | dma_addr_t ucd_prdt_dma_addr; | |
188 | ||
e0eca63e | 189 | struct scsi_cmnd *cmd; |
e0eca63e VH |
190 | int scsi_status; |
191 | ||
192 | int command_type; | |
193 | int task_tag; | |
0ce147d4 | 194 | u8 lun; /* UPIU LUN id field is only 8-bit wide */ |
5a0b0cb9 | 195 | bool intr_cmd; |
ff8e20c6 | 196 | ktime_t issue_time_stamp; |
0f85e747 | 197 | u64 issue_time_stamp_local_clock; |
09017188 | 198 | ktime_t compl_time_stamp; |
0f85e747 | 199 | u64 compl_time_stamp_local_clock; |
df043c74 ST |
200 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
201 | int crypto_key_slot; | |
202 | u64 data_unit_num; | |
203 | #endif | |
e0b299e3 GB |
204 | |
205 | bool req_abort_skip; | |
e0eca63e VH |
206 | }; |
207 | ||
e2566e0b BVA |
208 | /** |
209 | * struct ufs_query_req - parameters for building a query request | |
210 | * @query_func: UPIU header query function | |
211 | * @upiu_req: the query request data | |
212 | */ | |
213 | struct ufs_query_req { | |
214 | u8 query_func; | |
215 | struct utp_upiu_query upiu_req; | |
216 | }; | |
217 | ||
218 | /** | |
219 | * struct ufs_query_resp - UPIU QUERY | |
220 | * @response: device response code | |
221 | * @upiu_res: query response data | |
222 | */ | |
223 | struct ufs_query_res { | |
224 | struct utp_upiu_query upiu_res; | |
225 | }; | |
226 | ||
68078d5c | 227 | /** |
a230c2f6 | 228 | * struct ufs_query - holds relevant data structures for query request |
68078d5c DR |
229 | * @request: request upiu and function |
230 | * @descriptor: buffer for sending/receiving descriptor | |
231 | * @response: response upiu and response | |
232 | */ | |
233 | struct ufs_query { | |
234 | struct ufs_query_req request; | |
235 | u8 *descriptor; | |
236 | struct ufs_query_res response; | |
237 | }; | |
238 | ||
5a0b0cb9 SRT |
239 | /** |
240 | * struct ufs_dev_cmd - all assosiated fields with device management commands | |
241 | * @type: device management command type - Query, NOP OUT | |
242 | * @lock: lock to allow one command at a time | |
243 | * @complete: internal commands completion | |
cff91daf | 244 | * @query: Device management query information |
5a0b0cb9 SRT |
245 | */ |
246 | struct ufs_dev_cmd { | |
247 | enum dev_cmd_type type; | |
248 | struct mutex lock; | |
20b97acc | 249 | struct completion complete; |
68078d5c | 250 | struct ufs_query query; |
5a0b0cb9 | 251 | }; |
e0eca63e | 252 | |
c6e79dac SRT |
253 | /** |
254 | * struct ufs_clk_info - UFS clock related info | |
255 | * @list: list headed by hba->clk_list_head | |
256 | * @clk: clock node | |
257 | * @name: clock name | |
258 | * @max_freq: maximum frequency supported by the clock | |
4cff6d99 | 259 | * @min_freq: min frequency that can be used for clock scaling |
856b3483 | 260 | * @curr_freq: indicates the current frequency that it is set to |
81309c24 | 261 | * @keep_link_active: indicates that the clk should not be disabled if |
cff91daf | 262 | * link is active |
c6e79dac SRT |
263 | * @enabled: variable to check against multiple enable/disable |
264 | */ | |
265 | struct ufs_clk_info { | |
266 | struct list_head list; | |
267 | struct clk *clk; | |
268 | const char *name; | |
269 | u32 max_freq; | |
4cff6d99 | 270 | u32 min_freq; |
856b3483 | 271 | u32 curr_freq; |
81309c24 | 272 | bool keep_link_active; |
c6e79dac SRT |
273 | bool enabled; |
274 | }; | |
275 | ||
f06fcc71 YG |
276 | enum ufs_notify_change_status { |
277 | PRE_CHANGE, | |
278 | POST_CHANGE, | |
279 | }; | |
7eb584db DR |
280 | |
281 | struct ufs_pa_layer_attr { | |
282 | u32 gear_rx; | |
283 | u32 gear_tx; | |
284 | u32 lane_rx; | |
285 | u32 lane_tx; | |
286 | u32 pwr_rx; | |
287 | u32 pwr_tx; | |
288 | u32 hs_rate; | |
289 | }; | |
290 | ||
291 | struct ufs_pwr_mode_info { | |
292 | bool is_valid; | |
293 | struct ufs_pa_layer_attr info; | |
294 | }; | |
295 | ||
5c0c28a8 SRT |
296 | /** |
297 | * struct ufs_hba_variant_ops - variant specific callbacks | |
298 | * @name: variant name | |
e75ff633 | 299 | * @max_num_rtt: maximum RTT supported by the host |
5c0c28a8 SRT |
300 | * @init: called when the driver is initialized |
301 | * @exit: called to cleanup everything done in init | |
78bc671b BVA |
302 | * @set_dma_mask: For setting another DMA mask than indicated by the 64AS |
303 | * capability bit. | |
9949e702 | 304 | * @get_ufs_hci_version: called to get UFS HCI version |
856b3483 | 305 | * @clk_scale_notify: notifies that clks are scaled up/down |
5c0c28a8 | 306 | * @setup_clocks: called before touching any of the controller registers |
5c0c28a8 SRT |
307 | * @hce_enable_notify: called before and after HCE enable bit is set to allow |
308 | * variant specific Uni-Pro initialization. | |
309 | * @link_startup_notify: called before and after Link startup is carried out | |
310 | * to allow variant specific Uni-Pro initialization. | |
7eb584db DR |
311 | * @pwr_change_notify: called before and after a power mode change |
312 | * is carried out to allow vendor spesific capabilities | |
7f45ed5f PW |
313 | * to be set. PRE_CHANGE can modify final_params based |
314 | * on desired_pwr_mode, but POST_CHANGE must not alter | |
315 | * the final_params parameter | |
0e675efa KK |
316 | * @setup_xfer_req: called before any transfer request is issued |
317 | * to set some things | |
d2877be4 KK |
318 | * @setup_task_mgmt: called before any task management request is issued |
319 | * to set some things | |
ee32c909 | 320 | * @hibern8_notify: called around hibern8 enter/exit |
56d4a186 | 321 | * @apply_dev_quirks: called to apply device specific quirks |
cff91daf | 322 | * @fixup_dev_quirks: called to modify device specific quirks |
57d104c1 SJ |
323 | * @suspend: called during host controller PM callback |
324 | * @resume: called during host controller PM callback | |
6e3fd44d | 325 | * @dbg_register_dump: used to dump controller debug information |
4b9ffb5a | 326 | * @phy_initialization: used to initialize phys |
d8d9f793 | 327 | * @device_reset: called to issue a reset pulse on the UFS device |
cff91daf | 328 | * @config_scaling_param: called to configure clock scaling parameters |
8ecea3da | 329 | * @fill_crypto_prdt: initialize crypto-related fields in the PRDT |
172614a9 | 330 | * @event_notify: called to notify important events |
c263b4ef | 331 | * @mcq_config_resource: called to configure MCQ platform resources |
af568c7e BVA |
332 | * @get_hba_mac: reports maximum number of outstanding commands supported by |
333 | * the controller. Should be implemented for UFSHCI 4.0 or later | |
334 | * controllers that are not compliant with the UFSHCI 4.0 specification. | |
2468da61 | 335 | * @op_runtime_config: called to config Operation and runtime regs Pointers |
f87b2c41 | 336 | * @get_outstanding_cqs: called to get outstanding completion queues |
edb0db05 | 337 | * @config_esi: called to config Event Specific Interrupt |
db06ae7c | 338 | * @config_scsi_dev: called to configure SCSI device parameters |
d7bead60 | 339 | * @freq_to_gear_speed: called to map clock frequency to the max supported gear speed |
5c0c28a8 SRT |
340 | */ |
341 | struct ufs_hba_variant_ops { | |
342 | const char *name; | |
e75ff633 | 343 | int max_num_rtt; |
5c0c28a8 SRT |
344 | int (*init)(struct ufs_hba *); |
345 | void (*exit)(struct ufs_hba *); | |
9949e702 | 346 | u32 (*get_ufs_hci_version)(struct ufs_hba *); |
78bc671b | 347 | int (*set_dma_mask)(struct ufs_hba *); |
5e011fcc CG |
348 | int (*clk_scale_notify)(struct ufs_hba *, bool, unsigned long, |
349 | enum ufs_notify_change_status); | |
1e879e8f SJ |
350 | int (*setup_clocks)(struct ufs_hba *, bool, |
351 | enum ufs_notify_change_status); | |
f06fcc71 YG |
352 | int (*hce_enable_notify)(struct ufs_hba *, |
353 | enum ufs_notify_change_status); | |
354 | int (*link_startup_notify)(struct ufs_hba *, | |
355 | enum ufs_notify_change_status); | |
7eb584db | 356 | int (*pwr_change_notify)(struct ufs_hba *, |
3bcd901e BVA |
357 | enum ufs_notify_change_status status, |
358 | const struct ufs_pa_layer_attr *desired_pwr_mode, | |
359 | struct ufs_pa_layer_attr *final_params); | |
b427609e BVA |
360 | void (*setup_xfer_req)(struct ufs_hba *hba, int tag, |
361 | bool is_scsi_cmd); | |
d2877be4 | 362 | void (*setup_task_mgmt)(struct ufs_hba *, int, u8); |
ee32c909 | 363 | void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, |
56d4a186 | 364 | enum ufs_notify_change_status); |
09750066 | 365 | int (*apply_dev_quirks)(struct ufs_hba *hba); |
c28c00ba | 366 | void (*fixup_dev_quirks)(struct ufs_hba *hba); |
9561f584 PW |
367 | int (*suspend)(struct ufs_hba *, enum ufs_pm_op, |
368 | enum ufs_notify_change_status); | |
57d104c1 | 369 | int (*resume)(struct ufs_hba *, enum ufs_pm_op); |
6e3fd44d | 370 | void (*dbg_register_dump)(struct ufs_hba *hba); |
4b9ffb5a | 371 | int (*phy_initialization)(struct ufs_hba *); |
151f1b66 | 372 | int (*device_reset)(struct ufs_hba *hba); |
2c75f9a5 | 373 | void (*config_scaling_param)(struct ufs_hba *hba, |
c906e832 BVA |
374 | struct devfreq_dev_profile *profile, |
375 | struct devfreq_simple_ondemand_data *data); | |
8ecea3da EB |
376 | int (*fill_crypto_prdt)(struct ufs_hba *hba, |
377 | const struct bio_crypt_ctx *crypt_ctx, | |
378 | void *prdt, unsigned int num_segments); | |
172614a9 SC |
379 | void (*event_notify)(struct ufs_hba *hba, |
380 | enum ufs_event_type evt, void *data); | |
c263b4ef | 381 | int (*mcq_config_resource)(struct ufs_hba *hba); |
7224c806 | 382 | int (*get_hba_mac)(struct ufs_hba *hba); |
2468da61 | 383 | int (*op_runtime_config)(struct ufs_hba *hba); |
f87b2c41 AD |
384 | int (*get_outstanding_cqs)(struct ufs_hba *hba, |
385 | unsigned long *ocqs); | |
edb0db05 | 386 | int (*config_esi)(struct ufs_hba *hba); |
7670e74f | 387 | void (*config_scsi_dev)(struct scsi_device *sdev); |
d7bead60 | 388 | u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq); |
5c0c28a8 SRT |
389 | }; |
390 | ||
1ab27c9c ST |
391 | /* clock gating state */ |
392 | enum clk_gating_state { | |
393 | CLKS_OFF, | |
394 | CLKS_ON, | |
395 | REQ_CLKS_OFF, | |
396 | REQ_CLKS_ON, | |
397 | }; | |
398 | ||
399 | /** | |
400 | * struct ufs_clk_gating - UFS clock gating related info | |
401 | * @gate_work: worker to turn off clocks after some delay as specified in | |
402 | * delay_ms | |
403 | * @ungate_work: worker to turn on clocks that will be used in case of | |
404 | * interrupt context | |
209f4e43 AA |
405 | * @clk_gating_workq: workqueue for clock gating work. |
406 | * @lock: serialize access to some struct ufs_clk_gating members. An outer lock | |
407 | * relative to the host lock | |
1ab27c9c ST |
408 | * @state: the current clocks state |
409 | * @delay_ms: gating delay in ms | |
410 | * @is_suspended: clk gating is suspended when set to 1 which can be used | |
411 | * during suspend/resume | |
412 | * @delay_attr: sysfs attribute to control delay_attr | |
b427411a ST |
413 | * @enable_attr: sysfs attribute to enable/disable clock gating |
414 | * @is_enabled: Indicates the current status of clock gating | |
4543d9d7 | 415 | * @is_initialized: Indicates whether clock gating is initialized or not |
1ab27c9c ST |
416 | * @active_reqs: number of requests that are pending and should be waited for |
417 | * completion before gating clocks. | |
418 | */ | |
419 | struct ufs_clk_gating { | |
420 | struct delayed_work gate_work; | |
421 | struct work_struct ungate_work; | |
209f4e43 AA |
422 | struct workqueue_struct *clk_gating_workq; |
423 | ||
424 | spinlock_t lock; | |
425 | ||
1ab27c9c ST |
426 | enum clk_gating_state state; |
427 | unsigned long delay_ms; | |
428 | bool is_suspended; | |
429 | struct device_attribute delay_attr; | |
b427411a ST |
430 | struct device_attribute enable_attr; |
431 | bool is_enabled; | |
4543d9d7 | 432 | bool is_initialized; |
1ab27c9c ST |
433 | int active_reqs; |
434 | }; | |
435 | ||
401f1e44 | 436 | /** |
437 | * struct ufs_clk_scaling - UFS clock scaling related data | |
be769e5c AA |
438 | * @workq: workqueue to schedule devfreq suspend/resume work |
439 | * @suspend_work: worker to suspend devfreq | |
440 | * @resume_work: worker to resume devfreq | |
441 | * @lock: serialize access to some struct ufs_clk_scaling members | |
401f1e44 | 442 | * @active_reqs: number of requests that are pending. If this is zero when |
443 | * devfreq ->target() function is called then schedule "suspend_work" to | |
444 | * suspend devfreq. | |
445 | * @tot_busy_t: Total busy time in current polling window | |
446 | * @window_start_t: Start time (in jiffies) of the current polling window | |
447 | * @busy_start_t: Start time of current busy period | |
448 | * @enable_attr: sysfs attribute to enable/disable clock scaling | |
449 | * @saved_pwr_info: UFS power mode may also be changed during scaling and this | |
450 | * one keeps track of previous power mode. | |
930bd77e | 451 | * @target_freq: frequency requested by devfreq framework |
29b87e92 | 452 | * @min_gear: lowest HS gear to scale down to |
2a25cbaa CG |
453 | * @wb_gear: enable Write Booster when HS gear scales above or equal to it, else |
454 | * disable Write Booster | |
0e9d4ca4 | 455 | * @is_enabled: tracks if scaling is currently enabled or not, controlled by |
cff91daf | 456 | * clkscale_enable sysfs node |
0e9d4ca4 | 457 | * @is_allowed: tracks if scaling is currently allowed or not, used to block |
cff91daf | 458 | * clock scaling which is not invoked from devfreq governor |
4543d9d7 | 459 | * @is_initialized: Indicates whether clock scaling is initialized or not |
401f1e44 | 460 | * @is_busy_started: tracks if busy period has started or not |
461 | * @is_suspended: tracks if devfreq is suspended or not | |
462 | */ | |
856b3483 | 463 | struct ufs_clk_scaling { |
be769e5c AA |
464 | struct workqueue_struct *workq; |
465 | struct work_struct suspend_work; | |
466 | struct work_struct resume_work; | |
467 | ||
468 | spinlock_t lock; | |
469 | ||
401f1e44 | 470 | int active_reqs; |
471 | unsigned long tot_busy_t; | |
b1bf66d1 | 472 | ktime_t window_start_t; |
401f1e44 | 473 | ktime_t busy_start_t; |
fcb0c4b0 | 474 | struct device_attribute enable_attr; |
543a827b | 475 | struct ufs_pa_layer_attr saved_pwr_info; |
930bd77e | 476 | unsigned long target_freq; |
29b87e92 | 477 | u32 min_gear; |
2a25cbaa | 478 | u32 wb_gear; |
0e9d4ca4 | 479 | bool is_enabled; |
401f1e44 | 480 | bool is_allowed; |
4543d9d7 | 481 | bool is_initialized; |
401f1e44 | 482 | bool is_busy_started; |
483 | bool is_suspended; | |
50183ac2 | 484 | bool suspend_on_no_request; |
856b3483 ST |
485 | }; |
486 | ||
e965e5e0 | 487 | #define UFS_EVENT_HIST_LENGTH 8 |
ff8e20c6 | 488 | /** |
e965e5e0 | 489 | * struct ufs_event_hist - keeps history of errors |
ff8e20c6 | 490 | * @pos: index to indicate cyclic buffer position |
cff91daf | 491 | * @val: cyclic buffer for registers value |
ff8e20c6 | 492 | * @tstamp: cyclic buffer for time stamp |
b6cacaf2 | 493 | * @cnt: error counter |
ff8e20c6 | 494 | */ |
e965e5e0 | 495 | struct ufs_event_hist { |
ff8e20c6 | 496 | int pos; |
e965e5e0 | 497 | u32 val[UFS_EVENT_HIST_LENGTH]; |
0f85e747 | 498 | u64 tstamp[UFS_EVENT_HIST_LENGTH]; |
b6cacaf2 | 499 | unsigned long long cnt; |
ff8e20c6 DR |
500 | }; |
501 | ||
502 | /** | |
503 | * struct ufs_stats - keeps usage/err statistics | |
504 | * @hibern8_exit_cnt: Counter to keep track of number of exits, | |
505 | * reset this after link-startup. | |
506 | * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. | |
507 | * Clear after the first successful command completion. | |
cff91daf | 508 | * @event: array with event history. |
ff8e20c6 DR |
509 | */ |
510 | struct ufs_stats { | |
511 | u32 hibern8_exit_cnt; | |
0f85e747 | 512 | u64 last_hibern8_exit_tstamp; |
e965e5e0 | 513 | struct ufs_event_hist event[UFS_EVT_CNT]; |
ff8e20c6 DR |
514 | }; |
515 | ||
9c202090 BVA |
516 | /** |
517 | * enum ufshcd_state - UFS host controller state | |
518 | * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command | |
519 | * processing. | |
520 | * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process | |
521 | * SCSI commands. | |
522 | * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled. | |
523 | * SCSI commands may be submitted to the controller. | |
524 | * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail | |
525 | * newly submitted SCSI commands with error code DID_BAD_TARGET. | |
526 | * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery | |
527 | * failed. Fail all SCSI commands with error code DID_ERROR. | |
528 | */ | |
529 | enum ufshcd_state { | |
530 | UFSHCD_STATE_RESET, | |
531 | UFSHCD_STATE_OPERATIONAL, | |
532 | UFSHCD_STATE_EH_SCHEDULED_NON_FATAL, | |
533 | UFSHCD_STATE_EH_SCHEDULED_FATAL, | |
534 | UFSHCD_STATE_ERROR, | |
535 | }; | |
536 | ||
c3f7d1fc CH |
537 | enum ufshcd_quirks { |
538 | /* Interrupt aggregation support is broken */ | |
539 | UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, | |
540 | ||
541 | /* | |
542 | * delay before each dme command is required as the unipro | |
543 | * layer has shown instabilities | |
544 | */ | |
545 | UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1, | |
546 | ||
547 | /* | |
548 | * If UFS host controller is having issue in processing LCC (Line | |
549 | * Control Command) coming from device then enable this quirk. | |
550 | * When this quirk is enabled, host controller driver should disable | |
551 | * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE | |
552 | * attribute of device to 0). | |
553 | */ | |
554 | UFSHCD_QUIRK_BROKEN_LCC = 1 << 2, | |
555 | ||
556 | /* | |
557 | * The attribute PA_RXHSUNTERMCAP specifies whether or not the | |
558 | * inbound Link supports unterminated line in HS mode. Setting this | |
559 | * attribute to 1 fixes moving to HS gear. | |
560 | */ | |
561 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3, | |
562 | ||
563 | /* | |
564 | * This quirk needs to be enabled if the host controller only allows | |
565 | * accessing the peer dme attributes in AUTO mode (FAST AUTO or | |
566 | * SLOW AUTO). | |
567 | */ | |
568 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4, | |
569 | ||
570 | /* | |
571 | * This quirk needs to be enabled if the host controller doesn't | |
572 | * advertise the correct version in UFS_VER register. If this quirk | |
573 | * is enabled, standard UFS host driver will call the vendor specific | |
574 | * ops (get_ufs_hci_version) to get the correct version. | |
575 | */ | |
576 | UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, | |
87183841 AA |
577 | |
578 | /* | |
579 | * Clear handling for transfer/task request list is just opposite. | |
580 | */ | |
581 | UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6, | |
b638b5eb AA |
582 | |
583 | /* | |
584 | * This quirk needs to be enabled if host controller doesn't allow | |
585 | * that the interrupt aggregation timer and counter are reset by s/w. | |
586 | */ | |
587 | UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7, | |
39bf2d83 AA |
588 | |
589 | /* | |
590 | * This quirks needs to be enabled if host controller cannot be | |
591 | * enabled via HCE register. | |
592 | */ | |
593 | UFSHCI_QUIRK_BROKEN_HCE = 1 << 8, | |
26f968d7 AA |
594 | |
595 | /* | |
596 | * This quirk needs to be enabled if the host controller regards | |
597 | * resolution of the values of PRDTO and PRDTL in UTRD as byte. | |
598 | */ | |
599 | UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9, | |
d779a6e9 KK |
600 | |
601 | /* | |
602 | * This quirk needs to be enabled if the host controller reports | |
603 | * OCS FATAL ERROR with device error through sense data | |
604 | */ | |
605 | UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, | |
5df6f2de | 606 | |
8da76f71 AH |
607 | /* |
608 | * This quirk needs to be enabled if the host controller has | |
609 | * auto-hibernate capability but it doesn't work. | |
610 | */ | |
611 | UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11, | |
02f74150 | 612 | |
5df6f2de KK |
613 | /* |
614 | * This quirk needs to disable manual flush for write booster | |
615 | */ | |
02f74150 MP |
616 | UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12, |
617 | ||
b1d0d2eb KK |
618 | /* |
619 | * This quirk needs to disable unipro timeout values | |
620 | * before power mode change | |
621 | */ | |
622 | UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13, | |
623 | ||
a22bcfdb | 624 | /* |
625 | * This quirk needs to be enabled if the host controller does not | |
626 | * support UIC command | |
627 | */ | |
628 | UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15, | |
10fb4f87 | 629 | |
630 | /* | |
631 | * This quirk needs to be enabled if the host controller cannot | |
632 | * support physical host configuration. | |
633 | */ | |
634 | UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, | |
6554400d | 635 | |
2f11bbc2 YS |
636 | /* |
637 | * This quirk needs to be enabled if the host controller has | |
638 | * auto-hibernate capability but it's FASTAUTO only. | |
639 | */ | |
640 | UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18, | |
96a7141d MS |
641 | |
642 | /* | |
643 | * This quirk needs to be enabled if the host controller needs | |
644 | * to reinit the device after switching to maximum gear. | |
645 | */ | |
646 | UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19, | |
c4ad4f2e PWK |
647 | |
648 | /* | |
649 | * Some host raises interrupt (per queue) in addition to | |
650 | * CQES (traditional) when ESI is disabled. | |
651 | * Enable this quirk will disable CQES and use per queue interrupt. | |
652 | */ | |
653 | UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20, | |
aa9d5d00 PWK |
654 | |
655 | /* | |
656 | * Some host does not implement SQ Run Time Command (SQRTC) register | |
657 | * thus need this quirk to skip related flow. | |
658 | */ | |
659 | UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21, | |
c2a90eee EB |
660 | |
661 | /* | |
662 | * This quirk needs to be enabled if the host controller supports inline | |
663 | * encryption but it needs to initialize the crypto capabilities in a | |
664 | * nonstandard way and/or needs to override blk_crypto_ll_ops. If | |
665 | * enabled, the standard code won't initialize the blk_crypto_profile; | |
666 | * ufs_hba_variant_ops::init() must do it instead. | |
667 | */ | |
668 | UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22, | |
e95881e0 EB |
669 | |
670 | /* | |
671 | * This quirk needs to be enabled if the host controller supports inline | |
672 | * encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e. | |
673 | * host controller initialization fails if that bit is set. | |
674 | */ | |
675 | UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23, | |
4c45dba5 EB |
676 | |
677 | /* | |
678 | * This quirk needs to be enabled if the host controller driver copies | |
679 | * cryptographic keys into the PRDT in order to send them to hardware, | |
680 | * and therefore the PRDT should be zeroized after each request (as per | |
681 | * the standard best practice for managing keys). | |
682 | */ | |
683 | UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24, | |
cd06b713 MS |
684 | |
685 | /* | |
686 | * This quirk indicates that the controller reports the value 1 (not | |
687 | * supported) in the Legacy Single DoorBell Support (LSDBS) bit of the | |
688 | * Controller Capabilities register although it supports the legacy | |
689 | * single doorbell mode. | |
690 | */ | |
691 | UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25, | |
c3f7d1fc CH |
692 | }; |
693 | ||
c2014682 SC |
694 | enum ufshcd_caps { |
695 | /* Allow dynamic clk gating */ | |
696 | UFSHCD_CAP_CLK_GATING = 1 << 0, | |
697 | ||
698 | /* Allow hiberb8 with clk gating */ | |
699 | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1, | |
700 | ||
701 | /* Allow dynamic clk scaling */ | |
702 | UFSHCD_CAP_CLK_SCALING = 1 << 2, | |
703 | ||
704 | /* Allow auto bkops to enabled during runtime suspend */ | |
705 | UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3, | |
706 | ||
707 | /* | |
708 | * This capability allows host controller driver to use the UFS HCI's | |
709 | * interrupt aggregation capability. | |
710 | * CAUTION: Enabling this might reduce overall UFS throughput. | |
711 | */ | |
712 | UFSHCD_CAP_INTR_AGGR = 1 << 4, | |
713 | ||
714 | /* | |
715 | * This capability allows the device auto-bkops to be always enabled | |
716 | * except during suspend (both runtime and suspend). | |
717 | * Enabling this capability means that device will always be allowed | |
718 | * to do background operation when it's active but it might degrade | |
719 | * the performance of ongoing read/write operations. | |
720 | */ | |
721 | UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5, | |
722 | ||
723 | /* | |
724 | * This capability allows host controller driver to automatically | |
725 | * enable runtime power management by itself instead of waiting | |
726 | * for userspace to control the power management. | |
727 | */ | |
728 | UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6, | |
3d17b9b5 AD |
729 | |
730 | /* | |
731 | * This capability allows the host controller driver to turn-on | |
732 | * WriteBooster, if the underlying device supports it and is | |
733 | * provisioned to be used. This would increase the write performance. | |
734 | */ | |
735 | UFSHCD_CAP_WB_EN = 1 << 7, | |
5e7341e1 ST |
736 | |
737 | /* | |
738 | * This capability allows the host controller driver to use the | |
739 | * inline crypto engine, if it is present | |
740 | */ | |
741 | UFSHCD_CAP_CRYPTO = 1 << 8, | |
dd7143e2 CG |
742 | |
743 | /* | |
744 | * This capability allows the controller regulators to be put into | |
745 | * lpm mode aggressively during clock gating. | |
746 | * This would increase power savings. | |
747 | */ | |
748 | UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9, | |
fe1d4c2e AH |
749 | |
750 | /* | |
751 | * This capability allows the host controller driver to use DeepSleep, | |
752 | * if it is supported by the UFS device. The host controller driver must | |
753 | * support device hardware reset via the hba->device_reset() callback, | |
754 | * in order to exit DeepSleep state. | |
755 | */ | |
756 | UFSHCD_CAP_DEEPSLEEP = 1 << 10, | |
e88e2d32 AA |
757 | |
758 | /* | |
759 | * This capability allows the host controller driver to use temperature | |
760 | * notification if it is supported by the UFS device. | |
761 | */ | |
762 | UFSHCD_CAP_TEMP_NOTIF = 1 << 11, | |
87bd0501 PW |
763 | |
764 | /* | |
765 | * Enable WriteBooster when scaling up the clock and disable | |
766 | * WriteBooster when scaling the clock down. | |
767 | */ | |
768 | UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12, | |
c2014682 SC |
769 | }; |
770 | ||
90b8491c SC |
771 | struct ufs_hba_variant_params { |
772 | struct devfreq_dev_profile devfreq_profile; | |
773 | struct devfreq_simple_ondemand_data ondemand_data; | |
774 | u16 hba_enable_delay_us; | |
d14734ae | 775 | u32 wb_flush_threshold; |
90b8491c SC |
776 | }; |
777 | ||
1d8613a2 CG |
778 | struct ufs_hba_monitor { |
779 | unsigned long chunk_size; | |
780 | ||
781 | unsigned long nr_sec_rw[2]; | |
782 | ktime_t total_busy[2]; | |
783 | ||
784 | unsigned long nr_req[2]; | |
785 | /* latencies*/ | |
786 | ktime_t lat_sum[2]; | |
787 | ktime_t lat_max[2]; | |
788 | ktime_t lat_min[2]; | |
789 | ||
790 | u32 nr_queued[2]; | |
791 | ktime_t busy_start_ts[2]; | |
792 | ||
793 | ktime_t enabled_ts; | |
794 | bool enabled; | |
795 | }; | |
796 | ||
c263b4ef AD |
797 | /** |
798 | * struct ufshcd_res_info_t - MCQ related resource regions | |
799 | * | |
800 | * @name: resource name | |
801 | * @resource: pointer to resource region | |
802 | * @base: register base address | |
803 | */ | |
804 | struct ufshcd_res_info { | |
805 | const char *name; | |
806 | struct resource *resource; | |
807 | void __iomem *base; | |
808 | }; | |
809 | ||
810 | enum ufshcd_res { | |
811 | RES_UFS, | |
812 | RES_MCQ, | |
813 | RES_MCQ_SQD, | |
814 | RES_MCQ_SQIS, | |
815 | RES_MCQ_CQD, | |
816 | RES_MCQ_CQIS, | |
817 | RES_MCQ_VS, | |
818 | RES_MAX, | |
819 | }; | |
820 | ||
2468da61 AD |
821 | /** |
822 | * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers | |
823 | * | |
824 | * @offset: Doorbell Address Offset | |
825 | * @stride: Steps proportional to queue [0...31] | |
826 | * @base: base address | |
827 | */ | |
828 | struct ufshcd_mcq_opr_info_t { | |
829 | unsigned long offset; | |
830 | unsigned long stride; | |
831 | void __iomem *base; | |
832 | }; | |
833 | ||
834 | enum ufshcd_mcq_opr { | |
835 | OPR_SQD, | |
836 | OPR_SQIS, | |
837 | OPR_CQD, | |
838 | OPR_CQIS, | |
839 | OPR_MAX, | |
840 | }; | |
841 | ||
e0eca63e VH |
842 | /** |
843 | * struct ufs_hba - per adapter private structure | |
844 | * @mmio_base: UFSHCI base register address | |
845 | * @ucdl_base_addr: UFS Command Descriptor base address | |
846 | * @utrdl_base_addr: UTP Transfer Request Descriptor base address | |
847 | * @utmrdl_base_addr: UTP Task Management Descriptor base address | |
848 | * @ucdl_dma_addr: UFS Command Descriptor DMA address | |
849 | * @utrdl_dma_addr: UTRDL DMA address | |
850 | * @utmrdl_dma_addr: UTMRDL DMA address | |
851 | * @host: Scsi_Host instance of the driver | |
852 | * @dev: device handle | |
e2106584 | 853 | * @ufs_device_wlun: WLUN that controls the entire UFS device. |
cff91daf BVA |
854 | * @hwmon_device: device instance registered with the hwmon core. |
855 | * @curr_dev_pwr_mode: active UFS device power mode. | |
856 | * @uic_link_state: active state of the link to the UFS device. | |
857 | * @rpm_lvl: desired UFS power management level during runtime PM. | |
858 | * @spm_lvl: desired UFS power management level during system PM. | |
859 | * @pm_op_in_progress: whether or not a PM operation is in progress. | |
860 | * @ahit: value of Auto-Hibernate Idle Timer register. | |
e0eca63e VH |
861 | * @lrb: local reference block |
862 | * @outstanding_tasks: Bits representing outstanding task requests | |
169f5eb2 | 863 | * @outstanding_lock: Protects @outstanding_reqs. |
e0eca63e VH |
864 | * @outstanding_reqs: Bits representing outstanding transfer requests |
865 | * @capabilities: UFS Controller Capabilities | |
6e1d850a | 866 | * @mcq_capabilities: UFS Multi Circular Queue capabilities |
e0eca63e | 867 | * @nutrs: Transfer Request Queue depth supported by controller |
9ec54934 | 868 | * @nortt - Max outstanding RTTs supported by controller |
e0eca63e | 869 | * @nutmrs: Task Management Queue depth supported by controller |
945c3cca | 870 | * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock. |
e0eca63e | 871 | * @ufs_version: UFS Version to which controller complies |
5c0c28a8 | 872 | * @vops: pointer to variant specific operations |
cff91daf | 873 | * @vps: pointer to variant specific parameters |
5c0c28a8 | 874 | * @priv: pointer to variant specific private data |
ada1e653 | 875 | * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields) |
e0eca63e | 876 | * @irq: Irq number of the controller |
cff91daf BVA |
877 | * @is_irq_enabled: whether or not the UFS controller interrupt is enabled. |
878 | * @dev_ref_clk_freq: reference clock frequency | |
879 | * @quirks: bitmask with information about deviations from the UFSHCI standard. | |
880 | * @dev_quirks: bitmask with information about deviations from the UFS standard. | |
69a6c269 BVA |
881 | * @tmf_tag_set: TMF tag set. |
882 | * @tmf_queue: Used to allocate TMF tags. | |
cff91daf | 883 | * @tmf_rqs: array with pointers to TMF requests while these are in progress. |
22fbabe8 BVA |
884 | * @active_uic_cmd: pointer to active UIC command. |
885 | * @uic_cmd_mutex: mutex used for serializing UIC command processing. | |
886 | * @uic_async_done: completion used to wait for power mode or hibernation state | |
887 | * changes. | |
9c202090 | 888 | * @ufshcd_state: UFSHCD state |
3441da7d | 889 | * @eh_flags: Error handling flags |
2fbd009b | 890 | * @intr_mask: Interrupt Mask Bits |
66ec6d59 | 891 | * @ee_ctrl_mask: Exception event control mask |
cff91daf BVA |
892 | * @ee_drv_mask: Exception event mask for driver |
893 | * @ee_usr_mask: Exception event mask for user (set via debugfs) | |
894 | * @ee_ctrl_mutex: Used to serialize exception event information. | |
1d337ec2 | 895 | * @is_powered: flag to check if HBA is powered |
9cd20d3f CG |
896 | * @shutting_down: flag to check if shutdown has been invoked |
897 | * @host_sem: semaphore used to serialize concurrent contexts | |
88b09900 AH |
898 | * @eh_wq: Workqueue that eh_work works on |
899 | * @eh_work: Worker to handle UFS errors that require s/w attention | |
66ec6d59 | 900 | * @eeh_work: Worker to handle exception events |
e0eca63e | 901 | * @errors: HBA errors |
e8e7f271 SRT |
902 | * @uic_error: UFS interconnect layer error status |
903 | * @saved_err: sticky error mask | |
904 | * @saved_uic_err: sticky UIC error mask | |
cff91daf | 905 | * @ufs_stats: various error counters |
4db7a236 | 906 | * @force_reset: flag to force eh_work perform a full reset |
2355b66e | 907 | * @force_pmc: flag to force a power mode change |
2df74b69 | 908 | * @silence_err_logs: flag to silence error logs |
5a0b0cb9 | 909 | * @dev_cmd: ufs device management command information |
cad2e03d | 910 | * @last_dme_cmd_tstamp: time stamp of the last completed DME command |
cff91daf BVA |
911 | * @nop_out_timeout: NOP OUT timeout value |
912 | * @dev_info: information about the UFS device | |
66ec6d59 | 913 | * @auto_bkops_enabled: to track whether bkops is enabled in device |
aa497613 | 914 | * @vreg_info: UFS device voltage regulator information |
c6e79dac | 915 | * @clk_list_head: UFS host controller clocks list node head |
930bd77e | 916 | * @use_pm_opp: Indicates whether OPP based scaling is used or not |
cff91daf BVA |
917 | * @req_abort_count: number of times ufshcd_abort() has been called |
918 | * @lanes_per_direction: number of lanes per data direction between the UFS | |
919 | * controller and the UFS device. | |
7eb584db DR |
920 | * @pwr_info: holds current power mode |
921 | * @max_pwr_info: keeps the device max valid pwm | |
cff91daf BVA |
922 | * @clk_gating: information related to clock gating |
923 | * @caps: bitmask with information about UFS controller capabilities | |
924 | * @devfreq: frequency scaling information owned by the devfreq core | |
925 | * @clk_scaling: frequency scaling information owned by the UFS driver | |
1a547cbc BVA |
926 | * @system_suspending: system suspend has been started and system resume has |
927 | * not yet finished. | |
928 | * @is_sys_suspended: UFS device has been suspended because of system suspend | |
afdfff59 YG |
929 | * @urgent_bkops_lvl: keeps track of urgent bkops level for device |
930 | * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for | |
931 | * device is known or not. | |
ba810437 | 932 | * @wb_mutex: used to serialize devfreq and sysfs write booster toggling |
cff91daf BVA |
933 | * @clk_scaling_lock: used to serialize device commands and clock scaling |
934 | * @desc_size: descriptor sizes reported by device | |
cff91daf BVA |
935 | * @bsg_dev: struct device associated with the BSG queue |
936 | * @bsg_queue: BSG queue associated with the UFS controller | |
937 | * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power | |
938 | * management) after the UFS device has finished a WriteBooster buffer | |
939 | * flush or auto BKOP. | |
cff91daf | 940 | * @monitor: statistics about UFS commands |
70297a8a ST |
941 | * @crypto_capabilities: Content of crypto capabilities register (0x100) |
942 | * @crypto_cap_array: Array of crypto capabilities | |
943 | * @crypto_cfg_register: Start of the crypto cfg array | |
cb77cb5a | 944 | * @crypto_profile: the crypto profile of this hba (if applicable) |
cff91daf BVA |
945 | * @debugfs_root: UFS controller debugfs root directory |
946 | * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay | |
947 | * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore | |
948 | * ee_ctrl_mask | |
949 | * @luns_avail: number of regular and well known LUNs supported by the UFS | |
950 | * device | |
57b1c0ef AD |
951 | * @nr_hw_queues: number of hardware queues configured |
952 | * @nr_queues: number of Queues of different queue types | |
cff91daf BVA |
953 | * @complete_put: whether or not to call ufshcd_rpm_put() from inside |
954 | * ufshcd_resume_complete() | |
305a357d | 955 | * @mcq_sup: is mcq supported by UFSHC |
2468da61 | 956 | * @mcq_enabled: is mcq ready to accept requests |
2d6c7bcc | 957 | * @mcq_esi_enabled: is mcq ESI configured |
c263b4ef AD |
958 | * @res: array of resource info of MCQ registers |
959 | * @mcq_base: Multi circular queue registers base address | |
4682abfa AD |
960 | * @uhq: array of supported hardware queues |
961 | * @dev_cmd_queue: Queue for issuing device management commands | |
6bf999e0 BH |
962 | * @mcq_opr: MCQ operation and runtime registers |
963 | * @ufs_rtc_update_work: A work for UFS RTC periodic update | |
2777e73f MN |
964 | * @pm_qos_req: PM QoS request handle |
965 | * @pm_qos_enabled: flag to check if pm qos is enabled | |
edfaf868 | 966 | * @critical_health_count: count of critical health exceptions |
1fd2e77b BN |
967 | * @dev_lvl_exception_count: count of device level exceptions since last reset |
968 | * @dev_lvl_exception_id: vendor specific information about the | |
969 | * device level exception event. | |
e0eca63e VH |
970 | */ |
971 | struct ufs_hba { | |
972 | void __iomem *mmio_base; | |
973 | ||
974 | /* Virtual memory reference */ | |
975 | struct utp_transfer_cmd_desc *ucdl_base_addr; | |
976 | struct utp_transfer_req_desc *utrdl_base_addr; | |
977 | struct utp_task_req_desc *utmrdl_base_addr; | |
978 | ||
979 | /* DMA memory reference */ | |
980 | dma_addr_t ucdl_dma_addr; | |
981 | dma_addr_t utrdl_dma_addr; | |
982 | dma_addr_t utmrdl_dma_addr; | |
983 | ||
984 | struct Scsi_Host *host; | |
985 | struct device *dev; | |
e2106584 | 986 | struct scsi_device *ufs_device_wlun; |
e0eca63e | 987 | |
e88e2d32 AA |
988 | #ifdef CONFIG_SCSI_UFS_HWMON |
989 | struct device *hwmon_device; | |
990 | #endif | |
991 | ||
57d104c1 SJ |
992 | enum ufs_dev_pwr_mode curr_dev_pwr_mode; |
993 | enum uic_link_state uic_link_state; | |
994 | /* Desired UFS power management level during runtime PM */ | |
995 | enum ufs_pm_level rpm_lvl; | |
996 | /* Desired UFS power management level during system PM */ | |
997 | enum ufs_pm_level spm_lvl; | |
998 | int pm_op_in_progress; | |
999 | ||
ad448378 AH |
1000 | /* Auto-Hibernate Idle Timer register value */ |
1001 | u32 ahit; | |
1002 | ||
e0eca63e VH |
1003 | struct ufshcd_lrb *lrb; |
1004 | ||
1005 | unsigned long outstanding_tasks; | |
169f5eb2 | 1006 | spinlock_t outstanding_lock; |
e0eca63e VH |
1007 | unsigned long outstanding_reqs; |
1008 | ||
1009 | u32 capabilities; | |
1010 | int nutrs; | |
9ec54934 | 1011 | int nortt; |
6e1d850a | 1012 | u32 mcq_capabilities; |
e0eca63e | 1013 | int nutmrs; |
945c3cca | 1014 | u32 reserved_slot; |
e0eca63e | 1015 | u32 ufs_version; |
176eb927 | 1016 | const struct ufs_hba_variant_ops *vops; |
90b8491c | 1017 | struct ufs_hba_variant_params *vps; |
5c0c28a8 | 1018 | void *priv; |
ada1e653 EB |
1019 | #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE |
1020 | size_t sg_entry_size; | |
1021 | #endif | |
e0eca63e | 1022 | unsigned int irq; |
57d104c1 | 1023 | bool is_irq_enabled; |
9e1e8a75 | 1024 | enum ufs_ref_clk_freq dev_ref_clk_freq; |
e0eca63e | 1025 | |
cad2e03d | 1026 | unsigned int quirks; /* Deviations from standard UFSHCI spec. */ |
6ccf44fe | 1027 | |
c58ab7aa YG |
1028 | /* Device deviations from standard UFS device spec. */ |
1029 | unsigned int dev_quirks; | |
1030 | ||
69a6c269 BVA |
1031 | struct blk_mq_tag_set tmf_tag_set; |
1032 | struct request_queue *tmf_queue; | |
f5ef336f | 1033 | struct request **tmf_rqs; |
e0eca63e | 1034 | |
57d104c1 SJ |
1035 | struct uic_command *active_uic_cmd; |
1036 | struct mutex uic_cmd_mutex; | |
1037 | struct completion *uic_async_done; | |
53b3d9c3 | 1038 | |
9c202090 | 1039 | enum ufshcd_state ufshcd_state; |
3441da7d | 1040 | u32 eh_flags; |
2fbd009b | 1041 | u32 intr_mask; |
cff91daf BVA |
1042 | u16 ee_ctrl_mask; |
1043 | u16 ee_drv_mask; | |
1044 | u16 ee_usr_mask; | |
cd469475 | 1045 | struct mutex ee_ctrl_mutex; |
1d337ec2 | 1046 | bool is_powered; |
9cd20d3f CG |
1047 | bool shutting_down; |
1048 | struct semaphore host_sem; | |
e0eca63e VH |
1049 | |
1050 | /* Work Queues */ | |
88b09900 AH |
1051 | struct workqueue_struct *eh_wq; |
1052 | struct work_struct eh_work; | |
66ec6d59 | 1053 | struct work_struct eeh_work; |
e0eca63e VH |
1054 | |
1055 | /* HBA Errors */ | |
1056 | u32 errors; | |
e8e7f271 SRT |
1057 | u32 uic_error; |
1058 | u32 saved_err; | |
1059 | u32 saved_uic_err; | |
ff8e20c6 | 1060 | struct ufs_stats ufs_stats; |
4db7a236 | 1061 | bool force_reset; |
2355b66e | 1062 | bool force_pmc; |
2df74b69 | 1063 | bool silence_err_logs; |
5a0b0cb9 SRT |
1064 | |
1065 | /* Device management request data */ | |
1066 | struct ufs_dev_cmd dev_cmd; | |
cad2e03d | 1067 | ktime_t last_dme_cmd_tstamp; |
1cbc9ad3 | 1068 | int nop_out_timeout; |
66ec6d59 | 1069 | |
57d104c1 SJ |
1070 | /* Keeps information of the UFS device connected to this host */ |
1071 | struct ufs_dev_info dev_info; | |
66ec6d59 | 1072 | bool auto_bkops_enabled; |
aa497613 | 1073 | struct ufs_vreg_info vreg_info; |
c6e79dac | 1074 | struct list_head clk_list_head; |
930bd77e | 1075 | bool use_pm_opp; |
57d104c1 | 1076 | |
7fabb77b GB |
1077 | /* Number of requests aborts */ |
1078 | int req_abort_count; | |
1079 | ||
54b879b7 YG |
1080 | /* Number of lanes available (1 or 2) for Rx/Tx */ |
1081 | u32 lanes_per_direction; | |
7eb584db DR |
1082 | struct ufs_pa_layer_attr pwr_info; |
1083 | struct ufs_pwr_mode_info max_pwr_info; | |
1ab27c9c ST |
1084 | |
1085 | struct ufs_clk_gating clk_gating; | |
1086 | /* Control to enable/disable host capabilities */ | |
1087 | u32 caps; | |
856b3483 ST |
1088 | |
1089 | struct devfreq *devfreq; | |
1090 | struct ufs_clk_scaling clk_scaling; | |
1a547cbc | 1091 | bool system_suspending; |
e785060e | 1092 | bool is_sys_suspended; |
afdfff59 YG |
1093 | |
1094 | enum bkops_status urgent_bkops_lvl; | |
1095 | bool is_urgent_bkops_lvl_checked; | |
a3cd5ec5 | 1096 | |
ba810437 | 1097 | struct mutex wb_mutex; |
a3cd5ec5 | 1098 | struct rw_semaphore clk_scaling_lock; |
df032bf2 AA |
1099 | |
1100 | struct device bsg_dev; | |
1101 | struct request_queue *bsg_queue; | |
51dd905b | 1102 | struct delayed_work rpm_dev_flush_recheck_work; |
70297a8a | 1103 | |
1d8613a2 CG |
1104 | struct ufs_hba_monitor monitor; |
1105 | ||
70297a8a ST |
1106 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
1107 | union ufs_crypto_capabilities crypto_capabilities; | |
1108 | union ufs_crypto_cap_entry *crypto_cap_array; | |
1109 | u32 crypto_cfg_register; | |
cb77cb5a | 1110 | struct blk_crypto_profile crypto_profile; |
70297a8a | 1111 | #endif |
b6cacaf2 AH |
1112 | #ifdef CONFIG_DEBUG_FS |
1113 | struct dentry *debugfs_root; | |
7deedfda AH |
1114 | struct delayed_work debugfs_ee_work; |
1115 | u32 debugfs_ee_rate_limit_ms; | |
045da307 AM |
1116 | #endif |
1117 | #ifdef CONFIG_SCSI_UFS_FAULT_INJECTION | |
1118 | struct fault_attr trigger_eh_attr; | |
1119 | struct fault_attr timeout_attr; | |
b6cacaf2 | 1120 | #endif |
b294ff3e | 1121 | u32 luns_avail; |
57b1c0ef AD |
1122 | unsigned int nr_hw_queues; |
1123 | unsigned int nr_queues[HCTX_MAX_TYPES]; | |
b294ff3e | 1124 | bool complete_put; |
0cab4023 | 1125 | bool scsi_host_added; |
305a357d | 1126 | bool mcq_sup; |
0c60eb0c | 1127 | bool lsdb_sup; |
2468da61 | 1128 | bool mcq_enabled; |
2d6c7bcc | 1129 | bool mcq_esi_enabled; |
c263b4ef AD |
1130 | struct ufshcd_res_info res[RES_MAX]; |
1131 | void __iomem *mcq_base; | |
4682abfa AD |
1132 | struct ufs_hw_queue *uhq; |
1133 | struct ufs_hw_queue *dev_cmd_queue; | |
2468da61 | 1134 | struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX]; |
6bf999e0 BH |
1135 | |
1136 | struct delayed_work ufs_rtc_update_work; | |
2777e73f MN |
1137 | struct pm_qos_request pm_qos_req; |
1138 | bool pm_qos_enabled; | |
edfaf868 AA |
1139 | |
1140 | int critical_health_count; | |
1fd2e77b BN |
1141 | atomic_t dev_lvl_exception_count; |
1142 | u64 dev_lvl_exception_id; | |
4682abfa AD |
1143 | }; |
1144 | ||
1145 | /** | |
1146 | * struct ufs_hw_queue - per hardware queue structure | |
2468da61 AD |
1147 | * @mcq_sq_head: base address of submission queue head pointer |
1148 | * @mcq_sq_tail: base address of submission queue tail pointer | |
1149 | * @mcq_cq_head: base address of completion queue head pointer | |
1150 | * @mcq_cq_tail: base address of completion queue tail pointer | |
4682abfa AD |
1151 | * @sqe_base_addr: submission queue entry base address |
1152 | * @sqe_dma_addr: submission queue dma address | |
1153 | * @cqe_base_addr: completion queue base address | |
1154 | * @cqe_dma_addr: completion queue dma address | |
1155 | * @max_entries: max number of slots in this hardware queue | |
2468da61 | 1156 | * @id: hardware queue ID |
22a2d563 AD |
1157 | * @sq_tp_slot: current slot to which SQ tail pointer is pointing |
1158 | * @sq_lock: serialize submission queue access | |
f87b2c41 AD |
1159 | * @cq_tail_slot: current slot to which CQ tail pointer is pointing |
1160 | * @cq_head_slot: current slot to which CQ head pointer is pointing | |
ed975065 | 1161 | * @cq_lock: Synchronize between multiple polling instances |
8d729034 | 1162 | * @sq_mutex: prevent submission queue concurrent access |
4682abfa AD |
1163 | */ |
1164 | struct ufs_hw_queue { | |
2468da61 AD |
1165 | void __iomem *mcq_sq_head; |
1166 | void __iomem *mcq_sq_tail; | |
1167 | void __iomem *mcq_cq_head; | |
1168 | void __iomem *mcq_cq_tail; | |
1169 | ||
3c85f087 | 1170 | struct utp_transfer_req_desc *sqe_base_addr; |
4682abfa AD |
1171 | dma_addr_t sqe_dma_addr; |
1172 | struct cq_entry *cqe_base_addr; | |
1173 | dma_addr_t cqe_dma_addr; | |
1174 | u32 max_entries; | |
2468da61 | 1175 | u32 id; |
22a2d563 AD |
1176 | u32 sq_tail_slot; |
1177 | spinlock_t sq_lock; | |
f87b2c41 AD |
1178 | u32 cq_tail_slot; |
1179 | u32 cq_head_slot; | |
ed975065 | 1180 | spinlock_t cq_lock; |
8d729034 BN |
1181 | /* prevent concurrent access to submission queue */ |
1182 | struct mutex sq_mutex; | |
e0eca63e VH |
1183 | }; |
1184 | ||
e8a1d87b MI |
1185 | #define MCQ_QCFG_SIZE 0x40 |
1186 | ||
2fc39848 MI |
1187 | static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba, |
1188 | enum ufshcd_mcq_opr opr, int idx) | |
1189 | { | |
1190 | return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx; | |
1191 | } | |
1192 | ||
e8a1d87b MI |
1193 | static inline unsigned int ufshcd_mcq_cfg_offset(unsigned int reg, int idx) |
1194 | { | |
1195 | return reg + MCQ_QCFG_SIZE * idx; | |
1196 | } | |
1197 | ||
ada1e653 EB |
1198 | #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE |
1199 | static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) | |
1200 | { | |
1201 | return hba->sg_entry_size; | |
1202 | } | |
1203 | ||
1204 | static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size) | |
1205 | { | |
1206 | WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry)); | |
1207 | hba->sg_entry_size = sg_entry_size; | |
1208 | } | |
1209 | #else | |
1210 | static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) | |
1211 | { | |
1212 | return sizeof(struct ufshcd_sg_entry); | |
1213 | } | |
1214 | ||
1215 | #define ufshcd_set_sg_entry_size(hba, sg_entry_size) \ | |
1216 | ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); }) | |
1217 | #endif | |
1218 | ||
75d0c649 EB |
1219 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
1220 | static inline struct ufs_hba * | |
1221 | ufs_hba_from_crypto_profile(struct blk_crypto_profile *profile) | |
1222 | { | |
1223 | return container_of(profile, struct ufs_hba, crypto_profile); | |
1224 | } | |
1225 | #endif | |
1226 | ||
06caeb53 | 1227 | static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba) |
ada1e653 EB |
1228 | { |
1229 | return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba); | |
1230 | } | |
1231 | ||
1ab27c9c ST |
1232 | /* Returns true if clocks can be gated. Otherwise false */ |
1233 | static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) | |
1234 | { | |
1235 | return hba->caps & UFSHCD_CAP_CLK_GATING; | |
1236 | } | |
1237 | static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) | |
1238 | { | |
1239 | return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; | |
1240 | } | |
fcb0c4b0 | 1241 | static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) |
856b3483 ST |
1242 | { |
1243 | return hba->caps & UFSHCD_CAP_CLK_SCALING; | |
1244 | } | |
374a246e SJ |
1245 | static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) |
1246 | { | |
1247 | return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; | |
1248 | } | |
49615ba1 SC |
1249 | static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) |
1250 | { | |
1251 | return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; | |
1252 | } | |
374a246e | 1253 | |
b852190e YG |
1254 | static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) |
1255 | { | |
1c0810e7 KP |
1256 | return (hba->caps & UFSHCD_CAP_INTR_AGGR) && |
1257 | !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR); | |
b852190e YG |
1258 | } |
1259 | ||
dd7143e2 CG |
1260 | static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba) |
1261 | { | |
1262 | return !!(ufshcd_is_link_hibern8(hba) && | |
1263 | (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE)); | |
1264 | } | |
1265 | ||
ee5f1042 SC |
1266 | static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) |
1267 | { | |
8da76f71 AH |
1268 | return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) && |
1269 | !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8); | |
ee5f1042 SC |
1270 | } |
1271 | ||
5a244e0e SC |
1272 | static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) |
1273 | { | |
51d1628f | 1274 | return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit); |
5a244e0e SC |
1275 | } |
1276 | ||
3d17b9b5 AD |
1277 | static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba) |
1278 | { | |
1279 | return hba->caps & UFSHCD_CAP_WB_EN; | |
1280 | } | |
1281 | ||
87bd0501 PW |
1282 | static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba) |
1283 | { | |
1284 | return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING; | |
1285 | } | |
1286 | ||
2468da61 AD |
1287 | #define ufsmcq_writel(hba, val, reg) \ |
1288 | writel((val), (hba)->mcq_base + (reg)) | |
1289 | #define ufsmcq_readl(hba, reg) \ | |
1290 | readl((hba)->mcq_base + (reg)) | |
1291 | ||
1292 | #define ufsmcq_writelx(hba, val, reg) \ | |
1293 | writel_relaxed((val), (hba)->mcq_base + (reg)) | |
1294 | #define ufsmcq_readlx(hba, reg) \ | |
1295 | readl_relaxed((hba)->mcq_base + (reg)) | |
1296 | ||
b873a275 SJ |
1297 | #define ufshcd_writel(hba, val, reg) \ |
1298 | writel((val), (hba)->mmio_base + (reg)) | |
1299 | #define ufshcd_readl(hba, reg) \ | |
1300 | readl((hba)->mmio_base + (reg)) | |
1301 | ||
e785060e | 1302 | /** |
cff91daf BVA |
1303 | * ufshcd_rmwl - perform read/modify/write for a controller register |
1304 | * @hba: per adapter instance | |
1305 | * @mask: mask to apply on read value | |
1306 | * @val: actual value to write | |
1307 | * @reg: register address | |
e785060e DR |
1308 | */ |
1309 | static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) | |
1310 | { | |
1311 | u32 tmp; | |
1312 | ||
1313 | tmp = ufshcd_readl(hba, reg); | |
1314 | tmp &= ~mask; | |
1315 | tmp |= (val & mask); | |
1316 | ufshcd_writel(hba, tmp, reg); | |
1317 | } | |
1318 | ||
0ae7a027 MS |
1319 | void ufshcd_enable_irq(struct ufs_hba *hba); |
1320 | void ufshcd_disable_irq(struct ufs_hba *hba); | |
5c0c28a8 | 1321 | int ufshcd_alloc_host(struct device *, struct ufs_hba **); |
9d19bf7a | 1322 | int ufshcd_hba_enable(struct ufs_hba *hba); |
ecd7beb3 | 1323 | int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int); |
087c5efa | 1324 | int ufshcd_link_recovery(struct ufs_hba *hba); |
9d19bf7a | 1325 | int ufshcd_make_hba_operational(struct ufs_hba *hba); |
e0eca63e | 1326 | void ufshcd_remove(struct ufs_hba *); |
525943a5 | 1327 | int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); |
9d19bf7a | 1328 | int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); |
5c955c10 | 1329 | void ufshcd_delay_us(unsigned long us, unsigned long tolerance); |
9e1e8a75 | 1330 | void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); |
e965e5e0 | 1331 | void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); |
3a95f5b3 | 1332 | void ufshcd_hba_stop(struct ufs_hba *hba); |
267a59f6 | 1333 | void ufshcd_schedule_eh_work(struct ufs_hba *hba); |
11afb65c | 1334 | void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); |
175d1825 | 1335 | unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba); |
11afb65c | 1336 | u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); |
e02288e0 | 1337 | void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); |
57d6ef46 | 1338 | unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, |
e02288e0 | 1339 | struct ufs_hw_queue *hwq); |
11afb65c | 1340 | void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); |
ab3e6c4e | 1341 | void ufshcd_mcq_enable(struct ufs_hba *hba); |
a085e037 | 1342 | void ufshcd_mcq_enable_esi(struct ufs_hba *hba); |
e02288e0 | 1343 | void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); |
e0eca63e | 1344 | |
72208ebe MS |
1345 | int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table, |
1346 | struct dev_pm_opp *opp, void *data, | |
1347 | bool scaling_down); | |
1ce5898a YG |
1348 | /** |
1349 | * ufshcd_set_variant - set variant specific data to the hba | |
cff91daf BVA |
1350 | * @hba: per adapter instance |
1351 | * @variant: pointer to variant specific data | |
1ce5898a YG |
1352 | */ |
1353 | static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) | |
1354 | { | |
1355 | BUG_ON(!hba); | |
1356 | hba->priv = variant; | |
1357 | } | |
1358 | ||
1359 | /** | |
1360 | * ufshcd_get_variant - get variant specific data from the hba | |
cff91daf | 1361 | * @hba: per adapter instance |
1ce5898a YG |
1362 | */ |
1363 | static inline void *ufshcd_get_variant(struct ufs_hba *hba) | |
1364 | { | |
1365 | BUG_ON(!hba); | |
1366 | return hba->priv; | |
1367 | } | |
e88e2d32 | 1368 | |
9bb25e5d | 1369 | #ifdef CONFIG_PM |
f1ecbe1e BVA |
1370 | extern int ufshcd_runtime_suspend(struct device *dev); |
1371 | extern int ufshcd_runtime_resume(struct device *dev); | |
9bb25e5d BVA |
1372 | #endif |
1373 | #ifdef CONFIG_PM_SLEEP | |
f1ecbe1e BVA |
1374 | extern int ufshcd_system_suspend(struct device *dev); |
1375 | extern int ufshcd_system_resume(struct device *dev); | |
88441a8d AH |
1376 | extern int ufshcd_system_freeze(struct device *dev); |
1377 | extern int ufshcd_system_thaw(struct device *dev); | |
1378 | extern int ufshcd_system_restore(struct device *dev); | |
9bb25e5d | 1379 | #endif |
88441a8d | 1380 | |
6b070711 SL |
1381 | extern int ufshcd_dme_reset(struct ufs_hba *hba); |
1382 | extern int ufshcd_dme_enable(struct ufs_hba *hba); | |
fc85a74e SC |
1383 | extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba, |
1384 | int agreed_gear, | |
1385 | int adapt_val); | |
12b4fdb4 SJ |
1386 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, |
1387 | u8 attr_set, u32 mib_val, u8 peer); | |
1388 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | |
1389 | u32 *mib_val, u8 peer); | |
0d846e70 AA |
1390 | extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, |
1391 | struct ufs_pa_layer_attr *desired_pwr_mode); | |
fc53683b | 1392 | extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode); |
12b4fdb4 SJ |
1393 | |
1394 | /* UIC command interfaces for DME primitives */ | |
1395 | #define DME_LOCAL 0 | |
1396 | #define DME_PEER 1 | |
1397 | #define ATTR_SET_NOR 0 /* NORMAL */ | |
1398 | #define ATTR_SET_ST 1 /* STATIC */ | |
1399 | ||
1400 | static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, | |
1401 | u32 mib_val) | |
1402 | { | |
1403 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | |
1404 | mib_val, DME_LOCAL); | |
1405 | } | |
1406 | ||
1407 | static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, | |
1408 | u32 mib_val) | |
1409 | { | |
1410 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | |
1411 | mib_val, DME_LOCAL); | |
1412 | } | |
1413 | ||
1414 | static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, | |
1415 | u32 mib_val) | |
1416 | { | |
1417 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | |
1418 | mib_val, DME_PEER); | |
1419 | } | |
1420 | ||
1421 | static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, | |
1422 | u32 mib_val) | |
1423 | { | |
1424 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | |
1425 | mib_val, DME_PEER); | |
1426 | } | |
1427 | ||
1428 | static inline int ufshcd_dme_get(struct ufs_hba *hba, | |
1429 | u32 attr_sel, u32 *mib_val) | |
1430 | { | |
1431 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); | |
1432 | } | |
1433 | ||
1434 | static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, | |
1435 | u32 attr_sel, u32 *mib_val) | |
1436 | { | |
1437 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); | |
1438 | } | |
1439 | ||
3bcd901e | 1440 | static inline bool ufshcd_is_hs_mode(const struct ufs_pa_layer_attr *pwr_info) |
f37aabcf YG |
1441 | { |
1442 | return (pwr_info->pwr_rx == FAST_MODE || | |
1443 | pwr_info->pwr_rx == FASTAUTO_MODE) && | |
1444 | (pwr_info->pwr_tx == FAST_MODE || | |
1445 | pwr_info->pwr_tx == FASTAUTO_MODE); | |
1446 | } | |
1447 | ||
984eaac1 SC |
1448 | static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba) |
1449 | { | |
1450 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); | |
1451 | } | |
1452 | ||
ba7af5ec | 1453 | void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); |
aead21f3 BVA |
1454 | void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, |
1455 | const struct ufs_dev_quirk *fixups); | |
4b828fe1 TW |
1456 | #define SD_ASCII_STD true |
1457 | #define SD_RAW false | |
1458 | int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, | |
1459 | u8 **buf, bool ascii); | |
2238d31c | 1460 | |
078f4f4b | 1461 | void ufshcd_hold(struct ufs_hba *hba); |
1ab27c9c | 1462 | void ufshcd_release(struct ufs_hba *hba); |
a4b0e8a4 | 1463 | |
ad8a647e BVA |
1464 | void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value); |
1465 | ||
1d6f9dec SC |
1466 | int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg); |
1467 | ||
e77044c5 AA |
1468 | int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); |
1469 | ||
6ff265fc BH |
1470 | int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, |
1471 | struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req, | |
1472 | struct ufs_ehs *ehs_rsp, int sg_cnt, | |
1473 | struct scatterlist *sg_list, enum dma_data_direction dir); | |
3b5f3c0d | 1474 | int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); |
6c4148ce | 1475 | int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable); |
500d4b74 | 1476 | int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode); |
b294ff3e | 1477 | int ufshcd_suspend_prepare(struct device *dev); |
ddba1cf7 | 1478 | int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm); |
b294ff3e | 1479 | void ufshcd_resume_complete(struct device *dev); |
548fdf77 | 1480 | bool ufshcd_is_hba_active(struct ufs_hba *hba); |
2777e73f MN |
1481 | void ufshcd_pm_qos_init(struct ufs_hba *hba); |
1482 | void ufshcd_pm_qos_exit(struct ufs_hba *hba); | |
8e834ca5 | 1483 | |
0263bcd0 | 1484 | /* Wrapper functions for safely calling variant operations */ |
0263bcd0 YG |
1485 | static inline int ufshcd_vops_init(struct ufs_hba *hba) |
1486 | { | |
1487 | if (hba->vops && hba->vops->init) | |
1488 | return hba->vops->init(hba); | |
1489 | ||
1490 | return 0; | |
1491 | } | |
1492 | ||
92bcebe4 SC |
1493 | static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba) |
1494 | { | |
1495 | if (hba->vops && hba->vops->phy_initialization) | |
1496 | return hba->vops->phy_initialization(hba); | |
1497 | ||
1498 | return 0; | |
1499 | } | |
1500 | ||
35d11ec2 | 1501 | extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; |
cbb6813e | 1502 | |
ba80917d TW |
1503 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, |
1504 | const char *prefix); | |
1505 | ||
7deedfda AH |
1506 | int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask); |
1507 | int ufshcd_write_ee_control(struct ufs_hba *hba); | |
35d11ec2 KK |
1508 | int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, |
1509 | const u16 *other_mask, u16 set, u16 clr); | |
cd469475 | 1510 | |
e0eca63e | 1511 | #endif /* End of Header */ |