Commit | Line | Data |
---|---|---|
0cbf2608 MS |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. | |
4 | * | |
5 | */ | |
6 | #ifndef _MHI_H_ | |
7 | #define _MHI_H_ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/dma-direction.h> | |
11 | #include <linux/mutex.h> | |
189ff97c | 12 | #include <linux/skbuff.h> |
0cbf2608 | 13 | #include <linux/slab.h> |
e1427f32 | 14 | #include <linux/spinlock.h> |
0cbf2608 MS |
15 | #include <linux/wait.h> |
16 | #include <linux/workqueue.h> | |
17 | ||
8e3729bf BB |
18 | #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16 |
19 | ||
0cbf2608 MS |
20 | struct mhi_chan; |
21 | struct mhi_event; | |
22 | struct mhi_ctxt; | |
23 | struct mhi_cmd; | |
24 | struct mhi_buf_info; | |
25 | ||
26 | /** | |
27 | * enum mhi_callback - MHI callback | |
28 | * @MHI_CB_IDLE: MHI entered idle state | |
29 | * @MHI_CB_PENDING_DATA: New data available for client to process | |
30 | * @MHI_CB_LPM_ENTER: MHI host entered low power mode | |
31 | * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode | |
32 | * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env | |
33 | * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env | |
34 | * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) | |
35 | * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state | |
1d3173a3 | 36 | * @MHI_CB_BW_REQ: Received a bandwidth switch request from device |
0cbf2608 MS |
37 | */ |
38 | enum mhi_callback { | |
39 | MHI_CB_IDLE, | |
40 | MHI_CB_PENDING_DATA, | |
41 | MHI_CB_LPM_ENTER, | |
42 | MHI_CB_LPM_EXIT, | |
43 | MHI_CB_EE_RDDM, | |
44 | MHI_CB_EE_MISSION_MODE, | |
45 | MHI_CB_SYS_ERROR, | |
46 | MHI_CB_FATAL_ERROR, | |
1d3173a3 | 47 | MHI_CB_BW_REQ, |
0cbf2608 MS |
48 | }; |
49 | ||
50 | /** | |
51 | * enum mhi_flags - Transfer flags | |
52 | * @MHI_EOB: End of buffer for bulk transfer | |
53 | * @MHI_EOT: End of transfer | |
54 | * @MHI_CHAIN: Linked transfer | |
55 | */ | |
56 | enum mhi_flags { | |
115f3251 MS |
57 | MHI_EOB = BIT(0), |
58 | MHI_EOT = BIT(1), | |
59 | MHI_CHAIN = BIT(2), | |
0cbf2608 MS |
60 | }; |
61 | ||
62 | /** | |
63 | * enum mhi_device_type - Device types | |
64 | * @MHI_DEVICE_XFER: Handles data transfer | |
65 | * @MHI_DEVICE_CONTROLLER: Control device | |
66 | */ | |
67 | enum mhi_device_type { | |
68 | MHI_DEVICE_XFER, | |
69 | MHI_DEVICE_CONTROLLER, | |
70 | }; | |
71 | ||
72 | /** | |
73 | * enum mhi_ch_type - Channel types | |
74 | * @MHI_CH_TYPE_INVALID: Invalid channel type | |
75 | * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device | |
76 | * @MHI_CH_TYPE_INBOUND: Inbound channel from the device | |
77 | * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine | |
78 | * multiple packets and send them as a single | |
79 | * large packet to reduce CPU consumption | |
80 | */ | |
81 | enum mhi_ch_type { | |
82 | MHI_CH_TYPE_INVALID = 0, | |
83 | MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, | |
84 | MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, | |
85 | MHI_CH_TYPE_INBOUND_COALESCED = 3, | |
86 | }; | |
87 | ||
3000f85b | 88 | /** |
4d12a897 RD |
89 | * struct image_info - Firmware and RDDM table |
90 | * @mhi_buf: Buffer for firmware and RDDM table | |
91 | * @entries: # of entries in table | |
3000f85b MS |
92 | */ |
93 | struct image_info { | |
94 | struct mhi_buf *mhi_buf; | |
4d12a897 | 95 | /* private: from internal.h */ |
3000f85b | 96 | struct bhi_vec_entry *bhi_vec; |
4d12a897 | 97 | /* public: */ |
3000f85b MS |
98 | u32 entries; |
99 | }; | |
100 | ||
1d3173a3 MS |
101 | /** |
102 | * struct mhi_link_info - BW requirement | |
103 | * target_link_speed - Link speed as defined by TLS bits in LinkControl reg | |
104 | * target_link_width - Link width as defined by NLW bits in LinkStatus reg | |
105 | */ | |
106 | struct mhi_link_info { | |
107 | unsigned int target_link_speed; | |
108 | unsigned int target_link_width; | |
109 | }; | |
110 | ||
0cbf2608 MS |
111 | /** |
112 | * enum mhi_ee_type - Execution environment types | |
113 | * @MHI_EE_PBL: Primary Bootloader | |
114 | * @MHI_EE_SBL: Secondary Bootloader | |
115 | * @MHI_EE_AMSS: Modem, aka the primary runtime EE | |
116 | * @MHI_EE_RDDM: Ram dump download mode | |
117 | * @MHI_EE_WFW: WLAN firmware mode | |
118 | * @MHI_EE_PTHRU: Passthrough | |
119 | * @MHI_EE_EDL: Embedded downloader | |
66ac7985 | 120 | * @MHI_EE_FP: Flash Programmer Environment |
0cbf2608 MS |
121 | */ |
122 | enum mhi_ee_type { | |
123 | MHI_EE_PBL, | |
124 | MHI_EE_SBL, | |
125 | MHI_EE_AMSS, | |
126 | MHI_EE_RDDM, | |
127 | MHI_EE_WFW, | |
128 | MHI_EE_PTHRU, | |
129 | MHI_EE_EDL, | |
66ac7985 CY |
130 | MHI_EE_FP, |
131 | MHI_EE_MAX_SUPPORTED = MHI_EE_FP, | |
0cbf2608 MS |
132 | MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ |
133 | MHI_EE_NOT_SUPPORTED, | |
134 | MHI_EE_MAX, | |
135 | }; | |
136 | ||
a6e2e352 MS |
137 | /** |
138 | * enum mhi_state - MHI states | |
139 | * @MHI_STATE_RESET: Reset state | |
140 | * @MHI_STATE_READY: Ready state | |
141 | * @MHI_STATE_M0: M0 state | |
142 | * @MHI_STATE_M1: M1 state | |
143 | * @MHI_STATE_M2: M2 state | |
144 | * @MHI_STATE_M3: M3 state | |
145 | * @MHI_STATE_M3_FAST: M3 Fast state | |
146 | * @MHI_STATE_BHI: BHI state | |
147 | * @MHI_STATE_SYS_ERR: System Error state | |
148 | */ | |
149 | enum mhi_state { | |
150 | MHI_STATE_RESET = 0x0, | |
151 | MHI_STATE_READY = 0x1, | |
152 | MHI_STATE_M0 = 0x2, | |
153 | MHI_STATE_M1 = 0x3, | |
154 | MHI_STATE_M2 = 0x4, | |
155 | MHI_STATE_M3 = 0x5, | |
156 | MHI_STATE_M3_FAST = 0x6, | |
157 | MHI_STATE_BHI = 0x7, | |
158 | MHI_STATE_SYS_ERR = 0xFF, | |
159 | MHI_STATE_MAX, | |
160 | }; | |
161 | ||
0cbf2608 MS |
162 | /** |
163 | * enum mhi_ch_ee_mask - Execution environment mask for channel | |
164 | * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE | |
165 | * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE | |
166 | * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE | |
167 | * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE | |
168 | * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE | |
169 | * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE | |
170 | * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE | |
171 | */ | |
172 | enum mhi_ch_ee_mask { | |
173 | MHI_CH_EE_PBL = BIT(MHI_EE_PBL), | |
174 | MHI_CH_EE_SBL = BIT(MHI_EE_SBL), | |
175 | MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), | |
176 | MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), | |
177 | MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), | |
178 | MHI_CH_EE_WFW = BIT(MHI_EE_WFW), | |
179 | MHI_CH_EE_EDL = BIT(MHI_EE_EDL), | |
180 | }; | |
181 | ||
182 | /** | |
183 | * enum mhi_er_data_type - Event ring data types | |
184 | * @MHI_ER_DATA: Only client data over this ring | |
185 | * @MHI_ER_CTRL: MHI control data and client data | |
186 | */ | |
187 | enum mhi_er_data_type { | |
188 | MHI_ER_DATA, | |
189 | MHI_ER_CTRL, | |
190 | }; | |
191 | ||
192 | /** | |
193 | * enum mhi_db_brst_mode - Doorbell mode | |
194 | * @MHI_DB_BRST_DISABLE: Burst mode disable | |
195 | * @MHI_DB_BRST_ENABLE: Burst mode enable | |
196 | */ | |
197 | enum mhi_db_brst_mode { | |
198 | MHI_DB_BRST_DISABLE = 0x2, | |
199 | MHI_DB_BRST_ENABLE = 0x3, | |
200 | }; | |
201 | ||
202 | /** | |
203 | * struct mhi_channel_config - Channel configuration structure for controller | |
204 | * @name: The name of this channel | |
205 | * @num: The number assigned to this channel | |
206 | * @num_elements: The number of elements that can be queued to this channel | |
207 | * @local_elements: The local ring length of the channel | |
a503d162 | 208 | * @event_ring: The event ring index that services this channel |
0cbf2608 MS |
209 | * @dir: Direction that data may flow on this channel |
210 | * @type: Channel type | |
211 | * @ee_mask: Execution Environment mask for this channel | |
212 | * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds | |
213 | for UL channels, multiple of 8 ring elements for DL channels | |
214 | * @doorbell: Doorbell mode | |
215 | * @lpm_notify: The channel master requires low power mode notifications | |
216 | * @offload_channel: The client manages the channel completely | |
217 | * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition | |
218 | * @auto_queue: Framework will automatically queue buffers for DL traffic | |
da1c4f85 | 219 | * @wake-capable: Channel capable of waking up the system |
0cbf2608 MS |
220 | */ |
221 | struct mhi_channel_config { | |
222 | char *name; | |
223 | u32 num; | |
224 | u32 num_elements; | |
225 | u32 local_elements; | |
226 | u32 event_ring; | |
227 | enum dma_data_direction dir; | |
228 | enum mhi_ch_type type; | |
229 | u32 ee_mask; | |
230 | u32 pollcfg; | |
231 | enum mhi_db_brst_mode doorbell; | |
232 | bool lpm_notify; | |
233 | bool offload_channel; | |
234 | bool doorbell_mode_switch; | |
235 | bool auto_queue; | |
da1c4f85 | 236 | bool wake_capable; |
0cbf2608 MS |
237 | }; |
238 | ||
239 | /** | |
240 | * struct mhi_event_config - Event ring configuration structure for controller | |
241 | * @num_elements: The number of elements that can be queued to this ring | |
242 | * @irq_moderation_ms: Delay irq for additional events to be aggregated | |
243 | * @irq: IRQ associated with this ring | |
244 | * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring | |
245 | * @priority: Priority of this ring. Use 1 for now | |
246 | * @mode: Doorbell mode | |
247 | * @data_type: Type of data this ring will process | |
248 | * @hardware_event: This ring is associated with hardware channels | |
249 | * @client_managed: This ring is client managed | |
250 | * @offload_channel: This ring is associated with an offloaded channel | |
251 | */ | |
252 | struct mhi_event_config { | |
253 | u32 num_elements; | |
254 | u32 irq_moderation_ms; | |
255 | u32 irq; | |
256 | u32 channel; | |
257 | u32 priority; | |
258 | enum mhi_db_brst_mode mode; | |
259 | enum mhi_er_data_type data_type; | |
260 | bool hardware_event; | |
261 | bool client_managed; | |
262 | bool offload_channel; | |
263 | }; | |
264 | ||
265 | /** | |
266 | * struct mhi_controller_config - Root MHI controller configuration | |
267 | * @max_channels: Maximum number of channels supported | |
268 | * @timeout_ms: Timeout value for operations. 0 means use default | |
6ab3d50b | 269 | * @ready_timeout_ms: Timeout value for waiting device to be ready (optional) |
0cbf2608 MS |
270 | * @buf_len: Size of automatically allocated buffers. 0 means use default |
271 | * @num_channels: Number of channels defined in @ch_cfg | |
272 | * @ch_cfg: Array of defined channels | |
273 | * @num_events: Number of event rings defined in @event_cfg | |
274 | * @event_cfg: Array of defined event rings | |
275 | * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access | |
276 | * @m2_no_db: Host is not allowed to ring DB in M2 state | |
277 | */ | |
278 | struct mhi_controller_config { | |
279 | u32 max_channels; | |
280 | u32 timeout_ms; | |
6ab3d50b | 281 | u32 ready_timeout_ms; |
0cbf2608 MS |
282 | u32 buf_len; |
283 | u32 num_channels; | |
f38173a7 | 284 | const struct mhi_channel_config *ch_cfg; |
0cbf2608 | 285 | u32 num_events; |
fcba4b20 | 286 | struct mhi_event_config *event_cfg; |
0cbf2608 MS |
287 | bool use_bounce_buf; |
288 | bool m2_no_db; | |
289 | }; | |
290 | ||
291 | /** | |
292 | * struct mhi_controller - Master MHI controller structure | |
293 | * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI | |
294 | * controller (required) | |
295 | * @mhi_dev: MHI device instance for the controller | |
c7bd825e | 296 | * @debugfs_dentry: MHI controller debugfs directory |
0cbf2608 | 297 | * @regs: Base address of MHI MMIO register space (required) |
6cd330ae | 298 | * @bhi: Points to base of MHI BHI register space |
3000f85b | 299 | * @bhie: Points to base of MHI BHIe register space |
6cd330ae | 300 | * @wake_db: MHI WAKE doorbell register address |
0cbf2608 MS |
301 | * @iova_start: IOMMU starting address for data (required) |
302 | * @iova_stop: IOMMU stop address for data (required) | |
4d5f5283 | 303 | * @fw_image: Firmware image name for normal booting (optional) |
efe47a18 KV |
304 | * @fw_data: Firmware image data content for normal booting, used only |
305 | * if fw_image is NULL and fbc_download is true (optional) | |
306 | * @fw_sz: Firmware image data size for normal booting, used only if fw_image | |
307 | * is NULL and fbc_download is true (optional) | |
0cbf2608 | 308 | * @edl_image: Firmware image name for emergency download mode (optional) |
6fdfdd27 | 309 | * @rddm_size: RAM dump size that host should allocate for debugging purpose |
0cbf2608 MS |
310 | * @sbl_size: SBL image size downloaded through BHIe (optional) |
311 | * @seg_len: BHIe vector size (optional) | |
baa7a085 | 312 | * @reg_len: Length of the MHI MMIO region (required) |
3000f85b | 313 | * @fbc_image: Points to firmware image buffer |
6fdfdd27 | 314 | * @rddm_image: Points to RAM dump buffer |
0cbf2608 MS |
315 | * @mhi_chan: Points to the channel configuration table |
316 | * @lpm_chans: List of channels that require LPM notifications | |
317 | * @irq: base irq # to request (required) | |
318 | * @max_chan: Maximum number of channels the controller supports | |
319 | * @total_ev_rings: Total # of event rings allocated | |
320 | * @hw_ev_rings: Number of hardware event rings | |
321 | * @sw_ev_rings: Number of software event rings | |
0cbf2608 | 322 | * @nr_irqs: Number of IRQ allocated by bus master (required) |
3316ab2b MS |
323 | * @family_number: MHI controller family number |
324 | * @device_number: MHI controller device number | |
325 | * @major_version: MHI controller major revision number | |
326 | * @minor_version: MHI controller minor revision number | |
8e3729bf BB |
327 | * @serial_number: MHI controller serial number obtained from BHI |
328 | * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI | |
0cbf2608 MS |
329 | * @mhi_event: MHI event ring configurations table |
330 | * @mhi_cmd: MHI command ring configurations table | |
331 | * @mhi_ctxt: MHI device context, shared memory between host and device | |
332 | * @pm_mutex: Mutex for suspend/resume operation | |
333 | * @pm_lock: Lock for protecting MHI power management state | |
334 | * @timeout_ms: Timeout in ms for state transitions | |
6ab3d50b | 335 | * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional) |
0cbf2608 MS |
336 | * @pm_state: MHI power management state |
337 | * @db_access: DB access states | |
338 | * @ee: MHI device execution environment | |
a6e2e352 | 339 | * @dev_state: MHI device state |
0cbf2608 MS |
340 | * @dev_wake: Device wakeup count |
341 | * @pending_pkts: Pending packets for the controller | |
601455da | 342 | * @M0, M2, M3: Counters to track number of device MHI state changes |
0cbf2608 MS |
343 | * @transition_list: List of MHI state transitions |
344 | * @transition_lock: Lock for protecting MHI state transition list | |
345 | * @wlock: Lock for protecting device wakeup | |
1d3173a3 | 346 | * @mhi_link_info: Device bandwidth info |
0cbf2608 | 347 | * @st_worker: State transition worker |
8f703978 | 348 | * @hiprio_wq: High priority workqueue for MHI work such as state transitions |
0cbf2608 MS |
349 | * @state_event: State change event |
350 | * @status_cb: CB function to notify power states of the device (required) | |
0cbf2608 MS |
351 | * @wake_get: CB function to assert device wake (optional) |
352 | * @wake_put: CB function to de-assert device wake (optional) | |
353 | * @wake_toggle: CB function to assert and de-assert device wake (optional) | |
354 | * @runtime_get: CB function to controller runtime resume (required) | |
af2e5881 | 355 | * @runtime_put: CB function to decrement pm usage (required) |
189ff97c MS |
356 | * @map_single: CB function to create TRE buffer |
357 | * @unmap_single: CB function to destroy TRE buffer | |
45723a44 JH |
358 | * @read_reg: Read a MHI register via the physical link (required) |
359 | * @write_reg: Write a MHI register via the physical link (required) | |
b5a8d233 | 360 | * @reset: Controller specific reset function (optional) |
0cbf2608 | 361 | * @buffer_len: Bounce buffer length |
206e7383 | 362 | * @index: Index of the MHI controller instance |
0cbf2608 MS |
363 | * @bounce_buf: Use of bounce buffer |
364 | * @fbc_download: MHI host needs to do complete image transfer (optional) | |
0cbf2608 | 365 | * @wake_set: Device wakeup set flag |
6ffcc18d | 366 | * @irq_flags: irq flags passed to request_irq (optional) |
5c2c8531 | 367 | * @mru: the default MRU for the MHI device |
0cbf2608 MS |
368 | * |
369 | * Fields marked as (required) need to be populated by the controller driver | |
370 | * before calling mhi_register_controller(). For the fields marked as (optional) | |
371 | * they can be populated depending on the usecase. | |
3316ab2b MS |
372 | * |
373 | * The following fields are present for the purpose of implementing any device | |
374 | * specific quirks or customizations for specific MHI revisions used in device | |
375 | * by the controller drivers. The MHI stack will just populate these fields | |
376 | * during mhi_register_controller(): | |
377 | * family_number | |
378 | * device_number | |
379 | * major_version | |
380 | * minor_version | |
0cbf2608 MS |
381 | */ |
382 | struct mhi_controller { | |
383 | struct device *cntrl_dev; | |
384 | struct mhi_device *mhi_dev; | |
c7bd825e | 385 | struct dentry *debugfs_dentry; |
0cbf2608 | 386 | void __iomem *regs; |
6cd330ae | 387 | void __iomem *bhi; |
3000f85b | 388 | void __iomem *bhie; |
6cd330ae | 389 | void __iomem *wake_db; |
a6e2e352 | 390 | |
0cbf2608 MS |
391 | dma_addr_t iova_start; |
392 | dma_addr_t iova_stop; | |
393 | const char *fw_image; | |
efe47a18 KV |
394 | const u8 *fw_data; |
395 | size_t fw_sz; | |
0cbf2608 | 396 | const char *edl_image; |
6fdfdd27 | 397 | size_t rddm_size; |
0cbf2608 MS |
398 | size_t sbl_size; |
399 | size_t seg_len; | |
baa7a085 | 400 | size_t reg_len; |
3000f85b | 401 | struct image_info *fbc_image; |
6fdfdd27 | 402 | struct image_info *rddm_image; |
0cbf2608 MS |
403 | struct mhi_chan *mhi_chan; |
404 | struct list_head lpm_chans; | |
405 | int *irq; | |
406 | u32 max_chan; | |
407 | u32 total_ev_rings; | |
408 | u32 hw_ev_rings; | |
409 | u32 sw_ev_rings; | |
0cbf2608 | 410 | u32 nr_irqs; |
3316ab2b MS |
411 | u32 family_number; |
412 | u32 device_number; | |
413 | u32 major_version; | |
414 | u32 minor_version; | |
8e3729bf BB |
415 | u32 serial_number; |
416 | u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS]; | |
0cbf2608 MS |
417 | |
418 | struct mhi_event *mhi_event; | |
419 | struct mhi_cmd *mhi_cmd; | |
420 | struct mhi_ctxt *mhi_ctxt; | |
421 | ||
422 | struct mutex pm_mutex; | |
423 | rwlock_t pm_lock; | |
424 | u32 timeout_ms; | |
6ab3d50b | 425 | u32 ready_timeout_ms; |
0cbf2608 MS |
426 | u32 pm_state; |
427 | u32 db_access; | |
428 | enum mhi_ee_type ee; | |
a6e2e352 | 429 | enum mhi_state dev_state; |
0cbf2608 MS |
430 | atomic_t dev_wake; |
431 | atomic_t pending_pkts; | |
601455da | 432 | u32 M0, M2, M3; |
0cbf2608 MS |
433 | struct list_head transition_list; |
434 | spinlock_t transition_lock; | |
435 | spinlock_t wlock; | |
1d3173a3 | 436 | struct mhi_link_info mhi_link_info; |
0cbf2608 | 437 | struct work_struct st_worker; |
8f703978 | 438 | struct workqueue_struct *hiprio_wq; |
0cbf2608 MS |
439 | wait_queue_head_t state_event; |
440 | ||
441 | void (*status_cb)(struct mhi_controller *mhi_cntrl, | |
442 | enum mhi_callback cb); | |
0cbf2608 MS |
443 | void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); |
444 | void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); | |
445 | void (*wake_toggle)(struct mhi_controller *mhi_cntrl); | |
446 | int (*runtime_get)(struct mhi_controller *mhi_cntrl); | |
447 | void (*runtime_put)(struct mhi_controller *mhi_cntrl); | |
189ff97c MS |
448 | int (*map_single)(struct mhi_controller *mhi_cntrl, |
449 | struct mhi_buf_info *buf); | |
450 | void (*unmap_single)(struct mhi_controller *mhi_cntrl, | |
451 | struct mhi_buf_info *buf); | |
45723a44 JH |
452 | int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, |
453 | u32 *out); | |
454 | void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, | |
455 | u32 val); | |
b5a8d233 | 456 | void (*reset)(struct mhi_controller *mhi_cntrl); |
0cbf2608 MS |
457 | |
458 | size_t buffer_len; | |
206e7383 | 459 | int index; |
0cbf2608 MS |
460 | bool bounce_buf; |
461 | bool fbc_download; | |
0cbf2608 | 462 | bool wake_set; |
6ffcc18d | 463 | unsigned long irq_flags; |
5c2c8531 | 464 | u32 mru; |
0cbf2608 MS |
465 | }; |
466 | ||
467 | /** | |
5aa93f05 BB |
468 | * struct mhi_device - Structure representing an MHI device which binds |
469 | * to channels or is associated with controllers | |
0cbf2608 | 470 | * @id: Pointer to MHI device ID struct |
5aa93f05 | 471 | * @name: Name of the associated MHI device |
0cbf2608 MS |
472 | * @mhi_cntrl: Controller the device belongs to |
473 | * @ul_chan: UL channel for the device | |
474 | * @dl_chan: DL channel for the device | |
475 | * @dev: Driver model device node for the MHI device | |
476 | * @dev_type: MHI device type | |
e755cadb MS |
477 | * @ul_chan_id: MHI channel id for UL transfer |
478 | * @dl_chan_id: MHI channel id for DL transfer | |
0cbf2608 MS |
479 | * @dev_wake: Device wakeup counter |
480 | */ | |
481 | struct mhi_device { | |
482 | const struct mhi_device_id *id; | |
5aa93f05 | 483 | const char *name; |
0cbf2608 MS |
484 | struct mhi_controller *mhi_cntrl; |
485 | struct mhi_chan *ul_chan; | |
486 | struct mhi_chan *dl_chan; | |
487 | struct device dev; | |
488 | enum mhi_device_type dev_type; | |
e755cadb MS |
489 | int ul_chan_id; |
490 | int dl_chan_id; | |
0cbf2608 MS |
491 | u32 dev_wake; |
492 | }; | |
493 | ||
494 | /** | |
495 | * struct mhi_result - Completed buffer information | |
496 | * @buf_addr: Address of data buffer | |
497 | * @bytes_xferd: # of bytes transferred | |
498 | * @dir: Channel direction | |
499 | * @transaction_status: Status of last transaction | |
500 | */ | |
501 | struct mhi_result { | |
502 | void *buf_addr; | |
503 | size_t bytes_xferd; | |
504 | enum dma_data_direction dir; | |
505 | int transaction_status; | |
506 | }; | |
507 | ||
a6e2e352 MS |
508 | /** |
509 | * struct mhi_buf - MHI Buffer description | |
510 | * @buf: Virtual address of the buffer | |
511 | * @name: Buffer label. For offload channel, configurations name must be: | |
512 | * ECA - Event context array data | |
513 | * CCA - Channel context array data | |
514 | * @dma_addr: IOMMU address of the buffer | |
515 | * @len: # of bytes | |
516 | */ | |
517 | struct mhi_buf { | |
518 | void *buf; | |
519 | const char *name; | |
520 | dma_addr_t dma_addr; | |
521 | size_t len; | |
522 | }; | |
523 | ||
e755cadb MS |
524 | /** |
525 | * struct mhi_driver - Structure representing a MHI client driver | |
526 | * @probe: CB function for client driver probe function | |
527 | * @remove: CB function for client driver remove function | |
528 | * @ul_xfer_cb: CB function for UL data transfer | |
529 | * @dl_xfer_cb: CB function for DL data transfer | |
530 | * @status_cb: CB functions for asynchronous status | |
531 | * @driver: Device driver model driver | |
532 | */ | |
533 | struct mhi_driver { | |
534 | const struct mhi_device_id *id_table; | |
535 | int (*probe)(struct mhi_device *mhi_dev, | |
536 | const struct mhi_device_id *id); | |
537 | void (*remove)(struct mhi_device *mhi_dev); | |
538 | void (*ul_xfer_cb)(struct mhi_device *mhi_dev, | |
539 | struct mhi_result *result); | |
540 | void (*dl_xfer_cb)(struct mhi_device *mhi_dev, | |
541 | struct mhi_result *result); | |
542 | void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); | |
543 | struct device_driver driver; | |
544 | }; | |
545 | ||
546 | #define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) | |
0cbf2608 MS |
547 | #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) |
548 | ||
f42dfbe8 BB |
549 | /** |
550 | * mhi_alloc_controller - Allocate the MHI Controller structure | |
551 | * Allocate the mhi_controller structure using zero initialized memory | |
552 | */ | |
553 | struct mhi_controller *mhi_alloc_controller(void); | |
554 | ||
555 | /** | |
556 | * mhi_free_controller - Free the MHI Controller structure | |
557 | * Free the mhi_controller structure which was previously allocated | |
558 | */ | |
559 | void mhi_free_controller(struct mhi_controller *mhi_cntrl); | |
560 | ||
0cbf2608 MS |
561 | /** |
562 | * mhi_register_controller - Register MHI controller | |
563 | * @mhi_cntrl: MHI controller to register | |
564 | * @config: Configuration to use for the controller | |
565 | */ | |
566 | int mhi_register_controller(struct mhi_controller *mhi_cntrl, | |
f38173a7 | 567 | const struct mhi_controller_config *config); |
0cbf2608 MS |
568 | |
569 | /** | |
570 | * mhi_unregister_controller - Unregister MHI controller | |
571 | * @mhi_cntrl: MHI controller to unregister | |
572 | */ | |
573 | void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); | |
574 | ||
82174738 MS |
575 | /* |
576 | * module_mhi_driver() - Helper macro for drivers that don't do | |
577 | * anything special other than using default mhi_driver_register() and | |
578 | * mhi_driver_unregister(). This eliminates a lot of boilerplate. | |
579 | * Each module may only use this macro once. | |
580 | */ | |
581 | #define module_mhi_driver(mhi_drv) \ | |
582 | module_driver(mhi_drv, mhi_driver_register, \ | |
583 | mhi_driver_unregister) | |
584 | ||
585 | /* | |
586 | * Macro to avoid include chaining to get THIS_MODULE | |
587 | */ | |
588 | #define mhi_driver_register(mhi_drv) \ | |
589 | __mhi_driver_register(mhi_drv, THIS_MODULE) | |
590 | ||
e755cadb | 591 | /** |
82174738 | 592 | * __mhi_driver_register - Register driver with MHI framework |
e755cadb | 593 | * @mhi_drv: Driver associated with the device |
82174738 | 594 | * @owner: The module owner |
e755cadb | 595 | */ |
82174738 | 596 | int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner); |
e755cadb MS |
597 | |
598 | /** | |
599 | * mhi_driver_unregister - Unregister a driver for mhi_devices | |
600 | * @mhi_drv: Driver associated with the device | |
601 | */ | |
602 | void mhi_driver_unregister(struct mhi_driver *mhi_drv); | |
603 | ||
a6e2e352 MS |
604 | /** |
605 | * mhi_set_mhi_state - Set MHI device state | |
606 | * @mhi_cntrl: MHI controller | |
607 | * @state: State to set | |
608 | */ | |
609 | void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, | |
610 | enum mhi_state state); | |
611 | ||
0c6b20a1 MS |
612 | /** |
613 | * mhi_notify - Notify the MHI client driver about client device status | |
614 | * @mhi_dev: MHI device instance | |
615 | * @cb_reason: MHI callback reason | |
616 | */ | |
617 | void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); | |
618 | ||
87baa23e HK |
619 | /** |
620 | * mhi_get_free_desc_count - Get transfer ring length | |
621 | * Get # of TD available to queue buffers | |
622 | * @mhi_dev: Device associated with the channels | |
623 | * @dir: Direction of the channel | |
624 | */ | |
625 | int mhi_get_free_desc_count(struct mhi_device *mhi_dev, | |
626 | enum dma_data_direction dir); | |
627 | ||
3000f85b MS |
628 | /** |
629 | * mhi_prepare_for_power_up - Do pre-initialization before power up. | |
630 | * This is optional, call this before power up if | |
631 | * the controller does not want bus framework to | |
632 | * automatically free any allocated memory during | |
633 | * shutdown process. | |
634 | * @mhi_cntrl: MHI controller | |
635 | */ | |
636 | int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); | |
637 | ||
638 | /** | |
639 | * mhi_async_power_up - Start MHI power up sequence | |
640 | * @mhi_cntrl: MHI controller | |
641 | */ | |
642 | int mhi_async_power_up(struct mhi_controller *mhi_cntrl); | |
643 | ||
644 | /** | |
645 | * mhi_sync_power_up - Start MHI power up sequence and wait till the device | |
4d12a897 | 646 | * enters valid EE state |
3000f85b MS |
647 | * @mhi_cntrl: MHI controller |
648 | */ | |
649 | int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); | |
650 | ||
651 | /** | |
652 | * mhi_power_down - Start MHI power down sequence | |
653 | * @mhi_cntrl: MHI controller | |
654 | * @graceful: Link is still accessible, so do a graceful shutdown process | |
655 | */ | |
656 | void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); | |
657 | ||
658 | /** | |
659 | * mhi_unprepare_after_power_down - Free any allocated memory after power down | |
660 | * @mhi_cntrl: MHI controller | |
661 | */ | |
662 | void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); | |
0c6b20a1 MS |
663 | |
664 | /** | |
665 | * mhi_pm_suspend - Move MHI into a suspended state | |
666 | * @mhi_cntrl: MHI controller | |
667 | */ | |
668 | int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); | |
669 | ||
670 | /** | |
671 | * mhi_pm_resume - Resume MHI from suspended state | |
672 | * @mhi_cntrl: MHI controller | |
673 | */ | |
674 | int mhi_pm_resume(struct mhi_controller *mhi_cntrl); | |
3000f85b | 675 | |
cab2d3fd LP |
676 | /** |
677 | * mhi_pm_resume_force - Force resume MHI from suspended state | |
678 | * @mhi_cntrl: MHI controller | |
679 | * | |
680 | * Resume the device irrespective of its MHI state. As per the MHI spec, devices | |
681 | * has to be in M3 state during resume. But some devices seem to be in a | |
682 | * different MHI state other than M3 but they continue working fine if allowed. | |
683 | * This API is intented to be used for such devices. | |
684 | * | |
685 | * Return: 0 if the resume succeeds, a negative error code otherwise | |
686 | */ | |
687 | int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl); | |
688 | ||
6fdfdd27 | 689 | /** |
9e1660e5 BB |
690 | * mhi_download_rddm_image - Download ramdump image from device for |
691 | * debugging purpose. | |
6fdfdd27 MS |
692 | * @mhi_cntrl: MHI controller |
693 | * @in_panic: Download rddm image during kernel panic | |
694 | */ | |
9e1660e5 | 695 | int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic); |
6fdfdd27 MS |
696 | |
697 | /** | |
698 | * mhi_force_rddm_mode - Force device into rddm mode | |
699 | * @mhi_cntrl: MHI controller | |
700 | */ | |
701 | int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); | |
702 | ||
78e1d226 BB |
703 | /** |
704 | * mhi_get_exec_env - Get BHI execution environment of the device | |
705 | * @mhi_cntrl: MHI controller | |
706 | */ | |
707 | enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); | |
708 | ||
6fdfdd27 MS |
709 | /** |
710 | * mhi_get_mhi_state - Get MHI state of the device | |
711 | * @mhi_cntrl: MHI controller | |
712 | */ | |
713 | enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); | |
714 | ||
b5a8d233 LP |
715 | /** |
716 | * mhi_soc_reset - Trigger a device reset. This can be used as a last resort | |
717 | * to reset and recover a device. | |
718 | * @mhi_cntrl: MHI controller | |
719 | */ | |
720 | void mhi_soc_reset(struct mhi_controller *mhi_cntrl); | |
721 | ||
189ff97c MS |
722 | /** |
723 | * mhi_device_get - Disable device low power mode | |
724 | * @mhi_dev: Device associated with the channel | |
725 | */ | |
726 | void mhi_device_get(struct mhi_device *mhi_dev); | |
727 | ||
728 | /** | |
729 | * mhi_device_get_sync - Disable device low power mode. Synchronously | |
730 | * take the controller out of suspended state | |
731 | * @mhi_dev: Device associated with the channel | |
732 | */ | |
733 | int mhi_device_get_sync(struct mhi_device *mhi_dev); | |
734 | ||
735 | /** | |
736 | * mhi_device_put - Re-enable device low power mode | |
737 | * @mhi_dev: Device associated with the channel | |
738 | */ | |
739 | void mhi_device_put(struct mhi_device *mhi_dev); | |
740 | ||
741 | /** | |
6731fefd | 742 | * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer. |
189ff97c | 743 | * @mhi_dev: Device associated with the channels |
227fee5f MS |
744 | * |
745 | * Allocate and initialize the channel context and also issue the START channel | |
746 | * command to both channels. Channels can be started only if both host and | |
747 | * device execution environments match and channels are in a DISABLED state. | |
189ff97c | 748 | */ |
9ebc2758 | 749 | int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); |
189ff97c | 750 | |
227fee5f MS |
751 | /** |
752 | * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue | |
753 | * buffers for DL traffic | |
754 | * @mhi_dev: Device associated with the channels | |
755 | * | |
756 | * Allocate and initialize the channel context and also issue the START channel | |
757 | * command to both channels. Channels can be started only if both host and | |
758 | * device execution environments match and channels are in a DISABLED state. | |
759 | * The MHI core will automatically allocate and queue buffers for the DL traffic. | |
760 | */ | |
761 | int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev); | |
762 | ||
189ff97c | 763 | /** |
6731fefd BB |
764 | * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. |
765 | * Issue the RESET channel command and let the | |
766 | * device clean-up the context so no incoming | |
767 | * transfers are seen on the host. Free memory | |
768 | * associated with the context on host. If device | |
769 | * is unresponsive, only perform a host side | |
770 | * clean-up. Channels can be reset only if both | |
771 | * host and device execution environments match | |
772 | * and channels are in an ENABLED, STOPPED or | |
773 | * SUSPENDED state. | |
189ff97c MS |
774 | * @mhi_dev: Device associated with the channels |
775 | */ | |
776 | void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); | |
777 | ||
189ff97c MS |
778 | /** |
779 | * mhi_queue_dma - Send or receive DMA mapped buffers from client device | |
780 | * over MHI channel | |
781 | * @mhi_dev: Device associated with the channels | |
782 | * @dir: DMA direction for the channel | |
783 | * @mhi_buf: Buffer for holding the DMA mapped data | |
784 | * @len: Buffer length | |
785 | * @mflags: MHI transfer flags used for the transfer | |
786 | */ | |
787 | int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, | |
788 | struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags); | |
789 | ||
790 | /** | |
791 | * mhi_queue_buf - Send or receive raw buffers from client device over MHI | |
792 | * channel | |
793 | * @mhi_dev: Device associated with the channels | |
794 | * @dir: DMA direction for the channel | |
795 | * @buf: Buffer for holding the data | |
796 | * @len: Buffer length | |
797 | * @mflags: MHI transfer flags used for the transfer | |
798 | */ | |
799 | int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, | |
800 | void *buf, size_t len, enum mhi_flags mflags); | |
801 | ||
802 | /** | |
803 | * mhi_queue_skb - Send or receive SKBs from client device over MHI channel | |
804 | * @mhi_dev: Device associated with the channels | |
805 | * @dir: DMA direction for the channel | |
806 | * @skb: Buffer for holding SKBs | |
807 | * @len: Buffer length | |
808 | * @mflags: MHI transfer flags used for the transfer | |
809 | */ | |
810 | int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, | |
811 | struct sk_buff *skb, size_t len, enum mhi_flags mflags); | |
812 | ||
d8c4a223 LP |
813 | /** |
814 | * mhi_queue_is_full - Determine whether queueing new elements is possible | |
815 | * @mhi_dev: Device associated with the channels | |
816 | * @dir: DMA direction for the channel | |
817 | */ | |
818 | bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir); | |
819 | ||
0cbf2608 | 820 | #endif /* _MHI_H_ */ |