Commit | Line | Data |
---|---|---|
0cbf2608 MS |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. | |
4 | * | |
5 | */ | |
6 | #ifndef _MHI_H_ | |
7 | #define _MHI_H_ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/dma-direction.h> | |
11 | #include <linux/mutex.h> | |
189ff97c | 12 | #include <linux/skbuff.h> |
0cbf2608 | 13 | #include <linux/slab.h> |
e1427f32 | 14 | #include <linux/spinlock.h> |
0cbf2608 MS |
15 | #include <linux/wait.h> |
16 | #include <linux/workqueue.h> | |
17 | ||
8e3729bf BB |
18 | #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16 |
19 | ||
0cbf2608 MS |
20 | struct mhi_chan; |
21 | struct mhi_event; | |
22 | struct mhi_ctxt; | |
23 | struct mhi_cmd; | |
24 | struct mhi_buf_info; | |
25 | ||
26 | /** | |
27 | * enum mhi_callback - MHI callback | |
28 | * @MHI_CB_IDLE: MHI entered idle state | |
29 | * @MHI_CB_PENDING_DATA: New data available for client to process | |
30 | * @MHI_CB_LPM_ENTER: MHI host entered low power mode | |
31 | * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode | |
32 | * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env | |
33 | * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env | |
34 | * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) | |
35 | * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state | |
1d3173a3 | 36 | * @MHI_CB_BW_REQ: Received a bandwidth switch request from device |
0cbf2608 MS |
37 | */ |
38 | enum mhi_callback { | |
39 | MHI_CB_IDLE, | |
40 | MHI_CB_PENDING_DATA, | |
41 | MHI_CB_LPM_ENTER, | |
42 | MHI_CB_LPM_EXIT, | |
43 | MHI_CB_EE_RDDM, | |
44 | MHI_CB_EE_MISSION_MODE, | |
45 | MHI_CB_SYS_ERROR, | |
46 | MHI_CB_FATAL_ERROR, | |
1d3173a3 | 47 | MHI_CB_BW_REQ, |
0cbf2608 MS |
48 | }; |
49 | ||
50 | /** | |
51 | * enum mhi_flags - Transfer flags | |
52 | * @MHI_EOB: End of buffer for bulk transfer | |
53 | * @MHI_EOT: End of transfer | |
54 | * @MHI_CHAIN: Linked transfer | |
55 | */ | |
56 | enum mhi_flags { | |
115f3251 MS |
57 | MHI_EOB = BIT(0), |
58 | MHI_EOT = BIT(1), | |
59 | MHI_CHAIN = BIT(2), | |
0cbf2608 MS |
60 | }; |
61 | ||
62 | /** | |
63 | * enum mhi_device_type - Device types | |
64 | * @MHI_DEVICE_XFER: Handles data transfer | |
65 | * @MHI_DEVICE_CONTROLLER: Control device | |
66 | */ | |
67 | enum mhi_device_type { | |
68 | MHI_DEVICE_XFER, | |
69 | MHI_DEVICE_CONTROLLER, | |
70 | }; | |
71 | ||
72 | /** | |
73 | * enum mhi_ch_type - Channel types | |
74 | * @MHI_CH_TYPE_INVALID: Invalid channel type | |
75 | * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device | |
76 | * @MHI_CH_TYPE_INBOUND: Inbound channel from the device | |
77 | * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine | |
78 | * multiple packets and send them as a single | |
79 | * large packet to reduce CPU consumption | |
80 | */ | |
81 | enum mhi_ch_type { | |
82 | MHI_CH_TYPE_INVALID = 0, | |
83 | MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, | |
84 | MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, | |
85 | MHI_CH_TYPE_INBOUND_COALESCED = 3, | |
86 | }; | |
87 | ||
3000f85b | 88 | /** |
4d12a897 RD |
89 | * struct image_info - Firmware and RDDM table |
90 | * @mhi_buf: Buffer for firmware and RDDM table | |
91 | * @entries: # of entries in table | |
3000f85b MS |
92 | */ |
93 | struct image_info { | |
94 | struct mhi_buf *mhi_buf; | |
4d12a897 | 95 | /* private: from internal.h */ |
3000f85b | 96 | struct bhi_vec_entry *bhi_vec; |
4d12a897 | 97 | /* public: */ |
3000f85b MS |
98 | u32 entries; |
99 | }; | |
100 | ||
1d3173a3 MS |
101 | /** |
102 | * struct mhi_link_info - BW requirement | |
103 | * target_link_speed - Link speed as defined by TLS bits in LinkControl reg | |
104 | * target_link_width - Link width as defined by NLW bits in LinkStatus reg | |
105 | */ | |
106 | struct mhi_link_info { | |
107 | unsigned int target_link_speed; | |
108 | unsigned int target_link_width; | |
109 | }; | |
110 | ||
0cbf2608 MS |
111 | /** |
112 | * enum mhi_ee_type - Execution environment types | |
113 | * @MHI_EE_PBL: Primary Bootloader | |
114 | * @MHI_EE_SBL: Secondary Bootloader | |
115 | * @MHI_EE_AMSS: Modem, aka the primary runtime EE | |
116 | * @MHI_EE_RDDM: Ram dump download mode | |
117 | * @MHI_EE_WFW: WLAN firmware mode | |
118 | * @MHI_EE_PTHRU: Passthrough | |
119 | * @MHI_EE_EDL: Embedded downloader | |
66ac7985 | 120 | * @MHI_EE_FP: Flash Programmer Environment |
0cbf2608 MS |
121 | */ |
122 | enum mhi_ee_type { | |
123 | MHI_EE_PBL, | |
124 | MHI_EE_SBL, | |
125 | MHI_EE_AMSS, | |
126 | MHI_EE_RDDM, | |
127 | MHI_EE_WFW, | |
128 | MHI_EE_PTHRU, | |
129 | MHI_EE_EDL, | |
66ac7985 CY |
130 | MHI_EE_FP, |
131 | MHI_EE_MAX_SUPPORTED = MHI_EE_FP, | |
0cbf2608 MS |
132 | MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ |
133 | MHI_EE_NOT_SUPPORTED, | |
134 | MHI_EE_MAX, | |
135 | }; | |
136 | ||
a6e2e352 MS |
137 | /** |
138 | * enum mhi_state - MHI states | |
139 | * @MHI_STATE_RESET: Reset state | |
140 | * @MHI_STATE_READY: Ready state | |
141 | * @MHI_STATE_M0: M0 state | |
142 | * @MHI_STATE_M1: M1 state | |
143 | * @MHI_STATE_M2: M2 state | |
144 | * @MHI_STATE_M3: M3 state | |
145 | * @MHI_STATE_M3_FAST: M3 Fast state | |
146 | * @MHI_STATE_BHI: BHI state | |
147 | * @MHI_STATE_SYS_ERR: System Error state | |
148 | */ | |
149 | enum mhi_state { | |
150 | MHI_STATE_RESET = 0x0, | |
151 | MHI_STATE_READY = 0x1, | |
152 | MHI_STATE_M0 = 0x2, | |
153 | MHI_STATE_M1 = 0x3, | |
154 | MHI_STATE_M2 = 0x4, | |
155 | MHI_STATE_M3 = 0x5, | |
156 | MHI_STATE_M3_FAST = 0x6, | |
157 | MHI_STATE_BHI = 0x7, | |
158 | MHI_STATE_SYS_ERR = 0xFF, | |
159 | MHI_STATE_MAX, | |
160 | }; | |
161 | ||
0cbf2608 MS |
162 | /** |
163 | * enum mhi_ch_ee_mask - Execution environment mask for channel | |
164 | * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE | |
165 | * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE | |
166 | * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE | |
167 | * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE | |
168 | * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE | |
169 | * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE | |
170 | * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE | |
171 | */ | |
172 | enum mhi_ch_ee_mask { | |
173 | MHI_CH_EE_PBL = BIT(MHI_EE_PBL), | |
174 | MHI_CH_EE_SBL = BIT(MHI_EE_SBL), | |
175 | MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), | |
176 | MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), | |
177 | MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), | |
178 | MHI_CH_EE_WFW = BIT(MHI_EE_WFW), | |
179 | MHI_CH_EE_EDL = BIT(MHI_EE_EDL), | |
180 | }; | |
181 | ||
182 | /** | |
183 | * enum mhi_er_data_type - Event ring data types | |
184 | * @MHI_ER_DATA: Only client data over this ring | |
185 | * @MHI_ER_CTRL: MHI control data and client data | |
186 | */ | |
187 | enum mhi_er_data_type { | |
188 | MHI_ER_DATA, | |
189 | MHI_ER_CTRL, | |
190 | }; | |
191 | ||
192 | /** | |
193 | * enum mhi_db_brst_mode - Doorbell mode | |
194 | * @MHI_DB_BRST_DISABLE: Burst mode disable | |
195 | * @MHI_DB_BRST_ENABLE: Burst mode enable | |
196 | */ | |
197 | enum mhi_db_brst_mode { | |
198 | MHI_DB_BRST_DISABLE = 0x2, | |
199 | MHI_DB_BRST_ENABLE = 0x3, | |
200 | }; | |
201 | ||
202 | /** | |
203 | * struct mhi_channel_config - Channel configuration structure for controller | |
204 | * @name: The name of this channel | |
205 | * @num: The number assigned to this channel | |
206 | * @num_elements: The number of elements that can be queued to this channel | |
207 | * @local_elements: The local ring length of the channel | |
a503d162 | 208 | * @event_ring: The event ring index that services this channel |
0cbf2608 MS |
209 | * @dir: Direction that data may flow on this channel |
210 | * @type: Channel type | |
211 | * @ee_mask: Execution Environment mask for this channel | |
212 | * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds | |
213 | for UL channels, multiple of 8 ring elements for DL channels | |
214 | * @doorbell: Doorbell mode | |
215 | * @lpm_notify: The channel master requires low power mode notifications | |
216 | * @offload_channel: The client manages the channel completely | |
217 | * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition | |
218 | * @auto_queue: Framework will automatically queue buffers for DL traffic | |
da1c4f85 | 219 | * @wake-capable: Channel capable of waking up the system |
0cbf2608 MS |
220 | */ |
221 | struct mhi_channel_config { | |
222 | char *name; | |
223 | u32 num; | |
224 | u32 num_elements; | |
225 | u32 local_elements; | |
226 | u32 event_ring; | |
227 | enum dma_data_direction dir; | |
228 | enum mhi_ch_type type; | |
229 | u32 ee_mask; | |
230 | u32 pollcfg; | |
231 | enum mhi_db_brst_mode doorbell; | |
232 | bool lpm_notify; | |
233 | bool offload_channel; | |
234 | bool doorbell_mode_switch; | |
235 | bool auto_queue; | |
da1c4f85 | 236 | bool wake_capable; |
0cbf2608 MS |
237 | }; |
238 | ||
239 | /** | |
240 | * struct mhi_event_config - Event ring configuration structure for controller | |
241 | * @num_elements: The number of elements that can be queued to this ring | |
242 | * @irq_moderation_ms: Delay irq for additional events to be aggregated | |
243 | * @irq: IRQ associated with this ring | |
244 | * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring | |
245 | * @priority: Priority of this ring. Use 1 for now | |
246 | * @mode: Doorbell mode | |
247 | * @data_type: Type of data this ring will process | |
248 | * @hardware_event: This ring is associated with hardware channels | |
249 | * @client_managed: This ring is client managed | |
250 | * @offload_channel: This ring is associated with an offloaded channel | |
251 | */ | |
252 | struct mhi_event_config { | |
253 | u32 num_elements; | |
254 | u32 irq_moderation_ms; | |
255 | u32 irq; | |
256 | u32 channel; | |
257 | u32 priority; | |
258 | enum mhi_db_brst_mode mode; | |
259 | enum mhi_er_data_type data_type; | |
260 | bool hardware_event; | |
261 | bool client_managed; | |
262 | bool offload_channel; | |
263 | }; | |
264 | ||
265 | /** | |
266 | * struct mhi_controller_config - Root MHI controller configuration | |
267 | * @max_channels: Maximum number of channels supported | |
268 | * @timeout_ms: Timeout value for operations. 0 means use default | |
269 | * @buf_len: Size of automatically allocated buffers. 0 means use default | |
270 | * @num_channels: Number of channels defined in @ch_cfg | |
271 | * @ch_cfg: Array of defined channels | |
272 | * @num_events: Number of event rings defined in @event_cfg | |
273 | * @event_cfg: Array of defined event rings | |
274 | * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access | |
275 | * @m2_no_db: Host is not allowed to ring DB in M2 state | |
276 | */ | |
277 | struct mhi_controller_config { | |
278 | u32 max_channels; | |
279 | u32 timeout_ms; | |
280 | u32 buf_len; | |
281 | u32 num_channels; | |
f38173a7 | 282 | const struct mhi_channel_config *ch_cfg; |
0cbf2608 | 283 | u32 num_events; |
fcba4b20 | 284 | struct mhi_event_config *event_cfg; |
0cbf2608 MS |
285 | bool use_bounce_buf; |
286 | bool m2_no_db; | |
287 | }; | |
288 | ||
289 | /** | |
290 | * struct mhi_controller - Master MHI controller structure | |
291 | * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI | |
292 | * controller (required) | |
293 | * @mhi_dev: MHI device instance for the controller | |
c7bd825e | 294 | * @debugfs_dentry: MHI controller debugfs directory |
0cbf2608 | 295 | * @regs: Base address of MHI MMIO register space (required) |
6cd330ae | 296 | * @bhi: Points to base of MHI BHI register space |
3000f85b | 297 | * @bhie: Points to base of MHI BHIe register space |
6cd330ae | 298 | * @wake_db: MHI WAKE doorbell register address |
0cbf2608 MS |
299 | * @iova_start: IOMMU starting address for data (required) |
300 | * @iova_stop: IOMMU stop address for data (required) | |
4d5f5283 | 301 | * @fw_image: Firmware image name for normal booting (optional) |
0cbf2608 | 302 | * @edl_image: Firmware image name for emergency download mode (optional) |
6fdfdd27 | 303 | * @rddm_size: RAM dump size that host should allocate for debugging purpose |
0cbf2608 MS |
304 | * @sbl_size: SBL image size downloaded through BHIe (optional) |
305 | * @seg_len: BHIe vector size (optional) | |
3000f85b | 306 | * @fbc_image: Points to firmware image buffer |
6fdfdd27 | 307 | * @rddm_image: Points to RAM dump buffer |
0cbf2608 MS |
308 | * @mhi_chan: Points to the channel configuration table |
309 | * @lpm_chans: List of channels that require LPM notifications | |
310 | * @irq: base irq # to request (required) | |
311 | * @max_chan: Maximum number of channels the controller supports | |
312 | * @total_ev_rings: Total # of event rings allocated | |
313 | * @hw_ev_rings: Number of hardware event rings | |
314 | * @sw_ev_rings: Number of software event rings | |
0cbf2608 | 315 | * @nr_irqs: Number of IRQ allocated by bus master (required) |
3316ab2b MS |
316 | * @family_number: MHI controller family number |
317 | * @device_number: MHI controller device number | |
318 | * @major_version: MHI controller major revision number | |
319 | * @minor_version: MHI controller minor revision number | |
8e3729bf BB |
320 | * @serial_number: MHI controller serial number obtained from BHI |
321 | * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI | |
0cbf2608 MS |
322 | * @mhi_event: MHI event ring configurations table |
323 | * @mhi_cmd: MHI command ring configurations table | |
324 | * @mhi_ctxt: MHI device context, shared memory between host and device | |
325 | * @pm_mutex: Mutex for suspend/resume operation | |
326 | * @pm_lock: Lock for protecting MHI power management state | |
327 | * @timeout_ms: Timeout in ms for state transitions | |
328 | * @pm_state: MHI power management state | |
329 | * @db_access: DB access states | |
330 | * @ee: MHI device execution environment | |
a6e2e352 | 331 | * @dev_state: MHI device state |
0cbf2608 MS |
332 | * @dev_wake: Device wakeup count |
333 | * @pending_pkts: Pending packets for the controller | |
601455da | 334 | * @M0, M2, M3: Counters to track number of device MHI state changes |
0cbf2608 MS |
335 | * @transition_list: List of MHI state transitions |
336 | * @transition_lock: Lock for protecting MHI state transition list | |
337 | * @wlock: Lock for protecting device wakeup | |
1d3173a3 | 338 | * @mhi_link_info: Device bandwidth info |
0cbf2608 | 339 | * @st_worker: State transition worker |
8f703978 | 340 | * @hiprio_wq: High priority workqueue for MHI work such as state transitions |
0cbf2608 MS |
341 | * @state_event: State change event |
342 | * @status_cb: CB function to notify power states of the device (required) | |
0cbf2608 MS |
343 | * @wake_get: CB function to assert device wake (optional) |
344 | * @wake_put: CB function to de-assert device wake (optional) | |
345 | * @wake_toggle: CB function to assert and de-assert device wake (optional) | |
346 | * @runtime_get: CB function to controller runtime resume (required) | |
af2e5881 | 347 | * @runtime_put: CB function to decrement pm usage (required) |
189ff97c MS |
348 | * @map_single: CB function to create TRE buffer |
349 | * @unmap_single: CB function to destroy TRE buffer | |
45723a44 JH |
350 | * @read_reg: Read a MHI register via the physical link (required) |
351 | * @write_reg: Write a MHI register via the physical link (required) | |
b5a8d233 | 352 | * @reset: Controller specific reset function (optional) |
0cbf2608 | 353 | * @buffer_len: Bounce buffer length |
206e7383 | 354 | * @index: Index of the MHI controller instance |
0cbf2608 MS |
355 | * @bounce_buf: Use of bounce buffer |
356 | * @fbc_download: MHI host needs to do complete image transfer (optional) | |
0cbf2608 | 357 | * @wake_set: Device wakeup set flag |
6ffcc18d | 358 | * @irq_flags: irq flags passed to request_irq (optional) |
0cbf2608 MS |
359 | * |
360 | * Fields marked as (required) need to be populated by the controller driver | |
361 | * before calling mhi_register_controller(). For the fields marked as (optional) | |
362 | * they can be populated depending on the usecase. | |
3316ab2b MS |
363 | * |
364 | * The following fields are present for the purpose of implementing any device | |
365 | * specific quirks or customizations for specific MHI revisions used in device | |
366 | * by the controller drivers. The MHI stack will just populate these fields | |
367 | * during mhi_register_controller(): | |
368 | * family_number | |
369 | * device_number | |
370 | * major_version | |
371 | * minor_version | |
0cbf2608 MS |
372 | */ |
373 | struct mhi_controller { | |
374 | struct device *cntrl_dev; | |
375 | struct mhi_device *mhi_dev; | |
c7bd825e | 376 | struct dentry *debugfs_dentry; |
0cbf2608 | 377 | void __iomem *regs; |
6cd330ae | 378 | void __iomem *bhi; |
3000f85b | 379 | void __iomem *bhie; |
6cd330ae | 380 | void __iomem *wake_db; |
a6e2e352 | 381 | |
0cbf2608 MS |
382 | dma_addr_t iova_start; |
383 | dma_addr_t iova_stop; | |
384 | const char *fw_image; | |
385 | const char *edl_image; | |
6fdfdd27 | 386 | size_t rddm_size; |
0cbf2608 MS |
387 | size_t sbl_size; |
388 | size_t seg_len; | |
3000f85b | 389 | struct image_info *fbc_image; |
6fdfdd27 | 390 | struct image_info *rddm_image; |
0cbf2608 MS |
391 | struct mhi_chan *mhi_chan; |
392 | struct list_head lpm_chans; | |
393 | int *irq; | |
394 | u32 max_chan; | |
395 | u32 total_ev_rings; | |
396 | u32 hw_ev_rings; | |
397 | u32 sw_ev_rings; | |
0cbf2608 | 398 | u32 nr_irqs; |
3316ab2b MS |
399 | u32 family_number; |
400 | u32 device_number; | |
401 | u32 major_version; | |
402 | u32 minor_version; | |
8e3729bf BB |
403 | u32 serial_number; |
404 | u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS]; | |
0cbf2608 MS |
405 | |
406 | struct mhi_event *mhi_event; | |
407 | struct mhi_cmd *mhi_cmd; | |
408 | struct mhi_ctxt *mhi_ctxt; | |
409 | ||
410 | struct mutex pm_mutex; | |
411 | rwlock_t pm_lock; | |
412 | u32 timeout_ms; | |
413 | u32 pm_state; | |
414 | u32 db_access; | |
415 | enum mhi_ee_type ee; | |
a6e2e352 | 416 | enum mhi_state dev_state; |
0cbf2608 MS |
417 | atomic_t dev_wake; |
418 | atomic_t pending_pkts; | |
601455da | 419 | u32 M0, M2, M3; |
0cbf2608 MS |
420 | struct list_head transition_list; |
421 | spinlock_t transition_lock; | |
422 | spinlock_t wlock; | |
1d3173a3 | 423 | struct mhi_link_info mhi_link_info; |
0cbf2608 | 424 | struct work_struct st_worker; |
8f703978 | 425 | struct workqueue_struct *hiprio_wq; |
0cbf2608 MS |
426 | wait_queue_head_t state_event; |
427 | ||
428 | void (*status_cb)(struct mhi_controller *mhi_cntrl, | |
429 | enum mhi_callback cb); | |
0cbf2608 MS |
430 | void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); |
431 | void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); | |
432 | void (*wake_toggle)(struct mhi_controller *mhi_cntrl); | |
433 | int (*runtime_get)(struct mhi_controller *mhi_cntrl); | |
434 | void (*runtime_put)(struct mhi_controller *mhi_cntrl); | |
189ff97c MS |
435 | int (*map_single)(struct mhi_controller *mhi_cntrl, |
436 | struct mhi_buf_info *buf); | |
437 | void (*unmap_single)(struct mhi_controller *mhi_cntrl, | |
438 | struct mhi_buf_info *buf); | |
45723a44 JH |
439 | int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, |
440 | u32 *out); | |
441 | void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, | |
442 | u32 val); | |
b5a8d233 | 443 | void (*reset)(struct mhi_controller *mhi_cntrl); |
0cbf2608 MS |
444 | |
445 | size_t buffer_len; | |
206e7383 | 446 | int index; |
0cbf2608 MS |
447 | bool bounce_buf; |
448 | bool fbc_download; | |
0cbf2608 | 449 | bool wake_set; |
6ffcc18d | 450 | unsigned long irq_flags; |
0cbf2608 MS |
451 | }; |
452 | ||
453 | /** | |
5aa93f05 BB |
454 | * struct mhi_device - Structure representing an MHI device which binds |
455 | * to channels or is associated with controllers | |
0cbf2608 | 456 | * @id: Pointer to MHI device ID struct |
5aa93f05 | 457 | * @name: Name of the associated MHI device |
0cbf2608 MS |
458 | * @mhi_cntrl: Controller the device belongs to |
459 | * @ul_chan: UL channel for the device | |
460 | * @dl_chan: DL channel for the device | |
461 | * @dev: Driver model device node for the MHI device | |
462 | * @dev_type: MHI device type | |
e755cadb MS |
463 | * @ul_chan_id: MHI channel id for UL transfer |
464 | * @dl_chan_id: MHI channel id for DL transfer | |
0cbf2608 MS |
465 | * @dev_wake: Device wakeup counter |
466 | */ | |
467 | struct mhi_device { | |
468 | const struct mhi_device_id *id; | |
5aa93f05 | 469 | const char *name; |
0cbf2608 MS |
470 | struct mhi_controller *mhi_cntrl; |
471 | struct mhi_chan *ul_chan; | |
472 | struct mhi_chan *dl_chan; | |
473 | struct device dev; | |
474 | enum mhi_device_type dev_type; | |
e755cadb MS |
475 | int ul_chan_id; |
476 | int dl_chan_id; | |
0cbf2608 MS |
477 | u32 dev_wake; |
478 | }; | |
479 | ||
480 | /** | |
481 | * struct mhi_result - Completed buffer information | |
482 | * @buf_addr: Address of data buffer | |
483 | * @bytes_xferd: # of bytes transferred | |
484 | * @dir: Channel direction | |
485 | * @transaction_status: Status of last transaction | |
486 | */ | |
487 | struct mhi_result { | |
488 | void *buf_addr; | |
489 | size_t bytes_xferd; | |
490 | enum dma_data_direction dir; | |
491 | int transaction_status; | |
492 | }; | |
493 | ||
a6e2e352 MS |
494 | /** |
495 | * struct mhi_buf - MHI Buffer description | |
496 | * @buf: Virtual address of the buffer | |
497 | * @name: Buffer label. For offload channel, configurations name must be: | |
498 | * ECA - Event context array data | |
499 | * CCA - Channel context array data | |
500 | * @dma_addr: IOMMU address of the buffer | |
501 | * @len: # of bytes | |
502 | */ | |
503 | struct mhi_buf { | |
504 | void *buf; | |
505 | const char *name; | |
506 | dma_addr_t dma_addr; | |
507 | size_t len; | |
508 | }; | |
509 | ||
e755cadb MS |
510 | /** |
511 | * struct mhi_driver - Structure representing a MHI client driver | |
512 | * @probe: CB function for client driver probe function | |
513 | * @remove: CB function for client driver remove function | |
514 | * @ul_xfer_cb: CB function for UL data transfer | |
515 | * @dl_xfer_cb: CB function for DL data transfer | |
516 | * @status_cb: CB functions for asynchronous status | |
517 | * @driver: Device driver model driver | |
518 | */ | |
519 | struct mhi_driver { | |
520 | const struct mhi_device_id *id_table; | |
521 | int (*probe)(struct mhi_device *mhi_dev, | |
522 | const struct mhi_device_id *id); | |
523 | void (*remove)(struct mhi_device *mhi_dev); | |
524 | void (*ul_xfer_cb)(struct mhi_device *mhi_dev, | |
525 | struct mhi_result *result); | |
526 | void (*dl_xfer_cb)(struct mhi_device *mhi_dev, | |
527 | struct mhi_result *result); | |
528 | void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); | |
529 | struct device_driver driver; | |
530 | }; | |
531 | ||
532 | #define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) | |
0cbf2608 MS |
533 | #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) |
534 | ||
f42dfbe8 BB |
535 | /** |
536 | * mhi_alloc_controller - Allocate the MHI Controller structure | |
537 | * Allocate the mhi_controller structure using zero initialized memory | |
538 | */ | |
539 | struct mhi_controller *mhi_alloc_controller(void); | |
540 | ||
541 | /** | |
542 | * mhi_free_controller - Free the MHI Controller structure | |
543 | * Free the mhi_controller structure which was previously allocated | |
544 | */ | |
545 | void mhi_free_controller(struct mhi_controller *mhi_cntrl); | |
546 | ||
0cbf2608 MS |
547 | /** |
548 | * mhi_register_controller - Register MHI controller | |
549 | * @mhi_cntrl: MHI controller to register | |
550 | * @config: Configuration to use for the controller | |
551 | */ | |
552 | int mhi_register_controller(struct mhi_controller *mhi_cntrl, | |
f38173a7 | 553 | const struct mhi_controller_config *config); |
0cbf2608 MS |
554 | |
555 | /** | |
556 | * mhi_unregister_controller - Unregister MHI controller | |
557 | * @mhi_cntrl: MHI controller to unregister | |
558 | */ | |
559 | void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); | |
560 | ||
82174738 MS |
561 | /* |
562 | * module_mhi_driver() - Helper macro for drivers that don't do | |
563 | * anything special other than using default mhi_driver_register() and | |
564 | * mhi_driver_unregister(). This eliminates a lot of boilerplate. | |
565 | * Each module may only use this macro once. | |
566 | */ | |
567 | #define module_mhi_driver(mhi_drv) \ | |
568 | module_driver(mhi_drv, mhi_driver_register, \ | |
569 | mhi_driver_unregister) | |
570 | ||
571 | /* | |
572 | * Macro to avoid include chaining to get THIS_MODULE | |
573 | */ | |
574 | #define mhi_driver_register(mhi_drv) \ | |
575 | __mhi_driver_register(mhi_drv, THIS_MODULE) | |
576 | ||
e755cadb | 577 | /** |
82174738 | 578 | * __mhi_driver_register - Register driver with MHI framework |
e755cadb | 579 | * @mhi_drv: Driver associated with the device |
82174738 | 580 | * @owner: The module owner |
e755cadb | 581 | */ |
82174738 | 582 | int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner); |
e755cadb MS |
583 | |
584 | /** | |
585 | * mhi_driver_unregister - Unregister a driver for mhi_devices | |
586 | * @mhi_drv: Driver associated with the device | |
587 | */ | |
588 | void mhi_driver_unregister(struct mhi_driver *mhi_drv); | |
589 | ||
a6e2e352 MS |
590 | /** |
591 | * mhi_set_mhi_state - Set MHI device state | |
592 | * @mhi_cntrl: MHI controller | |
593 | * @state: State to set | |
594 | */ | |
595 | void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, | |
596 | enum mhi_state state); | |
597 | ||
0c6b20a1 MS |
598 | /** |
599 | * mhi_notify - Notify the MHI client driver about client device status | |
600 | * @mhi_dev: MHI device instance | |
601 | * @cb_reason: MHI callback reason | |
602 | */ | |
603 | void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); | |
604 | ||
87baa23e HK |
605 | /** |
606 | * mhi_get_free_desc_count - Get transfer ring length | |
607 | * Get # of TD available to queue buffers | |
608 | * @mhi_dev: Device associated with the channels | |
609 | * @dir: Direction of the channel | |
610 | */ | |
611 | int mhi_get_free_desc_count(struct mhi_device *mhi_dev, | |
612 | enum dma_data_direction dir); | |
613 | ||
3000f85b MS |
614 | /** |
615 | * mhi_prepare_for_power_up - Do pre-initialization before power up. | |
616 | * This is optional, call this before power up if | |
617 | * the controller does not want bus framework to | |
618 | * automatically free any allocated memory during | |
619 | * shutdown process. | |
620 | * @mhi_cntrl: MHI controller | |
621 | */ | |
622 | int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); | |
623 | ||
624 | /** | |
625 | * mhi_async_power_up - Start MHI power up sequence | |
626 | * @mhi_cntrl: MHI controller | |
627 | */ | |
628 | int mhi_async_power_up(struct mhi_controller *mhi_cntrl); | |
629 | ||
630 | /** | |
631 | * mhi_sync_power_up - Start MHI power up sequence and wait till the device | |
4d12a897 | 632 | * enters valid EE state |
3000f85b MS |
633 | * @mhi_cntrl: MHI controller |
634 | */ | |
635 | int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); | |
636 | ||
637 | /** | |
638 | * mhi_power_down - Start MHI power down sequence | |
639 | * @mhi_cntrl: MHI controller | |
640 | * @graceful: Link is still accessible, so do a graceful shutdown process | |
641 | */ | |
642 | void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); | |
643 | ||
644 | /** | |
645 | * mhi_unprepare_after_power_down - Free any allocated memory after power down | |
646 | * @mhi_cntrl: MHI controller | |
647 | */ | |
648 | void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); | |
0c6b20a1 MS |
649 | |
650 | /** | |
651 | * mhi_pm_suspend - Move MHI into a suspended state | |
652 | * @mhi_cntrl: MHI controller | |
653 | */ | |
654 | int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); | |
655 | ||
656 | /** | |
657 | * mhi_pm_resume - Resume MHI from suspended state | |
658 | * @mhi_cntrl: MHI controller | |
659 | */ | |
660 | int mhi_pm_resume(struct mhi_controller *mhi_cntrl); | |
3000f85b | 661 | |
6fdfdd27 | 662 | /** |
9e1660e5 BB |
663 | * mhi_download_rddm_image - Download ramdump image from device for |
664 | * debugging purpose. | |
6fdfdd27 MS |
665 | * @mhi_cntrl: MHI controller |
666 | * @in_panic: Download rddm image during kernel panic | |
667 | */ | |
9e1660e5 | 668 | int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic); |
6fdfdd27 MS |
669 | |
670 | /** | |
671 | * mhi_force_rddm_mode - Force device into rddm mode | |
672 | * @mhi_cntrl: MHI controller | |
673 | */ | |
674 | int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); | |
675 | ||
78e1d226 BB |
676 | /** |
677 | * mhi_get_exec_env - Get BHI execution environment of the device | |
678 | * @mhi_cntrl: MHI controller | |
679 | */ | |
680 | enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); | |
681 | ||
6fdfdd27 MS |
682 | /** |
683 | * mhi_get_mhi_state - Get MHI state of the device | |
684 | * @mhi_cntrl: MHI controller | |
685 | */ | |
686 | enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); | |
687 | ||
b5a8d233 LP |
688 | /** |
689 | * mhi_soc_reset - Trigger a device reset. This can be used as a last resort | |
690 | * to reset and recover a device. | |
691 | * @mhi_cntrl: MHI controller | |
692 | */ | |
693 | void mhi_soc_reset(struct mhi_controller *mhi_cntrl); | |
694 | ||
189ff97c MS |
695 | /** |
696 | * mhi_device_get - Disable device low power mode | |
697 | * @mhi_dev: Device associated with the channel | |
698 | */ | |
699 | void mhi_device_get(struct mhi_device *mhi_dev); | |
700 | ||
701 | /** | |
702 | * mhi_device_get_sync - Disable device low power mode. Synchronously | |
703 | * take the controller out of suspended state | |
704 | * @mhi_dev: Device associated with the channel | |
705 | */ | |
706 | int mhi_device_get_sync(struct mhi_device *mhi_dev); | |
707 | ||
708 | /** | |
709 | * mhi_device_put - Re-enable device low power mode | |
710 | * @mhi_dev: Device associated with the channel | |
711 | */ | |
712 | void mhi_device_put(struct mhi_device *mhi_dev); | |
713 | ||
714 | /** | |
6731fefd BB |
715 | * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer. |
716 | * Allocate and initialize the channel context and | |
717 | * also issue the START channel command to both | |
718 | * channels. Channels can be started only if both | |
719 | * host and device execution environments match and | |
720 | * channels are in a DISABLED state. | |
189ff97c | 721 | * @mhi_dev: Device associated with the channels |
ce78ffa3 | 722 | * @flags: MHI channel flags |
189ff97c | 723 | */ |
ce78ffa3 DM |
724 | int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, |
725 | unsigned int flags); | |
726 | ||
727 | /* Automatically allocate and queue inbound buffers */ | |
728 | #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) | |
189ff97c MS |
729 | |
730 | /** | |
6731fefd BB |
731 | * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. |
732 | * Issue the RESET channel command and let the | |
733 | * device clean-up the context so no incoming | |
734 | * transfers are seen on the host. Free memory | |
735 | * associated with the context on host. If device | |
736 | * is unresponsive, only perform a host side | |
737 | * clean-up. Channels can be reset only if both | |
738 | * host and device execution environments match | |
739 | * and channels are in an ENABLED, STOPPED or | |
740 | * SUSPENDED state. | |
189ff97c MS |
741 | * @mhi_dev: Device associated with the channels |
742 | */ | |
743 | void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); | |
744 | ||
745 | /** | |
746 | * mhi_poll - Poll for any available data in DL direction | |
747 | * @mhi_dev: Device associated with the channels | |
748 | * @budget: # of events to process | |
749 | */ | |
750 | int mhi_poll(struct mhi_device *mhi_dev, u32 budget); | |
751 | ||
752 | /** | |
753 | * mhi_queue_dma - Send or receive DMA mapped buffers from client device | |
754 | * over MHI channel | |
755 | * @mhi_dev: Device associated with the channels | |
756 | * @dir: DMA direction for the channel | |
757 | * @mhi_buf: Buffer for holding the DMA mapped data | |
758 | * @len: Buffer length | |
759 | * @mflags: MHI transfer flags used for the transfer | |
760 | */ | |
761 | int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, | |
762 | struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags); | |
763 | ||
764 | /** | |
765 | * mhi_queue_buf - Send or receive raw buffers from client device over MHI | |
766 | * channel | |
767 | * @mhi_dev: Device associated with the channels | |
768 | * @dir: DMA direction for the channel | |
769 | * @buf: Buffer for holding the data | |
770 | * @len: Buffer length | |
771 | * @mflags: MHI transfer flags used for the transfer | |
772 | */ | |
773 | int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, | |
774 | void *buf, size_t len, enum mhi_flags mflags); | |
775 | ||
776 | /** | |
777 | * mhi_queue_skb - Send or receive SKBs from client device over MHI channel | |
778 | * @mhi_dev: Device associated with the channels | |
779 | * @dir: DMA direction for the channel | |
780 | * @skb: Buffer for holding SKBs | |
781 | * @len: Buffer length | |
782 | * @mflags: MHI transfer flags used for the transfer | |
783 | */ | |
784 | int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, | |
785 | struct sk_buff *skb, size_t len, enum mhi_flags mflags); | |
786 | ||
d8c4a223 LP |
787 | /** |
788 | * mhi_queue_is_full - Determine whether queueing new elements is possible | |
789 | * @mhi_dev: Device associated with the channels | |
790 | * @dir: DMA direction for the channel | |
791 | */ | |
792 | bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir); | |
793 | ||
0cbf2608 | 794 | #endif /* _MHI_H_ */ |