Commit | Line | Data |
---|---|---|
d434743e MS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * MHI Endpoint bus stack | |
4 | * | |
5 | * Copyright (C) 2022 Linaro Ltd. | |
6 | * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | |
7 | */ | |
8 | ||
9 | #include <linux/bitfield.h> | |
10 | #include <linux/delay.h> | |
11 | #include <linux/dma-direction.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
4799e71b | 14 | #include <linux/irq.h> |
d434743e MS |
15 | #include <linux/mhi_ep.h> |
16 | #include <linux/mod_devicetable.h> | |
17 | #include <linux/module.h> | |
18 | #include "internal.h" | |
19 | ||
fb3a26b7 MS |
20 | #define M0_WAIT_DELAY_MS 100 |
21 | #define M0_WAIT_COUNT 100 | |
22 | ||
d434743e MS |
23 | static DEFINE_IDA(mhi_ep_cntrl_ida); |
24 | ||
e8275690 | 25 | static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); |
5d507ee0 MS |
26 | static int mhi_ep_destroy_device(struct device *dev, void *data); |
27 | ||
961aeb68 MS |
28 | static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, |
29 | struct mhi_ring_element *el, bool bei) | |
30 | { | |
31 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
32 | union mhi_ep_ring_ctx *ctx; | |
33 | struct mhi_ep_ring *ring; | |
34 | int ret; | |
35 | ||
36 | mutex_lock(&mhi_cntrl->event_lock); | |
37 | ring = &mhi_cntrl->mhi_event[ring_idx].ring; | |
38 | ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; | |
39 | if (!ring->started) { | |
40 | ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); | |
41 | if (ret) { | |
42 | dev_err(dev, "Error starting event ring (%u)\n", ring_idx); | |
43 | goto err_unlock; | |
44 | } | |
45 | } | |
46 | ||
47 | /* Add element to the event ring */ | |
48 | ret = mhi_ep_ring_add_element(ring, el); | |
49 | if (ret) { | |
50 | dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); | |
51 | goto err_unlock; | |
52 | } | |
53 | ||
54 | mutex_unlock(&mhi_cntrl->event_lock); | |
55 | ||
56 | /* | |
57 | * Raise IRQ to host only if the BEI flag is not set in TRE. Host might | |
58 | * set this flag for interrupt moderation as per MHI protocol. | |
59 | */ | |
60 | if (!bei) | |
61 | mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); | |
62 | ||
63 | return 0; | |
64 | ||
65 | err_unlock: | |
66 | mutex_unlock(&mhi_cntrl->event_lock); | |
67 | ||
68 | return ret; | |
69 | } | |
70 | ||
71 | static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, | |
72 | struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) | |
73 | { | |
74 | struct mhi_ring_element event = {}; | |
75 | ||
76 | event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); | |
77 | event.dword[0] = MHI_TRE_EV_DWORD0(code, len); | |
78 | event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); | |
79 | ||
80 | return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); | |
81 | } | |
82 | ||
83 | int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) | |
84 | { | |
85 | struct mhi_ring_element event = {}; | |
86 | ||
87 | event.dword[0] = MHI_SC_EV_DWORD0(state); | |
88 | event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); | |
89 | ||
90 | return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); | |
91 | } | |
92 | ||
93 | int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) | |
94 | { | |
95 | struct mhi_ring_element event = {}; | |
96 | ||
97 | event.dword[0] = MHI_EE_EV_DWORD0(exec_env); | |
98 | event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); | |
99 | ||
100 | return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); | |
101 | } | |
102 | ||
103 | static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) | |
104 | { | |
105 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; | |
106 | struct mhi_ring_element event = {}; | |
107 | ||
108 | event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); | |
109 | event.dword[0] = MHI_CC_EV_DWORD0(code); | |
110 | event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); | |
111 | ||
112 | return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); | |
113 | } | |
114 | ||
e8275690 MS |
115 | static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) |
116 | { | |
117 | struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; | |
118 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
119 | struct mhi_result result = {}; | |
120 | struct mhi_ep_chan *mhi_chan; | |
121 | struct mhi_ep_ring *ch_ring; | |
122 | u32 tmp, ch_id; | |
123 | int ret; | |
124 | ||
125 | ch_id = MHI_TRE_GET_CMD_CHID(el); | |
126 | mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; | |
127 | ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; | |
128 | ||
129 | switch (MHI_TRE_GET_CMD_TYPE(el)) { | |
130 | case MHI_PKT_TYPE_START_CHAN_CMD: | |
131 | dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); | |
132 | ||
133 | mutex_lock(&mhi_chan->lock); | |
134 | /* Initialize and configure the corresponding channel ring */ | |
135 | if (!ch_ring->started) { | |
136 | ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, | |
137 | (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); | |
138 | if (ret) { | |
139 | dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); | |
140 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, | |
141 | MHI_EV_CC_UNDEFINED_ERR); | |
142 | if (ret) | |
143 | dev_err(dev, "Error sending completion event: %d\n", ret); | |
144 | ||
145 | goto err_unlock; | |
146 | } | |
147 | } | |
148 | ||
149 | /* Set channel state to RUNNING */ | |
150 | mhi_chan->state = MHI_CH_STATE_RUNNING; | |
151 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); | |
152 | tmp &= ~CHAN_CTX_CHSTATE_MASK; | |
153 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); | |
154 | mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); | |
155 | ||
156 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); | |
157 | if (ret) { | |
158 | dev_err(dev, "Error sending command completion event (%u)\n", | |
159 | MHI_EV_CC_SUCCESS); | |
160 | goto err_unlock; | |
161 | } | |
162 | ||
163 | mutex_unlock(&mhi_chan->lock); | |
164 | ||
165 | /* | |
166 | * Create MHI device only during UL channel start. Since the MHI | |
167 | * channels operate in a pair, we'll associate both UL and DL | |
168 | * channels to the same device. | |
169 | * | |
170 | * We also need to check for mhi_dev != NULL because, the host | |
171 | * will issue START_CHAN command during resume and we don't | |
172 | * destroy the device during suspend. | |
173 | */ | |
174 | if (!(ch_id % 2) && !mhi_chan->mhi_dev) { | |
175 | ret = mhi_ep_create_device(mhi_cntrl, ch_id); | |
176 | if (ret) { | |
177 | dev_err(dev, "Error creating device for channel (%u)\n", ch_id); | |
178 | mhi_ep_handle_syserr(mhi_cntrl); | |
179 | return ret; | |
180 | } | |
181 | } | |
182 | ||
183 | /* Finally, enable DB for the channel */ | |
184 | mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); | |
185 | ||
186 | break; | |
187 | case MHI_PKT_TYPE_STOP_CHAN_CMD: | |
188 | dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); | |
189 | if (!ch_ring->started) { | |
190 | dev_err(dev, "Channel (%u) not opened\n", ch_id); | |
191 | return -ENODEV; | |
192 | } | |
193 | ||
194 | mutex_lock(&mhi_chan->lock); | |
195 | /* Disable DB for the channel */ | |
196 | mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); | |
197 | ||
198 | /* Send channel disconnect status to client drivers */ | |
199 | result.transaction_status = -ENOTCONN; | |
200 | result.bytes_xferd = 0; | |
201 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); | |
202 | ||
203 | /* Set channel state to STOP */ | |
204 | mhi_chan->state = MHI_CH_STATE_STOP; | |
205 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); | |
206 | tmp &= ~CHAN_CTX_CHSTATE_MASK; | |
207 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); | |
208 | mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); | |
209 | ||
210 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); | |
211 | if (ret) { | |
212 | dev_err(dev, "Error sending command completion event (%u)\n", | |
213 | MHI_EV_CC_SUCCESS); | |
214 | goto err_unlock; | |
215 | } | |
216 | ||
217 | mutex_unlock(&mhi_chan->lock); | |
218 | break; | |
219 | case MHI_PKT_TYPE_RESET_CHAN_CMD: | |
220 | dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); | |
221 | if (!ch_ring->started) { | |
222 | dev_err(dev, "Channel (%u) not opened\n", ch_id); | |
223 | return -ENODEV; | |
224 | } | |
225 | ||
226 | mutex_lock(&mhi_chan->lock); | |
227 | /* Stop and reset the transfer ring */ | |
228 | mhi_ep_ring_reset(mhi_cntrl, ch_ring); | |
229 | ||
230 | /* Send channel disconnect status to client driver */ | |
231 | result.transaction_status = -ENOTCONN; | |
232 | result.bytes_xferd = 0; | |
233 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); | |
234 | ||
235 | /* Set channel state to DISABLED */ | |
236 | mhi_chan->state = MHI_CH_STATE_DISABLED; | |
237 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); | |
238 | tmp &= ~CHAN_CTX_CHSTATE_MASK; | |
239 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); | |
240 | mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); | |
241 | ||
242 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); | |
243 | if (ret) { | |
244 | dev_err(dev, "Error sending command completion event (%u)\n", | |
245 | MHI_EV_CC_SUCCESS); | |
246 | goto err_unlock; | |
247 | } | |
248 | ||
249 | mutex_unlock(&mhi_chan->lock); | |
250 | break; | |
251 | default: | |
252 | dev_err(dev, "Invalid command received: %lu for channel (%u)\n", | |
253 | MHI_TRE_GET_CMD_TYPE(el), ch_id); | |
254 | return -EINVAL; | |
255 | } | |
256 | ||
257 | return 0; | |
258 | ||
259 | err_unlock: | |
260 | mutex_unlock(&mhi_chan->lock); | |
261 | ||
262 | return ret; | |
263 | } | |
264 | ||
53012588 MS |
265 | bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) |
266 | { | |
267 | struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : | |
268 | mhi_dev->ul_chan; | |
269 | struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; | |
270 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; | |
271 | ||
272 | return !!(ring->rd_offset == ring->wr_offset); | |
273 | } | |
274 | EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); | |
275 | ||
276 | static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, | |
277 | struct mhi_ep_ring *ring, | |
278 | struct mhi_result *result, | |
279 | u32 len) | |
280 | { | |
281 | struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; | |
282 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
283 | size_t tr_len, read_offset, write_offset; | |
284 | struct mhi_ring_element *el; | |
285 | bool tr_done = false; | |
286 | void *write_addr; | |
287 | u64 read_addr; | |
288 | u32 buf_left; | |
289 | int ret; | |
290 | ||
291 | buf_left = len; | |
292 | ||
293 | do { | |
294 | /* Don't process the transfer ring if the channel is not in RUNNING state */ | |
295 | if (mhi_chan->state != MHI_CH_STATE_RUNNING) { | |
296 | dev_err(dev, "Channel not available\n"); | |
297 | return -ENODEV; | |
298 | } | |
299 | ||
300 | el = &ring->ring_cache[ring->rd_offset]; | |
301 | ||
302 | /* Check if there is data pending to be read from previous read operation */ | |
303 | if (mhi_chan->tre_bytes_left) { | |
304 | dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); | |
305 | tr_len = min(buf_left, mhi_chan->tre_bytes_left); | |
306 | } else { | |
307 | mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); | |
308 | mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); | |
309 | mhi_chan->tre_bytes_left = mhi_chan->tre_size; | |
310 | ||
311 | tr_len = min(buf_left, mhi_chan->tre_size); | |
312 | } | |
313 | ||
314 | read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; | |
315 | write_offset = len - buf_left; | |
316 | read_addr = mhi_chan->tre_loc + read_offset; | |
317 | write_addr = result->buf_addr + write_offset; | |
318 | ||
319 | dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); | |
320 | ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); | |
321 | if (ret < 0) { | |
322 | dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); | |
323 | return ret; | |
324 | } | |
325 | ||
326 | buf_left -= tr_len; | |
327 | mhi_chan->tre_bytes_left -= tr_len; | |
328 | ||
329 | /* | |
330 | * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been | |
331 | * read completely: | |
332 | * | |
333 | * 1. Send completion event to the host based on the flags set in TRE. | |
334 | * 2. Increment the local read offset of the transfer ring. | |
335 | */ | |
336 | if (!mhi_chan->tre_bytes_left) { | |
337 | /* | |
338 | * The host will split the data packet into multiple TREs if it can't fit | |
339 | * the packet in a single TRE. In that case, CHAIN flag will be set by the | |
340 | * host for all TREs except the last one. | |
341 | */ | |
342 | if (MHI_TRE_DATA_GET_CHAIN(el)) { | |
343 | /* | |
344 | * IEOB (Interrupt on End of Block) flag will be set by the host if | |
345 | * it expects the completion event for all TREs of a TD. | |
346 | */ | |
347 | if (MHI_TRE_DATA_GET_IEOB(el)) { | |
348 | ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, | |
349 | MHI_TRE_DATA_GET_LEN(el), | |
350 | MHI_EV_CC_EOB); | |
351 | if (ret < 0) { | |
352 | dev_err(&mhi_chan->mhi_dev->dev, | |
353 | "Error sending transfer compl. event\n"); | |
354 | return ret; | |
355 | } | |
356 | } | |
357 | } else { | |
358 | /* | |
359 | * IEOT (Interrupt on End of Transfer) flag will be set by the host | |
360 | * for the last TRE of the TD and expects the completion event for | |
361 | * the same. | |
362 | */ | |
363 | if (MHI_TRE_DATA_GET_IEOT(el)) { | |
364 | ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, | |
365 | MHI_TRE_DATA_GET_LEN(el), | |
366 | MHI_EV_CC_EOT); | |
367 | if (ret < 0) { | |
368 | dev_err(&mhi_chan->mhi_dev->dev, | |
369 | "Error sending transfer compl. event\n"); | |
370 | return ret; | |
371 | } | |
372 | } | |
373 | ||
374 | tr_done = true; | |
375 | } | |
376 | ||
377 | mhi_ep_ring_inc_index(ring); | |
378 | } | |
379 | ||
380 | result->bytes_xferd += tr_len; | |
381 | } while (buf_left && !tr_done); | |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
03c0bb8e MS |
386 | static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) |
387 | { | |
388 | struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; | |
389 | struct mhi_result result = {}; | |
390 | u32 len = MHI_EP_DEFAULT_MTU; | |
391 | struct mhi_ep_chan *mhi_chan; | |
392 | int ret; | |
393 | ||
394 | mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; | |
395 | ||
396 | /* | |
397 | * Bail out if transfer callback is not registered for the channel. | |
398 | * This is most likely due to the client driver not loaded at this point. | |
399 | */ | |
400 | if (!mhi_chan->xfer_cb) { | |
401 | dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); | |
402 | return -ENODEV; | |
403 | } | |
404 | ||
405 | if (ring->ch_id % 2) { | |
406 | /* DL channel */ | |
407 | result.dir = mhi_chan->dir; | |
408 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); | |
409 | } else { | |
410 | /* UL channel */ | |
411 | result.buf_addr = kzalloc(len, GFP_KERNEL); | |
412 | if (!result.buf_addr) | |
413 | return -ENOMEM; | |
414 | ||
415 | do { | |
416 | ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); | |
417 | if (ret < 0) { | |
418 | dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); | |
419 | kfree(result.buf_addr); | |
420 | return ret; | |
421 | } | |
422 | ||
423 | result.dir = mhi_chan->dir; | |
424 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); | |
425 | result.bytes_xferd = 0; | |
426 | memset(result.buf_addr, 0, len); | |
427 | ||
428 | /* Read until the ring becomes empty */ | |
429 | } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); | |
430 | ||
431 | kfree(result.buf_addr); | |
432 | } | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
2d945a39 MS |
437 | /* TODO: Handle partially formed TDs */ |
438 | int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) | |
439 | { | |
440 | struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; | |
441 | struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; | |
442 | struct device *dev = &mhi_chan->mhi_dev->dev; | |
443 | struct mhi_ring_element *el; | |
444 | u32 buf_left, read_offset; | |
445 | struct mhi_ep_ring *ring; | |
446 | enum mhi_ev_ccs code; | |
447 | void *read_addr; | |
448 | u64 write_addr; | |
449 | size_t tr_len; | |
450 | u32 tre_len; | |
451 | int ret; | |
452 | ||
453 | buf_left = skb->len; | |
454 | ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; | |
455 | ||
456 | mutex_lock(&mhi_chan->lock); | |
457 | ||
458 | do { | |
459 | /* Don't process the transfer ring if the channel is not in RUNNING state */ | |
460 | if (mhi_chan->state != MHI_CH_STATE_RUNNING) { | |
461 | dev_err(dev, "Channel not available\n"); | |
462 | ret = -ENODEV; | |
463 | goto err_exit; | |
464 | } | |
465 | ||
466 | if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { | |
467 | dev_err(dev, "TRE not available!\n"); | |
468 | ret = -ENOSPC; | |
469 | goto err_exit; | |
470 | } | |
471 | ||
472 | el = &ring->ring_cache[ring->rd_offset]; | |
473 | tre_len = MHI_TRE_DATA_GET_LEN(el); | |
474 | ||
475 | tr_len = min(buf_left, tre_len); | |
476 | read_offset = skb->len - buf_left; | |
477 | read_addr = skb->data + read_offset; | |
478 | write_addr = MHI_TRE_DATA_GET_PTR(el); | |
479 | ||
480 | dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); | |
481 | ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); | |
482 | if (ret < 0) { | |
483 | dev_err(dev, "Error writing to the channel\n"); | |
484 | goto err_exit; | |
485 | } | |
486 | ||
487 | buf_left -= tr_len; | |
488 | /* | |
489 | * For all TREs queued by the host for DL channel, only the EOT flag will be set. | |
490 | * If the packet doesn't fit into a single TRE, send the OVERFLOW event to | |
491 | * the host so that the host can adjust the packet boundary to next TREs. Else send | |
492 | * the EOT event to the host indicating the packet boundary. | |
493 | */ | |
494 | if (buf_left) | |
495 | code = MHI_EV_CC_OVERFLOW; | |
496 | else | |
497 | code = MHI_EV_CC_EOT; | |
498 | ||
499 | ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); | |
500 | if (ret) { | |
501 | dev_err(dev, "Error sending transfer completion event\n"); | |
502 | goto err_exit; | |
503 | } | |
504 | ||
505 | mhi_ep_ring_inc_index(ring); | |
506 | } while (buf_left); | |
507 | ||
508 | mutex_unlock(&mhi_chan->lock); | |
509 | ||
510 | return 0; | |
511 | ||
512 | err_exit: | |
513 | mutex_unlock(&mhi_chan->lock); | |
514 | ||
515 | return ret; | |
516 | } | |
517 | EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); | |
518 | ||
fb3a26b7 MS |
519 | static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) |
520 | { | |
521 | size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; | |
522 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
523 | int ret; | |
524 | ||
525 | /* Update the number of event rings (NER) programmed by the host */ | |
526 | mhi_ep_mmio_update_ner(mhi_cntrl); | |
527 | ||
528 | dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", | |
529 | mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); | |
530 | ||
531 | ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; | |
532 | ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; | |
533 | cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; | |
534 | ||
535 | /* Get the channel context base pointer from host */ | |
536 | mhi_ep_mmio_get_chc_base(mhi_cntrl); | |
537 | ||
538 | /* Allocate and map memory for caching host channel context */ | |
539 | ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, | |
540 | &mhi_cntrl->ch_ctx_cache_phys, | |
541 | (void __iomem **) &mhi_cntrl->ch_ctx_cache, | |
542 | ch_ctx_host_size); | |
543 | if (ret) { | |
544 | dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); | |
545 | return ret; | |
546 | } | |
547 | ||
548 | /* Get the event context base pointer from host */ | |
549 | mhi_ep_mmio_get_erc_base(mhi_cntrl); | |
550 | ||
551 | /* Allocate and map memory for caching host event context */ | |
552 | ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, | |
553 | &mhi_cntrl->ev_ctx_cache_phys, | |
554 | (void __iomem **) &mhi_cntrl->ev_ctx_cache, | |
555 | ev_ctx_host_size); | |
556 | if (ret) { | |
557 | dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); | |
558 | goto err_ch_ctx; | |
559 | } | |
560 | ||
561 | /* Get the command context base pointer from host */ | |
562 | mhi_ep_mmio_get_crc_base(mhi_cntrl); | |
563 | ||
564 | /* Allocate and map memory for caching host command context */ | |
565 | ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, | |
566 | &mhi_cntrl->cmd_ctx_cache_phys, | |
567 | (void __iomem **) &mhi_cntrl->cmd_ctx_cache, | |
568 | cmd_ctx_host_size); | |
569 | if (ret) { | |
570 | dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); | |
571 | goto err_ev_ctx; | |
572 | } | |
573 | ||
574 | /* Initialize command ring */ | |
575 | ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, | |
576 | (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); | |
577 | if (ret) { | |
578 | dev_err(dev, "Failed to start the command ring\n"); | |
579 | goto err_cmd_ctx; | |
580 | } | |
581 | ||
582 | return ret; | |
583 | ||
584 | err_cmd_ctx: | |
585 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, | |
586 | (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); | |
587 | ||
588 | err_ev_ctx: | |
589 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, | |
590 | (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); | |
591 | ||
592 | err_ch_ctx: | |
593 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, | |
594 | (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); | |
595 | ||
596 | return ret; | |
597 | } | |
598 | ||
599 | static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) | |
600 | { | |
601 | size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; | |
602 | ||
603 | ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; | |
604 | ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; | |
605 | cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; | |
606 | ||
607 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, | |
608 | (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); | |
609 | ||
610 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, | |
611 | (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); | |
612 | ||
613 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, | |
614 | (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); | |
615 | } | |
616 | ||
617 | static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) | |
618 | { | |
619 | /* | |
620 | * Doorbell interrupts are enabled when the corresponding channel gets started. | |
621 | * Enabling all interrupts here triggers spurious irqs as some of the interrupts | |
622 | * associated with hw channels always get triggered. | |
623 | */ | |
624 | mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); | |
625 | mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); | |
626 | } | |
627 | ||
628 | static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) | |
629 | { | |
630 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
631 | enum mhi_state state; | |
632 | bool mhi_reset; | |
633 | u32 count = 0; | |
634 | int ret; | |
635 | ||
636 | /* Wait for Host to set the M0 state */ | |
637 | do { | |
638 | msleep(M0_WAIT_DELAY_MS); | |
639 | mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); | |
640 | if (mhi_reset) { | |
641 | /* Clear the MHI reset if host is in reset state */ | |
642 | mhi_ep_mmio_clear_reset(mhi_cntrl); | |
643 | dev_info(dev, "Detected Host reset while waiting for M0\n"); | |
644 | } | |
645 | count++; | |
646 | } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); | |
647 | ||
648 | if (state != MHI_STATE_M0) { | |
649 | dev_err(dev, "Host failed to enter M0\n"); | |
650 | return -ETIMEDOUT; | |
651 | } | |
652 | ||
653 | ret = mhi_ep_cache_host_cfg(mhi_cntrl); | |
654 | if (ret) { | |
655 | dev_err(dev, "Failed to cache host config\n"); | |
656 | return ret; | |
657 | } | |
658 | ||
659 | mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); | |
660 | ||
661 | /* Enable all interrupts now */ | |
662 | mhi_ep_enable_int(mhi_cntrl); | |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
e8275690 MS |
667 | static void mhi_ep_cmd_ring_worker(struct work_struct *work) |
668 | { | |
669 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); | |
670 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; | |
671 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
672 | struct mhi_ring_element *el; | |
673 | int ret; | |
674 | ||
675 | /* Update the write offset for the ring */ | |
676 | ret = mhi_ep_update_wr_offset(ring); | |
677 | if (ret) { | |
678 | dev_err(dev, "Error updating write offset for ring\n"); | |
679 | return; | |
680 | } | |
681 | ||
682 | /* Sanity check to make sure there are elements in the ring */ | |
683 | if (ring->rd_offset == ring->wr_offset) | |
684 | return; | |
685 | ||
686 | /* | |
687 | * Process command ring element till write offset. In case of an error, just try to | |
688 | * process next element. | |
689 | */ | |
690 | while (ring->rd_offset != ring->wr_offset) { | |
691 | el = &ring->ring_cache[ring->rd_offset]; | |
692 | ||
693 | ret = mhi_ep_process_cmd_ring(ring, el); | |
694 | if (ret) | |
695 | dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); | |
696 | ||
697 | mhi_ep_ring_inc_index(ring); | |
698 | } | |
699 | } | |
700 | ||
03c0bb8e MS |
701 | static void mhi_ep_ch_ring_worker(struct work_struct *work) |
702 | { | |
703 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); | |
704 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
705 | struct mhi_ep_ring_item *itr, *tmp; | |
706 | struct mhi_ring_element *el; | |
707 | struct mhi_ep_ring *ring; | |
708 | struct mhi_ep_chan *chan; | |
709 | unsigned long flags; | |
710 | LIST_HEAD(head); | |
711 | int ret; | |
712 | ||
713 | spin_lock_irqsave(&mhi_cntrl->list_lock, flags); | |
714 | list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); | |
715 | spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); | |
716 | ||
717 | /* Process each queued channel ring. In case of an error, just process next element. */ | |
718 | list_for_each_entry_safe(itr, tmp, &head, node) { | |
719 | list_del(&itr->node); | |
720 | ring = itr->ring; | |
721 | ||
722 | /* Update the write offset for the ring */ | |
723 | ret = mhi_ep_update_wr_offset(ring); | |
724 | if (ret) { | |
725 | dev_err(dev, "Error updating write offset for ring\n"); | |
726 | kfree(itr); | |
727 | continue; | |
728 | } | |
729 | ||
730 | /* Sanity check to make sure there are elements in the ring */ | |
731 | if (ring->rd_offset == ring->wr_offset) { | |
732 | kfree(itr); | |
733 | continue; | |
734 | } | |
735 | ||
736 | el = &ring->ring_cache[ring->rd_offset]; | |
737 | chan = &mhi_cntrl->mhi_chan[ring->ch_id]; | |
738 | ||
739 | mutex_lock(&chan->lock); | |
740 | dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); | |
741 | ret = mhi_ep_process_ch_ring(ring, el); | |
742 | if (ret) { | |
743 | dev_err(dev, "Error processing ring for channel (%u): %d\n", | |
744 | ring->ch_id, ret); | |
745 | mutex_unlock(&chan->lock); | |
746 | kfree(itr); | |
747 | continue; | |
748 | } | |
749 | ||
750 | mutex_unlock(&chan->lock); | |
751 | kfree(itr); | |
752 | } | |
753 | } | |
754 | ||
f9baa4f7 MS |
755 | static void mhi_ep_state_worker(struct work_struct *work) |
756 | { | |
757 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); | |
758 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
759 | struct mhi_ep_state_transition *itr, *tmp; | |
760 | unsigned long flags; | |
761 | LIST_HEAD(head); | |
762 | int ret; | |
763 | ||
764 | spin_lock_irqsave(&mhi_cntrl->list_lock, flags); | |
765 | list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); | |
766 | spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); | |
767 | ||
768 | list_for_each_entry_safe(itr, tmp, &head, node) { | |
769 | list_del(&itr->node); | |
770 | dev_dbg(dev, "Handling MHI state transition to %s\n", | |
771 | mhi_state_str(itr->state)); | |
772 | ||
773 | switch (itr->state) { | |
774 | case MHI_STATE_M0: | |
775 | ret = mhi_ep_set_m0_state(mhi_cntrl); | |
776 | if (ret) | |
777 | dev_err(dev, "Failed to transition to M0 state\n"); | |
778 | break; | |
779 | case MHI_STATE_M3: | |
780 | ret = mhi_ep_set_m3_state(mhi_cntrl); | |
781 | if (ret) | |
782 | dev_err(dev, "Failed to transition to M3 state\n"); | |
783 | break; | |
784 | default: | |
785 | dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); | |
786 | break; | |
787 | } | |
788 | kfree(itr); | |
789 | } | |
790 | } | |
791 | ||
4799e71b MS |
792 | static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, |
793 | u32 ch_idx) | |
794 | { | |
795 | struct mhi_ep_ring_item *item; | |
796 | struct mhi_ep_ring *ring; | |
797 | bool work = !!ch_int; | |
798 | LIST_HEAD(head); | |
799 | u32 i; | |
800 | ||
801 | /* First add the ring items to a local list */ | |
802 | for_each_set_bit(i, &ch_int, 32) { | |
803 | /* Channel index varies for each register: 0, 32, 64, 96 */ | |
804 | u32 ch_id = ch_idx + i; | |
805 | ||
806 | ring = &mhi_cntrl->mhi_chan[ch_id].ring; | |
807 | item = kzalloc(sizeof(*item), GFP_ATOMIC); | |
808 | if (!item) | |
809 | return; | |
810 | ||
811 | item->ring = ring; | |
812 | list_add_tail(&item->node, &head); | |
813 | } | |
814 | ||
815 | /* Now, splice the local list into ch_db_list and queue the work item */ | |
816 | if (work) { | |
817 | spin_lock(&mhi_cntrl->list_lock); | |
818 | list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); | |
819 | spin_unlock(&mhi_cntrl->list_lock); | |
03c0bb8e MS |
820 | |
821 | queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); | |
4799e71b MS |
822 | } |
823 | } | |
824 | ||
825 | /* | |
826 | * Channel interrupt statuses are contained in 4 registers each of 32bit length. | |
827 | * For checking all interrupts, we need to loop through each registers and then | |
828 | * check for bits set. | |
829 | */ | |
830 | static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) | |
831 | { | |
832 | u32 ch_int, ch_idx, i; | |
833 | ||
834 | /* Bail out if there is no channel doorbell interrupt */ | |
835 | if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) | |
836 | return; | |
837 | ||
838 | for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { | |
839 | ch_idx = i * MHI_MASK_CH_LEN; | |
840 | ||
841 | /* Only process channel interrupt if the mask is enabled */ | |
842 | ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; | |
843 | if (ch_int) { | |
844 | mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); | |
845 | mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), | |
846 | mhi_cntrl->chdb[i].status); | |
847 | } | |
848 | } | |
849 | } | |
850 | ||
851 | static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, | |
852 | enum mhi_state state) | |
853 | { | |
854 | struct mhi_ep_state_transition *item; | |
855 | ||
856 | item = kzalloc(sizeof(*item), GFP_ATOMIC); | |
857 | if (!item) | |
858 | return; | |
859 | ||
860 | item->state = state; | |
861 | spin_lock(&mhi_cntrl->list_lock); | |
862 | list_add_tail(&item->node, &mhi_cntrl->st_transition_list); | |
863 | spin_unlock(&mhi_cntrl->list_lock); | |
864 | ||
865 | queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); | |
866 | } | |
867 | ||
868 | /* | |
869 | * Interrupt handler that services interrupts raised by the host writing to | |
870 | * MHICTRL and Command ring doorbell (CRDB) registers for state change and | |
871 | * channel interrupts. | |
872 | */ | |
873 | static irqreturn_t mhi_ep_irq(int irq, void *data) | |
874 | { | |
875 | struct mhi_ep_cntrl *mhi_cntrl = data; | |
876 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
877 | enum mhi_state state; | |
878 | u32 int_value; | |
7a97b6b4 | 879 | bool mhi_reset; |
4799e71b MS |
880 | |
881 | /* Acknowledge the ctrl interrupt */ | |
882 | int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); | |
883 | mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); | |
884 | ||
885 | /* Check for ctrl interrupt */ | |
886 | if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { | |
887 | dev_dbg(dev, "Processing ctrl interrupt\n"); | |
7a97b6b4 MS |
888 | mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); |
889 | if (mhi_reset) { | |
890 | dev_info(dev, "Host triggered MHI reset!\n"); | |
891 | disable_irq_nosync(mhi_cntrl->irq); | |
892 | schedule_work(&mhi_cntrl->reset_work); | |
893 | return IRQ_HANDLED; | |
894 | } | |
895 | ||
4799e71b MS |
896 | mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); |
897 | } | |
898 | ||
899 | /* Check for command doorbell interrupt */ | |
e8275690 | 900 | if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { |
4799e71b | 901 | dev_dbg(dev, "Processing command doorbell interrupt\n"); |
e8275690 MS |
902 | queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); |
903 | } | |
4799e71b MS |
904 | |
905 | /* Check for channel interrupts */ | |
906 | mhi_ep_check_channel_interrupt(mhi_cntrl); | |
907 | ||
908 | return IRQ_HANDLED; | |
909 | } | |
910 | ||
5d507ee0 MS |
911 | static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) |
912 | { | |
913 | struct mhi_ep_ring *ch_ring, *ev_ring; | |
914 | struct mhi_result result = {}; | |
915 | struct mhi_ep_chan *mhi_chan; | |
916 | int i; | |
917 | ||
918 | /* Stop all the channels */ | |
919 | for (i = 0; i < mhi_cntrl->max_chan; i++) { | |
920 | mhi_chan = &mhi_cntrl->mhi_chan[i]; | |
921 | if (!mhi_chan->ring.started) | |
922 | continue; | |
923 | ||
924 | mutex_lock(&mhi_chan->lock); | |
925 | /* Send channel disconnect status to client drivers */ | |
926 | if (mhi_chan->xfer_cb) { | |
927 | result.transaction_status = -ENOTCONN; | |
928 | result.bytes_xferd = 0; | |
929 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); | |
930 | } | |
931 | ||
932 | mhi_chan->state = MHI_CH_STATE_DISABLED; | |
933 | mutex_unlock(&mhi_chan->lock); | |
934 | } | |
935 | ||
936 | flush_workqueue(mhi_cntrl->wq); | |
937 | ||
938 | /* Destroy devices associated with all channels */ | |
939 | device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); | |
940 | ||
941 | /* Stop and reset the transfer rings */ | |
942 | for (i = 0; i < mhi_cntrl->max_chan; i++) { | |
943 | mhi_chan = &mhi_cntrl->mhi_chan[i]; | |
944 | if (!mhi_chan->ring.started) | |
945 | continue; | |
946 | ||
947 | ch_ring = &mhi_cntrl->mhi_chan[i].ring; | |
948 | mutex_lock(&mhi_chan->lock); | |
949 | mhi_ep_ring_reset(mhi_cntrl, ch_ring); | |
950 | mutex_unlock(&mhi_chan->lock); | |
951 | } | |
952 | ||
953 | /* Stop and reset the event rings */ | |
954 | for (i = 0; i < mhi_cntrl->event_rings; i++) { | |
955 | ev_ring = &mhi_cntrl->mhi_event[i].ring; | |
956 | if (!ev_ring->started) | |
957 | continue; | |
958 | ||
959 | mutex_lock(&mhi_cntrl->event_lock); | |
960 | mhi_ep_ring_reset(mhi_cntrl, ev_ring); | |
961 | mutex_unlock(&mhi_cntrl->event_lock); | |
962 | } | |
963 | ||
964 | /* Stop and reset the command ring */ | |
965 | mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); | |
966 | ||
967 | mhi_ep_free_host_cfg(mhi_cntrl); | |
968 | mhi_ep_mmio_mask_interrupts(mhi_cntrl); | |
969 | ||
970 | mhi_cntrl->enabled = false; | |
971 | } | |
972 | ||
7a97b6b4 MS |
973 | static void mhi_ep_reset_worker(struct work_struct *work) |
974 | { | |
975 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); | |
976 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
977 | enum mhi_state cur_state; | |
978 | int ret; | |
979 | ||
980 | mhi_ep_abort_transfer(mhi_cntrl); | |
981 | ||
982 | spin_lock_bh(&mhi_cntrl->state_lock); | |
983 | /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ | |
984 | mhi_ep_mmio_reset(mhi_cntrl); | |
985 | cur_state = mhi_cntrl->mhi_state; | |
986 | spin_unlock_bh(&mhi_cntrl->state_lock); | |
987 | ||
988 | /* | |
989 | * Only proceed further if the reset is due to SYS_ERR. The host will | |
990 | * issue reset during shutdown also and we don't need to do re-init in | |
991 | * that case. | |
992 | */ | |
993 | if (cur_state == MHI_STATE_SYS_ERR) { | |
994 | mhi_ep_mmio_init(mhi_cntrl); | |
995 | ||
996 | /* Set AMSS EE before signaling ready state */ | |
997 | mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); | |
998 | ||
999 | /* All set, notify the host that we are ready */ | |
1000 | ret = mhi_ep_set_ready_state(mhi_cntrl); | |
1001 | if (ret) | |
1002 | return; | |
1003 | ||
1004 | dev_dbg(dev, "READY state notification sent to the host\n"); | |
1005 | ||
1006 | ret = mhi_ep_enable(mhi_cntrl); | |
1007 | if (ret) { | |
1008 | dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); | |
1009 | return; | |
1010 | } | |
1011 | ||
1012 | enable_irq(mhi_cntrl->irq); | |
1013 | } | |
1014 | } | |
1015 | ||
f7d0806b MS |
1016 | /* |
1017 | * We don't need to do anything special other than setting the MHI SYS_ERR | |
1018 | * state. The host will reset all contexts and issue MHI RESET so that we | |
1019 | * could also recover from error state. | |
1020 | */ | |
1021 | void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) | |
1022 | { | |
1023 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
1024 | int ret; | |
1025 | ||
1026 | ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); | |
1027 | if (ret) | |
1028 | return; | |
1029 | ||
1030 | /* Signal host that the device went to SYS_ERR state */ | |
1031 | ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); | |
1032 | if (ret) | |
1033 | dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); | |
1034 | } | |
1035 | ||
fb3a26b7 MS |
1036 | int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) |
1037 | { | |
1038 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
1039 | int ret, i; | |
1040 | ||
1041 | /* | |
1042 | * Mask all interrupts until the state machine is ready. Interrupts will | |
1043 | * be enabled later with mhi_ep_enable(). | |
1044 | */ | |
1045 | mhi_ep_mmio_mask_interrupts(mhi_cntrl); | |
1046 | mhi_ep_mmio_init(mhi_cntrl); | |
1047 | ||
1048 | mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), | |
1049 | GFP_KERNEL); | |
1050 | if (!mhi_cntrl->mhi_event) | |
1051 | return -ENOMEM; | |
1052 | ||
1053 | /* Initialize command, channel and event rings */ | |
1054 | mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); | |
1055 | for (i = 0; i < mhi_cntrl->max_chan; i++) | |
1056 | mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); | |
1057 | for (i = 0; i < mhi_cntrl->event_rings; i++) | |
1058 | mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); | |
1059 | ||
1060 | mhi_cntrl->mhi_state = MHI_STATE_RESET; | |
1061 | ||
1062 | /* Set AMSS EE before signaling ready state */ | |
1063 | mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); | |
1064 | ||
1065 | /* All set, notify the host that we are ready */ | |
1066 | ret = mhi_ep_set_ready_state(mhi_cntrl); | |
1067 | if (ret) | |
1068 | goto err_free_event; | |
1069 | ||
1070 | dev_dbg(dev, "READY state notification sent to the host\n"); | |
1071 | ||
1072 | ret = mhi_ep_enable(mhi_cntrl); | |
1073 | if (ret) { | |
1074 | dev_err(dev, "Failed to enable MHI endpoint\n"); | |
1075 | goto err_free_event; | |
1076 | } | |
1077 | ||
1078 | enable_irq(mhi_cntrl->irq); | |
1079 | mhi_cntrl->enabled = true; | |
1080 | ||
1081 | return 0; | |
1082 | ||
1083 | err_free_event: | |
1084 | kfree(mhi_cntrl->mhi_event); | |
1085 | ||
1086 | return ret; | |
1087 | } | |
1088 | EXPORT_SYMBOL_GPL(mhi_ep_power_up); | |
1089 | ||
5d507ee0 MS |
1090 | void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) |
1091 | { | |
1092 | if (mhi_cntrl->enabled) | |
1093 | mhi_ep_abort_transfer(mhi_cntrl); | |
1094 | ||
1095 | kfree(mhi_cntrl->mhi_event); | |
1096 | disable_irq(mhi_cntrl->irq); | |
1097 | } | |
1098 | EXPORT_SYMBOL_GPL(mhi_ep_power_down); | |
1099 | ||
e4b7b5f0 MS |
1100 | void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) |
1101 | { | |
1102 | struct mhi_ep_chan *mhi_chan; | |
1103 | u32 tmp; | |
1104 | int i; | |
1105 | ||
1106 | for (i = 0; i < mhi_cntrl->max_chan; i++) { | |
1107 | mhi_chan = &mhi_cntrl->mhi_chan[i]; | |
1108 | ||
1109 | if (!mhi_chan->mhi_dev) | |
1110 | continue; | |
1111 | ||
1112 | mutex_lock(&mhi_chan->lock); | |
1113 | /* Skip if the channel is not currently running */ | |
1114 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); | |
1115 | if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { | |
1116 | mutex_unlock(&mhi_chan->lock); | |
1117 | continue; | |
1118 | } | |
1119 | ||
1120 | dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); | |
1121 | /* Set channel state to SUSPENDED */ | |
1122 | tmp &= ~CHAN_CTX_CHSTATE_MASK; | |
1123 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); | |
1124 | mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); | |
1125 | mutex_unlock(&mhi_chan->lock); | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) | |
1130 | { | |
1131 | struct mhi_ep_chan *mhi_chan; | |
1132 | u32 tmp; | |
1133 | int i; | |
1134 | ||
1135 | for (i = 0; i < mhi_cntrl->max_chan; i++) { | |
1136 | mhi_chan = &mhi_cntrl->mhi_chan[i]; | |
1137 | ||
1138 | if (!mhi_chan->mhi_dev) | |
1139 | continue; | |
1140 | ||
1141 | mutex_lock(&mhi_chan->lock); | |
1142 | /* Skip if the channel is not currently suspended */ | |
1143 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); | |
1144 | if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { | |
1145 | mutex_unlock(&mhi_chan->lock); | |
1146 | continue; | |
1147 | } | |
1148 | ||
1149 | dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); | |
1150 | /* Set channel state to RUNNING */ | |
1151 | tmp &= ~CHAN_CTX_CHSTATE_MASK; | |
1152 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); | |
1153 | mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); | |
1154 | mutex_unlock(&mhi_chan->lock); | |
1155 | } | |
1156 | } | |
1157 | ||
d434743e MS |
1158 | static void mhi_ep_release_device(struct device *dev) |
1159 | { | |
1160 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); | |
1161 | ||
1162 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) | |
1163 | mhi_dev->mhi_cntrl->mhi_dev = NULL; | |
1164 | ||
1165 | /* | |
1166 | * We need to set the mhi_chan->mhi_dev to NULL here since the MHI | |
1167 | * devices for the channels will only get created in mhi_ep_create_device() | |
1168 | * if the mhi_dev associated with it is NULL. | |
1169 | */ | |
1170 | if (mhi_dev->ul_chan) | |
1171 | mhi_dev->ul_chan->mhi_dev = NULL; | |
1172 | ||
1173 | if (mhi_dev->dl_chan) | |
1174 | mhi_dev->dl_chan->mhi_dev = NULL; | |
1175 | ||
1176 | kfree(mhi_dev); | |
1177 | } | |
1178 | ||
1179 | static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, | |
1180 | enum mhi_device_type dev_type) | |
1181 | { | |
1182 | struct mhi_ep_device *mhi_dev; | |
1183 | struct device *dev; | |
1184 | ||
1185 | mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); | |
1186 | if (!mhi_dev) | |
1187 | return ERR_PTR(-ENOMEM); | |
1188 | ||
1189 | dev = &mhi_dev->dev; | |
1190 | device_initialize(dev); | |
1191 | dev->bus = &mhi_ep_bus_type; | |
1192 | dev->release = mhi_ep_release_device; | |
1193 | ||
1194 | /* Controller device is always allocated first */ | |
1195 | if (dev_type == MHI_DEVICE_CONTROLLER) | |
1196 | /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ | |
1197 | dev->parent = mhi_cntrl->cntrl_dev; | |
1198 | else | |
1199 | /* for MHI client devices, parent is the MHI controller device */ | |
1200 | dev->parent = &mhi_cntrl->mhi_dev->dev; | |
1201 | ||
1202 | mhi_dev->mhi_cntrl = mhi_cntrl; | |
1203 | mhi_dev->dev_type = dev_type; | |
1204 | ||
1205 | return mhi_dev; | |
1206 | } | |
1207 | ||
297c77a0 MS |
1208 | /* |
1209 | * MHI channels are always defined in pairs with UL as the even numbered | |
1210 | * channel and DL as odd numbered one. This function gets UL channel (primary) | |
1211 | * as the ch_id and always looks after the next entry in channel list for | |
1212 | * the corresponding DL channel (secondary). | |
1213 | */ | |
1214 | static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) | |
1215 | { | |
1216 | struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; | |
1217 | struct device *dev = mhi_cntrl->cntrl_dev; | |
1218 | struct mhi_ep_device *mhi_dev; | |
1219 | int ret; | |
1220 | ||
1221 | /* Check if the channel name is same for both UL and DL */ | |
1222 | if (strcmp(mhi_chan->name, mhi_chan[1].name)) { | |
1223 | dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", | |
1224 | mhi_chan->name, mhi_chan[1].name); | |
1225 | return -EINVAL; | |
1226 | } | |
1227 | ||
1228 | mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); | |
1229 | if (IS_ERR(mhi_dev)) | |
1230 | return PTR_ERR(mhi_dev); | |
1231 | ||
1232 | /* Configure primary channel */ | |
1233 | mhi_dev->ul_chan = mhi_chan; | |
1234 | get_device(&mhi_dev->dev); | |
1235 | mhi_chan->mhi_dev = mhi_dev; | |
1236 | ||
1237 | /* Configure secondary channel as well */ | |
1238 | mhi_chan++; | |
1239 | mhi_dev->dl_chan = mhi_chan; | |
1240 | get_device(&mhi_dev->dev); | |
1241 | mhi_chan->mhi_dev = mhi_dev; | |
1242 | ||
1243 | /* Channel name is same for both UL and DL */ | |
1244 | mhi_dev->name = mhi_chan->name; | |
2ebb36ea | 1245 | ret = dev_set_name(&mhi_dev->dev, "%s_%s", |
297c77a0 MS |
1246 | dev_name(&mhi_cntrl->mhi_dev->dev), |
1247 | mhi_dev->name); | |
2ebb36ea BL |
1248 | if (ret) { |
1249 | put_device(&mhi_dev->dev); | |
1250 | return ret; | |
1251 | } | |
297c77a0 MS |
1252 | |
1253 | ret = device_add(&mhi_dev->dev); | |
1254 | if (ret) | |
1255 | put_device(&mhi_dev->dev); | |
1256 | ||
1257 | return ret; | |
1258 | } | |
1259 | ||
1260 | static int mhi_ep_destroy_device(struct device *dev, void *data) | |
1261 | { | |
1262 | struct mhi_ep_device *mhi_dev; | |
1263 | struct mhi_ep_cntrl *mhi_cntrl; | |
1264 | struct mhi_ep_chan *ul_chan, *dl_chan; | |
1265 | ||
1266 | if (dev->bus != &mhi_ep_bus_type) | |
1267 | return 0; | |
1268 | ||
1269 | mhi_dev = to_mhi_ep_device(dev); | |
1270 | mhi_cntrl = mhi_dev->mhi_cntrl; | |
1271 | ||
1272 | /* Only destroy devices created for channels */ | |
1273 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) | |
1274 | return 0; | |
1275 | ||
1276 | ul_chan = mhi_dev->ul_chan; | |
1277 | dl_chan = mhi_dev->dl_chan; | |
1278 | ||
1279 | if (ul_chan) | |
1280 | put_device(&ul_chan->mhi_dev->dev); | |
1281 | ||
1282 | if (dl_chan) | |
1283 | put_device(&dl_chan->mhi_dev->dev); | |
1284 | ||
1285 | dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", | |
1286 | mhi_dev->name); | |
1287 | ||
1288 | /* Notify the client and remove the device from MHI bus */ | |
1289 | device_del(dev); | |
1290 | put_device(dev); | |
1291 | ||
1292 | return 0; | |
1293 | } | |
1294 | ||
d434743e MS |
1295 | static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, |
1296 | const struct mhi_ep_cntrl_config *config) | |
1297 | { | |
1298 | const struct mhi_ep_channel_config *ch_cfg; | |
1299 | struct device *dev = mhi_cntrl->cntrl_dev; | |
1300 | u32 chan, i; | |
1301 | int ret = -EINVAL; | |
1302 | ||
1303 | mhi_cntrl->max_chan = config->max_channels; | |
1304 | ||
1305 | /* | |
1306 | * Allocate max_channels supported by the MHI endpoint and populate | |
1307 | * only the defined channels | |
1308 | */ | |
1309 | mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), | |
1310 | GFP_KERNEL); | |
1311 | if (!mhi_cntrl->mhi_chan) | |
1312 | return -ENOMEM; | |
1313 | ||
1314 | for (i = 0; i < config->num_channels; i++) { | |
1315 | struct mhi_ep_chan *mhi_chan; | |
1316 | ||
1317 | ch_cfg = &config->ch_cfg[i]; | |
1318 | ||
1319 | chan = ch_cfg->num; | |
1320 | if (chan >= mhi_cntrl->max_chan) { | |
1321 | dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", | |
1322 | chan, mhi_cntrl->max_chan); | |
1323 | goto error_chan_cfg; | |
1324 | } | |
1325 | ||
1326 | /* Bi-directional and direction less channels are not supported */ | |
1327 | if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { | |
1328 | dev_err(dev, "Invalid direction (%u) for channel (%u)\n", | |
1329 | ch_cfg->dir, chan); | |
1330 | goto error_chan_cfg; | |
1331 | } | |
1332 | ||
1333 | mhi_chan = &mhi_cntrl->mhi_chan[chan]; | |
1334 | mhi_chan->name = ch_cfg->name; | |
1335 | mhi_chan->chan = chan; | |
1336 | mhi_chan->dir = ch_cfg->dir; | |
1337 | mutex_init(&mhi_chan->lock); | |
1338 | } | |
1339 | ||
1340 | return 0; | |
1341 | ||
1342 | error_chan_cfg: | |
1343 | kfree(mhi_cntrl->mhi_chan); | |
1344 | ||
1345 | return ret; | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * Allocate channel and command rings here. Event rings will be allocated | |
1350 | * in mhi_ep_power_up() as the config comes from the host. | |
1351 | */ | |
1352 | int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, | |
1353 | const struct mhi_ep_cntrl_config *config) | |
1354 | { | |
1355 | struct mhi_ep_device *mhi_dev; | |
1356 | int ret; | |
1357 | ||
4799e71b | 1358 | if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) |
d434743e MS |
1359 | return -EINVAL; |
1360 | ||
1361 | ret = mhi_ep_chan_init(mhi_cntrl, config); | |
1362 | if (ret) | |
1363 | return ret; | |
1364 | ||
1365 | mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); | |
1366 | if (!mhi_cntrl->mhi_cmd) { | |
1367 | ret = -ENOMEM; | |
1368 | goto err_free_ch; | |
1369 | } | |
1370 | ||
f9baa4f7 | 1371 | INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); |
7a97b6b4 | 1372 | INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); |
e8275690 | 1373 | INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); |
03c0bb8e | 1374 | INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); |
f9baa4f7 MS |
1375 | |
1376 | mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); | |
1377 | if (!mhi_cntrl->wq) { | |
1378 | ret = -ENOMEM; | |
1379 | goto err_free_cmd; | |
1380 | } | |
1381 | ||
1382 | INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); | |
4799e71b | 1383 | INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); |
f9baa4f7 MS |
1384 | spin_lock_init(&mhi_cntrl->state_lock); |
1385 | spin_lock_init(&mhi_cntrl->list_lock); | |
961aeb68 MS |
1386 | mutex_init(&mhi_cntrl->event_lock); |
1387 | ||
e9e4da23 MS |
1388 | /* Set MHI version and AMSS EE before enumeration */ |
1389 | mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); | |
1390 | mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); | |
1391 | ||
d434743e MS |
1392 | /* Set controller index */ |
1393 | ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); | |
1394 | if (ret < 0) | |
f9baa4f7 | 1395 | goto err_destroy_wq; |
d434743e MS |
1396 | |
1397 | mhi_cntrl->index = ret; | |
1398 | ||
4799e71b MS |
1399 | irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); |
1400 | ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, | |
1401 | "doorbell_irq", mhi_cntrl); | |
1402 | if (ret) { | |
1403 | dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); | |
1404 | goto err_ida_free; | |
1405 | } | |
1406 | ||
d434743e MS |
1407 | /* Allocate the controller device */ |
1408 | mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); | |
1409 | if (IS_ERR(mhi_dev)) { | |
1410 | dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); | |
1411 | ret = PTR_ERR(mhi_dev); | |
4799e71b | 1412 | goto err_free_irq; |
d434743e MS |
1413 | } |
1414 | ||
2ebb36ea BL |
1415 | ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); |
1416 | if (ret) | |
1417 | goto err_put_dev; | |
1418 | ||
d434743e MS |
1419 | mhi_dev->name = dev_name(&mhi_dev->dev); |
1420 | mhi_cntrl->mhi_dev = mhi_dev; | |
1421 | ||
1422 | ret = device_add(&mhi_dev->dev); | |
1423 | if (ret) | |
1424 | goto err_put_dev; | |
1425 | ||
1426 | dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); | |
1427 | ||
1428 | return 0; | |
1429 | ||
1430 | err_put_dev: | |
1431 | put_device(&mhi_dev->dev); | |
4799e71b MS |
1432 | err_free_irq: |
1433 | free_irq(mhi_cntrl->irq, mhi_cntrl); | |
d434743e MS |
1434 | err_ida_free: |
1435 | ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); | |
f9baa4f7 MS |
1436 | err_destroy_wq: |
1437 | destroy_workqueue(mhi_cntrl->wq); | |
d434743e MS |
1438 | err_free_cmd: |
1439 | kfree(mhi_cntrl->mhi_cmd); | |
1440 | err_free_ch: | |
1441 | kfree(mhi_cntrl->mhi_chan); | |
1442 | ||
1443 | return ret; | |
1444 | } | |
1445 | EXPORT_SYMBOL_GPL(mhi_ep_register_controller); | |
1446 | ||
5d507ee0 MS |
1447 | /* |
1448 | * It is expected that the controller drivers will power down the MHI EP stack | |
1449 | * using "mhi_ep_power_down()" before calling this function to unregister themselves. | |
1450 | */ | |
d434743e MS |
1451 | void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) |
1452 | { | |
1453 | struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; | |
1454 | ||
f9baa4f7 MS |
1455 | destroy_workqueue(mhi_cntrl->wq); |
1456 | ||
4799e71b MS |
1457 | free_irq(mhi_cntrl->irq, mhi_cntrl); |
1458 | ||
d434743e MS |
1459 | kfree(mhi_cntrl->mhi_cmd); |
1460 | kfree(mhi_cntrl->mhi_chan); | |
1461 | ||
1462 | device_del(&mhi_dev->dev); | |
1463 | put_device(&mhi_dev->dev); | |
1464 | ||
1465 | ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); | |
1466 | } | |
1467 | EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); | |
1468 | ||
ee0360b2 MS |
1469 | static int mhi_ep_driver_probe(struct device *dev) |
1470 | { | |
1471 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); | |
1472 | struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); | |
1473 | struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; | |
1474 | struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; | |
1475 | ||
1476 | ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; | |
1477 | dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; | |
1478 | ||
1479 | return mhi_drv->probe(mhi_dev, mhi_dev->id); | |
1480 | } | |
1481 | ||
1482 | static int mhi_ep_driver_remove(struct device *dev) | |
1483 | { | |
1484 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); | |
1485 | struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); | |
1486 | struct mhi_result result = {}; | |
1487 | struct mhi_ep_chan *mhi_chan; | |
1488 | int dir; | |
1489 | ||
1490 | /* Skip if it is a controller device */ | |
1491 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) | |
1492 | return 0; | |
1493 | ||
1494 | /* Disconnect the channels associated with the driver */ | |
1495 | for (dir = 0; dir < 2; dir++) { | |
1496 | mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; | |
1497 | ||
1498 | if (!mhi_chan) | |
1499 | continue; | |
1500 | ||
1501 | mutex_lock(&mhi_chan->lock); | |
1502 | /* Send channel disconnect status to the client driver */ | |
1503 | if (mhi_chan->xfer_cb) { | |
1504 | result.transaction_status = -ENOTCONN; | |
1505 | result.bytes_xferd = 0; | |
1506 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); | |
1507 | } | |
1508 | ||
1509 | mhi_chan->state = MHI_CH_STATE_DISABLED; | |
1510 | mhi_chan->xfer_cb = NULL; | |
1511 | mutex_unlock(&mhi_chan->lock); | |
1512 | } | |
1513 | ||
1514 | /* Remove the client driver now */ | |
1515 | mhi_drv->remove(mhi_dev); | |
1516 | ||
1517 | return 0; | |
1518 | } | |
1519 | ||
1520 | int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) | |
1521 | { | |
1522 | struct device_driver *driver = &mhi_drv->driver; | |
1523 | ||
1524 | if (!mhi_drv->probe || !mhi_drv->remove) | |
1525 | return -EINVAL; | |
1526 | ||
1527 | /* Client drivers should have callbacks defined for both channels */ | |
1528 | if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) | |
1529 | return -EINVAL; | |
1530 | ||
1531 | driver->bus = &mhi_ep_bus_type; | |
1532 | driver->owner = owner; | |
1533 | driver->probe = mhi_ep_driver_probe; | |
1534 | driver->remove = mhi_ep_driver_remove; | |
1535 | ||
1536 | return driver_register(driver); | |
1537 | } | |
1538 | EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); | |
1539 | ||
1540 | void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) | |
1541 | { | |
1542 | driver_unregister(&mhi_drv->driver); | |
1543 | } | |
1544 | EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); | |
1545 | ||
c268c0a8 MS |
1546 | static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) |
1547 | { | |
1548 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); | |
1549 | ||
1550 | return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, | |
1551 | mhi_dev->name); | |
1552 | } | |
1553 | ||
d434743e MS |
1554 | static int mhi_ep_match(struct device *dev, struct device_driver *drv) |
1555 | { | |
1556 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); | |
ee0360b2 MS |
1557 | struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); |
1558 | const struct mhi_device_id *id; | |
d434743e MS |
1559 | |
1560 | /* | |
1561 | * If the device is a controller type then there is no client driver | |
1562 | * associated with it | |
1563 | */ | |
1564 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) | |
1565 | return 0; | |
1566 | ||
ee0360b2 MS |
1567 | for (id = mhi_drv->id_table; id->chan[0]; id++) |
1568 | if (!strcmp(mhi_dev->name, id->chan)) { | |
1569 | mhi_dev->id = id; | |
1570 | return 1; | |
1571 | } | |
1572 | ||
d434743e MS |
1573 | return 0; |
1574 | }; | |
1575 | ||
1576 | struct bus_type mhi_ep_bus_type = { | |
1577 | .name = "mhi_ep", | |
1578 | .dev_name = "mhi_ep", | |
1579 | .match = mhi_ep_match, | |
c268c0a8 | 1580 | .uevent = mhi_ep_uevent, |
d434743e MS |
1581 | }; |
1582 | ||
1583 | static int __init mhi_ep_init(void) | |
1584 | { | |
1585 | return bus_register(&mhi_ep_bus_type); | |
1586 | } | |
1587 | ||
1588 | static void __exit mhi_ep_exit(void) | |
1589 | { | |
1590 | bus_unregister(&mhi_ep_bus_type); | |
1591 | } | |
1592 | ||
1593 | postcore_initcall(mhi_ep_init); | |
1594 | module_exit(mhi_ep_exit); | |
1595 | ||
1596 | MODULE_LICENSE("GPL v2"); | |
1597 | MODULE_DESCRIPTION("MHI Bus Endpoint stack"); | |
1598 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); |