bus: mhi: ep: Fix off by one in mhi_ep_process_cmd_ring()
[linux-block.git] / drivers / bus / mhi / ep / main.c
CommitLineData
d434743e
MS
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MHI Endpoint bus stack
4 *
5 * Copyright (C) 2022 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8
9#include <linux/bitfield.h>
10#include <linux/delay.h>
11#include <linux/dma-direction.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
4799e71b 14#include <linux/irq.h>
d434743e
MS
15#include <linux/mhi_ep.h>
16#include <linux/mod_devicetable.h>
17#include <linux/module.h>
18#include "internal.h"
19
fb3a26b7
MS
20#define M0_WAIT_DELAY_MS 100
21#define M0_WAIT_COUNT 100
22
d434743e
MS
23static DEFINE_IDA(mhi_ep_cntrl_ida);
24
e8275690 25static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
5d507ee0
MS
26static int mhi_ep_destroy_device(struct device *dev, void *data);
27
961aeb68
MS
28static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
29 struct mhi_ring_element *el, bool bei)
30{
31 struct device *dev = &mhi_cntrl->mhi_dev->dev;
32 union mhi_ep_ring_ctx *ctx;
33 struct mhi_ep_ring *ring;
34 int ret;
35
36 mutex_lock(&mhi_cntrl->event_lock);
37 ring = &mhi_cntrl->mhi_event[ring_idx].ring;
38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
39 if (!ring->started) {
40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
41 if (ret) {
42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
43 goto err_unlock;
44 }
45 }
46
47 /* Add element to the event ring */
48 ret = mhi_ep_ring_add_element(ring, el);
49 if (ret) {
50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
51 goto err_unlock;
52 }
53
54 mutex_unlock(&mhi_cntrl->event_lock);
55
56 /*
57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
58 * set this flag for interrupt moderation as per MHI protocol.
59 */
60 if (!bei)
61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
62
63 return 0;
64
65err_unlock:
66 mutex_unlock(&mhi_cntrl->event_lock);
67
68 return ret;
69}
70
71static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
73{
74 struct mhi_ring_element event = {};
75
76 event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
77 event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
78 event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
79
80 return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
81}
82
83int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
84{
85 struct mhi_ring_element event = {};
86
87 event.dword[0] = MHI_SC_EV_DWORD0(state);
88 event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
89
90 return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
91}
92
93int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
94{
95 struct mhi_ring_element event = {};
96
97 event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
98 event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
99
100 return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
101}
102
103static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
104{
105 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
106 struct mhi_ring_element event = {};
107
108 event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
109 event.dword[0] = MHI_CC_EV_DWORD0(code);
110 event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
111
112 return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
113}
114
e8275690
MS
115static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
116{
117 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
118 struct device *dev = &mhi_cntrl->mhi_dev->dev;
119 struct mhi_result result = {};
120 struct mhi_ep_chan *mhi_chan;
121 struct mhi_ep_ring *ch_ring;
122 u32 tmp, ch_id;
123 int ret;
124
125 ch_id = MHI_TRE_GET_CMD_CHID(el);
6de4941c
MS
126
127 /* Check if the channel is supported by the controller */
3c54a3ff 128 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
6de4941c
MS
129 dev_err(dev, "Channel (%u) not supported!\n", ch_id);
130 return -ENODEV;
131 }
132
e8275690
MS
133 mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
134 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
135
136 switch (MHI_TRE_GET_CMD_TYPE(el)) {
137 case MHI_PKT_TYPE_START_CHAN_CMD:
138 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
139
140 mutex_lock(&mhi_chan->lock);
141 /* Initialize and configure the corresponding channel ring */
142 if (!ch_ring->started) {
143 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
144 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
145 if (ret) {
146 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
147 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
148 MHI_EV_CC_UNDEFINED_ERR);
149 if (ret)
150 dev_err(dev, "Error sending completion event: %d\n", ret);
151
152 goto err_unlock;
153 }
154 }
155
156 /* Set channel state to RUNNING */
157 mhi_chan->state = MHI_CH_STATE_RUNNING;
158 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
159 tmp &= ~CHAN_CTX_CHSTATE_MASK;
160 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
161 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
162
163 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
164 if (ret) {
165 dev_err(dev, "Error sending command completion event (%u)\n",
166 MHI_EV_CC_SUCCESS);
167 goto err_unlock;
168 }
169
170 mutex_unlock(&mhi_chan->lock);
171
172 /*
173 * Create MHI device only during UL channel start. Since the MHI
174 * channels operate in a pair, we'll associate both UL and DL
175 * channels to the same device.
176 *
177 * We also need to check for mhi_dev != NULL because, the host
178 * will issue START_CHAN command during resume and we don't
179 * destroy the device during suspend.
180 */
181 if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
182 ret = mhi_ep_create_device(mhi_cntrl, ch_id);
183 if (ret) {
184 dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
185 mhi_ep_handle_syserr(mhi_cntrl);
186 return ret;
187 }
188 }
189
190 /* Finally, enable DB for the channel */
191 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
192
193 break;
194 case MHI_PKT_TYPE_STOP_CHAN_CMD:
195 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
196 if (!ch_ring->started) {
197 dev_err(dev, "Channel (%u) not opened\n", ch_id);
198 return -ENODEV;
199 }
200
201 mutex_lock(&mhi_chan->lock);
202 /* Disable DB for the channel */
203 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
204
205 /* Send channel disconnect status to client drivers */
e6cebcc2
MS
206 if (mhi_chan->xfer_cb) {
207 result.transaction_status = -ENOTCONN;
208 result.bytes_xferd = 0;
209 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
210 }
e8275690
MS
211
212 /* Set channel state to STOP */
213 mhi_chan->state = MHI_CH_STATE_STOP;
214 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
215 tmp &= ~CHAN_CTX_CHSTATE_MASK;
216 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
217 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
218
219 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
220 if (ret) {
221 dev_err(dev, "Error sending command completion event (%u)\n",
222 MHI_EV_CC_SUCCESS);
223 goto err_unlock;
224 }
225
226 mutex_unlock(&mhi_chan->lock);
227 break;
228 case MHI_PKT_TYPE_RESET_CHAN_CMD:
8e697fcf 229 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
e8275690
MS
230 if (!ch_ring->started) {
231 dev_err(dev, "Channel (%u) not opened\n", ch_id);
232 return -ENODEV;
233 }
234
235 mutex_lock(&mhi_chan->lock);
236 /* Stop and reset the transfer ring */
237 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
238
239 /* Send channel disconnect status to client driver */
e6cebcc2
MS
240 if (mhi_chan->xfer_cb) {
241 result.transaction_status = -ENOTCONN;
242 result.bytes_xferd = 0;
243 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
244 }
e8275690
MS
245
246 /* Set channel state to DISABLED */
247 mhi_chan->state = MHI_CH_STATE_DISABLED;
248 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
249 tmp &= ~CHAN_CTX_CHSTATE_MASK;
250 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
251 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
252
253 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
254 if (ret) {
255 dev_err(dev, "Error sending command completion event (%u)\n",
256 MHI_EV_CC_SUCCESS);
257 goto err_unlock;
258 }
259
260 mutex_unlock(&mhi_chan->lock);
261 break;
262 default:
263 dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
264 MHI_TRE_GET_CMD_TYPE(el), ch_id);
265 return -EINVAL;
266 }
267
268 return 0;
269
270err_unlock:
271 mutex_unlock(&mhi_chan->lock);
272
273 return ret;
274}
275
53012588
MS
276bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
277{
278 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
279 mhi_dev->ul_chan;
280 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
281 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
282
283 return !!(ring->rd_offset == ring->wr_offset);
284}
285EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
286
287static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
288 struct mhi_ep_ring *ring,
289 struct mhi_result *result,
290 u32 len)
291{
292 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
293 struct device *dev = &mhi_cntrl->mhi_dev->dev;
294 size_t tr_len, read_offset, write_offset;
295 struct mhi_ring_element *el;
296 bool tr_done = false;
297 void *write_addr;
298 u64 read_addr;
299 u32 buf_left;
300 int ret;
301
302 buf_left = len;
303
304 do {
305 /* Don't process the transfer ring if the channel is not in RUNNING state */
306 if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
307 dev_err(dev, "Channel not available\n");
308 return -ENODEV;
309 }
310
311 el = &ring->ring_cache[ring->rd_offset];
312
313 /* Check if there is data pending to be read from previous read operation */
314 if (mhi_chan->tre_bytes_left) {
315 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
316 tr_len = min(buf_left, mhi_chan->tre_bytes_left);
317 } else {
318 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
319 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
320 mhi_chan->tre_bytes_left = mhi_chan->tre_size;
321
322 tr_len = min(buf_left, mhi_chan->tre_size);
323 }
324
325 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
326 write_offset = len - buf_left;
327 read_addr = mhi_chan->tre_loc + read_offset;
328 write_addr = result->buf_addr + write_offset;
329
330 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
331 ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
332 if (ret < 0) {
333 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
334 return ret;
335 }
336
337 buf_left -= tr_len;
338 mhi_chan->tre_bytes_left -= tr_len;
339
340 /*
341 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
342 * read completely:
343 *
344 * 1. Send completion event to the host based on the flags set in TRE.
345 * 2. Increment the local read offset of the transfer ring.
346 */
347 if (!mhi_chan->tre_bytes_left) {
348 /*
349 * The host will split the data packet into multiple TREs if it can't fit
350 * the packet in a single TRE. In that case, CHAIN flag will be set by the
351 * host for all TREs except the last one.
352 */
353 if (MHI_TRE_DATA_GET_CHAIN(el)) {
354 /*
355 * IEOB (Interrupt on End of Block) flag will be set by the host if
356 * it expects the completion event for all TREs of a TD.
357 */
358 if (MHI_TRE_DATA_GET_IEOB(el)) {
359 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
360 MHI_TRE_DATA_GET_LEN(el),
361 MHI_EV_CC_EOB);
362 if (ret < 0) {
363 dev_err(&mhi_chan->mhi_dev->dev,
364 "Error sending transfer compl. event\n");
365 return ret;
366 }
367 }
368 } else {
369 /*
370 * IEOT (Interrupt on End of Transfer) flag will be set by the host
371 * for the last TRE of the TD and expects the completion event for
372 * the same.
373 */
374 if (MHI_TRE_DATA_GET_IEOT(el)) {
375 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
376 MHI_TRE_DATA_GET_LEN(el),
377 MHI_EV_CC_EOT);
378 if (ret < 0) {
379 dev_err(&mhi_chan->mhi_dev->dev,
380 "Error sending transfer compl. event\n");
381 return ret;
382 }
383 }
384
385 tr_done = true;
386 }
387
388 mhi_ep_ring_inc_index(ring);
389 }
390
391 result->bytes_xferd += tr_len;
392 } while (buf_left && !tr_done);
393
394 return 0;
395}
396
03c0bb8e
MS
397static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
398{
399 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
400 struct mhi_result result = {};
401 u32 len = MHI_EP_DEFAULT_MTU;
402 struct mhi_ep_chan *mhi_chan;
403 int ret;
404
405 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
406
407 /*
408 * Bail out if transfer callback is not registered for the channel.
409 * This is most likely due to the client driver not loaded at this point.
410 */
411 if (!mhi_chan->xfer_cb) {
412 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
413 return -ENODEV;
414 }
415
416 if (ring->ch_id % 2) {
417 /* DL channel */
418 result.dir = mhi_chan->dir;
419 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
420 } else {
421 /* UL channel */
422 result.buf_addr = kzalloc(len, GFP_KERNEL);
423 if (!result.buf_addr)
424 return -ENOMEM;
425
426 do {
427 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
428 if (ret < 0) {
429 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
430 kfree(result.buf_addr);
431 return ret;
432 }
433
434 result.dir = mhi_chan->dir;
435 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
436 result.bytes_xferd = 0;
437 memset(result.buf_addr, 0, len);
438
439 /* Read until the ring becomes empty */
440 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
441
442 kfree(result.buf_addr);
443 }
444
445 return 0;
446}
447
2d945a39
MS
448/* TODO: Handle partially formed TDs */
449int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
450{
451 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
452 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
453 struct device *dev = &mhi_chan->mhi_dev->dev;
454 struct mhi_ring_element *el;
455 u32 buf_left, read_offset;
456 struct mhi_ep_ring *ring;
457 enum mhi_ev_ccs code;
458 void *read_addr;
459 u64 write_addr;
460 size_t tr_len;
461 u32 tre_len;
462 int ret;
463
464 buf_left = skb->len;
465 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
466
467 mutex_lock(&mhi_chan->lock);
468
469 do {
470 /* Don't process the transfer ring if the channel is not in RUNNING state */
471 if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
472 dev_err(dev, "Channel not available\n");
473 ret = -ENODEV;
474 goto err_exit;
475 }
476
477 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
478 dev_err(dev, "TRE not available!\n");
479 ret = -ENOSPC;
480 goto err_exit;
481 }
482
483 el = &ring->ring_cache[ring->rd_offset];
484 tre_len = MHI_TRE_DATA_GET_LEN(el);
485
486 tr_len = min(buf_left, tre_len);
487 read_offset = skb->len - buf_left;
488 read_addr = skb->data + read_offset;
489 write_addr = MHI_TRE_DATA_GET_PTR(el);
490
491 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
492 ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
493 if (ret < 0) {
494 dev_err(dev, "Error writing to the channel\n");
495 goto err_exit;
496 }
497
498 buf_left -= tr_len;
499 /*
500 * For all TREs queued by the host for DL channel, only the EOT flag will be set.
501 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
502 * the host so that the host can adjust the packet boundary to next TREs. Else send
503 * the EOT event to the host indicating the packet boundary.
504 */
505 if (buf_left)
506 code = MHI_EV_CC_OVERFLOW;
507 else
508 code = MHI_EV_CC_EOT;
509
510 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
511 if (ret) {
512 dev_err(dev, "Error sending transfer completion event\n");
513 goto err_exit;
514 }
515
516 mhi_ep_ring_inc_index(ring);
517 } while (buf_left);
518
519 mutex_unlock(&mhi_chan->lock);
520
521 return 0;
522
523err_exit:
524 mutex_unlock(&mhi_chan->lock);
525
526 return ret;
527}
528EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
529
fb3a26b7
MS
530static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
531{
532 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
533 struct device *dev = &mhi_cntrl->mhi_dev->dev;
534 int ret;
535
536 /* Update the number of event rings (NER) programmed by the host */
537 mhi_ep_mmio_update_ner(mhi_cntrl);
538
539 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
540 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
541
542 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
543 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
544 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
545
546 /* Get the channel context base pointer from host */
547 mhi_ep_mmio_get_chc_base(mhi_cntrl);
548
549 /* Allocate and map memory for caching host channel context */
550 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
551 &mhi_cntrl->ch_ctx_cache_phys,
552 (void __iomem **) &mhi_cntrl->ch_ctx_cache,
553 ch_ctx_host_size);
554 if (ret) {
555 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
556 return ret;
557 }
558
559 /* Get the event context base pointer from host */
560 mhi_ep_mmio_get_erc_base(mhi_cntrl);
561
562 /* Allocate and map memory for caching host event context */
563 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
564 &mhi_cntrl->ev_ctx_cache_phys,
565 (void __iomem **) &mhi_cntrl->ev_ctx_cache,
566 ev_ctx_host_size);
567 if (ret) {
568 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
569 goto err_ch_ctx;
570 }
571
572 /* Get the command context base pointer from host */
573 mhi_ep_mmio_get_crc_base(mhi_cntrl);
574
575 /* Allocate and map memory for caching host command context */
576 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
577 &mhi_cntrl->cmd_ctx_cache_phys,
578 (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
579 cmd_ctx_host_size);
580 if (ret) {
581 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
582 goto err_ev_ctx;
583 }
584
585 /* Initialize command ring */
586 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
587 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
588 if (ret) {
589 dev_err(dev, "Failed to start the command ring\n");
590 goto err_cmd_ctx;
591 }
592
593 return ret;
594
595err_cmd_ctx:
596 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
597 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
598
599err_ev_ctx:
600 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
601 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
602
603err_ch_ctx:
604 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
605 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
606
607 return ret;
608}
609
610static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
611{
612 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
613
614 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
615 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
616 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
617
618 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
619 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
620
621 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
622 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
623
624 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
625 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
626}
627
628static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
629{
630 /*
631 * Doorbell interrupts are enabled when the corresponding channel gets started.
632 * Enabling all interrupts here triggers spurious irqs as some of the interrupts
633 * associated with hw channels always get triggered.
634 */
635 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
636 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
637}
638
639static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
640{
641 struct device *dev = &mhi_cntrl->mhi_dev->dev;
642 enum mhi_state state;
643 bool mhi_reset;
644 u32 count = 0;
645 int ret;
646
647 /* Wait for Host to set the M0 state */
648 do {
649 msleep(M0_WAIT_DELAY_MS);
650 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
651 if (mhi_reset) {
652 /* Clear the MHI reset if host is in reset state */
653 mhi_ep_mmio_clear_reset(mhi_cntrl);
654 dev_info(dev, "Detected Host reset while waiting for M0\n");
655 }
656 count++;
657 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
658
659 if (state != MHI_STATE_M0) {
660 dev_err(dev, "Host failed to enter M0\n");
661 return -ETIMEDOUT;
662 }
663
664 ret = mhi_ep_cache_host_cfg(mhi_cntrl);
665 if (ret) {
666 dev_err(dev, "Failed to cache host config\n");
667 return ret;
668 }
669
670 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
671
672 /* Enable all interrupts now */
673 mhi_ep_enable_int(mhi_cntrl);
674
675 return 0;
676}
677
e8275690
MS
678static void mhi_ep_cmd_ring_worker(struct work_struct *work)
679{
680 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
681 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
682 struct device *dev = &mhi_cntrl->mhi_dev->dev;
683 struct mhi_ring_element *el;
684 int ret;
685
686 /* Update the write offset for the ring */
687 ret = mhi_ep_update_wr_offset(ring);
688 if (ret) {
689 dev_err(dev, "Error updating write offset for ring\n");
690 return;
691 }
692
693 /* Sanity check to make sure there are elements in the ring */
694 if (ring->rd_offset == ring->wr_offset)
695 return;
696
697 /*
698 * Process command ring element till write offset. In case of an error, just try to
699 * process next element.
700 */
701 while (ring->rd_offset != ring->wr_offset) {
702 el = &ring->ring_cache[ring->rd_offset];
703
704 ret = mhi_ep_process_cmd_ring(ring, el);
705 if (ret)
706 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
707
708 mhi_ep_ring_inc_index(ring);
709 }
710}
711
03c0bb8e
MS
712static void mhi_ep_ch_ring_worker(struct work_struct *work)
713{
714 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
715 struct device *dev = &mhi_cntrl->mhi_dev->dev;
716 struct mhi_ep_ring_item *itr, *tmp;
717 struct mhi_ring_element *el;
718 struct mhi_ep_ring *ring;
719 struct mhi_ep_chan *chan;
720 unsigned long flags;
721 LIST_HEAD(head);
722 int ret;
723
724 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
725 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
726 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
727
728 /* Process each queued channel ring. In case of an error, just process next element. */
729 list_for_each_entry_safe(itr, tmp, &head, node) {
730 list_del(&itr->node);
731 ring = itr->ring;
732
8d6a1fea
MS
733 chan = &mhi_cntrl->mhi_chan[ring->ch_id];
734 mutex_lock(&chan->lock);
735
736 /*
737 * The ring could've stopped while we waited to grab the (chan->lock), so do
738 * a sanity check before going further.
739 */
740 if (!ring->started) {
741 mutex_unlock(&chan->lock);
742 kfree(itr);
743 continue;
744 }
745
03c0bb8e
MS
746 /* Update the write offset for the ring */
747 ret = mhi_ep_update_wr_offset(ring);
748 if (ret) {
749 dev_err(dev, "Error updating write offset for ring\n");
8d6a1fea 750 mutex_unlock(&chan->lock);
03c0bb8e
MS
751 kfree(itr);
752 continue;
753 }
754
755 /* Sanity check to make sure there are elements in the ring */
756 if (ring->rd_offset == ring->wr_offset) {
8d6a1fea 757 mutex_unlock(&chan->lock);
03c0bb8e
MS
758 kfree(itr);
759 continue;
760 }
761
762 el = &ring->ring_cache[ring->rd_offset];
03c0bb8e 763
03c0bb8e
MS
764 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
765 ret = mhi_ep_process_ch_ring(ring, el);
766 if (ret) {
767 dev_err(dev, "Error processing ring for channel (%u): %d\n",
768 ring->ch_id, ret);
769 mutex_unlock(&chan->lock);
770 kfree(itr);
771 continue;
772 }
773
774 mutex_unlock(&chan->lock);
775 kfree(itr);
776 }
777}
778
f9baa4f7
MS
779static void mhi_ep_state_worker(struct work_struct *work)
780{
781 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
782 struct device *dev = &mhi_cntrl->mhi_dev->dev;
783 struct mhi_ep_state_transition *itr, *tmp;
784 unsigned long flags;
785 LIST_HEAD(head);
786 int ret;
787
788 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
789 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
790 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
791
792 list_for_each_entry_safe(itr, tmp, &head, node) {
793 list_del(&itr->node);
794 dev_dbg(dev, "Handling MHI state transition to %s\n",
795 mhi_state_str(itr->state));
796
797 switch (itr->state) {
798 case MHI_STATE_M0:
799 ret = mhi_ep_set_m0_state(mhi_cntrl);
800 if (ret)
801 dev_err(dev, "Failed to transition to M0 state\n");
802 break;
803 case MHI_STATE_M3:
804 ret = mhi_ep_set_m3_state(mhi_cntrl);
805 if (ret)
806 dev_err(dev, "Failed to transition to M3 state\n");
807 break;
808 default:
809 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
810 break;
811 }
812 kfree(itr);
813 }
814}
815
4799e71b
MS
816static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
817 u32 ch_idx)
818{
819 struct mhi_ep_ring_item *item;
820 struct mhi_ep_ring *ring;
821 bool work = !!ch_int;
822 LIST_HEAD(head);
823 u32 i;
824
825 /* First add the ring items to a local list */
826 for_each_set_bit(i, &ch_int, 32) {
827 /* Channel index varies for each register: 0, 32, 64, 96 */
828 u32 ch_id = ch_idx + i;
829
830 ring = &mhi_cntrl->mhi_chan[ch_id].ring;
831 item = kzalloc(sizeof(*item), GFP_ATOMIC);
832 if (!item)
833 return;
834
835 item->ring = ring;
836 list_add_tail(&item->node, &head);
837 }
838
839 /* Now, splice the local list into ch_db_list and queue the work item */
840 if (work) {
841 spin_lock(&mhi_cntrl->list_lock);
842 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
843 spin_unlock(&mhi_cntrl->list_lock);
03c0bb8e
MS
844
845 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
4799e71b
MS
846 }
847}
848
849/*
850 * Channel interrupt statuses are contained in 4 registers each of 32bit length.
851 * For checking all interrupts, we need to loop through each registers and then
852 * check for bits set.
853 */
854static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
855{
856 u32 ch_int, ch_idx, i;
857
858 /* Bail out if there is no channel doorbell interrupt */
859 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
860 return;
861
862 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
863 ch_idx = i * MHI_MASK_CH_LEN;
864
865 /* Only process channel interrupt if the mask is enabled */
866 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
867 if (ch_int) {
868 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
869 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
870 mhi_cntrl->chdb[i].status);
871 }
872 }
873}
874
875static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
876 enum mhi_state state)
877{
878 struct mhi_ep_state_transition *item;
879
880 item = kzalloc(sizeof(*item), GFP_ATOMIC);
881 if (!item)
882 return;
883
884 item->state = state;
885 spin_lock(&mhi_cntrl->list_lock);
886 list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
887 spin_unlock(&mhi_cntrl->list_lock);
888
889 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
890}
891
892/*
893 * Interrupt handler that services interrupts raised by the host writing to
894 * MHICTRL and Command ring doorbell (CRDB) registers for state change and
895 * channel interrupts.
896 */
897static irqreturn_t mhi_ep_irq(int irq, void *data)
898{
899 struct mhi_ep_cntrl *mhi_cntrl = data;
900 struct device *dev = &mhi_cntrl->mhi_dev->dev;
901 enum mhi_state state;
902 u32 int_value;
7a97b6b4 903 bool mhi_reset;
4799e71b
MS
904
905 /* Acknowledge the ctrl interrupt */
906 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
907 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
908
909 /* Check for ctrl interrupt */
910 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
911 dev_dbg(dev, "Processing ctrl interrupt\n");
7a97b6b4
MS
912 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
913 if (mhi_reset) {
914 dev_info(dev, "Host triggered MHI reset!\n");
915 disable_irq_nosync(mhi_cntrl->irq);
916 schedule_work(&mhi_cntrl->reset_work);
917 return IRQ_HANDLED;
918 }
919
4799e71b
MS
920 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
921 }
922
923 /* Check for command doorbell interrupt */
e8275690 924 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
4799e71b 925 dev_dbg(dev, "Processing command doorbell interrupt\n");
e8275690
MS
926 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
927 }
4799e71b
MS
928
929 /* Check for channel interrupts */
930 mhi_ep_check_channel_interrupt(mhi_cntrl);
931
932 return IRQ_HANDLED;
933}
934
5d507ee0
MS
935static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
936{
937 struct mhi_ep_ring *ch_ring, *ev_ring;
938 struct mhi_result result = {};
939 struct mhi_ep_chan *mhi_chan;
940 int i;
941
942 /* Stop all the channels */
943 for (i = 0; i < mhi_cntrl->max_chan; i++) {
944 mhi_chan = &mhi_cntrl->mhi_chan[i];
945 if (!mhi_chan->ring.started)
946 continue;
947
948 mutex_lock(&mhi_chan->lock);
949 /* Send channel disconnect status to client drivers */
950 if (mhi_chan->xfer_cb) {
951 result.transaction_status = -ENOTCONN;
952 result.bytes_xferd = 0;
953 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
954 }
955
956 mhi_chan->state = MHI_CH_STATE_DISABLED;
957 mutex_unlock(&mhi_chan->lock);
958 }
959
960 flush_workqueue(mhi_cntrl->wq);
961
962 /* Destroy devices associated with all channels */
963 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
964
965 /* Stop and reset the transfer rings */
966 for (i = 0; i < mhi_cntrl->max_chan; i++) {
967 mhi_chan = &mhi_cntrl->mhi_chan[i];
968 if (!mhi_chan->ring.started)
969 continue;
970
971 ch_ring = &mhi_cntrl->mhi_chan[i].ring;
972 mutex_lock(&mhi_chan->lock);
973 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
974 mutex_unlock(&mhi_chan->lock);
975 }
976
977 /* Stop and reset the event rings */
978 for (i = 0; i < mhi_cntrl->event_rings; i++) {
979 ev_ring = &mhi_cntrl->mhi_event[i].ring;
980 if (!ev_ring->started)
981 continue;
982
983 mutex_lock(&mhi_cntrl->event_lock);
984 mhi_ep_ring_reset(mhi_cntrl, ev_ring);
985 mutex_unlock(&mhi_cntrl->event_lock);
986 }
987
988 /* Stop and reset the command ring */
989 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
990
991 mhi_ep_free_host_cfg(mhi_cntrl);
992 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
993
994 mhi_cntrl->enabled = false;
995}
996
7a97b6b4
MS
997static void mhi_ep_reset_worker(struct work_struct *work)
998{
999 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
7a97b6b4 1000 enum mhi_state cur_state;
7a97b6b4 1001
47a1dcae 1002 mhi_ep_power_down(mhi_cntrl);
7a97b6b4 1003
1ddc7618
MS
1004 mutex_lock(&mhi_cntrl->state_lock);
1005
7a97b6b4
MS
1006 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1007 mhi_ep_mmio_reset(mhi_cntrl);
1008 cur_state = mhi_cntrl->mhi_state;
7a97b6b4
MS
1009
1010 /*
1011 * Only proceed further if the reset is due to SYS_ERR. The host will
1012 * issue reset during shutdown also and we don't need to do re-init in
1013 * that case.
1014 */
47a1dcae
MS
1015 if (cur_state == MHI_STATE_SYS_ERR)
1016 mhi_ep_power_up(mhi_cntrl);
1ddc7618
MS
1017
1018 mutex_unlock(&mhi_cntrl->state_lock);
7a97b6b4
MS
1019}
1020
f7d0806b
MS
1021/*
1022 * We don't need to do anything special other than setting the MHI SYS_ERR
1023 * state. The host will reset all contexts and issue MHI RESET so that we
1024 * could also recover from error state.
1025 */
1026void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
1027{
1028 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1029 int ret;
1030
1031 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1032 if (ret)
1033 return;
1034
1035 /* Signal host that the device went to SYS_ERR state */
1036 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
1037 if (ret)
1038 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
1039}
1040
fb3a26b7
MS
1041int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
1042{
1043 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1044 int ret, i;
1045
1046 /*
1047 * Mask all interrupts until the state machine is ready. Interrupts will
1048 * be enabled later with mhi_ep_enable().
1049 */
1050 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1051 mhi_ep_mmio_init(mhi_cntrl);
1052
1053 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
1054 GFP_KERNEL);
1055 if (!mhi_cntrl->mhi_event)
1056 return -ENOMEM;
1057
1058 /* Initialize command, channel and event rings */
1059 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
1060 for (i = 0; i < mhi_cntrl->max_chan; i++)
1061 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
1062 for (i = 0; i < mhi_cntrl->event_rings; i++)
1063 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
1064
1065 mhi_cntrl->mhi_state = MHI_STATE_RESET;
1066
1067 /* Set AMSS EE before signaling ready state */
1068 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1069
1070 /* All set, notify the host that we are ready */
1071 ret = mhi_ep_set_ready_state(mhi_cntrl);
1072 if (ret)
1073 goto err_free_event;
1074
1075 dev_dbg(dev, "READY state notification sent to the host\n");
1076
1077 ret = mhi_ep_enable(mhi_cntrl);
1078 if (ret) {
1079 dev_err(dev, "Failed to enable MHI endpoint\n");
1080 goto err_free_event;
1081 }
1082
1083 enable_irq(mhi_cntrl->irq);
1084 mhi_cntrl->enabled = true;
1085
1086 return 0;
1087
1088err_free_event:
1089 kfree(mhi_cntrl->mhi_event);
1090
1091 return ret;
1092}
1093EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1094
5d507ee0
MS
1095void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1096{
47a1dcae 1097 if (mhi_cntrl->enabled) {
5d507ee0 1098 mhi_ep_abort_transfer(mhi_cntrl);
47a1dcae
MS
1099 kfree(mhi_cntrl->mhi_event);
1100 disable_irq(mhi_cntrl->irq);
1101 }
5d507ee0
MS
1102}
1103EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1104
e4b7b5f0
MS
1105void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1106{
1107 struct mhi_ep_chan *mhi_chan;
1108 u32 tmp;
1109 int i;
1110
1111 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1112 mhi_chan = &mhi_cntrl->mhi_chan[i];
1113
1114 if (!mhi_chan->mhi_dev)
1115 continue;
1116
1117 mutex_lock(&mhi_chan->lock);
1118 /* Skip if the channel is not currently running */
1119 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1120 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
1121 mutex_unlock(&mhi_chan->lock);
1122 continue;
1123 }
1124
1125 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
1126 /* Set channel state to SUSPENDED */
8a1c24bb 1127 mhi_chan->state = MHI_CH_STATE_SUSPENDED;
e4b7b5f0
MS
1128 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1129 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
1130 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1131 mutex_unlock(&mhi_chan->lock);
1132 }
1133}
1134
1135void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
1136{
1137 struct mhi_ep_chan *mhi_chan;
1138 u32 tmp;
1139 int i;
1140
1141 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1142 mhi_chan = &mhi_cntrl->mhi_chan[i];
1143
1144 if (!mhi_chan->mhi_dev)
1145 continue;
1146
1147 mutex_lock(&mhi_chan->lock);
1148 /* Skip if the channel is not currently suspended */
1149 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1150 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
1151 mutex_unlock(&mhi_chan->lock);
1152 continue;
1153 }
1154
1155 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
1156 /* Set channel state to RUNNING */
8a1c24bb 1157 mhi_chan->state = MHI_CH_STATE_RUNNING;
e4b7b5f0
MS
1158 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1159 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
1160 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1161 mutex_unlock(&mhi_chan->lock);
1162 }
1163}
1164
d434743e
MS
1165static void mhi_ep_release_device(struct device *dev)
1166{
1167 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1168
1169 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1170 mhi_dev->mhi_cntrl->mhi_dev = NULL;
1171
1172 /*
1173 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1174 * devices for the channels will only get created in mhi_ep_create_device()
1175 * if the mhi_dev associated with it is NULL.
1176 */
1177 if (mhi_dev->ul_chan)
1178 mhi_dev->ul_chan->mhi_dev = NULL;
1179
1180 if (mhi_dev->dl_chan)
1181 mhi_dev->dl_chan->mhi_dev = NULL;
1182
1183 kfree(mhi_dev);
1184}
1185
1186static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
1187 enum mhi_device_type dev_type)
1188{
1189 struct mhi_ep_device *mhi_dev;
1190 struct device *dev;
1191
1192 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1193 if (!mhi_dev)
1194 return ERR_PTR(-ENOMEM);
1195
1196 dev = &mhi_dev->dev;
1197 device_initialize(dev);
1198 dev->bus = &mhi_ep_bus_type;
1199 dev->release = mhi_ep_release_device;
1200
1201 /* Controller device is always allocated first */
1202 if (dev_type == MHI_DEVICE_CONTROLLER)
1203 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
1204 dev->parent = mhi_cntrl->cntrl_dev;
1205 else
1206 /* for MHI client devices, parent is the MHI controller device */
1207 dev->parent = &mhi_cntrl->mhi_dev->dev;
1208
1209 mhi_dev->mhi_cntrl = mhi_cntrl;
1210 mhi_dev->dev_type = dev_type;
1211
1212 return mhi_dev;
1213}
1214
297c77a0
MS
1215/*
1216 * MHI channels are always defined in pairs with UL as the even numbered
1217 * channel and DL as odd numbered one. This function gets UL channel (primary)
1218 * as the ch_id and always looks after the next entry in channel list for
1219 * the corresponding DL channel (secondary).
1220 */
1221static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
1222{
1223 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
1224 struct device *dev = mhi_cntrl->cntrl_dev;
1225 struct mhi_ep_device *mhi_dev;
1226 int ret;
1227
1228 /* Check if the channel name is same for both UL and DL */
1229 if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
1230 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
1231 mhi_chan->name, mhi_chan[1].name);
1232 return -EINVAL;
1233 }
1234
1235 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
1236 if (IS_ERR(mhi_dev))
1237 return PTR_ERR(mhi_dev);
1238
1239 /* Configure primary channel */
1240 mhi_dev->ul_chan = mhi_chan;
1241 get_device(&mhi_dev->dev);
1242 mhi_chan->mhi_dev = mhi_dev;
1243
1244 /* Configure secondary channel as well */
1245 mhi_chan++;
1246 mhi_dev->dl_chan = mhi_chan;
1247 get_device(&mhi_dev->dev);
1248 mhi_chan->mhi_dev = mhi_dev;
1249
1250 /* Channel name is same for both UL and DL */
1251 mhi_dev->name = mhi_chan->name;
2ebb36ea 1252 ret = dev_set_name(&mhi_dev->dev, "%s_%s",
297c77a0
MS
1253 dev_name(&mhi_cntrl->mhi_dev->dev),
1254 mhi_dev->name);
2ebb36ea
BL
1255 if (ret) {
1256 put_device(&mhi_dev->dev);
1257 return ret;
1258 }
297c77a0
MS
1259
1260 ret = device_add(&mhi_dev->dev);
1261 if (ret)
1262 put_device(&mhi_dev->dev);
1263
1264 return ret;
1265}
1266
1267static int mhi_ep_destroy_device(struct device *dev, void *data)
1268{
1269 struct mhi_ep_device *mhi_dev;
1270 struct mhi_ep_cntrl *mhi_cntrl;
1271 struct mhi_ep_chan *ul_chan, *dl_chan;
1272
1273 if (dev->bus != &mhi_ep_bus_type)
1274 return 0;
1275
1276 mhi_dev = to_mhi_ep_device(dev);
1277 mhi_cntrl = mhi_dev->mhi_cntrl;
1278
1279 /* Only destroy devices created for channels */
1280 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1281 return 0;
1282
1283 ul_chan = mhi_dev->ul_chan;
1284 dl_chan = mhi_dev->dl_chan;
1285
1286 if (ul_chan)
1287 put_device(&ul_chan->mhi_dev->dev);
1288
1289 if (dl_chan)
1290 put_device(&dl_chan->mhi_dev->dev);
1291
1292 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
1293 mhi_dev->name);
1294
1295 /* Notify the client and remove the device from MHI bus */
1296 device_del(dev);
1297 put_device(dev);
1298
1299 return 0;
1300}
1301
d434743e
MS
1302static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
1303 const struct mhi_ep_cntrl_config *config)
1304{
1305 const struct mhi_ep_channel_config *ch_cfg;
1306 struct device *dev = mhi_cntrl->cntrl_dev;
1307 u32 chan, i;
1308 int ret = -EINVAL;
1309
1310 mhi_cntrl->max_chan = config->max_channels;
1311
1312 /*
1313 * Allocate max_channels supported by the MHI endpoint and populate
1314 * only the defined channels
1315 */
1316 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
1317 GFP_KERNEL);
1318 if (!mhi_cntrl->mhi_chan)
1319 return -ENOMEM;
1320
1321 for (i = 0; i < config->num_channels; i++) {
1322 struct mhi_ep_chan *mhi_chan;
1323
1324 ch_cfg = &config->ch_cfg[i];
1325
1326 chan = ch_cfg->num;
1327 if (chan >= mhi_cntrl->max_chan) {
1328 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
1329 chan, mhi_cntrl->max_chan);
1330 goto error_chan_cfg;
1331 }
1332
1333 /* Bi-directional and direction less channels are not supported */
1334 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
1335 dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
1336 ch_cfg->dir, chan);
1337 goto error_chan_cfg;
1338 }
1339
1340 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1341 mhi_chan->name = ch_cfg->name;
1342 mhi_chan->chan = chan;
1343 mhi_chan->dir = ch_cfg->dir;
1344 mutex_init(&mhi_chan->lock);
1345 }
1346
1347 return 0;
1348
1349error_chan_cfg:
1350 kfree(mhi_cntrl->mhi_chan);
1351
1352 return ret;
1353}
1354
1355/*
1356 * Allocate channel and command rings here. Event rings will be allocated
1357 * in mhi_ep_power_up() as the config comes from the host.
1358 */
1359int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1360 const struct mhi_ep_cntrl_config *config)
1361{
1362 struct mhi_ep_device *mhi_dev;
1363 int ret;
1364
4799e71b 1365 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
d434743e
MS
1366 return -EINVAL;
1367
1368 ret = mhi_ep_chan_init(mhi_cntrl, config);
1369 if (ret)
1370 return ret;
1371
1372 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
1373 if (!mhi_cntrl->mhi_cmd) {
1374 ret = -ENOMEM;
1375 goto err_free_ch;
1376 }
1377
f9baa4f7 1378 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
7a97b6b4 1379 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
e8275690 1380 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
03c0bb8e 1381 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
f9baa4f7
MS
1382
1383 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1384 if (!mhi_cntrl->wq) {
1385 ret = -ENOMEM;
1386 goto err_free_cmd;
1387 }
1388
1389 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
4799e71b 1390 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
f9baa4f7 1391 spin_lock_init(&mhi_cntrl->list_lock);
1ddc7618 1392 mutex_init(&mhi_cntrl->state_lock);
961aeb68
MS
1393 mutex_init(&mhi_cntrl->event_lock);
1394
e9e4da23
MS
1395 /* Set MHI version and AMSS EE before enumeration */
1396 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1397 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1398
d434743e
MS
1399 /* Set controller index */
1400 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
1401 if (ret < 0)
f9baa4f7 1402 goto err_destroy_wq;
d434743e
MS
1403
1404 mhi_cntrl->index = ret;
1405
4799e71b
MS
1406 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
1407 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
1408 "doorbell_irq", mhi_cntrl);
1409 if (ret) {
1410 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
1411 goto err_ida_free;
1412 }
1413
d434743e
MS
1414 /* Allocate the controller device */
1415 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
1416 if (IS_ERR(mhi_dev)) {
1417 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
1418 ret = PTR_ERR(mhi_dev);
4799e71b 1419 goto err_free_irq;
d434743e
MS
1420 }
1421
2ebb36ea
BL
1422 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
1423 if (ret)
1424 goto err_put_dev;
1425
d434743e
MS
1426 mhi_dev->name = dev_name(&mhi_dev->dev);
1427 mhi_cntrl->mhi_dev = mhi_dev;
1428
1429 ret = device_add(&mhi_dev->dev);
1430 if (ret)
1431 goto err_put_dev;
1432
1433 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
1434
1435 return 0;
1436
1437err_put_dev:
1438 put_device(&mhi_dev->dev);
4799e71b
MS
1439err_free_irq:
1440 free_irq(mhi_cntrl->irq, mhi_cntrl);
d434743e
MS
1441err_ida_free:
1442 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
f9baa4f7
MS
1443err_destroy_wq:
1444 destroy_workqueue(mhi_cntrl->wq);
d434743e
MS
1445err_free_cmd:
1446 kfree(mhi_cntrl->mhi_cmd);
1447err_free_ch:
1448 kfree(mhi_cntrl->mhi_chan);
1449
1450 return ret;
1451}
1452EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
1453
5d507ee0
MS
1454/*
1455 * It is expected that the controller drivers will power down the MHI EP stack
1456 * using "mhi_ep_power_down()" before calling this function to unregister themselves.
1457 */
d434743e
MS
1458void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
1459{
1460 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
1461
f9baa4f7
MS
1462 destroy_workqueue(mhi_cntrl->wq);
1463
4799e71b
MS
1464 free_irq(mhi_cntrl->irq, mhi_cntrl);
1465
d434743e
MS
1466 kfree(mhi_cntrl->mhi_cmd);
1467 kfree(mhi_cntrl->mhi_chan);
1468
1469 device_del(&mhi_dev->dev);
1470 put_device(&mhi_dev->dev);
1471
1472 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1473}
1474EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
1475
ee0360b2
MS
1476static int mhi_ep_driver_probe(struct device *dev)
1477{
1478 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1479 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1480 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
1481 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
1482
1483 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1484 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1485
1486 return mhi_drv->probe(mhi_dev, mhi_dev->id);
1487}
1488
1489static int mhi_ep_driver_remove(struct device *dev)
1490{
1491 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1492 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1493 struct mhi_result result = {};
1494 struct mhi_ep_chan *mhi_chan;
1495 int dir;
1496
1497 /* Skip if it is a controller device */
1498 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1499 return 0;
1500
1501 /* Disconnect the channels associated with the driver */
1502 for (dir = 0; dir < 2; dir++) {
1503 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1504
1505 if (!mhi_chan)
1506 continue;
1507
1508 mutex_lock(&mhi_chan->lock);
1509 /* Send channel disconnect status to the client driver */
1510 if (mhi_chan->xfer_cb) {
1511 result.transaction_status = -ENOTCONN;
1512 result.bytes_xferd = 0;
1513 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1514 }
1515
1516 mhi_chan->state = MHI_CH_STATE_DISABLED;
1517 mhi_chan->xfer_cb = NULL;
1518 mutex_unlock(&mhi_chan->lock);
1519 }
1520
1521 /* Remove the client driver now */
1522 mhi_drv->remove(mhi_dev);
1523
1524 return 0;
1525}
1526
1527int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
1528{
1529 struct device_driver *driver = &mhi_drv->driver;
1530
1531 if (!mhi_drv->probe || !mhi_drv->remove)
1532 return -EINVAL;
1533
1534 /* Client drivers should have callbacks defined for both channels */
1535 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
1536 return -EINVAL;
1537
1538 driver->bus = &mhi_ep_bus_type;
1539 driver->owner = owner;
1540 driver->probe = mhi_ep_driver_probe;
1541 driver->remove = mhi_ep_driver_remove;
1542
1543 return driver_register(driver);
1544}
1545EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
1546
1547void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
1548{
1549 driver_unregister(&mhi_drv->driver);
1550}
1551EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
1552
c268c0a8
MS
1553static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
1554{
1555 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1556
1557 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
1558 mhi_dev->name);
1559}
1560
d434743e
MS
1561static int mhi_ep_match(struct device *dev, struct device_driver *drv)
1562{
1563 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
ee0360b2
MS
1564 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
1565 const struct mhi_device_id *id;
d434743e
MS
1566
1567 /*
1568 * If the device is a controller type then there is no client driver
1569 * associated with it
1570 */
1571 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1572 return 0;
1573
ee0360b2
MS
1574 for (id = mhi_drv->id_table; id->chan[0]; id++)
1575 if (!strcmp(mhi_dev->name, id->chan)) {
1576 mhi_dev->id = id;
1577 return 1;
1578 }
1579
d434743e
MS
1580 return 0;
1581};
1582
1583struct bus_type mhi_ep_bus_type = {
1584 .name = "mhi_ep",
1585 .dev_name = "mhi_ep",
1586 .match = mhi_ep_match,
c268c0a8 1587 .uevent = mhi_ep_uevent,
d434743e
MS
1588};
1589
1590static int __init mhi_ep_init(void)
1591{
1592 return bus_register(&mhi_ep_bus_type);
1593}
1594
1595static void __exit mhi_ep_exit(void)
1596{
1597 bus_unregister(&mhi_ep_bus_type);
1598}
1599
1600postcore_initcall(mhi_ep_init);
1601module_exit(mhi_ep_exit);
1602
1603MODULE_LICENSE("GPL v2");
1604MODULE_DESCRIPTION("MHI Bus Endpoint stack");
1605MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");