1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2021 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
24 #include "ipa_version.h"
27 * DOC: The IPA Generic Software Interface
29 * The generic software interface (GSI) is an integral component of the IPA,
30 * providing a well-defined communication layer between the AP subsystem
31 * and the IPA core. The modem uses the GSI layer as well.
35 * | AP +<---. .----+ Modem |
38 * -------- | | | | ---------
48 * In the above diagram, the AP and Modem represent "execution environments"
49 * (EEs), which are independent operating environments that use the IPA for
52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53 * of data to or from the IPA. A channel is implemented as a ring buffer,
54 * with a DRAM-resident array of "transfer elements" (TREs) available to
55 * describe transfers to or from other EEs through the IPA. A transfer
56 * element can also contain an immediate command, requesting the IPA perform
57 * actions other than data transfer.
59 * Each TRE refers to a block of data--also located DRAM. After writing one
60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61 * doorbell register to inform the receiving side how many elements have
64 * Each channel has a GSI "event ring" associated with it. An event ring
65 * is implemented very much like a channel ring, but is always directed from
66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
67 * events by adding an entry to the event ring associated with the channel.
68 * The GSI then writes its doorbell for the event ring, causing the target
69 * EE to be interrupted. Each entry in an event ring contains a pointer
70 * to the channel TRE whose completion the event represents.
72 * Each TRE in a channel ring has a set of flags. One flag indicates whether
73 * the completion of the transfer operation generates an entry (and possibly
74 * an interrupt) in the channel's event ring. Other flags allow transfer
75 * elements to be chained together, forming a single logical transaction.
76 * TRE flags are used to control whether and when interrupts are generated
77 * to signal completion of channel transfers.
79 * Elements in channel and event rings are completed (or consumed) strictly
80 * in order. Completion of one entry implies the completion of all preceding
81 * entries. A single completion interrupt can therefore communicate the
82 * completion of many transfers.
84 * Note that all GSI registers are little-endian, which is the assumed
85 * endianness of I/O space accesses. The accessor functions perform byte
86 * swapping if needed (i.e., for a big endian CPU).
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */
94 #define GSI_CHANNEL_STOP_RETRIES 10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10
97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
102 /* An entry in an event ring */
113 /** gsi_channel_scratch_gpi - GPI protocol scratch register
114 * @max_outstanding_tre:
115 * Defines the maximum number of TREs allowed in a single transaction
116 * on a channel (in bytes). This determines the amount of prefetch
117 * performed by the hardware. We configure this to equal the size of
118 * the TLV FIFO for the channel.
119 * @outstanding_threshold:
120 * Defines the threshold (in bytes) determining when the sequencer
121 * should update the channel doorbell. We configure this to equal
122 * the size of two TREs.
124 struct gsi_channel_scratch_gpi {
127 u16 max_outstanding_tre;
129 u16 outstanding_threshold;
132 /** gsi_channel_scratch - channel scratch configuration area
134 * The exact interpretation of this register is protocol-specific.
135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
137 union gsi_channel_scratch {
138 struct gsi_channel_scratch_gpi gpi;
147 /* Check things that can be validated at build time. */
148 static void gsi_validate_build(void)
150 /* This is used as a divisor */
151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
153 /* Code assumes the size of channel and event ring element are
154 * the same (and fixed). Make sure the size of an event ring
155 * element is what's expected.
157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
159 /* Hardware requires a 2^n ring size. We ensure the number of
160 * elements in an event ring is a power of 2 elsewhere; this
161 * ensure the elements themselves meet the requirement.
163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
165 /* The channel element size must fit in this field */
166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
168 /* The event ring element size must fit in this field */
169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
172 /* Return the channel id associated with a given channel */
173 static u32 gsi_channel_id(struct gsi_channel *channel)
175 return channel - &channel->gsi->channel[0];
178 /* An initialized channel has a non-null GSI pointer */
179 static bool gsi_channel_initialized(struct gsi_channel *channel)
181 return !!channel->gsi;
184 /* Update the GSI IRQ type register with the cached value */
185 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
187 gsi->type_enabled_bitmap = val;
188 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
193 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
198 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
201 /* Turn off all GSI interrupts initially */
202 static void gsi_irq_setup(struct gsi *gsi)
204 /* Disable all interrupt types */
205 gsi_irq_type_update(gsi, 0);
207 /* Clear all type-specific interrupt masks */
208 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
209 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
210 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
211 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
213 /* The inter-EE registers are in the non-adjusted address range */
214 iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
215 iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
217 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
220 /* Turn off all GSI interrupts when we're all done */
221 static void gsi_irq_teardown(struct gsi *gsi)
226 /* Event ring commands are performed one at a time. Their completion
227 * is signaled by the event ring control GSI interrupt type, which is
228 * only enabled when we issue an event ring command. Only the event
229 * ring being operated on has this interrupt enabled.
231 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
233 u32 val = BIT(evt_ring_id);
235 /* There's a small chance that a previous command completed
236 * after the interrupt was disabled, so make sure we have no
237 * pending interrupts before we enable them.
239 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
241 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
242 gsi_irq_type_enable(gsi, GSI_EV_CTRL);
245 /* Disable event ring control interrupts */
246 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
248 gsi_irq_type_disable(gsi, GSI_EV_CTRL);
249 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
252 /* Channel commands are performed one at a time. Their completion is
253 * signaled by the channel control GSI interrupt type, which is only
254 * enabled when we issue a channel command. Only the channel being
255 * operated on has this interrupt enabled.
257 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
259 u32 val = BIT(channel_id);
261 /* There's a small chance that a previous command completed
262 * after the interrupt was disabled, so make sure we have no
263 * pending interrupts before we enable them.
265 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
267 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
268 gsi_irq_type_enable(gsi, GSI_CH_CTRL);
271 /* Disable channel control interrupts */
272 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
274 gsi_irq_type_disable(gsi, GSI_CH_CTRL);
275 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
278 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
280 bool enable_ieob = !gsi->ieob_enabled_bitmap;
283 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
284 val = gsi->ieob_enabled_bitmap;
285 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
287 /* Enable the interrupt type if this is the first channel enabled */
289 gsi_irq_type_enable(gsi, GSI_IEOB);
292 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
296 gsi->ieob_enabled_bitmap &= ~event_mask;
298 /* Disable the interrupt type if this was the last enabled channel */
299 if (!gsi->ieob_enabled_bitmap)
300 gsi_irq_type_disable(gsi, GSI_IEOB);
302 val = gsi->ieob_enabled_bitmap;
303 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
306 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
308 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
311 /* Enable all GSI_interrupt types */
312 static void gsi_irq_enable(struct gsi *gsi)
316 /* Global interrupts include hardware error reports. Enable
317 * that so we can at least report the error should it occur.
319 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
320 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
322 /* General GSI interrupts are reported to all EEs; if they occur
323 * they are unrecoverable (without reset). A breakpoint interrupt
324 * also exists, but we don't support that. We want to be notified
325 * of errors so we can report them, even if they can't be handled.
327 val = BIT(BUS_ERROR);
328 val |= BIT(CMD_FIFO_OVRFLOW);
329 val |= BIT(MCS_STACK_OVRFLOW);
330 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
331 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
334 /* Disable all GSI interrupt types */
335 static void gsi_irq_disable(struct gsi *gsi)
337 gsi_irq_type_update(gsi, 0);
339 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
340 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
341 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
344 /* Return the virtual address associated with a ring index */
345 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
347 /* Note: index *must* be used modulo the ring count here */
348 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
351 /* Return the 32-bit DMA address associated with a ring index */
352 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
354 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
357 /* Return the ring index of a 32-bit ring offset */
358 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
360 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
363 /* Issue a GSI command by writing a value to a register, then wait for
364 * completion to be signaled. Returns true if the command completes
365 * or false if it times out.
368 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
370 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
372 reinit_completion(completion);
374 iowrite32(val, gsi->virt + reg);
376 return !!wait_for_completion_timeout(completion, timeout);
379 /* Return the hardware's notion of the current state of an event ring */
380 static enum gsi_evt_ring_state
381 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
385 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
387 return u32_get_bits(val, EV_CHSTATE_FMASK);
390 /* Issue an event ring command and wait for it to complete */
391 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
392 enum gsi_evt_cmd_opcode opcode)
394 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
395 struct completion *completion = &evt_ring->completion;
396 struct device *dev = gsi->dev;
400 /* Enable the completion interrupt for the command */
401 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
403 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
404 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
406 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
408 gsi_irq_ev_ctrl_disable(gsi);
413 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
414 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
417 /* Allocate an event ring in NOT_ALLOCATED state */
418 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
420 enum gsi_evt_ring_state state;
422 /* Get initial event ring state */
423 state = gsi_evt_ring_state(gsi, evt_ring_id);
424 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
425 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
430 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
432 /* If successful the event ring state will have changed */
433 state = gsi_evt_ring_state(gsi, evt_ring_id);
434 if (state == GSI_EVT_RING_STATE_ALLOCATED)
437 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
443 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
444 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
446 enum gsi_evt_ring_state state;
448 state = gsi_evt_ring_state(gsi, evt_ring_id);
449 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
450 state != GSI_EVT_RING_STATE_ERROR) {
451 dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
456 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
458 /* If successful the event ring state will have changed */
459 state = gsi_evt_ring_state(gsi, evt_ring_id);
460 if (state == GSI_EVT_RING_STATE_ALLOCATED)
463 dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
467 /* Issue a hardware de-allocation request for an allocated event ring */
468 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
470 enum gsi_evt_ring_state state;
472 state = gsi_evt_ring_state(gsi, evt_ring_id);
473 if (state != GSI_EVT_RING_STATE_ALLOCATED) {
474 dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
479 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
481 /* If successful the event ring state will have changed */
482 state = gsi_evt_ring_state(gsi, evt_ring_id);
483 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
486 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
490 /* Fetch the current state of a channel from hardware */
491 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
493 u32 channel_id = gsi_channel_id(channel);
494 void __iomem *virt = channel->gsi->virt;
497 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
499 return u32_get_bits(val, CHSTATE_FMASK);
502 /* Issue a channel command and wait for it to complete */
504 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
506 struct completion *completion = &channel->completion;
507 u32 channel_id = gsi_channel_id(channel);
508 struct gsi *gsi = channel->gsi;
509 struct device *dev = gsi->dev;
513 /* Enable the completion interrupt for the command */
514 gsi_irq_ch_ctrl_enable(gsi, channel_id);
516 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
517 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
518 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
520 gsi_irq_ch_ctrl_disable(gsi);
525 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
526 opcode, channel_id, gsi_channel_state(channel));
529 /* Allocate GSI channel in NOT_ALLOCATED state */
530 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
532 struct gsi_channel *channel = &gsi->channel[channel_id];
533 struct device *dev = gsi->dev;
534 enum gsi_channel_state state;
536 /* Get initial channel state */
537 state = gsi_channel_state(channel);
538 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
539 dev_err(dev, "channel %u bad state %u before alloc\n",
544 gsi_channel_command(channel, GSI_CH_ALLOCATE);
546 /* If successful the channel state will have changed */
547 state = gsi_channel_state(channel);
548 if (state == GSI_CHANNEL_STATE_ALLOCATED)
551 dev_err(dev, "channel %u bad state %u after alloc\n",
557 /* Start an ALLOCATED channel */
558 static int gsi_channel_start_command(struct gsi_channel *channel)
560 struct device *dev = channel->gsi->dev;
561 enum gsi_channel_state state;
563 state = gsi_channel_state(channel);
564 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
565 state != GSI_CHANNEL_STATE_STOPPED) {
566 dev_err(dev, "channel %u bad state %u before start\n",
567 gsi_channel_id(channel), state);
571 gsi_channel_command(channel, GSI_CH_START);
573 /* If successful the channel state will have changed */
574 state = gsi_channel_state(channel);
575 if (state == GSI_CHANNEL_STATE_STARTED)
578 dev_err(dev, "channel %u bad state %u after start\n",
579 gsi_channel_id(channel), state);
584 /* Stop a GSI channel in STARTED state */
585 static int gsi_channel_stop_command(struct gsi_channel *channel)
587 struct device *dev = channel->gsi->dev;
588 enum gsi_channel_state state;
590 state = gsi_channel_state(channel);
592 /* Channel could have entered STOPPED state since last call
593 * if it timed out. If so, we're done.
595 if (state == GSI_CHANNEL_STATE_STOPPED)
598 if (state != GSI_CHANNEL_STATE_STARTED &&
599 state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
600 dev_err(dev, "channel %u bad state %u before stop\n",
601 gsi_channel_id(channel), state);
605 gsi_channel_command(channel, GSI_CH_STOP);
607 /* If successful the channel state will have changed */
608 state = gsi_channel_state(channel);
609 if (state == GSI_CHANNEL_STATE_STOPPED)
612 /* We may have to try again if stop is in progress */
613 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
616 dev_err(dev, "channel %u bad state %u after stop\n",
617 gsi_channel_id(channel), state);
622 /* Reset a GSI channel in ALLOCATED or ERROR state. */
623 static void gsi_channel_reset_command(struct gsi_channel *channel)
625 struct device *dev = channel->gsi->dev;
626 enum gsi_channel_state state;
628 /* A short delay is required before a RESET command */
629 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
631 state = gsi_channel_state(channel);
632 if (state != GSI_CHANNEL_STATE_STOPPED &&
633 state != GSI_CHANNEL_STATE_ERROR) {
634 /* No need to reset a channel already in ALLOCATED state */
635 if (state != GSI_CHANNEL_STATE_ALLOCATED)
636 dev_err(dev, "channel %u bad state %u before reset\n",
637 gsi_channel_id(channel), state);
641 gsi_channel_command(channel, GSI_CH_RESET);
643 /* If successful the channel state will have changed */
644 state = gsi_channel_state(channel);
645 if (state != GSI_CHANNEL_STATE_ALLOCATED)
646 dev_err(dev, "channel %u bad state %u after reset\n",
647 gsi_channel_id(channel), state);
650 /* Deallocate an ALLOCATED GSI channel */
651 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
653 struct gsi_channel *channel = &gsi->channel[channel_id];
654 struct device *dev = gsi->dev;
655 enum gsi_channel_state state;
657 state = gsi_channel_state(channel);
658 if (state != GSI_CHANNEL_STATE_ALLOCATED) {
659 dev_err(dev, "channel %u bad state %u before dealloc\n",
664 gsi_channel_command(channel, GSI_CH_DE_ALLOC);
666 /* If successful the channel state will have changed */
667 state = gsi_channel_state(channel);
669 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
670 dev_err(dev, "channel %u bad state %u after dealloc\n",
674 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
675 * The index argument (modulo the ring count) is the first unfilled entry, so
676 * we supply one less than that with the doorbell. Update the event ring
677 * index field with the value provided.
679 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
681 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
684 ring->index = index; /* Next unused entry */
686 /* Note: index *must* be used modulo the ring count here */
687 val = gsi_ring_addr(ring, (index - 1) % ring->count);
688 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
691 /* Program an event ring for use */
692 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
694 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
695 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
698 /* We program all event rings as GPI type/protocol */
699 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
700 val |= EV_INTYPE_FMASK;
701 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
702 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
704 val = ev_r_length_encoded(gsi->version, size);
705 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
707 /* The context 2 and 3 registers store the low-order and
708 * high-order 32 bits of the address of the event ring,
711 val = lower_32_bits(evt_ring->ring.addr);
712 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
713 val = upper_32_bits(evt_ring->ring.addr);
714 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
716 /* Enable interrupt moderation by setting the moderation delay */
717 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
718 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
719 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
721 /* No MSI write data, and MSI address high and low address is 0 */
722 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
723 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
724 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
726 /* We don't need to get event read pointer updates */
727 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
728 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
730 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
731 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
734 /* Find the transaction whose completion indicates a channel is quiesced */
735 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
737 struct gsi_trans_info *trans_info = &channel->trans_info;
738 const struct list_head *list;
739 struct gsi_trans *trans;
741 spin_lock_bh(&trans_info->spinlock);
743 /* There is a small chance a TX transaction got allocated just
744 * before we disabled transmits, so check for that.
746 if (channel->toward_ipa) {
747 list = &trans_info->alloc;
748 if (!list_empty(list))
750 list = &trans_info->pending;
751 if (!list_empty(list))
755 /* Otherwise (TX or RX) we want to wait for anything that
756 * has completed, or has been polled but not released yet.
758 list = &trans_info->complete;
759 if (!list_empty(list))
761 list = &trans_info->polled;
762 if (list_empty(list))
765 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
767 /* Caller will wait for this, so take a reference */
769 refcount_inc(&trans->refcount);
771 spin_unlock_bh(&trans_info->spinlock);
776 /* Wait for transaction activity on a channel to complete */
777 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
779 struct gsi_trans *trans;
781 /* Get the last transaction, and wait for it to complete */
782 trans = gsi_channel_trans_last(channel);
784 wait_for_completion(&trans->completion);
785 gsi_trans_free(trans);
789 /* Program a channel for use */
790 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
792 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
793 u32 channel_id = gsi_channel_id(channel);
794 union gsi_channel_scratch scr = { };
795 struct gsi_channel_scratch_gpi *gpi;
796 struct gsi *gsi = channel->gsi;
800 /* Arbitrarily pick TRE 0 as the first channel element to use */
801 channel->tre_ring.index = 0;
803 /* We program all channels as GPI type/protocol */
804 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
805 if (channel->toward_ipa)
806 val |= CHTYPE_DIR_FMASK;
807 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
808 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
809 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
811 val = r_length_encoded(gsi->version, size);
812 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
814 /* The context 2 and 3 registers store the low-order and
815 * high-order 32 bits of the address of the channel ring,
818 val = lower_32_bits(channel->tre_ring.addr);
819 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
820 val = upper_32_bits(channel->tre_ring.addr);
821 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
823 /* Command channel gets low weighted round-robin priority */
824 if (channel->command)
825 wrr_weight = field_max(WRR_WEIGHT_FMASK);
826 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
828 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
830 /* No need to use the doorbell engine starting at IPA v4.0 */
831 if (gsi->version < IPA_VERSION_4_0 && doorbell)
832 val |= USE_DB_ENG_FMASK;
834 /* v4.0 introduces an escape buffer for prefetch. We use it
835 * on all but the AP command channel.
837 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
838 /* If not otherwise set, prefetch buffers are used */
839 if (gsi->version < IPA_VERSION_4_5)
840 val |= USE_ESCAPE_BUF_ONLY_FMASK;
842 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
843 PREFETCH_MODE_FMASK);
845 /* All channels set DB_IN_BYTES */
846 if (gsi->version >= IPA_VERSION_4_9)
849 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
851 /* Now update the scratch registers for GPI protocol */
853 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
854 GSI_RING_ELEMENT_SIZE;
855 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
857 val = scr.data.word1;
858 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
860 val = scr.data.word2;
861 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
863 val = scr.data.word3;
864 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
866 /* We must preserve the upper 16 bits of the last scratch register.
867 * The next sequence assumes those bits remain unchanged between the
868 * read and the write.
870 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
871 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
872 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
877 static void gsi_channel_deprogram(struct gsi_channel *channel)
882 static int __gsi_channel_start(struct gsi_channel *channel, bool start)
884 struct gsi *gsi = channel->gsi;
890 mutex_lock(&gsi->mutex);
892 ret = gsi_channel_start_command(channel);
894 mutex_unlock(&gsi->mutex);
899 /* Start an allocated GSI channel */
900 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
902 struct gsi_channel *channel = &gsi->channel[channel_id];
905 /* Enable NAPI and the completion interrupt */
906 napi_enable(&channel->napi);
907 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
909 ret = __gsi_channel_start(channel, true);
911 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
912 napi_disable(&channel->napi);
918 static int gsi_channel_stop_retry(struct gsi_channel *channel)
920 u32 retries = GSI_CHANNEL_STOP_RETRIES;
924 ret = gsi_channel_stop_command(channel);
927 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
933 static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
935 struct gsi *gsi = channel->gsi;
938 /* Wait for any underway transactions to complete before stopping. */
939 gsi_channel_trans_quiesce(channel);
944 mutex_lock(&gsi->mutex);
946 ret = gsi_channel_stop_retry(channel);
948 mutex_unlock(&gsi->mutex);
953 /* Stop a started channel */
954 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
956 struct gsi_channel *channel = &gsi->channel[channel_id];
959 ret = __gsi_channel_stop(channel, true);
963 /* Disable the completion interrupt and NAPI if successful */
964 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
965 napi_disable(&channel->napi);
970 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
971 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
973 struct gsi_channel *channel = &gsi->channel[channel_id];
975 mutex_lock(&gsi->mutex);
977 gsi_channel_reset_command(channel);
978 /* Due to a hardware quirk we may need to reset RX channels twice. */
979 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
980 gsi_channel_reset_command(channel);
982 gsi_channel_program(channel, doorbell);
983 gsi_channel_trans_cancel_pending(channel);
985 mutex_unlock(&gsi->mutex);
988 /* Stop a STARTED channel for suspend (using stop if requested) */
989 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
991 struct gsi_channel *channel = &gsi->channel[channel_id];
994 ret = __gsi_channel_stop(channel, stop);
998 /* Ensure NAPI polling has finished. */
999 napi_synchronize(&channel->napi);
1004 /* Resume a suspended channel (starting will be requested if STOPPED) */
1005 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
1007 struct gsi_channel *channel = &gsi->channel[channel_id];
1009 return __gsi_channel_start(channel, start);
1013 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
1014 * @channel: Channel for which to report
1016 * Report to the network stack the number of bytes and transactions that
1017 * have been queued to hardware since last call. This and the next function
1018 * supply information used by the network stack for throttling.
1020 * For each channel we track the number of transactions used and bytes of
1021 * data those transactions represent. We also track what those values are
1022 * each time this function is called. Subtracting the two tells us
1023 * the number of bytes and transactions that have been added between
1026 * Calling this each time we ring the channel doorbell allows us to
1027 * provide accurate information to the network stack about how much
1028 * work we've given the hardware at any point in time.
1030 void gsi_channel_tx_queued(struct gsi_channel *channel)
1035 byte_count = channel->byte_count - channel->queued_byte_count;
1036 trans_count = channel->trans_count - channel->queued_trans_count;
1037 channel->queued_byte_count = channel->byte_count;
1038 channel->queued_trans_count = channel->trans_count;
1040 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
1041 trans_count, byte_count);
1045 * gsi_channel_tx_update() - Report completed TX transfers
1046 * @channel: Channel that has completed transmitting packets
1047 * @trans: Last transation known to be complete
1049 * Compute the number of transactions and bytes that have been transferred
1050 * over a TX channel since the given transaction was committed. Report this
1051 * information to the network stack.
1053 * At the time a transaction is committed, we record its channel's
1054 * committed transaction and byte counts *in the transaction*.
1055 * Completions are signaled by the hardware with an interrupt, and
1056 * we can determine the latest completed transaction at that time.
1058 * The difference between the byte/transaction count recorded in
1059 * the transaction and the count last time we recorded a completion
1060 * tells us exactly how much data has been transferred between
1063 * Calling this each time we learn of a newly-completed transaction
1064 * allows us to provide accurate information to the network stack
1065 * about how much work has been completed by the hardware at a given
1069 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
1071 u64 byte_count = trans->byte_count + trans->len;
1072 u64 trans_count = trans->trans_count + 1;
1074 byte_count -= channel->compl_byte_count;
1075 channel->compl_byte_count += byte_count;
1076 trans_count -= channel->compl_trans_count;
1077 channel->compl_trans_count += trans_count;
1079 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1080 trans_count, byte_count);
1083 /* Channel control interrupt handler */
1084 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1088 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1089 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1091 while (channel_mask) {
1092 u32 channel_id = __ffs(channel_mask);
1093 struct gsi_channel *channel;
1095 channel_mask ^= BIT(channel_id);
1097 channel = &gsi->channel[channel_id];
1099 complete(&channel->completion);
1103 /* Event ring control interrupt handler */
1104 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1108 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1109 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1111 while (event_mask) {
1112 u32 evt_ring_id = __ffs(event_mask);
1113 struct gsi_evt_ring *evt_ring;
1115 event_mask ^= BIT(evt_ring_id);
1117 evt_ring = &gsi->evt_ring[evt_ring_id];
1119 complete(&evt_ring->completion);
1123 /* Global channel error interrupt handler */
1125 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1127 if (code == GSI_OUT_OF_RESOURCES) {
1128 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1129 complete(&gsi->channel[channel_id].completion);
1133 /* Report, but otherwise ignore all other error codes */
1134 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1135 channel_id, err_ee, code);
1138 /* Global event error interrupt handler */
1140 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1142 if (code == GSI_OUT_OF_RESOURCES) {
1143 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1144 u32 channel_id = gsi_channel_id(evt_ring->channel);
1146 complete(&evt_ring->completion);
1147 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1152 /* Report, but otherwise ignore all other error codes */
1153 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1154 evt_ring_id, err_ee, code);
1157 /* Global error interrupt handler */
1158 static void gsi_isr_glob_err(struct gsi *gsi)
1160 enum gsi_err_type type;
1161 enum gsi_err_code code;
1166 /* Get the logged error, then reinitialize the log */
1167 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1168 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1169 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1171 ee = u32_get_bits(val, ERR_EE_FMASK);
1172 type = u32_get_bits(val, ERR_TYPE_FMASK);
1173 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1174 code = u32_get_bits(val, ERR_CODE_FMASK);
1176 if (type == GSI_ERR_TYPE_CHAN)
1177 gsi_isr_glob_chan_err(gsi, ee, which, code);
1178 else if (type == GSI_ERR_TYPE_EVT)
1179 gsi_isr_glob_evt_err(gsi, ee, which, code);
1180 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1181 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1184 /* Generic EE interrupt handler */
1185 static void gsi_isr_gp_int1(struct gsi *gsi)
1190 /* This interrupt is used to handle completions of the two GENERIC
1191 * GSI commands. We use these to allocate and halt channels on
1192 * the modem's behalf due to a hardware quirk on IPA v4.2. Once
1193 * allocated, the modem "owns" these channels, and as a result we
1194 * have no way of knowing the channel's state at any given time.
1196 * It is recommended that we halt the modem channels we allocated
1197 * when shutting down, but it's possible the channel isn't running
1198 * at the time we issue the HALT command. We'll get an error in
1199 * that case, but it's harmless (the channel is already halted).
1201 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
1204 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1205 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1208 case GENERIC_EE_SUCCESS:
1209 case GENERIC_EE_CHANNEL_NOT_RUNNING:
1213 case GENERIC_EE_RETRY:
1214 gsi->result = -EAGAIN;
1218 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1223 complete(&gsi->completion);
1226 /* Inter-EE interrupt handler */
1227 static void gsi_isr_glob_ee(struct gsi *gsi)
1231 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1233 if (val & BIT(ERROR_INT))
1234 gsi_isr_glob_err(gsi);
1236 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1238 val &= ~BIT(ERROR_INT);
1240 if (val & BIT(GP_INT1)) {
1241 val ^= BIT(GP_INT1);
1242 gsi_isr_gp_int1(gsi);
1246 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1249 /* I/O completion interrupt event */
1250 static void gsi_isr_ieob(struct gsi *gsi)
1254 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1255 gsi_irq_ieob_disable(gsi, event_mask);
1256 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1258 while (event_mask) {
1259 u32 evt_ring_id = __ffs(event_mask);
1261 event_mask ^= BIT(evt_ring_id);
1263 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1267 /* General event interrupts represent serious problems, so report them */
1268 static void gsi_isr_general(struct gsi *gsi)
1270 struct device *dev = gsi->dev;
1273 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1274 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1276 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1280 * gsi_isr() - Top level GSI interrupt service routine
1281 * @irq: Interrupt number (ignored)
1282 * @dev_id: GSI pointer supplied to request_irq()
1284 * This is the main handler function registered for the GSI IRQ. Each type
1285 * of interrupt has a separate handler function that is called from here.
1287 static irqreturn_t gsi_isr(int irq, void *dev_id)
1289 struct gsi *gsi = dev_id;
1293 /* enum gsi_irq_type_id defines GSI interrupt types */
1294 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1295 /* intr_mask contains bitmask of pending GSI interrupts */
1297 u32 gsi_intr = BIT(__ffs(intr_mask));
1299 intr_mask ^= gsi_intr;
1302 case BIT(GSI_CH_CTRL):
1303 gsi_isr_chan_ctrl(gsi);
1305 case BIT(GSI_EV_CTRL):
1306 gsi_isr_evt_ctrl(gsi);
1308 case BIT(GSI_GLOB_EE):
1309 gsi_isr_glob_ee(gsi);
1314 case BIT(GSI_GENERAL):
1315 gsi_isr_general(gsi);
1319 "unrecognized interrupt type 0x%08x\n",
1323 } while (intr_mask);
1325 if (++cnt > GSI_ISR_MAX_ITER) {
1326 dev_err(gsi->dev, "interrupt flood\n");
1334 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1336 struct device *dev = &pdev->dev;
1340 ret = platform_get_irq_byname(pdev, "gsi");
1342 return ret ? : -EINVAL;
1346 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1348 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1356 static void gsi_irq_exit(struct gsi *gsi)
1358 free_irq(gsi->irq, gsi);
1361 /* Return the transaction associated with a transfer completion event */
1362 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1363 struct gsi_event *event)
1368 /* Event xfer_ptr records the TRE it's associated with */
1369 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1370 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1372 return gsi_channel_trans_mapped(channel, tre_index);
1376 * gsi_evt_ring_rx_update() - Record lengths of received data
1377 * @evt_ring: Event ring associated with channel that received packets
1378 * @index: Event index in ring reported by hardware
1380 * Events for RX channels contain the actual number of bytes received into
1381 * the buffer. Every event has a transaction associated with it, and here
1382 * we update transactions to record their actual received lengths.
1384 * This function is called whenever we learn that the GSI hardware has filled
1385 * new events since the last time we checked. The ring's index field tells
1386 * the first entry in need of processing. The index provided is the
1387 * first *unfilled* event in the ring (following the last filled one).
1389 * Events are sequential within the event ring, and transactions are
1390 * sequential within the transaction pool.
1392 * Note that @index always refers to an element *within* the event ring.
1394 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1396 struct gsi_channel *channel = evt_ring->channel;
1397 struct gsi_ring *ring = &evt_ring->ring;
1398 struct gsi_trans_info *trans_info;
1399 struct gsi_event *event_done;
1400 struct gsi_event *event;
1401 struct gsi_trans *trans;
1406 trans_info = &channel->trans_info;
1408 /* We'll start with the oldest un-processed event. RX channels
1409 * replenish receive buffers in single-TRE transactions, so we
1410 * can just map that event to its transaction. Transactions
1411 * associated with completion events are consecutive.
1413 old_index = ring->index;
1414 event = gsi_ring_virt(ring, old_index);
1415 trans = gsi_event_trans(channel, event);
1417 /* Compute the number of events to process before we wrap,
1418 * and determine when we'll be done processing events.
1420 event_avail = ring->count - old_index % ring->count;
1421 event_done = gsi_ring_virt(ring, index);
1423 trans->len = __le16_to_cpu(event->len);
1424 byte_count += trans->len;
1426 /* Move on to the next event and transaction */
1430 event = gsi_ring_virt(ring, 0);
1431 trans = gsi_trans_pool_next(&trans_info->pool, trans);
1432 } while (event != event_done);
1434 /* We record RX bytes when they are received */
1435 channel->byte_count += byte_count;
1436 channel->trans_count++;
1439 /* Initialize a ring, including allocating DMA memory for its entries */
1440 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1442 u32 size = count * GSI_RING_ELEMENT_SIZE;
1443 struct device *dev = gsi->dev;
1446 /* Hardware requires a 2^n ring size, with alignment equal to size.
1447 * The size is a power of 2, so we can check alignment using just
1448 * the bottom 32 bits for a DMA address of any size.
1450 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1451 if (ring->virt && lower_32_bits(addr) % size) {
1452 dma_free_coherent(dev, size, ring->virt, addr);
1453 dev_err(dev, "unable to alloc 0x%x-aligned ring buffer\n",
1455 return -EINVAL; /* Not a good error value, but distinct */
1456 } else if (!ring->virt) {
1460 ring->count = count;
1465 /* Free a previously-allocated ring */
1466 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1468 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1470 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1473 /* Allocate an available event ring id */
1474 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1478 if (gsi->event_bitmap == ~0U) {
1479 dev_err(gsi->dev, "event rings exhausted\n");
1483 evt_ring_id = ffz(gsi->event_bitmap);
1484 gsi->event_bitmap |= BIT(evt_ring_id);
1486 return (int)evt_ring_id;
1489 /* Free a previously-allocated event ring id */
1490 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1492 gsi->event_bitmap &= ~BIT(evt_ring_id);
1495 /* Ring a channel doorbell, reporting the first un-filled entry */
1496 void gsi_channel_doorbell(struct gsi_channel *channel)
1498 struct gsi_ring *tre_ring = &channel->tre_ring;
1499 u32 channel_id = gsi_channel_id(channel);
1500 struct gsi *gsi = channel->gsi;
1503 /* Note: index *must* be used modulo the ring count here */
1504 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1505 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1508 /* Consult hardware, move any newly completed transactions to completed list */
1509 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
1511 u32 evt_ring_id = channel->evt_ring_id;
1512 struct gsi *gsi = channel->gsi;
1513 struct gsi_evt_ring *evt_ring;
1514 struct gsi_trans *trans;
1515 struct gsi_ring *ring;
1519 evt_ring = &gsi->evt_ring[evt_ring_id];
1520 ring = &evt_ring->ring;
1522 /* See if there's anything new to process; if not, we're done. Note
1523 * that index always refers to an entry *within* the event ring.
1525 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1526 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1527 if (index == ring->index % ring->count)
1530 /* Get the transaction for the latest completed event. Take a
1531 * reference to keep it from completing before we give the events
1532 * for this and previous transactions back to the hardware.
1534 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1535 refcount_inc(&trans->refcount);
1537 /* For RX channels, update each completed transaction with the number
1538 * of bytes that were actually received. For TX channels, report
1539 * the number of transactions and bytes this completion represents
1540 * up the network stack.
1542 if (channel->toward_ipa)
1543 gsi_channel_tx_update(channel, trans);
1545 gsi_evt_ring_rx_update(evt_ring, index);
1547 gsi_trans_move_complete(trans);
1549 /* Tell the hardware we've handled these events */
1550 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1552 gsi_trans_free(trans);
1554 return gsi_channel_trans_complete(channel);
1558 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1559 * @channel: Channel to be polled
1561 * Return: Transaction pointer, or null if none are available
1563 * This function returns the first entry on a channel's completed transaction
1564 * list. If that list is empty, the hardware is consulted to determine
1565 * whether any new transactions have completed. If so, they're moved to the
1566 * completed list and the new first entry is returned. If there are no more
1567 * completed transactions, a null pointer is returned.
1569 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1571 struct gsi_trans *trans;
1573 /* Get the first transaction from the completed list */
1574 trans = gsi_channel_trans_complete(channel);
1575 if (!trans) /* List is empty; see if there's more to do */
1576 trans = gsi_channel_update(channel);
1579 gsi_trans_move_polled(trans);
1585 * gsi_channel_poll() - NAPI poll function for a channel
1586 * @napi: NAPI structure for the channel
1587 * @budget: Budget supplied by NAPI core
1589 * Return: Number of items polled (<= budget)
1591 * Single transactions completed by hardware are polled until either
1592 * the budget is exhausted, or there are no more. Each transaction
1593 * polled is passed to gsi_trans_complete(), to perform remaining
1594 * completion processing and retire/free the transaction.
1596 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1598 struct gsi_channel *channel;
1601 channel = container_of(napi, struct gsi_channel, napi);
1602 for (count = 0; count < budget; count++) {
1603 struct gsi_trans *trans;
1605 trans = gsi_channel_poll_one(channel);
1608 gsi_trans_complete(trans);
1611 if (count < budget && napi_complete(napi))
1612 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1617 /* The event bitmap represents which event ids are available for allocation.
1618 * Set bits are not available, clear bits can be used. This function
1619 * initializes the map so all events supported by the hardware are available,
1620 * then precludes any reserved events from being allocated.
1622 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1624 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1626 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1628 return event_bitmap;
1631 /* Setup function for event rings */
1632 static void gsi_evt_ring_setup(struct gsi *gsi)
1637 /* Inverse of gsi_evt_ring_setup() */
1638 static void gsi_evt_ring_teardown(struct gsi *gsi)
1643 /* Setup function for a single channel */
1644 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1646 struct gsi_channel *channel = &gsi->channel[channel_id];
1647 u32 evt_ring_id = channel->evt_ring_id;
1650 if (!gsi_channel_initialized(channel))
1653 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1657 gsi_evt_ring_program(gsi, evt_ring_id);
1659 ret = gsi_channel_alloc_command(gsi, channel_id);
1661 goto err_evt_ring_de_alloc;
1663 gsi_channel_program(channel, true);
1665 if (channel->toward_ipa)
1666 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1667 gsi_channel_poll, NAPI_POLL_WEIGHT);
1669 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1670 gsi_channel_poll, NAPI_POLL_WEIGHT);
1674 err_evt_ring_de_alloc:
1675 /* We've done nothing with the event ring yet so don't reset */
1676 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1681 /* Inverse of gsi_channel_setup_one() */
1682 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1684 struct gsi_channel *channel = &gsi->channel[channel_id];
1685 u32 evt_ring_id = channel->evt_ring_id;
1687 if (!gsi_channel_initialized(channel))
1690 netif_napi_del(&channel->napi);
1692 gsi_channel_deprogram(channel);
1693 gsi_channel_de_alloc_command(gsi, channel_id);
1694 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1695 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1698 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1699 enum gsi_generic_cmd_opcode opcode)
1701 struct completion *completion = &gsi->completion;
1705 /* The error global interrupt type is always enabled (until we
1706 * teardown), so we won't change that. A generic EE command
1707 * completes with a GSI global interrupt of type GP_INT1. We
1708 * only perform one generic command at a time (to allocate or
1709 * halt a modem channel) and only from this function. So we
1710 * enable the GP_INT1 IRQ type here while we're expecting it.
1712 val = BIT(ERROR_INT) | BIT(GP_INT1);
1713 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1715 /* First zero the result code field */
1716 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1717 val &= ~GENERIC_EE_RESULT_FMASK;
1718 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1720 /* Now issue the command */
1721 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1722 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1723 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1725 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
1727 /* Disable the GP_INT1 IRQ type again */
1728 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1733 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1734 opcode, channel_id);
1739 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1741 return gsi_generic_command(gsi, channel_id,
1742 GSI_GENERIC_ALLOCATE_CHANNEL);
1745 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1747 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1751 ret = gsi_generic_command(gsi, channel_id,
1752 GSI_GENERIC_HALT_CHANNEL);
1753 while (ret == -EAGAIN && retries--);
1756 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1760 /* Setup function for channels */
1761 static int gsi_channel_setup(struct gsi *gsi)
1767 gsi_evt_ring_setup(gsi);
1768 gsi_irq_enable(gsi);
1770 mutex_lock(&gsi->mutex);
1773 ret = gsi_channel_setup_one(gsi, channel_id);
1776 } while (++channel_id < gsi->channel_count);
1778 /* Make sure no channels were defined that hardware does not support */
1779 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1780 struct gsi_channel *channel = &gsi->channel[channel_id++];
1782 if (!gsi_channel_initialized(channel))
1786 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1788 channel_id = gsi->channel_count;
1792 /* Allocate modem channels if necessary */
1793 mask = gsi->modem_channel_bitmap;
1795 u32 modem_channel_id = __ffs(mask);
1797 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1799 goto err_unwind_modem;
1801 /* Clear bit from mask only after success (for unwind) */
1802 mask ^= BIT(modem_channel_id);
1805 mutex_unlock(&gsi->mutex);
1810 /* Compute which modem channels need to be deallocated */
1811 mask ^= gsi->modem_channel_bitmap;
1813 channel_id = __fls(mask);
1815 mask ^= BIT(channel_id);
1817 gsi_modem_channel_halt(gsi, channel_id);
1821 while (channel_id--)
1822 gsi_channel_teardown_one(gsi, channel_id);
1824 mutex_unlock(&gsi->mutex);
1826 gsi_irq_disable(gsi);
1827 gsi_evt_ring_teardown(gsi);
1832 /* Inverse of gsi_channel_setup() */
1833 static void gsi_channel_teardown(struct gsi *gsi)
1835 u32 mask = gsi->modem_channel_bitmap;
1838 mutex_lock(&gsi->mutex);
1841 channel_id = __fls(mask);
1843 mask ^= BIT(channel_id);
1845 gsi_modem_channel_halt(gsi, channel_id);
1848 channel_id = gsi->channel_count - 1;
1850 gsi_channel_teardown_one(gsi, channel_id);
1851 while (channel_id--);
1853 mutex_unlock(&gsi->mutex);
1855 gsi_irq_disable(gsi);
1856 gsi_evt_ring_teardown(gsi);
1859 /* Setup function for GSI. GSI firmware must be loaded and initialized */
1860 int gsi_setup(struct gsi *gsi)
1862 struct device *dev = gsi->dev;
1866 /* Here is where we first touch the GSI hardware */
1867 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1868 if (!(val & ENABLED_FMASK)) {
1869 dev_err(dev, "GSI has not been enabled\n");
1875 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1877 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1878 if (!gsi->channel_count) {
1879 dev_err(dev, "GSI reports zero channels supported\n");
1882 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1884 "limiting to %u channels; hardware supports %u\n",
1885 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1886 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1889 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1890 if (!gsi->evt_ring_count) {
1891 dev_err(dev, "GSI reports zero event rings supported\n");
1894 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1896 "limiting to %u event rings; hardware supports %u\n",
1897 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1898 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1901 /* Initialize the error log */
1902 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1904 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1905 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1907 ret = gsi_channel_setup(gsi);
1909 gsi_irq_teardown(gsi);
1914 /* Inverse of gsi_setup() */
1915 void gsi_teardown(struct gsi *gsi)
1917 gsi_channel_teardown(gsi);
1918 gsi_irq_teardown(gsi);
1921 /* Initialize a channel's event ring */
1922 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1924 struct gsi *gsi = channel->gsi;
1925 struct gsi_evt_ring *evt_ring;
1928 ret = gsi_evt_ring_id_alloc(gsi);
1931 channel->evt_ring_id = ret;
1933 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1934 evt_ring->channel = channel;
1936 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1938 return 0; /* Success! */
1940 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1941 ret, gsi_channel_id(channel));
1943 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1948 /* Inverse of gsi_channel_evt_ring_init() */
1949 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1951 u32 evt_ring_id = channel->evt_ring_id;
1952 struct gsi *gsi = channel->gsi;
1953 struct gsi_evt_ring *evt_ring;
1955 evt_ring = &gsi->evt_ring[evt_ring_id];
1956 gsi_ring_free(gsi, &evt_ring->ring);
1957 gsi_evt_ring_id_free(gsi, evt_ring_id);
1960 /* Init function for event rings */
1961 static void gsi_evt_ring_init(struct gsi *gsi)
1963 u32 evt_ring_id = 0;
1965 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1966 gsi->ieob_enabled_bitmap = 0;
1968 init_completion(&gsi->evt_ring[evt_ring_id].completion);
1969 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1972 /* Inverse of gsi_evt_ring_init() */
1973 static void gsi_evt_ring_exit(struct gsi *gsi)
1978 static bool gsi_channel_data_valid(struct gsi *gsi,
1979 const struct ipa_gsi_endpoint_data *data)
1981 #ifdef IPA_VALIDATION
1982 u32 channel_id = data->channel_id;
1983 struct device *dev = gsi->dev;
1985 /* Make sure channel ids are in the range driver supports */
1986 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1987 dev_err(dev, "bad channel id %u; must be less than %u\n",
1988 channel_id, GSI_CHANNEL_COUNT_MAX);
1992 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1993 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1997 if (!data->channel.tlv_count ||
1998 data->channel.tlv_count > GSI_TLV_MAX) {
1999 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2000 channel_id, data->channel.tlv_count, GSI_TLV_MAX);
2004 /* We have to allow at least one maximally-sized transaction to
2005 * be outstanding (which would use tlv_count TREs). Given how
2006 * gsi_channel_tre_max() is computed, tre_count has to be almost
2007 * twice the TLV FIFO size to satisfy this requirement.
2009 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
2010 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2011 channel_id, data->channel.tlv_count,
2012 data->channel.tre_count);
2016 if (!is_power_of_2(data->channel.tre_count)) {
2017 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2018 channel_id, data->channel.tre_count);
2022 if (!is_power_of_2(data->channel.event_count)) {
2023 dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2024 channel_id, data->channel.event_count);
2027 #endif /* IPA_VALIDATION */
2032 /* Init function for a single channel */
2033 static int gsi_channel_init_one(struct gsi *gsi,
2034 const struct ipa_gsi_endpoint_data *data,
2037 struct gsi_channel *channel;
2041 if (!gsi_channel_data_valid(gsi, data))
2044 /* Worst case we need an event for every outstanding TRE */
2045 if (data->channel.tre_count > data->channel.event_count) {
2046 tre_count = data->channel.event_count;
2047 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2048 data->channel_id, tre_count);
2050 tre_count = data->channel.tre_count;
2053 channel = &gsi->channel[data->channel_id];
2054 memset(channel, 0, sizeof(*channel));
2057 channel->toward_ipa = data->toward_ipa;
2058 channel->command = command;
2059 channel->tlv_count = data->channel.tlv_count;
2060 channel->tre_count = tre_count;
2061 channel->event_count = data->channel.event_count;
2062 init_completion(&channel->completion);
2064 ret = gsi_channel_evt_ring_init(channel);
2068 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2070 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2071 ret, data->channel_id);
2072 goto err_channel_evt_ring_exit;
2075 ret = gsi_channel_trans_init(gsi, data->channel_id);
2080 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2082 ret = ipa_cmd_pool_init(channel, tre_max);
2085 return 0; /* Success! */
2087 gsi_channel_trans_exit(channel);
2089 gsi_ring_free(gsi, &channel->tre_ring);
2090 err_channel_evt_ring_exit:
2091 gsi_channel_evt_ring_exit(channel);
2093 channel->gsi = NULL; /* Mark it not (fully) initialized */
2098 /* Inverse of gsi_channel_init_one() */
2099 static void gsi_channel_exit_one(struct gsi_channel *channel)
2101 if (!gsi_channel_initialized(channel))
2104 if (channel->command)
2105 ipa_cmd_pool_exit(channel);
2106 gsi_channel_trans_exit(channel);
2107 gsi_ring_free(channel->gsi, &channel->tre_ring);
2108 gsi_channel_evt_ring_exit(channel);
2111 /* Init function for channels */
2112 static int gsi_channel_init(struct gsi *gsi, u32 count,
2113 const struct ipa_gsi_endpoint_data *data)
2119 /* IPA v4.2 requires the AP to allocate channels for the modem */
2120 modem_alloc = gsi->version == IPA_VERSION_4_2;
2122 gsi_evt_ring_init(gsi);
2124 /* The endpoint data array is indexed by endpoint name */
2125 for (i = 0; i < count; i++) {
2126 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2128 if (ipa_gsi_endpoint_data_empty(&data[i]))
2129 continue; /* Skip over empty slots */
2131 /* Mark modem channels to be allocated (hardware workaround) */
2132 if (data[i].ee_id == GSI_EE_MODEM) {
2134 gsi->modem_channel_bitmap |=
2135 BIT(data[i].channel_id);
2139 ret = gsi_channel_init_one(gsi, &data[i], command);
2148 if (ipa_gsi_endpoint_data_empty(&data[i]))
2150 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2151 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2154 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2156 gsi_evt_ring_exit(gsi);
2161 /* Inverse of gsi_channel_init() */
2162 static void gsi_channel_exit(struct gsi *gsi)
2164 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2167 gsi_channel_exit_one(&gsi->channel[channel_id]);
2168 while (channel_id--);
2169 gsi->modem_channel_bitmap = 0;
2171 gsi_evt_ring_exit(gsi);
2174 /* Init function for GSI. GSI hardware does not need to be "ready" */
2175 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2176 enum ipa_version version, u32 count,
2177 const struct ipa_gsi_endpoint_data *data)
2179 struct device *dev = &pdev->dev;
2180 struct resource *res;
2181 resource_size_t size;
2185 gsi_validate_build();
2188 gsi->version = version;
2190 /* GSI uses NAPI on all channels. Create a dummy network device
2191 * for the channel NAPI contexts to be associated with.
2193 init_dummy_netdev(&gsi->dummy_dev);
2195 /* Get GSI memory range and map it */
2196 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2198 dev_err(dev, "DT error getting \"gsi\" memory property\n");
2202 size = resource_size(res);
2203 if (res->start > U32_MAX || size > U32_MAX - res->start) {
2204 dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2208 /* Make sure we can make our pointer adjustment if necessary */
2209 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2210 if (res->start < adjust) {
2211 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2216 gsi->virt_raw = ioremap(res->start, size);
2217 if (!gsi->virt_raw) {
2218 dev_err(dev, "unable to remap \"gsi\" memory\n");
2221 /* Most registers are accessed using an adjusted register range */
2222 gsi->virt = gsi->virt_raw - adjust;
2224 init_completion(&gsi->completion);
2226 ret = gsi_irq_init(gsi, pdev);
2230 ret = gsi_channel_init(gsi, count, data);
2234 mutex_init(&gsi->mutex);
2241 iounmap(gsi->virt_raw);
2246 /* Inverse of gsi_init() */
2247 void gsi_exit(struct gsi *gsi)
2249 mutex_destroy(&gsi->mutex);
2250 gsi_channel_exit(gsi);
2252 iounmap(gsi->virt_raw);
2255 /* The maximum number of outstanding TREs on a channel. This limits
2256 * a channel's maximum number of transactions outstanding (worst case
2257 * is one TRE per transaction).
2259 * The absolute limit is the number of TREs in the channel's TRE ring,
2260 * and in theory we should be able use all of them. But in practice,
2261 * doing that led to the hardware reporting exhaustion of event ring
2262 * slots for writing completion information. So the hardware limit
2263 * would be (tre_count - 1).
2265 * We reduce it a bit further though. Transaction resource pools are
2266 * sized to be a little larger than this maximum, to allow resource
2267 * allocations to always be contiguous. The number of entries in a
2268 * TRE ring buffer is a power of 2, and the extra resources in a pool
2269 * tends to nearly double the memory allocated for it. Reducing the
2270 * maximum number of outstanding TREs allows the number of entries in
2271 * a pool to avoid crossing that power-of-2 boundary, and this can
2272 * substantially reduce pool memory requirements. The number we
2273 * reduce it by matches the number added in gsi_trans_pool_init().
2275 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2277 struct gsi_channel *channel = &gsi->channel[channel_id];
2279 /* Hardware limit is channel->tre_count - 1 */
2280 return channel->tre_count - (channel->tlv_count - 1);
2283 /* Returns the maximum number of TREs in a single transaction for a channel */
2284 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2286 struct gsi_channel *channel = &gsi->channel[channel_id];
2288 return channel->tlv_count;