1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
18 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
20 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
22 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
23 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
24 struct mbox_hdr *tx_hdr, *rx_hdr;
26 tx_hdr = hw_mbase + mbox->tx_start;
27 rx_hdr = hw_mbase + mbox->rx_start;
29 spin_lock(&mdev->mbox_lock);
36 spin_unlock(&mdev->mbox_lock);
38 EXPORT_SYMBOL(otx2_mbox_reset);
40 void otx2_mbox_destroy(struct otx2_mbox *mbox)
42 mbox->reg_base = NULL;
48 EXPORT_SYMBOL(otx2_mbox_destroy);
50 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
51 void *reg_base, int direction, int ndevs)
53 struct otx2_mbox_dev *mdev;
59 mbox->tx_start = MBOX_DOWN_TX_START;
60 mbox->rx_start = MBOX_DOWN_RX_START;
61 mbox->tx_size = MBOX_DOWN_TX_SIZE;
62 mbox->rx_size = MBOX_DOWN_RX_SIZE;
66 mbox->tx_start = MBOX_DOWN_RX_START;
67 mbox->rx_start = MBOX_DOWN_TX_START;
68 mbox->tx_size = MBOX_DOWN_RX_SIZE;
69 mbox->rx_size = MBOX_DOWN_TX_SIZE;
71 case MBOX_DIR_AFPF_UP:
72 case MBOX_DIR_PFVF_UP:
73 mbox->tx_start = MBOX_UP_TX_START;
74 mbox->rx_start = MBOX_UP_RX_START;
75 mbox->tx_size = MBOX_UP_TX_SIZE;
76 mbox->rx_size = MBOX_UP_RX_SIZE;
78 case MBOX_DIR_PFAF_UP:
79 case MBOX_DIR_VFPF_UP:
80 mbox->tx_start = MBOX_UP_RX_START;
81 mbox->rx_start = MBOX_UP_TX_START;
82 mbox->tx_size = MBOX_UP_RX_SIZE;
83 mbox->rx_size = MBOX_UP_TX_SIZE;
91 case MBOX_DIR_AFPF_UP:
92 mbox->trigger = RVU_AF_AFPF_MBOX0;
96 case MBOX_DIR_PFAF_UP:
97 mbox->trigger = RVU_PF_PFAF_MBOX1;
101 case MBOX_DIR_PFVF_UP:
102 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
106 case MBOX_DIR_VFPF_UP:
107 mbox->trigger = RVU_VF_VFPF_MBOX1;
114 mbox->reg_base = reg_base;
115 mbox->hwbase = hwbase;
118 mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
120 otx2_mbox_destroy(mbox);
125 for (devid = 0; devid < ndevs; devid++) {
126 mdev = &mbox->dev[devid];
127 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
128 spin_lock_init(&mdev->mbox_lock);
129 /* Init header to reset value */
130 otx2_mbox_reset(mbox, devid);
135 EXPORT_SYMBOL(otx2_mbox_init);
137 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
139 unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
140 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
141 struct device *sender = &mbox->pdev->dev;
143 while (!time_after(jiffies, timeout)) {
144 if (mdev->num_msgs == mdev->msgs_acked)
146 usleep_range(800, 1000);
148 dev_dbg(sender, "timed out while waiting for rsp\n");
151 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
153 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
155 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
156 unsigned long timeout = jiffies + 1 * HZ;
158 while (!time_after(jiffies, timeout)) {
159 if (mdev->num_msgs == mdev->msgs_acked)
165 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
167 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
169 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
170 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
171 struct mbox_hdr *tx_hdr, *rx_hdr;
173 tx_hdr = hw_mbase + mbox->tx_start;
174 rx_hdr = hw_mbase + mbox->rx_start;
176 /* If bounce buffer is implemented copy mbox messages from
177 * bounce buffer to hw mbox memory.
179 if (mdev->mbase != hw_mbase)
180 memcpy(hw_mbase + mbox->tx_start + msgs_offset,
181 mdev->mbase + mbox->tx_start + msgs_offset,
184 spin_lock(&mdev->mbox_lock);
186 tx_hdr->msg_size = mdev->msg_size;
188 /* Reset header for next messages */
191 mdev->msgs_acked = 0;
193 /* Sync mbox data into memory */
196 /* num_msgs != 0 signals to the peer that the buffer has a number of
197 * messages. So this should be written after writing all the messages
198 * to the shared memory.
200 tx_hdr->num_msgs = mdev->num_msgs;
201 rx_hdr->num_msgs = 0;
202 spin_unlock(&mdev->mbox_lock);
204 /* The interrupt should be fired after num_msgs is written
205 * to the shared memory
207 writeq(1, (void __iomem *)mbox->reg_base +
208 (mbox->trigger | (devid << mbox->tr_shift)));
210 EXPORT_SYMBOL(otx2_mbox_msg_send);
212 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
213 int size, int size_rsp)
215 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
216 struct mbox_msghdr *msghdr = NULL;
218 spin_lock(&mdev->mbox_lock);
219 size = ALIGN(size, MBOX_MSG_ALIGN);
220 size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
221 /* Check if there is space in mailbox */
222 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
224 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
227 if (mdev->msg_size == 0)
231 msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
233 /* Clear the whole msg region */
234 memset(msghdr, 0, size);
235 /* Init message header with reset values */
236 msghdr->ver = OTX2_MBOX_VERSION;
237 mdev->msg_size += size;
238 mdev->rsp_size += size_rsp;
239 msghdr->next_msgoff = mdev->msg_size + msgs_offset;
241 spin_unlock(&mdev->mbox_lock);
245 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
247 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
248 struct mbox_msghdr *msg)
250 unsigned long imsg = mbox->tx_start + msgs_offset;
251 unsigned long irsp = mbox->rx_start + msgs_offset;
252 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
255 spin_lock(&mdev->mbox_lock);
257 if (mdev->num_msgs != mdev->msgs_acked)
260 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
261 struct mbox_msghdr *pmsg = mdev->mbase + imsg;
262 struct mbox_msghdr *prsp = mdev->mbase + irsp;
265 if (pmsg->id != prsp->id)
267 spin_unlock(&mdev->mbox_lock);
271 imsg = mbox->tx_start + pmsg->next_msgoff;
272 irsp = mbox->rx_start + prsp->next_msgoff;
276 spin_unlock(&mdev->mbox_lock);
277 return ERR_PTR(-ENODEV);
279 EXPORT_SYMBOL(otx2_mbox_get_rsp);
281 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
283 unsigned long ireq = mbox->tx_start + msgs_offset;
284 unsigned long irsp = mbox->rx_start + msgs_offset;
285 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
289 spin_lock(&mdev->mbox_lock);
291 if (mdev->num_msgs != mdev->msgs_acked)
294 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
295 struct mbox_msghdr *preq = mdev->mbase + ireq;
296 struct mbox_msghdr *prsp = mdev->mbase + irsp;
298 if (preq->id != prsp->id)
305 ireq = mbox->tx_start + preq->next_msgoff;
306 irsp = mbox->rx_start + prsp->next_msgoff;
310 spin_unlock(&mdev->mbox_lock);
313 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
316 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
320 rsp = (struct msg_rsp *)
321 otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
325 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
326 rsp->hdr.rc = MBOX_MSG_INVALID;
327 rsp->hdr.pcifunc = pcifunc;
330 EXPORT_SYMBOL(otx2_reply_invalid_msg);
332 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
334 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
337 spin_lock(&mdev->mbox_lock);
338 ret = mdev->num_msgs != 0;
339 spin_unlock(&mdev->mbox_lock);
343 EXPORT_SYMBOL(otx2_mbox_nonempty);
345 const char *otx2_mbox_id2name(u16 id)
348 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
355 EXPORT_SYMBOL(otx2_mbox_id2name);
357 MODULE_AUTHOR("Marvell International Ltd.");
358 MODULE_LICENSE("GPL v2");