1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
14 void idxd_device_wqs_clear_state(struct idxd_device *idxd)
18 lockdep_assert_held(&idxd->dev_lock);
19 for (i = 0; i < idxd->max_wqs; i++) {
20 struct idxd_wq *wq = &idxd->wqs[i];
22 wq->state = IDXD_WQ_DISABLED;
26 static void idxd_device_reinit(struct work_struct *work)
28 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
29 struct device *dev = &idxd->pdev->dev;
32 idxd_device_reset(idxd);
33 rc = idxd_device_config(idxd);
37 rc = idxd_device_enable(idxd);
41 for (i = 0; i < idxd->max_wqs; i++) {
42 struct idxd_wq *wq = &idxd->wqs[i];
44 if (wq->state == IDXD_WQ_ENABLED) {
45 rc = idxd_wq_enable(wq);
47 dev_warn(dev, "Unable to re-enable wq %s\n",
48 dev_name(&wq->conf_dev));
56 idxd_device_wqs_clear_state(idxd);
59 irqreturn_t idxd_irq_handler(int vec, void *data)
61 struct idxd_irq_entry *irq_entry = data;
62 struct idxd_device *idxd = irq_entry->idxd;
64 idxd_mask_msix_vector(idxd, irq_entry->id);
65 return IRQ_WAKE_THREAD;
68 irqreturn_t idxd_misc_thread(int vec, void *data)
70 struct idxd_irq_entry *irq_entry = data;
71 struct idxd_device *idxd = irq_entry->idxd;
72 struct device *dev = &idxd->pdev->dev;
73 union gensts_reg gensts;
78 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
80 if (cause & IDXD_INTC_ERR) {
81 spin_lock_bh(&idxd->dev_lock);
82 for (i = 0; i < 4; i++)
83 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
84 IDXD_SWERR_OFFSET + i * sizeof(u64));
85 iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
87 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
88 int id = idxd->sw_err.wq_idx;
89 struct idxd_wq *wq = &idxd->wqs[id];
91 if (wq->type == IDXD_WQT_USER)
92 wake_up_interruptible(&wq->idxd_cdev.err_queue);
96 for (i = 0; i < idxd->max_wqs; i++) {
97 struct idxd_wq *wq = &idxd->wqs[i];
99 if (wq->type == IDXD_WQT_USER)
100 wake_up_interruptible(&wq->idxd_cdev.err_queue);
104 spin_unlock_bh(&idxd->dev_lock);
105 val |= IDXD_INTC_ERR;
107 for (i = 0; i < 4; i++)
108 dev_warn(dev, "err[%d]: %#16.16llx\n",
109 i, idxd->sw_err.bits[i]);
113 if (cause & IDXD_INTC_CMD) {
114 val |= IDXD_INTC_CMD;
115 complete(idxd->cmd_done);
118 if (cause & IDXD_INTC_OCCUPY) {
119 /* Driver does not utilize occupancy interrupt */
120 val |= IDXD_INTC_OCCUPY;
123 if (cause & IDXD_INTC_PERFMON_OVFL) {
125 * Driver does not utilize perfmon counter overflow interrupt
128 val |= IDXD_INTC_PERFMON_OVFL;
133 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
136 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
140 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
141 if (gensts.state == IDXD_DEVICE_STATE_HALT) {
142 idxd->state = IDXD_DEV_HALTED;
143 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
145 * If we need a software reset, we will throw the work
146 * on a system workqueue in order to allow interrupts
147 * for the device command completions.
149 INIT_WORK(&idxd->work, idxd_device_reinit);
150 queue_work(idxd->wq, &idxd->work);
152 spin_lock_bh(&idxd->dev_lock);
153 idxd_device_wqs_clear_state(idxd);
154 dev_err(&idxd->pdev->dev,
155 "idxd halted, need %s.\n",
156 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
157 "FLR" : "system reset");
158 spin_unlock_bh(&idxd->dev_lock);
163 idxd_unmask_msix_vector(idxd, irq_entry->id);
167 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
170 struct idxd_desc *desc, *t;
171 struct llist_node *head;
175 head = llist_del_all(&irq_entry->pending_llist);
179 llist_for_each_entry_safe(desc, t, head, llnode) {
180 if (desc->completion->status) {
181 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
182 idxd_free_desc(desc->wq, desc);
185 list_add_tail(&desc->list, &irq_entry->work_list);
193 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
196 struct list_head *node, *next;
200 if (list_empty(&irq_entry->work_list))
203 list_for_each_safe(node, next, &irq_entry->work_list) {
204 struct idxd_desc *desc =
205 container_of(node, struct idxd_desc, list);
207 if (desc->completion->status) {
208 list_del(&desc->list);
209 /* process and callback */
210 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
211 idxd_free_desc(desc->wq, desc);
221 static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
223 int rc, processed, total = 0;
226 * There are two lists we are processing. The pending_llist is where
227 * submmiter adds all the submitted descriptor after sending it to
228 * the workqueue. It's a lockless singly linked list. The work_list
229 * is the common linux double linked list. We are in a scenario of
230 * multiple producers and a single consumer. The producers are all
231 * the kernel submitters of descriptors, and the consumer is the
232 * kernel irq handler thread for the msix vector when using threaded
233 * irq. To work with the restrictions of llist to remain lockless,
234 * we are doing the following steps:
235 * 1. Iterate through the work_list and process any completed
236 * descriptor. Delete the completed entries during iteration.
237 * 2. llist_del_all() from the pending list.
238 * 3. Iterate through the llist that was deleted from the pending list
239 * and process the completed entries.
240 * 4. If the entry is still waiting on hardware, list_add_tail() to
242 * 5. Repeat until no more descriptors.
245 rc = irq_process_work_list(irq_entry, &processed);
250 rc = irq_process_pending_llist(irq_entry, &processed);
257 irqreturn_t idxd_wq_thread(int irq, void *data)
259 struct idxd_irq_entry *irq_entry = data;
262 processed = idxd_desc_process(irq_entry);
263 idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);