Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux-2.6-block.git] / drivers / dma / ioat / dca.c
CommitLineData
4fa9c49f 1// SPDX-License-Identifier: GPL-2.0-only
2ed6dc34
SN
2/*
3 * Intel I/OAT DMA Linux driver
211a22ce 4 * Copyright(c) 2007 - 2009 Intel Corporation.
2ed6dc34
SN
5 */
6
7#include <linux/kernel.h>
8#include <linux/pci.h>
9#include <linux/smp.h>
10#include <linux/interrupt.h>
11#include <linux/dca.h>
12
13/* either a kernel change is needed, or we need something like this in kernel */
14#ifndef CONFIG_SMP
15#include <asm/smp.h>
16#undef cpu_physical_id
17#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
18#endif
19
584ec227
DW
20#include "dma.h"
21#include "registers.h"
2ed6dc34
SN
22
23/*
7f1b358a 24 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
2ed6dc34
SN
25 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
26 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
27 */
28#define DCA_TAG_MAP_VALID 0x80
29
7f1b358a
MS
30#define DCA3_TAG_MAP_BIT_TO_INV 0x80
31#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
32#define DCA3_TAG_MAP_LITERAL_VAL 0x1
33
34#define DCA_TAG_MAP_MASK 0xDF
35
49bc4636
MS
36/* expected tag map bytes for I/OAT ver.2 */
37#define DCA2_TAG_MAP_BYTE0 0x80
38#define DCA2_TAG_MAP_BYTE1 0x0
39#define DCA2_TAG_MAP_BYTE2 0x81
40#define DCA2_TAG_MAP_BYTE3 0x82
41#define DCA2_TAG_MAP_BYTE4 0x82
42
43/* verify if tag map matches expected values */
44static inline int dca2_tag_map_valid(u8 *tag_map)
45{
46 return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
47 (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
48 (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
49 (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
50 (tag_map[4] == DCA2_TAG_MAP_BYTE4));
51}
52
2ed6dc34
SN
53/*
54 * "Legacy" DCA systems do not implement the DCA register set in the
55 * I/OAT device. Software needs direct support for their tag mappings.
56 */
57
58#define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
59#define IOAT_TAG_MAP_LEN 8
60
2ed6dc34
SN
61/* pack PCI B/D/F into a u16 */
62static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
63{
64 return (pci->bus->number << 8) | pci->devfn;
65}
66
5149fd01 67static int dca_enabled_in_bios(struct pci_dev *pdev)
2ed6dc34
SN
68{
69 /* CPUID level 9 returns DCA configuration */
70 /* Bit 0 indicates DCA enabled by the BIOS */
71 unsigned long cpuid_level_9;
72 int res;
73
74 cpuid_level_9 = cpuid_eax(9);
75 res = test_bit(0, &cpuid_level_9);
76 if (!res)
e22dde99 77 dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
2ed6dc34
SN
78
79 return res;
80}
81
228c4f5c 82int system_has_dca_enabled(struct pci_dev *pdev)
2ed6dc34
SN
83{
84 if (boot_cpu_has(X86_FEATURE_DCA))
5149fd01 85 return dca_enabled_in_bios(pdev);
2ed6dc34 86
e22dde99 87 dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
2ed6dc34
SN
88 return 0;
89}
90
91struct ioat_dca_slot {
92 struct pci_dev *pdev; /* requester device */
93 u16 rid; /* requester id, as used by IOAT */
94};
95
96#define IOAT_DCA_MAX_REQ 6
7f1b358a 97#define IOAT3_DCA_MAX_REQ 2
2ed6dc34
SN
98
99struct ioat_dca_priv {
100 void __iomem *iobase;
53a0c98e 101 void __iomem *dca_base;
2ed6dc34
SN
102 int max_requesters;
103 int requester_count;
104 u8 tag_map[IOAT_TAG_MAP_LEN];
105 struct ioat_dca_slot req_slots[0];
106};
107
7f1b358a
MS
108static int ioat_dca_dev_managed(struct dca_provider *dca,
109 struct device *dev)
110{
111 struct ioat_dca_priv *ioatdca = dca_priv(dca);
112 struct pci_dev *pdev;
113 int i;
114
115 pdev = to_pci_dev(dev);
116 for (i = 0; i < ioatdca->max_requesters; i++) {
117 if (ioatdca->req_slots[i].pdev == pdev)
118 return 1;
119 }
120 return 0;
121}
122
3372de58 123static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
7f1b358a
MS
124{
125 struct ioat_dca_priv *ioatdca = dca_priv(dca);
126 struct pci_dev *pdev;
127 int i;
128 u16 id;
129 u16 global_req_table;
130
131 /* This implementation only supports PCI-Express */
1fde2548 132 if (!dev_is_pci(dev))
7f1b358a
MS
133 return -ENODEV;
134 pdev = to_pci_dev(dev);
135 id = dcaid_from_pcidev(pdev);
136
137 if (ioatdca->requester_count == ioatdca->max_requesters)
138 return -ENODEV;
139
140 for (i = 0; i < ioatdca->max_requesters; i++) {
141 if (ioatdca->req_slots[i].pdev == NULL) {
142 /* found an empty slot */
143 ioatdca->requester_count++;
144 ioatdca->req_slots[i].pdev = pdev;
145 ioatdca->req_slots[i].rid = id;
146 global_req_table =
147 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
148 writel(id | IOAT_DCA_GREQID_VALID,
149 ioatdca->iobase + global_req_table + (i * 4));
150 return i;
151 }
152 }
153 /* Error, ioatdma->requester_count is out of whack */
154 return -EFAULT;
155}
156
3372de58 157static int ioat_dca_remove_requester(struct dca_provider *dca,
7f1b358a
MS
158 struct device *dev)
159{
160 struct ioat_dca_priv *ioatdca = dca_priv(dca);
161 struct pci_dev *pdev;
162 int i;
163 u16 global_req_table;
164
165 /* This implementation only supports PCI-Express */
1fde2548 166 if (!dev_is_pci(dev))
7f1b358a
MS
167 return -ENODEV;
168 pdev = to_pci_dev(dev);
169
170 for (i = 0; i < ioatdca->max_requesters; i++) {
171 if (ioatdca->req_slots[i].pdev == pdev) {
172 global_req_table =
173 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
174 writel(0, ioatdca->iobase + global_req_table + (i * 4));
175 ioatdca->req_slots[i].pdev = NULL;
176 ioatdca->req_slots[i].rid = 0;
177 ioatdca->requester_count--;
178 return i;
179 }
180 }
181 return -ENODEV;
182}
183
3372de58 184static u8 ioat_dca_get_tag(struct dca_provider *dca,
7f1b358a
MS
185 struct device *dev,
186 int cpu)
187{
188 u8 tag;
189
190 struct ioat_dca_priv *ioatdca = dca_priv(dca);
191 int i, apic_id, bit, value;
192 u8 entry;
193
194 tag = 0;
195 apic_id = cpu_physical_id(cpu);
196
197 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
198 entry = ioatdca->tag_map[i];
199 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
200 bit = entry &
201 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
202 value = (apic_id & (1 << bit)) ? 1 : 0;
203 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
204 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
205 value = (apic_id & (1 << bit)) ? 0 : 1;
206 } else {
207 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
208 }
209 tag |= (value << i);
210 }
211
212 return tag;
213}
214
2bb129eb 215static const struct dca_ops ioat_dca_ops = {
3372de58
DJ
216 .add_requester = ioat_dca_add_requester,
217 .remove_requester = ioat_dca_remove_requester,
218 .get_tag = ioat_dca_get_tag,
7f1b358a
MS
219 .dev_managed = ioat_dca_dev_managed,
220};
221
3372de58 222static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
7f1b358a
MS
223{
224 int slots = 0;
225 u32 req;
226 u16 global_req_table;
227
228 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
229 if (global_req_table == 0)
230 return 0;
231
232 do {
233 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
234 slots++;
235 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
236
237 return slots;
238}
239
07bd34db
AD
240static inline int dca3_tag_map_invalid(u8 *tag_map)
241{
242 /*
243 * If the tag map is not programmed by the BIOS the default is:
244 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
245 *
246 * This an invalid map and will result in only 2 possible tags
247 * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
248 * this entire definition is invalid.
249 */
250 return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
251 (tag_map[1] == DCA_TAG_MAP_VALID) &&
252 (tag_map[2] == DCA_TAG_MAP_VALID) &&
253 (tag_map[3] == DCA_TAG_MAP_VALID) &&
254 (tag_map[4] == DCA_TAG_MAP_VALID));
255}
256
3372de58 257struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
7f1b358a
MS
258{
259 struct dca_provider *dca;
260 struct ioat_dca_priv *ioatdca;
261 int slots;
262 int i;
263 int err;
264 u16 dca_offset;
265 u16 csi_fsb_control;
266 u16 pcie_control;
267 u8 bit;
268
269 union {
270 u64 full;
271 struct {
272 u32 low;
273 u32 high;
274 };
275 } tag_map;
276
277 if (!system_has_dca_enabled(pdev))
278 return NULL;
279
280 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
281 if (dca_offset == 0)
282 return NULL;
283
3372de58 284 slots = ioat_dca_count_dca_slots(iobase, dca_offset);
7f1b358a
MS
285 if (slots == 0)
286 return NULL;
287
3372de58 288 dca = alloc_dca_provider(&ioat_dca_ops,
25af5afe 289 struct_size(ioatdca, req_slots, slots));
7f1b358a
MS
290 if (!dca)
291 return NULL;
292
293 ioatdca = dca_priv(dca);
294 ioatdca->iobase = iobase;
295 ioatdca->dca_base = iobase + dca_offset;
296 ioatdca->max_requesters = slots;
297
298 /* some bios might not know to turn these on */
299 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
300 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
301 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
302 writew(csi_fsb_control,
303 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
304 }
305 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
306 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
307 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
308 writew(pcie_control,
309 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
310 }
311
312
313 /* TODO version, compatibility and configuration checks */
314
315 /* copy out the APIC to DCA tag map */
316 tag_map.low =
317 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
318 tag_map.high =
319 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
320 for (i = 0; i < 8; i++) {
321 bit = tag_map.full >> (8 * i);
322 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
323 }
324
07bd34db 325 if (dca3_tag_map_invalid(ioatdca->tag_map)) {
036e9ef8
PB
326 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
327 pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
328 dev_driver_string(&pdev->dev),
329 dev_name(&pdev->dev));
07bd34db
AD
330 free_dca_provider(dca);
331 return NULL;
332 }
333
7f1b358a
MS
334 err = register_dca_provider(dca, &pdev->dev);
335 if (err) {
336 free_dca_provider(dca);
337 return NULL;
338 }
339
340 return dca;
341}