Commit | Line | Data |
---|---|---|
4cdadfd5 DW |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ | |
4faf31b4 | 3 | #include <linux/io-64-nonatomic-lo-hi.h> |
4cdadfd5 | 4 | #include <linux/module.h> |
fae8817a | 5 | #include <linux/sizes.h> |
b39cb105 | 6 | #include <linux/mutex.h> |
30af9729 | 7 | #include <linux/list.h> |
4cdadfd5 DW |
8 | #include <linux/pci.h> |
9 | #include <linux/io.h> | |
5161a55c | 10 | #include "cxlmem.h" |
4cdadfd5 | 11 | #include "pci.h" |
8adaf747 BW |
12 | #include "cxl.h" |
13 | ||
14 | /** | |
21e9f767 | 15 | * DOC: cxl pci |
8adaf747 | 16 | * |
21e9f767 BW |
17 | * This implements the PCI exclusive functionality for a CXL device as it is |
18 | * defined by the Compute Express Link specification. CXL devices may surface | |
ed97afb5 BW |
19 | * certain functionality even if it isn't CXL enabled. While this driver is |
20 | * focused around the PCI specific aspects of a CXL device, it binds to the | |
21 | * specific CXL memory device class code, and therefore the implementation of | |
22 | * cxl_pci is focused around CXL memory devices. | |
8adaf747 BW |
23 | * |
24 | * The driver has several responsibilities, mainly: | |
25 | * - Create the memX device and register on the CXL bus. | |
26 | * - Enumerate device's register interface and map them. | |
ed97afb5 BW |
27 | * - Registers nvdimm bridge device with cxl_core. |
28 | * - Registers a CXL mailbox with cxl_core. | |
8adaf747 BW |
29 | */ |
30 | ||
31 | #define cxl_doorbell_busy(cxlm) \ | |
8ac75dd6 | 32 | (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ |
8adaf747 BW |
33 | CXLDEV_MBOX_CTRL_DOORBELL) |
34 | ||
35 | /* CXL 2.0 - 8.2.8.4 */ | |
36 | #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) | |
37 | ||
ed97afb5 | 38 | static int cxl_pci_mbox_wait_for_doorbell(struct cxl_mem *cxlm) |
8adaf747 BW |
39 | { |
40 | const unsigned long start = jiffies; | |
41 | unsigned long end = start; | |
42 | ||
43 | while (cxl_doorbell_busy(cxlm)) { | |
44 | end = jiffies; | |
45 | ||
46 | if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { | |
47 | /* Check again in case preempted before timeout test */ | |
48 | if (!cxl_doorbell_busy(cxlm)) | |
49 | break; | |
50 | return -ETIMEDOUT; | |
51 | } | |
52 | cpu_relax(); | |
53 | } | |
54 | ||
99e222a5 | 55 | dev_dbg(cxlm->dev, "Doorbell wait took %dms", |
8adaf747 BW |
56 | jiffies_to_msecs(end) - jiffies_to_msecs(start)); |
57 | return 0; | |
58 | } | |
59 | ||
ed97afb5 | 60 | static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm, |
b64955a9 | 61 | struct cxl_mbox_cmd *mbox_cmd) |
8adaf747 | 62 | { |
99e222a5 | 63 | struct device *dev = cxlm->dev; |
8adaf747 BW |
64 | |
65 | dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", | |
66 | mbox_cmd->opcode, mbox_cmd->size_in); | |
67 | } | |
68 | ||
69 | /** | |
ed97afb5 | 70 | * __cxl_pci_mbox_send_cmd() - Execute a mailbox command |
8adaf747 BW |
71 | * @cxlm: The CXL memory device to communicate with. |
72 | * @mbox_cmd: Command to send to the memory device. | |
73 | * | |
74 | * Context: Any context. Expects mbox_mutex to be held. | |
75 | * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. | |
76 | * Caller should check the return code in @mbox_cmd to make sure it | |
77 | * succeeded. | |
78 | * | |
79 | * This is a generic form of the CXL mailbox send command thus only using the | |
80 | * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory | |
81 | * devices, and perhaps other types of CXL devices may have further information | |
82 | * available upon error conditions. Driver facilities wishing to send mailbox | |
83 | * commands should use the wrapper command. | |
84 | * | |
85 | * The CXL spec allows for up to two mailboxes. The intention is for the primary | |
86 | * mailbox to be OS controlled and the secondary mailbox to be used by system | |
87 | * firmware. This allows the OS and firmware to communicate with the device and | |
88 | * not need to coordinate with each other. The driver only uses the primary | |
89 | * mailbox. | |
90 | */ | |
ed97afb5 | 91 | static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm, |
b64955a9 | 92 | struct cxl_mbox_cmd *mbox_cmd) |
8adaf747 | 93 | { |
8ac75dd6 | 94 | void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; |
99e222a5 | 95 | struct device *dev = cxlm->dev; |
8adaf747 BW |
96 | u64 cmd_reg, status_reg; |
97 | size_t out_len; | |
98 | int rc; | |
99 | ||
100 | lockdep_assert_held(&cxlm->mbox_mutex); | |
101 | ||
102 | /* | |
103 | * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. | |
104 | * 1. Caller reads MB Control Register to verify doorbell is clear | |
105 | * 2. Caller writes Command Register | |
106 | * 3. Caller writes Command Payload Registers if input payload is non-empty | |
107 | * 4. Caller writes MB Control Register to set doorbell | |
108 | * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured | |
109 | * 6. Caller reads MB Status Register to fetch Return code | |
110 | * 7. If command successful, Caller reads Command Register to get Payload Length | |
111 | * 8. If output payload is non-empty, host reads Command Payload Registers | |
112 | * | |
113 | * Hardware is free to do whatever it wants before the doorbell is rung, | |
114 | * and isn't allowed to change anything after it clears the doorbell. As | |
115 | * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can | |
116 | * also happen in any order (though some orders might not make sense). | |
117 | */ | |
118 | ||
119 | /* #1 */ | |
120 | if (cxl_doorbell_busy(cxlm)) { | |
99e222a5 | 121 | dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n"); |
8adaf747 BW |
122 | return -EBUSY; |
123 | } | |
124 | ||
125 | cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, | |
126 | mbox_cmd->opcode); | |
127 | if (mbox_cmd->size_in) { | |
128 | if (WARN_ON(!mbox_cmd->payload_in)) | |
129 | return -EINVAL; | |
130 | ||
131 | cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, | |
132 | mbox_cmd->size_in); | |
133 | memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); | |
134 | } | |
135 | ||
136 | /* #2, #3 */ | |
8ac75dd6 | 137 | writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
8adaf747 BW |
138 | |
139 | /* #4 */ | |
99e222a5 | 140 | dev_dbg(dev, "Sending command\n"); |
8adaf747 | 141 | writel(CXLDEV_MBOX_CTRL_DOORBELL, |
8ac75dd6 | 142 | cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); |
8adaf747 BW |
143 | |
144 | /* #5 */ | |
ed97afb5 | 145 | rc = cxl_pci_mbox_wait_for_doorbell(cxlm); |
8adaf747 | 146 | if (rc == -ETIMEDOUT) { |
ed97afb5 | 147 | cxl_pci_mbox_timeout(cxlm, mbox_cmd); |
8adaf747 BW |
148 | return rc; |
149 | } | |
150 | ||
151 | /* #6 */ | |
8ac75dd6 | 152 | status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); |
8adaf747 BW |
153 | mbox_cmd->return_code = |
154 | FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); | |
155 | ||
156 | if (mbox_cmd->return_code != 0) { | |
99e222a5 | 157 | dev_dbg(dev, "Mailbox operation had an error\n"); |
8adaf747 BW |
158 | return 0; |
159 | } | |
160 | ||
161 | /* #7 */ | |
8ac75dd6 | 162 | cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
8adaf747 BW |
163 | out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); |
164 | ||
165 | /* #8 */ | |
166 | if (out_len && mbox_cmd->payload_out) { | |
167 | /* | |
168 | * Sanitize the copy. If hardware misbehaves, out_len per the | |
169 | * spec can actually be greater than the max allowed size (21 | |
170 | * bits available but spec defined 1M max). The caller also may | |
171 | * have requested less data than the hardware supplied even | |
172 | * within spec. | |
173 | */ | |
174 | size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); | |
175 | ||
176 | memcpy_fromio(mbox_cmd->payload_out, payload, n); | |
177 | mbox_cmd->size_out = n; | |
178 | } else { | |
179 | mbox_cmd->size_out = 0; | |
180 | } | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | /** | |
ed97afb5 | 186 | * cxl_pci_mbox_get() - Acquire exclusive access to the mailbox. |
8adaf747 BW |
187 | * @cxlm: The memory device to gain access to. |
188 | * | |
189 | * Context: Any context. Takes the mbox_mutex. | |
190 | * Return: 0 if exclusive access was acquired. | |
191 | */ | |
ed97afb5 | 192 | static int cxl_pci_mbox_get(struct cxl_mem *cxlm) |
8adaf747 | 193 | { |
99e222a5 | 194 | struct device *dev = cxlm->dev; |
8adaf747 BW |
195 | u64 md_status; |
196 | int rc; | |
197 | ||
198 | mutex_lock_io(&cxlm->mbox_mutex); | |
199 | ||
200 | /* | |
201 | * XXX: There is some amount of ambiguity in the 2.0 version of the spec | |
202 | * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the | |
203 | * bit is to allow firmware running on the device to notify the driver | |
204 | * that it's ready to receive commands. It is unclear if the bit needs | |
205 | * to be read for each transaction mailbox, ie. the firmware can switch | |
206 | * it on and off as needed. Second, there is no defined timeout for | |
207 | * mailbox ready, like there is for the doorbell interface. | |
208 | * | |
209 | * Assumptions: | |
210 | * 1. The firmware might toggle the Mailbox Interface Ready bit, check | |
211 | * it for every command. | |
212 | * | |
213 | * 2. If the doorbell is clear, the firmware should have first set the | |
214 | * Mailbox Interface Ready bit. Therefore, waiting for the doorbell | |
215 | * to be ready is sufficient. | |
216 | */ | |
ed97afb5 | 217 | rc = cxl_pci_mbox_wait_for_doorbell(cxlm); |
8adaf747 BW |
218 | if (rc) { |
219 | dev_warn(dev, "Mailbox interface not ready\n"); | |
220 | goto out; | |
221 | } | |
222 | ||
8ac75dd6 | 223 | md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); |
8adaf747 BW |
224 | if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { |
225 | dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); | |
226 | rc = -EBUSY; | |
227 | goto out; | |
228 | } | |
229 | ||
230 | /* | |
231 | * Hardware shouldn't allow a ready status but also have failure bits | |
232 | * set. Spit out an error, this should be a bug report | |
233 | */ | |
234 | rc = -EFAULT; | |
235 | if (md_status & CXLMDEV_DEV_FATAL) { | |
236 | dev_err(dev, "mbox: reported ready, but fatal\n"); | |
237 | goto out; | |
238 | } | |
239 | if (md_status & CXLMDEV_FW_HALT) { | |
240 | dev_err(dev, "mbox: reported ready, but halted\n"); | |
241 | goto out; | |
242 | } | |
243 | if (CXLMDEV_RESET_NEEDED(md_status)) { | |
244 | dev_err(dev, "mbox: reported ready, but reset needed\n"); | |
245 | goto out; | |
246 | } | |
247 | ||
248 | /* with lock held */ | |
249 | return 0; | |
250 | ||
251 | out: | |
252 | mutex_unlock(&cxlm->mbox_mutex); | |
253 | return rc; | |
254 | } | |
255 | ||
256 | /** | |
ed97afb5 | 257 | * cxl_pci_mbox_put() - Release exclusive access to the mailbox. |
8adaf747 BW |
258 | * @cxlm: The CXL memory device to communicate with. |
259 | * | |
260 | * Context: Any context. Expects mbox_mutex to be held. | |
261 | */ | |
ed97afb5 | 262 | static void cxl_pci_mbox_put(struct cxl_mem *cxlm) |
8adaf747 BW |
263 | { |
264 | mutex_unlock(&cxlm->mbox_mutex); | |
265 | } | |
266 | ||
b64955a9 DW |
267 | static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) |
268 | { | |
269 | int rc; | |
270 | ||
ed97afb5 | 271 | rc = cxl_pci_mbox_get(cxlm); |
b64955a9 DW |
272 | if (rc) |
273 | return rc; | |
274 | ||
ed97afb5 BW |
275 | rc = __cxl_pci_mbox_send_cmd(cxlm, cmd); |
276 | cxl_pci_mbox_put(cxlm); | |
b64955a9 DW |
277 | |
278 | return rc; | |
279 | } | |
280 | ||
ed97afb5 | 281 | static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm) |
8adaf747 | 282 | { |
8ac75dd6 | 283 | const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); |
8adaf747 | 284 | |
b64955a9 | 285 | cxlm->mbox_send = cxl_pci_mbox_send; |
8adaf747 BW |
286 | cxlm->payload_size = |
287 | 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); | |
288 | ||
289 | /* | |
290 | * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register | |
291 | * | |
292 | * If the size is too small, mandatory commands will not work and so | |
293 | * there's no point in going forward. If the size is too large, there's | |
294 | * no harm is soft limiting it. | |
295 | */ | |
296 | cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); | |
297 | if (cxlm->payload_size < 256) { | |
99e222a5 | 298 | dev_err(cxlm->dev, "Mailbox is too small (%zub)", |
8adaf747 BW |
299 | cxlm->payload_size); |
300 | return -ENXIO; | |
301 | } | |
302 | ||
99e222a5 | 303 | dev_dbg(cxlm->dev, "Mailbox payload sized %zu", |
8adaf747 BW |
304 | cxlm->payload_size); |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
a261e9a1 | 309 | static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) |
1b0a1a2a | 310 | { |
f8a7e8c2 | 311 | void __iomem *addr; |
7dc7a64d BW |
312 | int bar = map->barno; |
313 | struct device *dev = &pdev->dev; | |
314 | resource_size_t offset = map->block_offset; | |
1b0a1a2a | 315 | |
8adaf747 BW |
316 | /* Basic sanity check that BAR is big enough */ |
317 | if (pci_resource_len(pdev, bar) < offset) { | |
7dc7a64d BW |
318 | dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, |
319 | &pdev->resource[bar], &offset); | |
a261e9a1 | 320 | return -ENXIO; |
8adaf747 BW |
321 | } |
322 | ||
30af9729 | 323 | addr = pci_iomap(pdev, bar, 0); |
f8a7e8c2 | 324 | if (!addr) { |
8adaf747 | 325 | dev_err(dev, "failed to map registers\n"); |
a261e9a1 | 326 | return -ENOMEM; |
8adaf747 | 327 | } |
8adaf747 | 328 | |
7dc7a64d BW |
329 | dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", |
330 | bar, &offset); | |
6630d31c | 331 | |
a261e9a1 DW |
332 | map->base = addr + map->block_offset; |
333 | return 0; | |
30af9729 IW |
334 | } |
335 | ||
a261e9a1 DW |
336 | static void cxl_unmap_regblock(struct pci_dev *pdev, |
337 | struct cxl_register_map *map) | |
30af9729 | 338 | { |
a261e9a1 DW |
339 | pci_iounmap(pdev, map->base - map->block_offset); |
340 | map->base = NULL; | |
8adaf747 | 341 | } |
4cdadfd5 | 342 | |
ed97afb5 | 343 | static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) |
4cdadfd5 DW |
344 | { |
345 | int pos; | |
346 | ||
347 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); | |
348 | if (!pos) | |
349 | return 0; | |
350 | ||
351 | while (pos) { | |
352 | u16 vendor, id; | |
353 | ||
354 | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); | |
355 | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); | |
356 | if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) | |
357 | return pos; | |
358 | ||
359 | pos = pci_find_next_ext_capability(pdev, pos, | |
360 | PCI_EXT_CAP_ID_DVSEC); | |
361 | } | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
a261e9a1 | 366 | static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) |
30af9729 | 367 | { |
08422378 | 368 | struct cxl_component_reg_map *comp_map; |
30af9729 | 369 | struct cxl_device_reg_map *dev_map; |
7dc7a64d | 370 | struct device *dev = &pdev->dev; |
a261e9a1 | 371 | void __iomem *base = map->base; |
30af9729 IW |
372 | |
373 | switch (map->reg_type) { | |
08422378 BW |
374 | case CXL_REGLOC_RBI_COMPONENT: |
375 | comp_map = &map->component_map; | |
376 | cxl_probe_component_regs(dev, base, comp_map); | |
377 | if (!comp_map->hdm_decoder.valid) { | |
378 | dev_err(dev, "HDM decoder registers not found\n"); | |
379 | return -ENXIO; | |
380 | } | |
381 | ||
382 | dev_dbg(dev, "Set up component registers\n"); | |
383 | break; | |
30af9729 IW |
384 | case CXL_REGLOC_RBI_MEMDEV: |
385 | dev_map = &map->device_map; | |
386 | cxl_probe_device_regs(dev, base, dev_map); | |
387 | if (!dev_map->status.valid || !dev_map->mbox.valid || | |
388 | !dev_map->memdev.valid) { | |
389 | dev_err(dev, "registers not found: %s%s%s\n", | |
390 | !dev_map->status.valid ? "status " : "", | |
da582aa5 LQJL |
391 | !dev_map->mbox.valid ? "mbox " : "", |
392 | !dev_map->memdev.valid ? "memdev " : ""); | |
30af9729 IW |
393 | return -ENXIO; |
394 | } | |
395 | ||
396 | dev_dbg(dev, "Probing device registers...\n"); | |
397 | break; | |
398 | default: | |
399 | break; | |
400 | } | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
405 | static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) | |
406 | { | |
99e222a5 DW |
407 | struct device *dev = cxlm->dev; |
408 | struct pci_dev *pdev = to_pci_dev(dev); | |
30af9729 IW |
409 | |
410 | switch (map->reg_type) { | |
08422378 BW |
411 | case CXL_REGLOC_RBI_COMPONENT: |
412 | cxl_map_component_regs(pdev, &cxlm->regs.component, map); | |
413 | dev_dbg(dev, "Mapping component registers...\n"); | |
414 | break; | |
30af9729 IW |
415 | case CXL_REGLOC_RBI_MEMDEV: |
416 | cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); | |
417 | dev_dbg(dev, "Probing device registers...\n"); | |
418 | break; | |
419 | default: | |
420 | break; | |
421 | } | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
7dc7a64d BW |
426 | static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi, |
427 | struct cxl_register_map *map) | |
07d62eac | 428 | { |
7dc7a64d BW |
429 | map->block_offset = |
430 | ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); | |
431 | map->barno = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); | |
432 | map->reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); | |
07d62eac IW |
433 | } |
434 | ||
1d5a4159 | 435 | /** |
ed97afb5 | 436 | * cxl_pci_setup_regs() - Setup necessary MMIO. |
1d5a4159 BW |
437 | * @cxlm: The CXL memory device to communicate with. |
438 | * | |
439 | * Return: 0 if all necessary registers mapped. | |
440 | * | |
441 | * A memory device is required by spec to implement a certain set of MMIO | |
442 | * regions. The purpose of this function is to enumerate and map those | |
443 | * registers. | |
444 | */ | |
ed97afb5 | 445 | static int cxl_pci_setup_regs(struct cxl_mem *cxlm) |
1d5a4159 | 446 | { |
99e222a5 DW |
447 | u32 regloc_size, regblocks; |
448 | int regloc, i, n_maps, ret = 0; | |
449 | struct device *dev = cxlm->dev; | |
450 | struct pci_dev *pdev = to_pci_dev(dev); | |
5b68705d | 451 | struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; |
1d5a4159 | 452 | |
ed97afb5 | 453 | regloc = cxl_pci_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); |
1d5a4159 BW |
454 | if (!regloc) { |
455 | dev_err(dev, "register location dvsec not found\n"); | |
456 | return -ENXIO; | |
457 | } | |
458 | ||
459 | /* Get the size of the Register Locator DVSEC */ | |
460 | pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); | |
461 | regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); | |
462 | ||
463 | regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; | |
464 | regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; | |
465 | ||
5b68705d | 466 | for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { |
1d5a4159 | 467 | u32 reg_lo, reg_hi; |
1d5a4159 | 468 | |
1d5a4159 BW |
469 | pci_read_config_dword(pdev, regloc, ®_lo); |
470 | pci_read_config_dword(pdev, regloc + 4, ®_hi); | |
471 | ||
7dc7a64d BW |
472 | map = &maps[n_maps]; |
473 | cxl_decode_regblock(reg_lo, reg_hi, map); | |
07d62eac | 474 | |
1e39db57 | 475 | /* Ignore unknown register block types */ |
7dc7a64d | 476 | if (map->reg_type > CXL_REGLOC_RBI_MEMDEV) |
1e39db57 BW |
477 | continue; |
478 | ||
a261e9a1 DW |
479 | ret = cxl_map_regblock(pdev, map); |
480 | if (ret) | |
481 | return ret; | |
1d5a4159 | 482 | |
a261e9a1 DW |
483 | ret = cxl_probe_regs(pdev, map); |
484 | cxl_unmap_regblock(pdev, map); | |
30af9729 | 485 | if (ret) |
5b68705d BW |
486 | return ret; |
487 | ||
488 | n_maps++; | |
1d5a4159 BW |
489 | } |
490 | ||
5b68705d BW |
491 | for (i = 0; i < n_maps; i++) { |
492 | ret = cxl_map_regs(cxlm, &maps[i]); | |
30af9729 | 493 | if (ret) |
5b68705d | 494 | break; |
1d5a4159 BW |
495 | } |
496 | ||
30af9729 | 497 | return ret; |
1d5a4159 BW |
498 | } |
499 | ||
ed97afb5 | 500 | static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
4cdadfd5 | 501 | { |
21083f51 | 502 | struct cxl_memdev *cxlmd; |
1b0a1a2a | 503 | struct cxl_mem *cxlm; |
1d5a4159 | 504 | int rc; |
8adaf747 | 505 | |
5a2328f4 DW |
506 | /* |
507 | * Double check the anonymous union trickery in struct cxl_regs | |
508 | * FIXME switch to struct_group() | |
509 | */ | |
510 | BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != | |
511 | offsetof(struct cxl_regs, device_regs.memdev)); | |
512 | ||
8adaf747 BW |
513 | rc = pcim_enable_device(pdev); |
514 | if (rc) | |
515 | return rc; | |
4cdadfd5 | 516 | |
99e222a5 | 517 | cxlm = cxl_mem_create(&pdev->dev); |
1b0a1a2a BW |
518 | if (IS_ERR(cxlm)) |
519 | return PTR_ERR(cxlm); | |
520 | ||
ed97afb5 | 521 | rc = cxl_pci_setup_regs(cxlm); |
8adaf747 BW |
522 | if (rc) |
523 | return rc; | |
524 | ||
ed97afb5 | 525 | rc = cxl_pci_setup_mailbox(cxlm); |
8adaf747 BW |
526 | if (rc) |
527 | return rc; | |
528 | ||
472b1ce6 BW |
529 | rc = cxl_mem_enumerate_cmds(cxlm); |
530 | if (rc) | |
531 | return rc; | |
532 | ||
b39cb105 DW |
533 | rc = cxl_mem_identify(cxlm); |
534 | if (rc) | |
535 | return rc; | |
536 | ||
f847502a IW |
537 | rc = cxl_mem_create_range_info(cxlm); |
538 | if (rc) | |
539 | return rc; | |
540 | ||
4faf31b4 | 541 | cxlmd = devm_cxl_add_memdev(cxlm); |
21083f51 DW |
542 | if (IS_ERR(cxlmd)) |
543 | return PTR_ERR(cxlmd); | |
544 | ||
545 | if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) | |
546 | rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); | |
547 | ||
548 | return rc; | |
4cdadfd5 DW |
549 | } |
550 | ||
551 | static const struct pci_device_id cxl_mem_pci_tbl[] = { | |
552 | /* PCI class code for CXL.mem Type-3 Devices */ | |
553 | { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, | |
554 | { /* terminate list */ }, | |
555 | }; | |
556 | MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); | |
557 | ||
ed97afb5 | 558 | static struct pci_driver cxl_pci_driver = { |
4cdadfd5 DW |
559 | .name = KBUILD_MODNAME, |
560 | .id_table = cxl_mem_pci_tbl, | |
ed97afb5 | 561 | .probe = cxl_pci_probe, |
4cdadfd5 DW |
562 | .driver = { |
563 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, | |
564 | }, | |
565 | }; | |
566 | ||
567 | MODULE_LICENSE("GPL v2"); | |
ed97afb5 | 568 | module_pci_driver(cxl_pci_driver); |
b39cb105 | 569 | MODULE_IMPORT_NS(CXL); |